id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
23254 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext_lazy as _
from dataflow.batch.exceptions.comp_execptions import BatchTimeCompareError, BatchUnsupportedOperationError
from dataflow.batch.periodic.param_info.builder.periodic_batch_job_builder import PeriodicBatchJobBuilder
from dataflow.batch.utils.time_util import BatchTimeTuple
class ProcessingsValidator(object):
def validate(self, periodic_batch_info_params_obj):
"""
:param periodic_batch_info_params_obj:
:type periodic_batch_info_params_obj:
dataflow.batch.periodic.param_info.periodic_batch_info_params.PeriodicBatchInfoParams
"""
self.validate_input(periodic_batch_info_params_obj)
self.validate_output_data_offset(periodic_batch_info_params_obj)
def validate_input(self, periodic_batch_info_params_obj):
"""
:param periodic_batch_info_params_obj:
:type periodic_batch_info_params_obj:
dataflow.batch.periodic.param_info.periodic_batch_info_params.PeriodicBatchInfoParams
"""
for input_table in periodic_batch_info_params_obj.input_result_tables:
if (
input_table.window_type.lower() == "scroll"
or input_table.window_type.lower() == "slide"
or input_table.window_type.lower() == "accumulate"
):
self.__check_greater_than_value(input_table.window_offset, "window_offset", "0H")
if input_table.window_type.lower() == "slide" or input_table.window_type.lower() == "accumulate":
self.__check_greater_than_value(input_table.window_size, "window_size", "0H")
if input_table.window_type.lower() == "accumulate":
self.__check_greater_than_value(input_table.window_start_offset, "window_start_offset", "0H")
self.__check_greater_than_value(input_table.window_end_offset, "window_end_offset", "0H")
self.__check_less_than_value(
input_table.window_start_offset,
"window_start_offset",
input_table.window_size,
)
self.__check_less_than_value(
input_table.window_end_offset,
"window_end_offset",
input_table.window_size,
)
self.__check_if_null(input_table.accumulate_start_time, "accumulate_start_time")
def __check_greater_than_value(self, check_value, check_name, limit_value):
self.__check_if_null(check_value, check_name)
limit_time_tuple = BatchTimeTuple()
limit_time_tuple.from_jobnavi_format(limit_value)
check_value_tuple = BatchTimeTuple()
check_value_tuple.from_jobnavi_format(check_value)
if check_value_tuple < limit_time_tuple:
raise BatchUnsupportedOperationError(_("{}数值必须大于{}".format(check_name, limit_value)))
def __check_less_than_value(self, check_value, check_name, limit_value):
self.__check_if_null(check_value, check_name)
limit_time_tuple = BatchTimeTuple()
limit_time_tuple.from_jobnavi_format(limit_value)
check_value_tuple = BatchTimeTuple()
check_value_tuple.from_jobnavi_format(check_value)
if check_value_tuple > limit_time_tuple:
raise BatchUnsupportedOperationError(_("{}数值必须小于{}".format(check_name, limit_value)))
def __check_if_null(self, check_value, check_name):
if check_value is None:
raise BatchUnsupportedOperationError(_("{}数值不能是null".format(check_name)))
def validate_output_data_offset(self, periodic_batch_info_params_obj):
"""
:param periodic_batch_info_params_obj:
:type periodic_batch_info_params_obj:
dataflow.batch.periodic.param_info.periodic_batch_info_params.PeriodicBatchInfoParams
"""
try:
PeriodicBatchJobBuilder.calculate_output_offset(
periodic_batch_info_params_obj.input_result_tables,
periodic_batch_info_params_obj.output_result_tables[0],
periodic_batch_info_params_obj.count_freq,
periodic_batch_info_params_obj.schedule_period,
)
except BatchTimeCompareError:
raise BatchUnsupportedOperationError(_("当前配置无法算出默认存储分区,请激活自定义出库配置"))
| StarcoderdataPython |
3348455 | from nose.tools import eq_, ok_
from django.core.urlresolvers import resolve, reverse
def assert_routing(url, view_function_or_class, name = '', kwargs = {}):
resolved_route = resolve(url)
ok_((resolved_route.func is view_function_or_class) or (type(resolved_route.func) is view_function_or_class))
if kwargs:
eq_(resolved_route.kwargs, kwargs)
if name:
eq_(reverse(name, kwargs = kwargs), url)
def assert_redirects_to_named_url(response, name, kwargs = {}, permanent = False):
status_codes = {True: 301, False: 302}
expected_redirect_url = reverse(name, kwargs = kwargs)
eq_(response.status_code, status_codes[permanent])
eq_(response['Location'], expected_redirect_url)
| StarcoderdataPython |
1679726 | import uharfbuzz as hb
import re
from pathlib import Path
from fontTools.ttLib import TTFont
from beziers.path import BezierPath
from beziers.path.geometricshapes import Rectangle
from beziers.utils.linesweep import bbox_intersections
from beziers.point import Point
from beziers.boundingbox import BoundingBox
from glyphtools import categorize_glyph
import sys
from typing import NamedTuple
class Collision(NamedTuple):
glyph1: str
glyph2: str
path1: BezierPath
path2: BezierPath
point: Point
class Collidoscope:
"""Detect collisions between font glyphs"""
def __init__(self, fontfilename, rules, direction="LTR", ttFont = None):
"""Create a collision detector.
The rules dictionary may contain the following entries:
faraway (boolean): If true, non-adjacent base glyphs are tested for
overlap. Mark glyphs are ignored. All collisions are reported.
marks (boolean): If true, collisions between all pairs of marks in
the string are reported.
cursive (boolean): If true, adjacent glyphs are tested for overlap.
Paths containing cursive anchors are allowed to overlap, but
collisions between other paths are reported.
area (float): If provided, adjacent glyphs are tested for overlap.
Collisions are reported if the intersection area is greater than
the given proportion of the smallest path. (i.e. where cursive
connection anchors are not used in an Arabic font, you may wish
to ignore collisions if the overlaid area is less than 5% of the
smallest path, because this is likely to be the connection point
between the glyphs. But collisions affecting more than 5% of the
glyph will be reported.)
Args:
fontfilename: file name of font.
rules: dictionary of collision rules.
ttFont: fontTools object (loaded from file if not given).
direction: "LTR" or "RTL"
"""
self.fontfilename = fontfilename
self.glyphcache = {}
self.direction = direction
if ttFont:
self.font = ttFont
self.fontbinary = ttFont.reader.file.read()
else:
self.fontbinary = Path(fontfilename).read_bytes()
self.font = TTFont(fontfilename)
self.rules = rules
self.prep_shaper()
if "cursive" in self.rules and self.rules["cursive"]:
self.get_anchors()
else:
self.anchors = {}
def prep_shaper(self):
face = hb.Face(self.fontbinary)
font = hb.Font(face)
upem = face.upem
font.scale = (upem, upem)
hb.ot_font_set_funcs(font)
self.hbfont = font
def shape_a_text(self, text):
buf = hb.Buffer()
buf.add_str(text)
buf.guess_segment_properties()
hb.shape(self.hbfont, buf)
self.direction = buf.direction
return buf
def bb2path(bb):
vec = bb.tr-bb.bl
return Rectangle(vec.x, vec.y, origin= bb.bl+vec*0.5)
def get_anchors(self):
glyf = self.font["glyf"]
# Find the GPOS CursiveAttachment lookups
cursives = filter(lambda x: x.LookupType==3, self.font["GPOS"].table.LookupList.Lookup)
anchors = {}
for c in cursives:
for s in c.SubTable:
for glyph, record in zip(s.Coverage.glyphs, s.EntryExitRecord):
anchors[glyph] = []
if record.EntryAnchor:
anchors[glyph].append( (record.EntryAnchor.XCoordinate, record.EntryAnchor.YCoordinate) )
if record.ExitAnchor:
anchors[glyph].append( (record.ExitAnchor.XCoordinate, record.ExitAnchor.YCoordinate) )
self.anchors = anchors
def get_cached_glyph(self, name):
if name in self.glyphcache: return self.glyphcache[name]
paths = BezierPath.fromFonttoolsGlyph(self.font, name)
pathbounds = []
paths = list(filter(lambda p: p.length > 0, paths))
for p in paths:
p.hasAnchor = False
p.glyphname = name
if name in self.anchors:
for a in self.anchors[name]:
if p.pointIsInside(Point(*a)): p.hasAnchor = True
bounds = p.bounds()
pathbounds.append(bounds)
glyphbounds = BoundingBox()
if pathbounds:
for p in pathbounds:
glyphbounds.extend(p)
else:
glyphbounds.tr = Point(0,0)
glyphbounds.bl = Point(0,0)
self.glyphcache[name] = {
"name": name,
"paths": paths,
"pathbounds": pathbounds,
"glyphbounds": glyphbounds,
"category": categorize_glyph(self.font, name)[0],
"pathconvexhull": None # XXX
}
assert(len(self.glyphcache[name]["pathbounds"]) == len(self.glyphcache[name]["paths"]))
return self.glyphcache[name]
def get_positioned_glyph(self, name, pos):
g = self.get_cached_glyph(name)
positioned = {
"name": g["name"],
"paths": [ p.clone().translate(pos) for p in g["paths"] ],
"pathbounds": [b.translated(pos) for b in g["pathbounds"]],
"glyphbounds": g["glyphbounds"].translated(pos),
"category": g["category"]
}
assert(len(positioned["pathbounds"]) == len(positioned["paths"]))
# Copy path info
for old,new in zip(g["paths"], positioned["paths"]):
new.hasAnchor = old.hasAnchor
new.glyphname = old.glyphname
return positioned
def find_overlaps(self, g1, g2):
# print("Testing %s against %s" % (g1["name"], g2["name"]))
if not (g1["glyphbounds"].overlaps(g2["glyphbounds"])): return []
# print("Glyph bounds overlap")
overlappingPathBounds = bbox_intersections(g1["paths"], g2["paths"])
if not overlappingPathBounds: return []
overlappingPaths = {}
for p1, p2 in overlappingPathBounds:
left_segs = p1.asSegments()
right_segs = p2.asSegments()
overlappingSegBounds = bbox_intersections(left_segs, right_segs)
for s1,s2 in overlappingSegBounds:
intersects = s1.intersections(s2)
if len(intersects)>0:
overlappingPaths[(p1,p2)] = Collision(
glyph1=g1["name"],
glyph2=g2["name"],
path1=p1,
path2=p2,
point=intersects[0].point
)
return list(overlappingPaths.values())
def get_glyphs(self, text, buf=None):
"""Returns an list of dictionaries representing a shaped string.
Args:
text: text to check
buf: (Optional) already shaped uharfbuzz buffer.
This is the first step in collision detection; the dictionaries
returned can be fed to ``draw_overlaps`` and ``has_collisions``."""
if not buf:
buf = self.shape_a_text(text)
glyf = self.font["glyf"]
cursor = 0
glyphs = []
ix = 0
for info, pos in zip(buf.glyph_infos, buf.glyph_positions):
position = Point(cursor + pos.position[0], pos.position[1])
name = glyf.getGlyphName(info.codepoint)
g = self.get_positioned_glyph(name, position)
g["advance"] = pos.position[2]
for p in g["paths"]:
p.origin = info.cluster
p.glyphIndex = ix
glyphs.append(g)
ix = ix + 1
cursor = cursor + pos.position[2]
return glyphs
def draw_overlaps(self, glyphs, collisions, attribs=""):
"""Return an SVG string displaying the collisions.
Args:
glyphs: A list of glyphs dictionaries.
collisions: A list of Collision objects.
attribs: String of attributes added to SVG header.
"""
svgpaths = []
bbox = glyphs[0]["glyphbounds"]
col = ["green", "red", "purple", "blue", "yellow"]
for ix, g in enumerate(glyphs):
bbox.extend(g["glyphbounds"])
for p in g["paths"]:
svgpaths.append(
"<path d=\"%s\" fill=\"%s\"/>" %
(p.asSVGPath(), col[ix%len(col)])
)
for c in collisions:
intersect = c.path1.intersection(c.path2)
for i in intersect:
svgpaths.append(
"<path d=\"%s\" fill=\"black\"/>" %
(i.asSVGPath())
)
return "<svg %s viewBox=\"%i %i %i %i\">%s</svg>\n" % (attribs,
bbox.left, bbox.bottom, bbox.width, bbox.height, "\n".join(svgpaths)
)
def has_collisions(self, glyphs_in):
"""Run the collision detection algorithm according to the rules provided.
Note that this does not find *all* overlaps, but returns as soon
as some collisions are found.
Args:
glyphs: A list of glyph dictionaries returned by ``get_glyphs``.
Returns: A list of Collision objects.
"""
# Rules for collision detection:
# "Far away" (adjacency > 1) glyphs should not interact at all
# print("Faraway test")
glyphs = glyphs_in
if self.direction == "rtl":
glyphs = list(reversed(glyphs))
if "faraway" in self.rules:
for firstIx, first in enumerate(glyphs):
passedBases = 0
nonAdjacent = firstIx + 1
# print("Considering %i" % firstIx)
if first["category"] == "base":
# Skip mark and next base
while nonAdjacent<len(glyphs) and glyphs[nonAdjacent]["category"] == "mark":
nonAdjacent = nonAdjacent + 1
nonAdjacent = nonAdjacent + 1
if nonAdjacent >= len(glyphs):
continue
for secondIx in range(nonAdjacent,len(glyphs)):
second = glyphs[secondIx]
# print("Faraway test %s %s" % (first["name"], second["name"]))
overlaps = self.find_overlaps(first, second)
if overlaps: return overlaps
if "marks" in self.rules:
# print("Mark testing")
for i in range(1,len(glyphs)-1):
if glyphs[i]["category"] != "mark":
continue
for j in range(i+1, len(glyphs)):
if glyphs[j]["category"] != "mark":
continue
overlaps = self.find_overlaps(glyphs[i], glyphs[j])
# print(overlaps)
if overlaps: return overlaps
# Where there anchors between a glyph pair, the anchored paths should be
# allowed to collide but others should not
# XX this rule does not work when cursive attachment is used occasionally
# print("Area and anchor test")
if "cursive" in self.rules or "area" in self.rules:
for firstIx in range(0,len(glyphs)-1):
first = glyphs[firstIx]
second = glyphs[firstIx+1]
if "cursive" in self.rules and self.rules["cursive"]:
firstHasAnchors = any([x.hasAnchor for x in first["paths"]])
secondHasAnchors = any([x.hasAnchor for x in first["paths"]])
if firstHasAnchors or secondHasAnchors:
overlaps = self.find_overlaps(first, second)
overlaps = list(filter(lambda x: ((x.path1.hasAnchor and not x.path2.hasAnchor) or (x.path2.hasAnchor and not x.path1.hasAnchor)), overlaps))
if not overlaps: continue
return overlaps
if "area" in self.rules:
overlaps = self.find_overlaps(first, second)
if not overlaps: continue
newoverlaps = []
for i1 in overlaps:
intersect = i1.path1.intersection(i1.path2,flat=True)
for i in intersect:
ia = i.area
# print("Intersection area: %i Path 1 area: %i Path 2 area: %i" % (ia, p1.area, p2.area))
if ia > i1.path1.area * self.rules["area"] or ia > i1.path2.area*self.rules["area"]:
newoverlaps.append(i1)
if newoverlaps:
return newoverlaps
return []
| StarcoderdataPython |
155452 | <gh_stars>0
import uuid
def genaratorActiveCode(number=200):
result = []
while True is True:
uuid_id=uuid.uuid1()
tem=str(uuid_id).replace('-','')
tmmm=str(tem[4:])
if not tmmm in result:
result.append(tmmm)
if len(result) is number:
break
print result
if __name__=='__main__':
genaratorActiveCode(10)
print"Finished." | StarcoderdataPython |
3271696 | <gh_stars>0
x = 5
print (x, "tipenya adalah ", type(x))
x = 2.0
print (x, "tipenya adalah ", type(x))
| StarcoderdataPython |
165167 | import imp
import sys
def new_module(name):
"""
Do all of the gruntwork associated with creating a new module.
"""
parent = None
if '.' in name:
parent_name = name.rsplit('.', 1)[0]
parent = __import__(parent_name, fromlist=[''])
module = imp.new_module(name)
sys.modules[name] = module
if parent:
setattr(parent, name.rsplit('.', 1)[1], module)
return module
class SettingsImporter(object):
def __init__(self, module_name, settings):
self.module_name = module_name
self.settings = settings
def find_module(self, name, path=None):
if name == self.module_name:
return self
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
# Unroll the settings into a new module.
module = new_module(self.module_name)
for k, v in self.settings.items():
if callable(v) and not getattr(v, 'is_callable_setting', False):
v = v()
setattr(module, k, v)
return module
| StarcoderdataPython |
1672911 | <reponame>leytes/scona
#!/usr/bin/env python
import pandas as pd
import numpy as np
import os
def read_in_data(
data,
names_file,
covars_file=None,
centroids_file=None,
data_as_df=True):
'''
Read in data from file paths
Parameters
----------
data : str
path to a csv file.
Read in as a :class:`pandas.DataFrame` unless ``data_as_df=False``
names_file : str
path to a text file containing names of brain regions. Read in as list.
covars_file : str, optional
a text file containing a list of covariates to correct for. Read in as
list.
centroids_file : str, optional
a text file containing cartesian coordinates of
brain regions. Should be aligned with names_file so that the ith
line of centroids_file is the coordinates of the brain region named
in the ith line of names_file. Read in as list.
data_as_df : bool, optional
If False, returns data uses :func:`numpy.loadtext` to import data as
:class:`numpy.ndarray`
Returns
-------
:class:`pandas.DataFrame`, list, list or None, list or None
`data, names, covars, centroids`
'''
# Load names
with open(names_file) as f:
names = [line.strip() for line in f]
# Load covariates
if covars_file is not None:
with open(covars_file) as f:
covars_list = [line.strip() for line in f]
else:
covars_list = []
if centroids_file is not None:
centroids = list(np.loadtxt(centroids_file))
else:
centroids = None
# Load data
if data_as_df:
df = pd.read_csv(data)
else:
df = np.loadtxt(data)
return df, names, covars_list, centroids
def write_out_measures(df, output_dir, name, first_columns=[]):
'''
Write out a DataFrame as a csv
Parameters
----------
df : :class:`pandas.DataFrame`
A dataframe of measures to write out
output_dir, name : str
The output and filename to write out to. Creates output_dir if it does
not exist
first_columns : list, optional
There may be columns you want to be saved on the left hand side of the
csv for readability. Columns will go left to right in the order
specified by first_columns, followed by any columns not in
first_columns.
'''
# Make the output directory if it doesn't exist already
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_f_name = os.path.join(output_dir, name)
# Write the data frame out (with the degree column first)
new_col_list = first_columns.extend(
[col_name
for col_name in df.columns
if col_name not in first_columns])
df.to_csv(output_f_name, columns=new_col_list)
| StarcoderdataPython |
1695280 | from flask import Flask
from app.routes.routes import blueprint
from app.auth.auth import auth_blueprint
from app.fine_tune.fine_tune import fine_tune_blueprint
from app.select_tracks.select_tracks import select_blueprint
from app.result.result import result_blueprint
from app.home.home import home_blueprint
from app.loading.loading import loading_blueprint
from app.error.error import error_blueprint
def create_app():
"""
Creating and returning the app
"""
app = Flask(__name__)
app.config['SECRET_KEY'] = 'JUSTARANDOMKEY'
app.register_blueprint(blueprint)
app.register_blueprint(home_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(fine_tune_blueprint)
app.register_blueprint(select_blueprint)
app.register_blueprint(result_blueprint)
app.register_blueprint(loading_blueprint)
app.register_blueprint(error_blueprint)
return app
| StarcoderdataPython |
1749873 | from seahub.views.repo import get_upload_url
from seahub.test_utils import BaseTestCase
class GetUploadUrlTest(BaseTestCase):
def test_can_get(self):
rst = get_upload_url(self.fake_request, self.repo.id)
assert '8082' in rst
| StarcoderdataPython |
3236248 | <reponame>davan690/talks.ox<filename>talks/events/urls.py<gh_stars>0
from django.conf.urls import patterns, url
from talks.events.views import (upcoming_events, show_person, show_event, events_for_day, show_department_organiser,
events_for_month, events_for_year, list_event_groups,show_event_group, show_topic, list_topics,
show_department_descendant, list_departments)
from talks.contributors.views import (create_person, edit_person, edit_event, create_event, create_event_group,
edit_event_group, delete_event, delete_event_group)
urlpatterns = patterns('',
url(r'^$', upcoming_events, name='upcoming_events'),
url(r'^persons/new$', create_person, name='create-person'),
url(r'^persons/id/(?P<person_slug>[^/]+)$', show_person, name='show-person'),
url(r'^persons/id/(?P<person_slug>[^/]+)/edit$', edit_person, name='edit-person'),
url(r'^new$', create_event, name='create-event'),
url(r'^id/(?P<event_slug>[^/]+)/$', show_event, name='show-event'),
url(r'^id/(?P<event_slug>[^/]+)/edit$', edit_event, name='edit-event'),
url(r'^id/(?P<event_slug>[^/]+)/delete', delete_event, name='delete-event'),
url(r'^series/(?P<group_slug>[^/]+)/new$', create_event, name='create-event-in-group'),
url(r'^date/(?P<year>\d{4})/$', events_for_year, name='events_year'),
url(r'^date/(?P<year>\d{4})/(?P<month>\d{2})/$', events_for_month, name='events_month'),
url(r'^date/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/$', events_for_day, name='events_day'),
url(r'^series/$', list_event_groups, name='list-event-groups'),
url(r'^series/new$', create_event_group, name='create-event-group'),
url(r'^series/id/(?P<event_group_slug>[^/]+)$', show_event_group, name='show-event-group'),
url(r'^series/id/(?P<event_group_slug>[^/]+)/edit$', edit_event_group, name='edit-event-group'),
url(r'^series/id/(?P<event_group_slug>[^/]+)/delete', delete_event_group, name='delete-event-group'),
url(r'^topics/id/$', show_topic, name="show-topic"),
url(r'^topics$', list_topics, name='browse-topics'),
url(r'^department$', list_departments, name='browse-departments'),
url(r'^department/id/(?P<org_id>[^/]+)$', show_department_descendant, name="show-department"),
)
| StarcoderdataPython |
3323479 | <reponame>mommermi/cloudynight
""" Licensed under a 3-clause BSD style license - see LICENSE.rst
This script shows how to extract features from raw images.
The use of this script requires a mask file,
which has to be created with the script generate_mask.py
(c) 2020, <NAME> (<EMAIL>)
"""
import os
import requests
import numpy as np
import cloudynight
# instantiate AllskyCamera object and define example image repository
# relative to base directory (defined in __init__.py: example_data/)
cam = cloudynight.AllskyCamera('images')
# this will create a directory `workbench/images` in the repository root;
# `images` is named after the raw image directory (could be a night directory)
# read in mask file; has to be created with generate_mask.fits!
cam.read_mask(filename='../workbench/images/mask.fits')
# read in image data
cam.read_data_from_directory(only_new_data=False)
# this will automatically crop the images
# only_new_data=True is necessary to read all data in the directory
# generate subregions
cam.generate_subregions()
# use wrapper to process all images
# `no_upload=True` can be removed if the webapp is setup properly
cam.process_and_upload_data(no_upload=True)
# plot background median values per subregion for all images
for img in cam.imgdata:
sourcedens_overlay = img.create_overlay(overlaytype='bkgmedian')
img.write_image(overlay=sourcedens_overlay, mask=cam.maskdata,
filename=
os.path.join(cloudynight.conf.DIR_ARCHIVE,
'{}_bkgmedian.png'.format(
img.filename[:img.filename.find('.fit')])))
| StarcoderdataPython |
1702518 | <reponame>spotlightpa/covid-alerts-emailer<gh_stars>1-10
from src.definitions import (
DIR_TEMPLATES,
DIR_TESTS_OUTPUT,
)
from src.modules.gen_html.gen_html import gen_html
from src.modules.gen_html.gen_jinja_vars import gen_jinja_vars
def test_gen_html_dauphin(dauphin_info, dauphin_payload, stories_clean):
county_name = dauphin_info["name"]
newsletter_vars = gen_jinja_vars(
county_name,
county_payload=dauphin_payload,
newsletter_browser_link="",
story_promo=stories_clean,
)
print("Newsletter vars", newsletter_vars)
html = gen_html(templates_path=DIR_TEMPLATES, template_vars=newsletter_vars)
with open(DIR_TESTS_OUTPUT / "newsletter-test.html", "w") as fout:
fout.writelines(html)
def test_gen_html_greene(greene_county_dict, greene_payload, stories_clean):
county_name = greene_county_dict["42059"]["name"]
newsletter_vars = gen_jinja_vars(
county_name,
county_payload=greene_payload,
newsletter_browser_link="",
story_promo=stories_clean,
)
html = gen_html(templates_path=DIR_TEMPLATES, template_vars=newsletter_vars)
with open(DIR_TESTS_OUTPUT / "newsletter-test.html", "w") as fout:
fout.writelines(html)
| StarcoderdataPython |
106775 | import requests
r = "\033[1;31m"
g = "\033[1;32m"
y = "\033[1;33m"
b = "\033[1;34m"
x = "\033[0;0m"
banner="""
_______________________________________
| .__ .___ .__ |
| ______ |__| __| _/_____ |__| ____ |
| \____ \| |/ __ |/ \| |/ \ |
| | |_> > / /_/ | Y Y \ | | \ |
| | __/|__\____ |__|_| /__|___| / |
| |__| \/ \/ \/ |
| |
| developed by: <NAME> | egg sec |
| http://github.com/salman1410/ |
|_____________________________________|
"""
def main():
o = open("panels.txt","r");
url = raw_input("enter url:~# ")
print y+"\n[!]searching, please wait..."+x
while True:
panel = o.readline()
if not panel:
break
x_url = "http://"+url+"/"+panel
search = requests.head(x_url)
if search.status_code < 400:
print g+"\n[+]Found:"+x, x_url
print y+"\n[!]Done"+x
try:
print(b+banner+x)
main()
except (KeyboardInterrupt, SystemExit):
print r+"\n[x]Aborted"+x
| StarcoderdataPython |
1649863 | <gh_stars>1-10
import requests
import time
import os
import sqlite3
def init_make_request():
global conn
global last_hour
global last_minute
global queries_for_last_minute
global queries_for_last_hour
last_hour = time.clock()
last_minute = time.clock()
queries_for_last_minute = 0
queries_for_last_hour = 0
conn = sqlite3.connect('chitanka.db')
def make_request(req):
global last_minute
global last_hour
global queries_for_last_minute
global queries_for_last_hour
time.sleep(2)
while queries_for_last_hour > 175:
delta = time.clock() - last_hour
if delta < 3600:
print "queries limit for hour reached, %d minutes remaining" % int(60-delta/60)
time.sleep(60)
else:
last_hour = time.clock()
queries_for_last_hour = 0
while queries_for_last_minute > 18:
delta = time.clock() - last_hour
if delta < 60:
print "queries limit for minute reached, %d seconds remaining" % int(60-delta)
time.sleep(10)
else:
last_minute = time.clock()
queries_for_last_minute = 0
queries_for_last_hour += 1
queries_for_last_minute += 1
proxy = {'http': 'http://93.123.45.23:8008'}
#r = requests.get(req, proxies = proxy)
r = requests.get(req)
return r
def find_books_in_text(text):
global conn
#print text
c = conn.cursor()
ind = 0
ind = text.find('<span>epub</span></a></li>', ind)
while ind != -1:
ind = ind + 26
ind = text.find('"', ind)
ind = ind + 1
book_name = text[ind:text.find('"', ind)]
#print book_name
c.execute('select * from books where name="%s"' % book_name)
if len(c.fetchall()) == 0:
c.execute('insert into books values ("%s", 0)' % book_name)
conn.commit()
print 'new book found: %s' % book_name
ind = text.find('<span>epub</span></a></li>', ind)
c.close()
def main():
global conn
c = conn.cursor()
c.execute('select * from categories')
cats = c.fetchall()
flag = True
for category in cats:
print 'getting books in %s' % str(category[0])
if str(category[0]) == 'savremenni-romani-i-povesti':
flag = False
if flag:
continue
tries = 5
while tries:
try:
--tries
r = make_request('http://www.chitanka.info/books/category/'+category[0])
break
except:
print "exception"
time.sleep(30)
find_books_in_text(r.text)
pagination = r.text.find('<ul class="pagination">')
if pagination != -1:
ind = r.text.find('<li class="next">')
while r.text[ind] != '"':
ind = ind - 1
ind = ind + 2
second_ind = ind + 1
while r.text[second_ind] != '<':
second_ind = second_ind + 1
pages_count = int(r.text[ind:second_ind])
for i in range(1, pages_count):
print 'category page %d' % (i+1)
tries = 5
while tries:
try:
--tries
r = make_request('http://www.chitanka.info/books/category/'+category[0]+'.html/'+str(i+1))
break
except:
print "except"
time.sleep(30)
find_books_in_text(r.text)
c.close()
if __name__ == '__main__':
init_make_request()
main() | StarcoderdataPython |
1756505 | from setuptools import setup, find_packages
import os
from datarobot_drum.drum.description import version, project_name
from datarobot_drum.drum.common import extra_deps, SupportedFrameworks
# The directory containing this file
root = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(root, "requirements.txt")) as f:
requirements = f.read().splitlines()
with open(os.path.join(root, "README.md")) as f:
long_desc = f.read()
extras_require = {
"scikit-learn": extra_deps[SupportedFrameworks.SKLEARN],
"torch": extra_deps[SupportedFrameworks.TORCH],
"keras": extra_deps[SupportedFrameworks.KERAS],
"xgboost": extra_deps[SupportedFrameworks.XGBOOST],
"R": ["rpy2;python_version>='3.6'"],
"pypmml": extra_deps[SupportedFrameworks.PYPMML],
"trainingModels": ["datarobot==2.24.0"],
}
setup(
name=project_name,
version=version,
description="Custom Model Runner",
long_description=long_desc,
long_description_content_type="text/markdown",
url="http://datarobot.com",
author="DataRobot",
author_email="<EMAIL>",
license="Apache License, Version 2.0",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS",
"Operating System :: POSIX",
"Operating System :: Unix",
],
zip_safe=False,
include_package_data=True,
packages=find_packages("."),
package_data={
"": ["*.json", "*.jar", "*.R", "*.j2", "*.jl", "*.toml"],
"datarobot_drum.resource.pipelines": ["*"],
},
scripts=["bin/drum"],
install_requires=requirements,
extras_require=extras_require,
python_requires=">=3.4,<3.9",
)
| StarcoderdataPython |
1655660 | <filename>fplib/misc.py<gh_stars>0
from fplib.curry import curry
def fail(x):
raise ValueError(x)
def ident(x):
return x
@curry
def compose(f, g, x):
return f(g(x))
@curry
def const(x, y):
return x
| StarcoderdataPython |
1783839 | import unittest
from unittest import TestCase
from algorithms.dataStructures.LinkedList import Node
from algorithms.dataStructures.LinkedList import LinkedList
class linked_list_Test(TestCase):
def test_create_node(self):
n = Node(1)
self.assertIsInstance(n, Node)
def test_create_node_assigns_correct_data(self):
n = Node(1)
self.assertEqual(1, n.data)
def test_create_node_assigns_None_to_next(self):
n = Node(1)
self.assertIsNone(n.next)
def test_create_linked_list(self):
ll = LinkedList()
self.assertIsInstance(ll, LinkedList)
def test_create_linked_list_with_node(self):
n = Node(1)
ll = LinkedList(n)
self.assertEqual(n, ll.head)
self.assertEqual(1, ll.head.data)
def test_create_linked_list_with_multiple_nodes(self):
n = Node(1)
n2 = Node(2, n)
n3 = Node('a', n2)
ll = LinkedList(n3)
self.assertEqual(n3, ll.head)
self.assertEqual('a', ll.head.data)
self.assertEqual(n2, ll.head.next)
self.assertEqual(2, ll.head.next.data)
self.assertEqual(n, ll.head.next.next)
self.assertEqual(1, ll.head.next.next.data)
class get_element_Test(TestCase):
def setUp(self):
n = Node(1)
n2 = Node(2, n)
n3 = Node('a', n2)
self.ll = LinkedList(n3)
def test_access_first_element(self):
n = self.ll.get_element(0)
self.assertEqual('a', n.data)
def test_access_second_element(self):
n = self.ll.get_element(1)
self.assertEqual(2, n.data)
def test_access_third_element(self):
n = self.ll.get_element(2)
self.assertEqual(1, n.data)
def test_get_head_single_element_list(self):
n = Node(1)
ll2 = LinkedList(n)
data = ll2.get_head()
self.assertEqual(n, data)
def test_get_head_multiple_element_list(self):
n = self.ll.get_head()
self.assertEqual('a', n.data)
class empty_linked_list_Test(TestCase):
def setUp(self):
self.ll = LinkedList()
def test_insert_end(self):
self.ll.insert_end(1)
self.assertEqual(1, self.ll.head.data)
self.assertIsNone(self.ll.head.next)
def test_insert_start(self):
self.ll.insert_start(1)
self.assertEqual(1, self.ll.head.data)
self.assertIsNone(self.ll.head.next)
def test_insert_position_0(self):
self.ll.insert(1, 0)
self.assertEqual(1, self.ll.head.data)
self.assertIsNone(self.ll.head.next)
class linked_list_insert_Test(TestCase):
def setUp(self):
n = Node(1)
n2 = Node(2, n)
n3 = Node('a', n2)
self.ll = LinkedList(n3)
def test_insert_node_at_end(self):
self.ll.insert_end(5)
self.assertEqual(5, self.ll.head.next.next.next.data)
self.assertIsNone(self.ll.head.next.next.next.next)
def test_insert_node_at_end_doesnt_change_other_nodes(self):
self.ll.insert_end(5)
self.assertEqual(1, self.ll.head.next.next.data)
self.assertEqual(2, self.ll.head.next.data)
self.assertEqual('a', self.ll.head.data)
def test_insert_node_at_start(self):
self.ll.insert_start(5)
self.assertEqual(5, self.ll.head.data)
self.assertIsNotNone(self.ll.head.next)
def test_insert_node_at_start_doesnt_change_other_nodes(self):
self.ll.insert_start(5)
self.assertEqual(1, self.ll.head.next.next.next.data)
self.assertEqual(2, self.ll.head.next.next.data)
self.assertEqual('a', self.ll.head.next.data)
def test_insert_position_1(self):
self.ll.insert(5, 1)
self.assertEqual(5, self.ll.head.next.data)
self.assertIsNotNone(self.ll.head.next.next)
def test_insert_position_1_doesnt_change_other_nodes(self):
self.ll.insert(5, 1)
self.assertEqual(1, self.ll.head.next.next.next.data)
self.assertEqual(2, self.ll.head.next.next.data)
self.assertEqual('a', self.ll.head.data)
def test_insert_position_2(self):
self.ll.insert(5, 2)
self.assertEqual(5, self.ll.head.next.next.data)
self.assertIsNotNone(self.ll.head.next.next.next)
def test_insert_position_2_doesnt_change_other_nodes(self):
self.ll.insert(5, 2)
self.assertEqual(1, self.ll.head.next.next.next.data)
self.assertEqual(2, self.ll.head.next.data)
self.assertEqual('a', self.ll.head.data)
def test_insert_position_3(self):
self.ll.insert(5, 3)
self.assertEqual(5, self.ll.head.next.next.next.data)
self.assertIsNone(self.ll.head.next.next.next.next)
def test_insert_position_3_doesnt_change_other_nodes(self):
self.ll.insert(5, 3)
self.assertEqual(1, self.ll.head.next.next.data)
self.assertEqual(2, self.ll.head.next.data)
self.assertEqual('a', self.ll.head.data)
class linked_list_delete_Test(TestCase):
def setUp(self):
n = Node(1)
n2 = Node(2, n)
n3 = Node('a', n2)
self.ll = LinkedList(n3)
def test_delete_only_element(self):
ll = LinkedList()
ll.insert_start(1)
ll.delete(0)
self.assertIsNone(ll.head)
def test_delete_head(self):
self.ll.delete(0)
self.assertEqual(2, self.ll.head.data)
self.assertEqual(1, self.ll.head.next.data)
self.assertIsNone(self.ll.head.next.next)
def test_delete_end(self):
self.ll.delete(2)
self.assertIsNone(self.ll.head.next.next)
self.assertEqual(2, self.ll.head.next.data)
self.assertEqual('a', self.ll.head.data)
class linked_list_print_Test(TestCase):
def setUp(self):
n = Node(1)
n2 = Node(2, n)
n3 = Node('a', n2)
self.ll = LinkedList(n3)
def test_display(self):
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
self.ll.display()
output = out.getvalue().strip()
self.assertEqual(output, "a\n2\n1")
finally:
sys.stdout = saved_stdout
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1796362 | <filename>bin/pointGravityInversion.py
#!/usr/bin/python3
__copyright__ = "Copyright (c) 2021 by University of Queensland http://www.uq.edu.au"
__license__ = "Licensed under the Apache License, version 2.0 http://www.apache.org/licenses/LICENSE-2.0"
__credits__ = "<NAME>"
import importlib, sys, os
sys.path.insert(0, os.getcwd())
import argparse
from esys.escript import *
from esys.finley import ReadGmsh, ReadMesh
import esys.escript.unitsSI as U
import numpy as np
from esys.escript.linearPDEs import LinearSinglePDE, LinearPDE, SolverOptions
from esys.escript.pdetools import PCG
from esys.downunder import *
from esys.weipa import *
class FOSLSGravity(object):
def __init__(self, domain, gz, recorders, rho_0, P0, wdsq,
mu, a=0., b=1., atol=1.0, rtol=1.0, iter_max=100,
pde_tol=1e-8, name='bob', verboseLevel="low"):
self.domain = domain
self.gz = np.array(gz)
self.w = - kronecker(3)[2]
self.locG = Locator(ContinuousFunction(self.domain),recorders)
self.rho_0 = rho_0
self.P0 = P0
self.wdsq = np.array(wdsq)
self.mu = mu
self.a = a
self.b = b
self.atol = atol
self.rtol = rtol
self.iter_max = iter_max
self.pdetol = pdetol
self.name = name
self.numes = len(self.gz)
self.verboseLevel = verboseLevel
self.beta = -4.0*np.pi*U.Gravitational_Constant
#boundaries
coord=self.domain.getX()
self.qtop = whereZero(coord[2]-sup(coord[2]))
self.qbottom = whereZero(coord[2]-inf(coord[2]))
self.qleft = whereZero(coord[0]-inf(coord[0]))
self.qright = whereZero(coord[0]-sup(coord[0]))
self.qfront = whereZero(coord[1]-inf(coord[1]))
self.qback = whereZero(coord[1]-sup(coord[1]))
# pdes
self.dPpde = self.setupdPpde()
self.dppde = self.setupdppde()
self.FOSLSpde = self.setupFOSLSpde()
def setupFOSLSpde(self):
FOSLSpde = LinearPDE(self.domain,numEquations=3,numSolutions=3)
FOSLSpde.setSymmetryOn()
q=Data(0, (3,), Solution(self.domain))
q[0]=self.qleft+self.qright+self.qtop
q[1]=self.qfront+self.qback+self.qtop
q[2]=self.qbottom
FOSLSpde.setValue(q=q)
A=Data(0,(3,3,3,3),Function(self.domain))
for jj in range(3):
for kk in range(3):
A[jj,jj,kk,kk] = Scalar(1.,Function(self.domain))
if kk < jj:
A[kk,jj,kk,jj] = Scalar(1.,Function(self.domain))
A[jj,kk,jj,kk] = Scalar(1.,Function(self.domain))
A[kk,jj,jj,kk] = -Scalar(1.,Function(self.domain))
A[jj,kk,kk,jj] = -Scalar(1.,Function(self.domain))
FOSLSpde.setValue(A=A)
Foptions=FOSLSpde.getSolverOptions()
Foptions.setPackage(SolverOptions.TRILINOS)
Foptions.setSolverMethod(SolverOptions.PCG)
Foptions.setPreconditioner(SolverOptions.AMG)
Foptions.setTolerance(self.pdetol)
Foptions.setTrilinosParameter("number of equations",3)
Foptions.setTrilinosParameter("reuse: type","full")
return FOSLSpde
def setupdPpde(self):
aa = self.a*self.a
bb = self.b*self.b
pde=LinearSinglePDE(self.domain, isComplex=False)
pde.setValue(A = aa*kronecker(3))
pde.setValue(D = Scalar(bb , Function(self.domain)))
pde.setSymmetryOn()
q=self.qleft+self.qright+self.qfront+self.qback+self.qtop+self.qbottom
pde.setValue(q=q)
Foptions=pde.getSolverOptions()
Foptions.setPackage(SolverOptions.TRILINOS)
Foptions.setSolverMethod(SolverOptions.PCG)
Foptions.setPreconditioner(SolverOptions.AMG)
Foptions.setTolerance(self.pdetol)
Foptions.setTrilinosParameter("reuse: type","full")
return pde
def setupdppde(self):
aabb = self.a*self.a*self.b*self.b
pde=LinearSinglePDE(self.domain, isComplex=False)
pde.setValue(A = aabb*kronecker(3))
pde.setValue(D = Scalar(1., Function(self.domain)))
q=self.qleft+self.qright+self.qfront+self.qback+self.qtop+self.qbottom
pde.setValue(q=q)
Foptions=pde.getSolverOptions()
Foptions.setPackage(SolverOptions.TRILINOS)
Foptions.setSolverMethod(SolverOptions.PCG)
Foptions.setPreconditioner(SolverOptions.AMG)
Foptions.setTolerance(self.pdetol)
Foptions.setTrilinosParameter("reuse: type","full")
return pde
def getRHSsolve(self, f):
Y = Data(0,(3,),Function(self.domain))
X = Data(0,(3,3),Function(self.domain))
wtWd = Data(0,(3,),DiracDeltaFunctions(self.domain))
for e in range(self.numes):
bob = self.w*wdsq[e]*f[e]
wtWd.setTaggedValue(e, bob)
self.FOSLSpde.setValue(X = X, Y = Y , y_dirac=wtWd )
return self.FOSLSpde.getSolution()
def RHS(self):
U = self.getRHSsolve(self.gz)
NewY =Data(0.,(4,),Function(self.domain))
NewY[3] = (self.beta*self.rho_0/self.mu)*div(U)
NewX = Data(0.,(4,3),Function(self.domain))
return ArithmeticTuple (NewY, NewX)
def Aprod(self,P):
# left hand side <AP,Q> = (SP, SQ)+(DP, Q) = (S1 P + D P, Q) + (S2 P, grad(Q))
# returns tuple(S1 P + D P , S2 P)
Udown = - self.getGravity(P)[2]
U2pts = np.array(self.locG(Udown))
cU = self.locG(U2pts)
U = self.getRHSsolve(cU)
a=self.a
aa=a*a
bb=self.b*self.b
NewY =Data(0.,(4,),Function(self.domain))
NewX =Data(0.,(4,3),Function(self.domain))
gradp0 = grad(P[0])
gradp1 = grad(P[1])
gradp2 = grad(P[2])
gradp3 = grad(P[3])
curl2 = gradp1[0]-gradp0[1]
curl1 = gradp0[2]-gradp2[0]
curl0 = gradp2[1]-gradp1[2]
y3 = -a*(gradp0[0] + gradp1[1] + gradp2[2]) + P[3]
NewY[0] = bb*(P[0]-a*gradp3[0])
NewY[1] = bb*(P[1]-a*gradp3[1])
NewY[2] = bb*(P[2]-a*gradp3[2])
NewY[3] = (self.beta*self.rho_0/self.mu)*div(U) + y3
for kk in range(3):
NewX[kk,kk] = -a*y3
NewX[3,kk]= -a*NewY[kk] # aa*gradp3[kk]-a*P[kk]
NewX[0,1] = -aa*curl2
NewX[0,2] = -aa*curl1
NewX[1,0] = aa*curl2
NewX[1,2] = -aa*curl0
NewX[2,0] = aa*curl1
NewX[2,1] = aa*curl0
return ArithmeticTuple(NewY, NewX)
def Msolve(self,R):
# solve for U, (S*S U,V) = (R[0],V) + (R[1],grad(V))
# (U_i,V_i)+a^2(grad U_i,grad V_i) = (R[0]_i,V_i)+(R[1]_i,grad (V)_i)
U =Data(0.,(4,),Solution(self.domain))
for ind1 in range(3):
Y=R[0][ind1]
X=R[1][ind1]
self.dPpde.setValue(Y=Y, X=X)
U[ind1] = self.dPpde.getSolution()
Y=R[0][3]
X=R[1][3]
self.dppde.setValue(Y=Y, X=X)
U[3] = self.dppde.getSolution()
return U
def bilinearform(self, P, R):
#print("bilinear form")
# R = (NewY , NewX)
# returns (P , NewY ) + (grad(P) , NewX)
PR0= inner(P,R[0])
PR1 = inner(grad(P),R[1]) # grad(a)[i,j,k] = partial a[i,j]/partial k
return integrate(PR0+PR1)
def getGravity(self,P):
Y = Data(0,(3,),Function(self.domain))
X = Data(0,(3,3),Function(self.domain))
wtWd = Data(0,(3,),DiracDeltaFunctions(self.domain))
for jj in range(3):
X[jj,jj] = self.beta*self.rho_0*P[3]
self.FOSLSpde.setValue(Y = Y, X=X, y_dirac = wtWd)
U = self.FOSLSpde.getSolution()
return U
def getSPSP(self,P):
a=self.a
b=self.b
gradp0 = grad(P[0])
gradp1 = grad(P[1])
gradp2 = grad(P[2])
gradp3 = grad(P[3])
SP0 = integrate((b*(P[0]-a*gradp3[0]))**2)
SP1 = integrate((b*(P[1]-a*gradp3[1]))**2)
SP2 = integrate((b*(P[2]-a*gradp3[2]))**2)
SP3 = integrate((-a*(gradp0[0] + gradp1[1]+gradp2[2])+P[3])**2)
SP4 = integrate((a*(-gradp1[2] + gradp2[1]))**2)
SP5 = integrate((a*(gradp0[2] - gradp2[0]))**2)
SP6 = integrate((a*(-gradp0[1] + gradp1[0]))**2)
SPSP=SP0+SP1+SP2+SP3+SP4+SP5 +SP6
return SPSP
def myPCG(self, x,r,itermax,rtol):
# x intial approximation P0 (4 elements)
# r initial residual (4 elements but first 3 are zero)
piter=0 # iteration count
mfs = []
smooths = []
rzrzs = []
rhat = self.Msolve(r)
d = rhat
rhat_dot_r = self.bilinearform(rhat, r)
if rhat_dot_r<0: print("negative norm.")
rzrz0 = rhat_dot_r
norm_r0=np.sqrt(rhat_dot_r)
atol2=self.rtol*norm_r0
if atol2<=0:
print("Non-positive tolarance.")
print(("PCG: initial residual norm = %e (absolute tolerance = %e)"%(norm_r0, atol2)))
# this bit not actually needed, just initial output for csvs
# will need to fix for varying down
Udown = - self.getGravity(x)[2]
U2pts = np.array(self.locG(Udown))
diffG= U2pts - self.gz
mf=np.inner(diffG, diffG*self.wdsq )
smooth=self.getSPSP(x)
smooths.append(smooth)
mfs.append(mf)
rzrzs.append(1.0)
print(piter,'mf',mf,'smooth',smooth, 'rzrz 1.0' )
while not np.sqrt(rhat_dot_r) <= atol2:
piter+=1
if piter >= iter_max:
print("maximum number of %s steps reached."%iter_max)
break
q=self.Aprod(d)
alpha = rhat_dot_r / self.bilinearform(d, q)
x += alpha * d
r += q * (-alpha)
rhat=self.Msolve(r)
rhat_dot_r_new = self.bilinearform(rhat, r)
beta = rhat_dot_r_new / rhat_dot_r
rhat+=beta * d
d=rhat
rhat_dot_r = rhat_dot_r_new
if rhat_dot_r<0: print("negative norm.")
U = - self.getGravity(x)
U2data = np.array(self.locG(U[2]))
diffG=U2data-self.gz
mf=np.inner(diffG, diffG*self.wdsq )
smooth=self.getSPSP(x)
print(piter, 'mf',mf,'smooth',smooth, 'rzrz', rhat_dot_r/rzrz0)
mfs.append(mf)
smooths.append(smooth)
rzrzs.append(np.single(rhat_dot_r/rzrz0))
print(("PCG: tolerance reached after %s steps."%piter))
smooths=np.array(smooths)
mfs=np.array(mfs)
rzrzs=np.array(rzrzs)
np.savetxt(self.name+'smooths.csv', smooths, delimiter=",")
np.savetxt(self.name+'mfs.csv', mfs,delimiter=",")
np.savetxt(self.name+'rzrzs.csv', rzrzs,delimiter=",")
np.savetxt(self.name+'compg.csv',U2data,delimiter=",")
np.savetxt(self.name+'diffG.csv',diffG,delimiter=",")
return x#, smooths, mfs, rzrzs
def solve(self):
r = self.RHS()
if self.verboseLevel=="low":
P,r,rhatr = PCG(r, self.Aprod, self.P0, self.Msolve, self.bilinearform,
atol=self.atol, rtol=self.rtol, iter_max=self.iter_max,
initial_guess=True, verbose=False)
elif self.verboseLevel=="medium":
P,r,rhatr = PCG(r, self.Aprod, self.P0, self.Msolve, self.bilinearform,
atol=self.atol, rtol=self.rtol, iter_max=self.iter_max,
initial_guess=True, verbose=True)
elif self.verboseLevel == "high":
P = self.myPCG(self.P0, r, self.iter_max, self.rtol)
U = self.getGravity(P)
pdeG = LinearSinglePDE(self.domain)
pdeG.setSymmetryOn()
pdeG.setValue(A = kronecker(3))
pdeG.setValue(q = self.qtop)
pdeG.setValue(Y = - self.beta*self.rho_0*P[3])
optionsG=pdeG.getSolverOptions()
optionsG.setPackage(SolverOptions.TRILINOS)
optionsG.setSolverMethod(SolverOptions.PCG)
optionsG.setPreconditioner(SolverOptions.AMG)
u = pdeG.getSolution()
gradu= grad(u, ReducedFunction(dom))
saveSilo(self.name+"_final", gravity = - U[2], grav2 = - gradu[2], rho=P[3]*self.rho_0)
print('results silo saved to '+self.name+"_final"+'.silo')
return P[3]
########################################################################
### Input files and variables from file
parser = argparse.ArgumentParser(description='Gravity inversion for point data in csv format.', epilog="version 01/2021 by <EMAIL>")
parser.add_argument(dest='config', metavar='CONFIG', type=str, help='configuration file.')
args = parser.parse_args()
config = importlib.import_module(args.config)
print("Configuration "+args.config+".py imported.")
rho_0 = config.rho_0
atol = config.atol
rtol = config.rtol
pdetol = config.pdetol
iter_max = config.iter_max
data_scale = config.data_scale
s = config.s
a = config.a
b = config.b
mu=1./(8*np.pi*s*a**3)
gz = np.loadtxt(config.gravity_data_file, delimiter=',')*data_scale
acc = np.array(np.loadtxt(config.acc_data_file, delimiter=','))*data_scale
MeasEs = np.loadtxt(config.obsPts_file, delimiter=',')
recorders=[]
for bob in MeasEs:
recorders.append((bob[0],bob[1],bob[2]))
gz=np.array(gz)
measnum = len(gz)
norm_data_sq = np.inner(gz,gz)
print(measnum)
dataWt = config.dataWt
depthWeight = config.depthWeight
# data weighting
wdsq = 1./(2.*norm_data_sq)*np.ones(measnum)
if dataWt =='relative':
wdsq=1./(2.*measnum*gz**2)
if dataWt =='accuracy':
wdsq = np.array(1./(measnum*acc**2))
# build domain
filename, file_extension = os.path.splitext(config.mesh_name)
if file_extension == ".msh":
dom=ReadGmsh(config.mesh_name, numDim = 3,
diracPoints = [sp for sp in recorders],
diracTags = [st for st in range(len(recorders))])
else:
dom=ReadMesh(config.mesh_name, numDim = 3,
diracPoints = [sp for sp in recorders],
diracTags = [st for st in range(len(recorders))])
print("Mesh read from "+config.mesh_name)
coord=dom.getX()
coord2=ReducedFunction(dom).getX()
depthWt = whereNegative(coord2[2])
minZ =inf(coord[2])
gd = 0.0
xmin = inf(coord[0])
xmax = sup(coord[0])
ymin = inf(coord[1])
ymax = sup(coord[1])
gVol = (xmax-xmin)*(ymax-ymin)*(gd-minZ)
if depthWeight == "coreWt":
coreD = config.coreD
factor = 1.+coord2[2]/coreD*wherePositive(coord2[2]-coreD)+(1.-wherePositive(coord2[2]-coreD))
depthWt = depthWt*(factor)
if depthWeight == "baseWt":
depthWt = depthWt*coord2[2]/inf(coord2[2])
rho_e = Scalar(rho_0,ReducedFunction(dom))*depthWt
#saveSilo("bobish", rhoref=rho_e)
print("saved bobbish")
P0 = Data(0., (4,), Solution(dom))
grav = FOSLSGravity(dom, gz=gz, recorders=recorders, rho_0=rho_e, P0=P0,
wdsq=wdsq, mu=mu, a = a, b = b, atol=atol, rtol=rtol,
iter_max = iter_max, pde_tol=pdetol, name = config.output_name,
verboseLevel = config.VerboseLevel)
p = grav.solve()
rho = p* rho_e
rhobar = (1./gVol)*integrate(rho)
sigma = (1./gVol)*integrate((rho-rhobar)**2)
stddev=np.sqrt(sigma)
print('Min density ', inf(rho))
print('Max density ', sup(rho))
print('mean density ', rhobar)
print('variance ', sigma)
print('stddev ', stddev)
print("finished")
| StarcoderdataPython |
1729506 | from dataclasses import dataclass
import dataclass_factory
from dataclass_factory import Schema
@dataclass
class Book:
title: str
price: int
extra: str = ""
data = {
"title": "Fahrenheit 451",
"price": 100,
"extra": "some extra string"
}
# using `only`:
factory = dataclass_factory.Factory(schemas={Book: Schema(only=["title", "price"])})
book: Book = factory.load(data, Book) # Same as Book(title="Fahrenheit 451", price=100)
serialized = factory.dump(book) # no `extra` key will be in serialized
# using `exclude`
factory = dataclass_factory.Factory(schemas={Book: Schema(exclude=["extra"])})
book: Book = factory.load(data, Book) # Same as Book(title="Fahrenheit 451", price=100)
serialized = factory.dump(book) # no `extra` key will be in serialized
| StarcoderdataPython |
17363 | from collections import OrderedDict
from unittest import TestCase
from frozenordereddict import FrozenOrderedDict
class TestFrozenOrderedDict(TestCase):
ITEMS_1 = (
("b", 2),
("a", 1),
)
ITEMS_2 = (
("d", 4),
("c", 3),
)
ODICT_1 = OrderedDict(ITEMS_1)
ODICT_2 = OrderedDict(ITEMS_2)
def test_init_from_items(self):
fod = FrozenOrderedDict(self.ITEMS_1)
self.assertEqual(list(self.ITEMS_1), list(fod.items()))
def test_init_from_ordereddict(self):
fod = FrozenOrderedDict(self.ODICT_1)
self.assertEqual(list(self.ITEMS_1), list(fod.items()))
def test_setitem(self):
def doit():
fod = FrozenOrderedDict()
fod[1] = "b"
self.assertRaises(TypeError, doit)
def test_delitem(self):
def doit():
fod = FrozenOrderedDict(self.ITEMS_1)
del fod[1]
self.assertRaises(TypeError, doit)
def test_copy_no_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy()
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(fod1.items(), fod2.items())
self.assertEqual(repr(fod1), repr(fod2))
self.assertEqual(len(fod1), len(fod2))
self.assertEqual(hash(fod1), hash(fod2))
def test_copy_tuple_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(self.ITEMS_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(list(fod1.items()) + list(self.ITEMS_2), list(fod2.items()))
def test_copy_ordereddict_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(self.ODICT_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(list(fod1.items()) + list(self.ITEMS_2), list(fod2.items()))
def test_copy_kwargs(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(**self.ODICT_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(dict(list(fod1.items()) + list(self.ODICT_2.items())), fod2)
| StarcoderdataPython |
134197 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
# region VirtualHub
helps['network vhub'] = """
type: group
short-summary: Manage virtual hubs.
"""
helps['network vhub create'] = """
type: command
short-summary: Create a virtual hub.
"""
helps['network vhub list'] = """
type: command
short-summary: List virtual hubs.
"""
helps['network vhub show'] = """
type: command
short-summary: Get the details of a virtual hub.
"""
helps['network vhub update'] = """
type: command
short-summary: Update settings of a virtual hub.
"""
helps['network vhub delete'] = """
type: command
short-summary: Delete a virtual hub.
"""
helps['network vhub get-effective-routes'] = """
type: command
short-summary: Get the effective routes configured for the Virtual Hub resource or the specified resource.
examples:
- name: Get the effective routes configured for route table in the virtual hub.
text: |
az network vhub get-effective-routes --resource-type RouteTable --resource-id /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable -g MyRG -n MyHub
- name: Get the effective routes configured for P2S connection in the virtual hub.
text: |
az network vhub get-effective-routes --resource-type P2SConnection --resource-id /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/p2sVpnGateways/MyGateway/p2sConnectionConfigurations/MyConnection -g MyRG -n MyHub
"""
helps['network vhub connection'] = """
type: group
short-summary: Manage virtual hub VNet connections.
"""
helps['network vhub connection create'] = """
type: command
short-summary: Create a virtual hub VNet connection.
examples:
- name: Create a virtual hub VNet connection without routing configuration.
text: |
az network vhub connection create -n MyConnection --vhub-name MyHub -g MyRG --remote-vnet MyVNet
- name: Create a virtual hub VNet connection with routing configuration.
text: |
az network vhub connection create -n MyConnection --vhub-name MyHub -g MyRG --remote-vnet MyVNet --associated-route-table /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/RouteTable1 --propagated-route-tables /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/RouteTable1 /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/RouteTable2 --labels label1 label2 --route-name route1 --next-hop 172.16.17.32 --address-prefixes 10.80.0.0/16 10.90.0.0/16
"""
helps['network vhub connection list'] = """
type: command
short-summary: List virtual hub VNet connections.
examples:
- name: List VNet connections in a given virtual hub.
text: |
az network vhub connection list --vhub-name MyHub -g MyRG
"""
helps['network vhub connection show'] = """
type: command
short-summary: Get the details of a virtual hub VNet connection.
examples:
- name: Get the details of a virtual hub VNet connection.
text: |
az network vhub connection show -n MyConnection --vhub-name MyHub -g MyRG
"""
helps['network vhub connection delete'] = """
type: command
short-summary: Delete a virtual hub VNet connection.
examples:
- name: Delete a virtual hub VNet connection.
text: |
az network vhub connection delete -n MyConnection --vhub-name MyHub -g MyRG
"""
helps['network vhub connection wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of virtual hub VNet connection is met.
"""
helps['network vhub route'] = """
type: group
short-summary: Manage entries in the virtual hub route table.
"""
helps['network vhub route add'] = """
type: command
short-summary: Add a route to the virtual hub route table.
"""
helps['network vhub route list'] = """
type: command
short-summary: List routes in the virtual hub route table.
"""
helps['network vhub route remove'] = """
type: command
short-summary: Remove a route from the virtual hub route table.
"""
helps['network vhub route reset'] = """
type: command
short-summary: Reset virtual hub route when the route state is failed.
"""
helps['network vhub route-table'] = """
type: group
short-summary: Manage route table in the virtual hub.
"""
helps['network vhub route-table create'] = """
type: command
short-summary: Create a route table in the virtual hub.
examples:
- name: Create a v2 route table in the virtual hub.
text: |
az network vhub route-table create -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --connections All_Vnets --destination-type CIDR --destinations "10.4.0.0/16" "10.6.0.0/16" --next-hop-type IPAddress --next-hops "10.0.0.68"
- name: Create a v3 route table in the virtual hub.
text: |
az network vhub route-table create -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --route-name MyRoute --destination-type CIDR --destinations "10.4.0.0/16" "10.6.0.0/16" --next-hop-type ResourceId --next-hop /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/azureFirewalls/MyFirewall --labels label1 label2
"""
helps['network vhub route-table update'] = """
type: command
short-summary: Update a route table in the virtual hub.
examples:
- name: Update the connections for a v2 route table in the virtual hub.
text: |
az network vhub route-table update -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --connections All_Vnets All_Branches
- name: Update the labels for a v3 route table in the virtual hub.
text: |
az network vhub route-table update -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --labels label1 label2
"""
helps['network vhub route-table delete'] = """
type: command
short-summary: Delete a route table in the virtual hub.
examples:
- name: Delete a route table in the virtual hub.
text: |
az network vhub route-table delete -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub
"""
helps['network vhub route-table show'] = """
type: command
short-summary: Show a route table in the virtual hub.
"""
helps['network vhub route-table list'] = """
type: command
short-summary: List all route tables in the virtual hub.
"""
helps['network vhub route-table wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the vhub route-table is met.
examples:
- name: Pause executing next line of CLI script until the route table is successfully provisioned.
text: az network vhub route-table wait -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --created
"""
helps['network vhub route-table route'] = """
type: group
short-summary: Manage routes of route table in the virtual hub.
"""
helps['network vhub route-table route add'] = """
type: command
short-summary: Add a route into route table of the virtual hub.
examples:
- name: Add a route with CIDR destination into route table of the virtual hub (route table v2).
text: |
az network vhub route-table route add -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --destination-type CIDR --destinations "10.4.0.0/16" "10.6.0.0/16" --next-hop-type IPAddress --next-hops "10.0.0.68"
- name: Add a route with Service destination into route table of the virtual hub (route table v2).
text: |
az network vhub route-table route add -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --destination-type Service --destinations Skype Sharepoint --next-hop-type IPAddress --next-hops "10.0.0.68"
- name: Add a route with firewall as next hop into route table of the virtual hub (route table v3).
text: |
az network vhub route-table route add -n MyRouteTable -g MyResourceGroup --vhub-name MyVhub --destination-type CIDR --destinations "10.4.0.0/16" "10.6.0.0/16" --next-hop-type ResourceId --next-hop /subscriptions/MySub/resourceGroups/MyResourceGroup/providers/Microsoft.Network/azureFirewalls/MyFirewall
"""
helps['network vhub route-table route list'] = """
type: command
short-summary: List routes in the virtual hub route table.
"""
helps['network vhub route-table route remove'] = """
type: command
short-summary: Remove a route from route table of the virtual hub.
"""
# endregion
# region VirtualWAN
helps['network vwan'] = """
type: group
short-summary: Manage virtual WANs.
"""
helps['network vwan create'] = """
type: command
short-summary: Create a virtual WAN.
"""
helps['network vwan list'] = """
type: command
short-summary: List virtual WANs.
"""
helps['network vwan show'] = """
type: command
short-summary: Get the details of a virtual WAN.
"""
helps['network vwan update'] = """
type: command
short-summary: Update settings of a virtual WAN.
"""
helps['network vwan delete'] = """
type: command
short-summary: Delete a virtual WAN.
"""
# endregion
# region VpnGateway
helps['network vpn-gateway'] = """
type: group
short-summary: Manage VPN gateways.
"""
helps['network vpn-gateway create'] = """
type: command
short-summary: Create a VPN gateway.
"""
helps['network vpn-gateway list'] = """
type: command
short-summary: List VPN gateways.
"""
helps['network vpn-gateway show'] = """
type: command
short-summary: Get the details of a VPN gateway.
"""
helps['network vpn-gateway update'] = """
type: command
short-summary: Update settings of a VPN gateway.
"""
helps['network vpn-gateway delete'] = """
type: command
short-summary: Delete a VPN gateway.
"""
helps['network vpn-gateway connection'] = """
type: group
short-summary: Manage VPN gateway connections.
"""
helps['network vpn-gateway connection create'] = """
type: command
short-summary: Create a VPN gateway connection.
examples:
- name: Create a VPN gateway connection
text: |
az network vpn-gateway connection create -g MyRG -n MyConnection --gateway-name MyGateway --remote-vpn-site /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/vpnSites/MyVPNSite --associated-route-table /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 --propagated-route-tables /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable2 --labels label1 label2
"""
helps['network vpn-gateway connection list'] = """
type: command
short-summary: List VPN gateway connections.
examples:
- name: List all connections for a given VPN gateway
text: |
az network vpn-gateway connection list -g MyRG --gateway-name MyGateway
"""
helps['network vpn-gateway connection show'] = """
type: command
short-summary: Get the details of a VPN gateway connection.
examples:
- name: Get the details of a VPN gateway connection
text: |
az network vpn-gateway connection show -g MyRG -n MyConnection --gateway-name MyGateway
"""
helps['network vpn-gateway connection delete'] = """
type: command
short-summary: Delete a VPN gateway connection.
examples:
- name: Delete a VPN gateway connection
text: |
az network vpn-gateway connection delete -g MyRG -n MyConnection --gateway-name MyGateway
"""
helps['network vpn-gateway connection wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the VPN gateway connection is met.
"""
helps['network vpn-gateway connection ipsec-policy'] = """
type: group
short-summary: Manage VPN gateway connection IPSec policies.
"""
helps['network vpn-gateway connection ipsec-policy add'] = """
type: command
short-summary: Add an IPSec policy to a VPN gateway connection.
"""
helps['network vpn-gateway connection ipsec-policy list'] = """
type: command
short-summary: List VPN gateway connection IPSec policies.
"""
helps['network vpn-gateway connection ipsec-policy remove'] = """
type: command
short-summary: Remove an IPSec policy from a VPN gateway connection.
"""
# endregion
# region VpnSite
helps['network vpn-site'] = """
type: group
short-summary: Manage VPN site configurations.
"""
helps['network vpn-site create'] = """
type: command
short-summary: Create a VPN site configuration.
"""
helps['network vpn-site list'] = """
type: command
short-summary: List VPN site configurations.
"""
helps['network vpn-site show'] = """
type: command
short-summary: Get the details of a VPN site configuration.
"""
helps['network vpn-site update'] = """
type: command
short-summary: Update settings of a VPN site configuration.
"""
helps['network vpn-site delete'] = """
type: command
short-summary: Delete a VPN site configuration.
"""
helps['network vpn-site download'] = """
type: command
short-summary: Provide a SAS-URL to download the configuration for a VPN site.
"""
# endregion
# region VpnServerConfig
helps['network vpn-server-config'] = """
type: group
short-summary: Manage VPN server configuration.
"""
helps['network vpn-server-config create'] = """
type: command
short-summary: Create a VPN server configuration.
examples:
- name: Create a VPN server configuration with VPN auth type
text: |
az network vpn-server-config create -n MyVPNServerConfig -g MyRG --vpn-client-root-certs "ApplicationGatewayAuthCert.cer" --vpn-client-revoked-certs "ApplicationGatewayAuthCert.pem"
"""
helps['network vpn-server-config list'] = """
type: command
short-summary: List all VPN server configuration.
"""
helps['network vpn-server-config show'] = """
type: command
short-summary: Show the details of a VPN server configuration.
"""
helps['network vpn-server-config set'] = """
type: command
short-summary: Set settings of a VPN server configuration.
examples:
- name: Set a VPN server configuration with Radius auth type
text: |
az network vpn-server-config set -n MyVPNServerConfig -g MyRG --radius-client-root-certs "ApplicationGatewayAuthCert.cer" --radius-server-root-certs "ApplicationGatewayAuthCert.pem" --radius-servers address=test1 secret=clitest score=10 --radius-servers address=test2 secret=clitest score=10
"""
helps['network vpn-server-config delete'] = """
type: command
short-summary: Delete a VPN server configuration.
"""
helps['network vpn-server-config wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the VPN server configuration is met.
"""
helps['network vpn-server-config ipsec-policy'] = """
type: group
short-summary: Manage VPN server configuration IPSec policies.
"""
helps['network vpn-server-config ipsec-policy add'] = """
type: command
short-summary: Add an IPSec policy to a VPN server configuration.
"""
helps['network vpn-server-config ipsec-policy list'] = """
type: command
short-summary: List VPN server configuration IPSec policies.
"""
helps['network vpn-server-config ipsec-policy remove'] = """
type: command
short-summary: Remove an IPSec policy from a VPN server configuration.
"""
helps['network vpn-server-config ipsec-policy wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the IPSec policy of a VPN server configuration is met.
"""
# endregion
# region VpnServerConfig
helps['network p2s-vpn-gateway'] = """
type: group
short-summary: Manage point-to-site VPN gateway.
"""
helps['network p2s-vpn-gateway create'] = """
type: command
short-summary: Create a point-to-site VPN gateway.
examples:
- name: Create a point-to-site VPN gateway.
text: |
az network p2s-vpn-gateway create -g MyRG -n MyP2SVPNGateway --scale-unit 2 --vhub MyVhub --vpn-server-config MyVPNServerConfig --address-space 10.0.0.0/24 172.16.31.10/24
- name: Create a point-to-site VPN gateway with routing configuration.
text: |
az network p2s-vpn-gateway create -g MyRG -n MyP2SVPNGateway --scale-unit 2 --vhub MyVhub --vpn-server-config MyVPNServerConfig --address-space 10.0.0.0/24 172.16.31.10/24 --associated-route-table /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 --propagated-route-tables /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable1 /subscriptions/MySub/resourceGroups/MyRG/providers/Microsoft.Network/virtualHubs/MyHub/hubRouteTables/MyRouteTable2 --labels label1 label2
"""
helps['network p2s-vpn-gateway list'] = """
type: command
short-summary: List all point-to-site VPN gateway.
"""
helps['network p2s-vpn-gateway show'] = """
type: command
short-summary: Show the details of a point-to-site VPN gateway.
"""
helps['network p2s-vpn-gateway update'] = """
type: command
short-summary: Update settings of a point-to-site VPN gateway.
"""
helps['network p2s-vpn-gateway delete'] = """
type: command
short-summary: Delete a point-to-site VPN gateway.
"""
helps['network p2s-vpn-gateway wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the point-to-site VPN gateway is met.
"""
helps['network p2s-vpn-gateway connection'] = """
type: group
short-summary: Manage point-to-site VPN gateway connections.
"""
helps['network p2s-vpn-gateway connection list'] = """
type: command
short-summary: List all connections for a given point-to-site VPN gateway.
examples:
- name: List all connections for a given point-to-site VPN gateway
text: |
az network p2s-vpn-gateway connection list -g MyRG --gateway-name MyP2SVPNGateway
"""
helps['network p2s-vpn-gateway connection show'] = """
type: command
short-summary: Show the details of a point-to-site VPN gateway connection.
examples:
- name: Show the details of a point-to-site VPN gateway connection
text: |
az network p2s-vpn-gateway connection show -g MyRG -n connection --gateway-name MyP2SVPNGateway
"""
helps['network p2s-vpn-gateway vpn-client'] = """
type: group
short-summary: Download a VPN client configuration required to connect to Azure via point-to-site
"""
helps['network p2s-vpn-gateway vpn-client generate'] = """
type: command
short-summary: Generate VPN profile for P2S client of the P2SVpnGateway in the specified resource group
"""
# endregion
| StarcoderdataPython |
1691002 | """
Solution class
problemId 128
@author wanghaogang
@date 2018/6/29
"""
class Solution:
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
if not nums or length == 0:
return 0
s = set(nums)
ans = 0
cur = 0
for x in nums:
if not x - 1 in s:
cur = 1
i = 1
while x + i in s:
cur += 1
i += 1
ans = max(ans, cur)
return ans
| StarcoderdataPython |
1704375 | from django.conf.urls import url
from django.contrib import admin
from revenue.views import (
revenue_list,
revenue_detail,
)
urlpatterns = [
#url(r'^admin/', admin.site.urls),
url(r'^$', revenue_list, name="list"),
url(r'^(?P<id>[\w-]+)/$', revenue_detail, name="detail"),
] | StarcoderdataPython |
3389154 | <gh_stars>1-10
from cyder.cydns.domain.tests.all import *
from cyder.cydns.soa.tests.all import *
from cyder.cydns.tests.test_models import *
from cyder.cydns.tests.test_views import *
| StarcoderdataPython |
1622414 | <gh_stars>0
"""Load data from database for training the wordchooser."""
import os
import re
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchtext
import nltk
#raise RuntimeError("Not ready yet")
class WordChooserDataset(torch.utils.data.IterableDataset):
"""
This class represents a dataset for the wordchooser model.
:param str filename: a file or a directory containing data files
:param callable tokenizer: the tokenizer to use
(default: torchtext basic_english)
"""
def __init__(self, filename, tokenizer=None):
self._data = []
self.tokenizer = tokenizer
if self.tokenizer is None:
self.tokenizer = torchtext.data.get_tokenizer("basic_english")
if os.path.isdir(filename):
for file in os.path.listdir(filename):
self._load(file)
else:
self._load(filename)
def _load(self, file):
data = []
with open(file) as f:
for passage in f.readlines():
for token in self.tokenizer(passage):
selected = 0
match = re.fullmatch(r"_(.+)_", token)
if match:
token = match.group(1)
selected = 1
data.append((token, selected))
self._data.append(data)
def __iter__(self):
yield from self._data
def __len__(self):
return len(self._data)
def __getitem__(self, index):
return self._data[index] | StarcoderdataPython |
3354424 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Callable, Dict, List, Optional, Type, Union
import pandas as pd
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import Sampler
from flash.core.data.io.classification_input import ClassificationInputMixin
from flash.core.data.io.input import DataKeys, Input, IterableInput
from flash.core.data.utilities.classification import MultiBinaryTargetFormatter, TargetFormatter
from flash.core.data.utilities.data_frame import read_csv, resolve_files, resolve_targets
from flash.core.data.utilities.paths import list_valid_files, make_dataset, PATH_TYPE
from flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities
from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import, requires
if _FIFTYONE_AVAILABLE:
fol = lazy_import("fiftyone.core.labels")
SampleCollection = "fiftyone.core.collections.SampleCollection"
else:
fol = None
SampleCollection = None
if _PYTORCHVIDEO_AVAILABLE:
from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler
from pytorchvideo.data.encoded_video import EncodedVideo
from pytorchvideo.data.labeled_video_dataset import LabeledVideoDataset
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
else:
ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
def _make_clip_sampler(
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
) -> "ClipSampler":
if clip_sampler_kwargs is None:
clip_sampler_kwargs = {}
return make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)
class VideoClassificationInput(IterableInput, ClassificationInputMixin):
def load_data(
self,
files: List[PATH_TYPE],
targets: List[Any],
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
target_formatter: Optional[TargetFormatter] = None,
) -> "LabeledVideoDataset":
dataset = LabeledVideoDataset(
LabeledVideoPaths(list(zip(files, targets))),
_make_clip_sampler(clip_sampler, clip_duration, clip_sampler_kwargs),
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
)
if not self.predicting:
self.load_target_metadata(
[sample[1] for sample in dataset._labeled_videos._paths_and_labels], target_formatter=target_formatter
)
return dataset
def load_sample(self, sample):
sample["label"] = self.format_target(sample["label"])
return sample
class VideoClassificationFoldersInput(VideoClassificationInput):
def load_data(
self,
path: str,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
target_formatter: Optional[TargetFormatter] = None,
) -> "LabeledVideoDataset":
return super().load_data(
*make_dataset(path, extensions=("mp4", "avi")),
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
target_formatter=target_formatter,
)
class VideoClassificationFilesInput(VideoClassificationInput):
def load_data(
self,
paths: List[str],
targets: List[Any],
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
target_formatter: Optional[TargetFormatter] = None,
) -> "LabeledVideoDataset":
return super().load_data(
paths,
targets,
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
target_formatter=target_formatter,
)
class VideoClassificationDataFrameInput(VideoClassificationInput):
def load_data(
self,
data_frame: pd.DataFrame,
input_key: str,
target_keys: Union[str, List[str]],
root: Optional[PATH_TYPE] = None,
resolver: Optional[Callable[[Optional[PATH_TYPE], Any], PATH_TYPE]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
target_formatter: Optional[TargetFormatter] = None,
) -> "LabeledVideoDataset":
result = super().load_data(
resolve_files(data_frame, input_key, root, resolver),
resolve_targets(data_frame, target_keys),
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
target_formatter=target_formatter,
)
# If we had binary multi-class targets then we also know the labels (column names)
if (
self.training
and isinstance(self.target_formatter, MultiBinaryTargetFormatter)
and isinstance(target_keys, List)
):
self.labels = target_keys
return result
class VideoClassificationCSVInput(VideoClassificationDataFrameInput):
def load_data(
self,
csv_file: PATH_TYPE,
input_key: str,
target_keys: Optional[Union[str, List[str]]] = None,
root: Optional[PATH_TYPE] = None,
resolver: Optional[Callable[[Optional[PATH_TYPE], Any], PATH_TYPE]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
target_formatter: Optional[TargetFormatter] = None,
) -> "LabeledVideoDataset":
data_frame = read_csv(csv_file)
if root is None:
root = os.path.dirname(csv_file)
return super().load_data(
data_frame,
input_key,
target_keys,
root,
resolver,
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
target_formatter=target_formatter,
)
class VideoClassificationFiftyOneInput(VideoClassificationInput):
@requires("fiftyone")
def load_data(
self,
sample_collection: SampleCollection,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
label_field: str = "ground_truth",
target_formatter: Optional[TargetFormatter] = None,
) -> "LabeledVideoDataset":
label_utilities = FiftyOneLabelUtilities(label_field, fol.Classification)
label_utilities.validate(sample_collection)
return super().load_data(
sample_collection.values("filepath"),
sample_collection.values(label_field + ".label"),
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
target_formatter=target_formatter,
)
class VideoClassificationPathsPredictInput(Input):
def predict_load_data(
self,
paths: List[str],
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
decode_audio: bool = False,
decoder: str = "pyav",
) -> List[str]:
paths = list_valid_files(paths, valid_extensions=("mp4", "avi"))
self._clip_sampler = _make_clip_sampler(clip_sampler, clip_duration, clip_sampler_kwargs)
self._decode_audio = decode_audio
self._decoder = decoder
return paths
def predict_load_sample(self, sample: str) -> Dict[str, Any]:
video = EncodedVideo.from_path(sample, decode_audio=self._decode_audio, decoder=self._decoder)
(
clip_start,
clip_end,
clip_index,
aug_index,
is_last_clip,
) = self._clip_sampler(0.0, video.duration, None)
loaded_clip = video.get_clip(clip_start, clip_end)
clip_is_null = (
loaded_clip is None or loaded_clip["video"] is None or (loaded_clip["audio"] is None and self._decode_audio)
)
if clip_is_null:
raise MisconfigurationException(
f"The provided video is too short {video.duration} to be clipped at {self._clip_sampler._clip_duration}"
)
frames = loaded_clip["video"]
audio_samples = loaded_clip["audio"]
return {
"video": frames,
"video_name": video.name,
"video_index": 0,
"clip_index": clip_index,
"aug_index": aug_index,
**({"audio": audio_samples} if audio_samples is not None else {}),
DataKeys.METADATA: {"filepath": sample},
}
class VideoClassificationDataFramePredictInput(VideoClassificationPathsPredictInput):
def predict_load_data(
self,
data_frame: pd.DataFrame,
input_key: str,
root: Optional[PATH_TYPE] = None,
resolver: Optional[Callable[[Optional[PATH_TYPE], Any], PATH_TYPE]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
decode_audio: bool = False,
decoder: str = "pyav",
) -> List[str]:
return super().predict_load_data(
resolve_files(data_frame, input_key, root, resolver),
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
decode_audio=decode_audio,
decoder=decoder,
)
class VideoClassificationCSVPredictInput(VideoClassificationDataFramePredictInput):
def predict_load_data(
self,
csv_file: PATH_TYPE,
input_key: str,
root: Optional[PATH_TYPE] = None,
resolver: Optional[Callable[[Optional[PATH_TYPE], Any], PATH_TYPE]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
decode_audio: bool = False,
decoder: str = "pyav",
) -> List[str]:
data_frame = read_csv(csv_file)
if root is None:
root = os.path.dirname(csv_file)
return super().predict_load_data(
data_frame,
input_key,
root,
resolver,
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
decode_audio=decode_audio,
decoder=decoder,
)
class VideoClassificationFiftyOnePredictInput(VideoClassificationPathsPredictInput):
@requires("fiftyone")
def predict_load_data(
self,
data: SampleCollection,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
decode_audio: bool = False,
decoder: str = "pyav",
) -> List[str]:
return super().predict_load_data(
data.values("filepath"),
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
decode_audio=decode_audio,
decoder=decoder,
)
| StarcoderdataPython |
3340691 | from datetime import datetime
def get_current_month():
now = datetime.now()
return now.month
def get_current_year():
now = datetime.now()
return now.year
| StarcoderdataPython |
1605212 | <gh_stars>1-10
import os
from fase_lib import fase
from fase_lib import fase_config
from fase_lib import fase_application
import config as notes_config
import service as notes_service
fase.Service.RegisterService(notes_service.NotesService)
notes_config.Configurate(os.environ['NOTES_CONFIG_FILENAME'])
fase_config.Configurate(os.environ['FASE_CONFIG_FILENAME'])
application = fase_application.application
| StarcoderdataPython |
1768504 | #!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START program]
"""Vehicles Routing Problem (VRP)."""
# [START import]
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
# [END import]
# [START solution_printer]
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print(f'Objective: {solution.ObjectiveValue()}')
max_route_distance = 0
for vehicle_id in range(manager.GetNumberOfVehicles()):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} -> '.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += '{}\n'.format(manager.IndexToNode(index))
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
print(plan_output)
max_route_distance = max(route_distance, max_route_distance)
print('Maximum of the route distances: {}m'.format(max_route_distance))
# [END solution_printer]
def main():
"""Solve the CVRP problem."""
# Instantiate the data problem.
# [START data]
num_locations = 20
num_vehicles = 5
depot = 0
# [END data]
# Create the routing index manager.
# [START index_manager]
manager = pywrapcp.RoutingIndexManager(num_locations, num_vehicles, depot)
# [END index_manager]
# Create Routing Model.
# [START routing_model]
routing = pywrapcp.RoutingModel(manager)
# [END routing_model]
# Create and register a transit callback.
# [START transit_callback]
def distance_callback(from_index, to_index):
# pylint: disable=unused-argument
"""Returns the distance between the two nodes."""
return 1
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# [END transit_callback]
# Define cost of each arc.
# [START arc_cost]
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# [END arc_cost]
# Add Distance constraint.
# [START distance_constraint]
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
3000, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
distance_dimension = routing.GetDimensionOrDie(dimension_name)
distance_dimension.SetGlobalSpanCostCoefficient(100)
# [END distance_constraint]
# Setting first solution heuristic.
# [START parameters]
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.log_search = True
search_parameters.time_limit.FromSeconds(10)
# [END parameters]
# Solve the problem.
# [START solve]
solution = routing.SolveWithParameters(search_parameters)
# [END solve]
# Print solution on console.
# [START print_solution]
if solution:
print_solution(manager, routing, solution)
# [END print_solution]
if __name__ == '__main__':
main()
# [END program]
| StarcoderdataPython |
1655602 | #
import torch
import os
import numpy
import cv2
#
def generate_triplets(bags):
#
triplets = []
for i in range(0, len(bags)):
for j in range(i+1, len(bags)):
if bags[i][1] == bags[j][1]: # compare labels
#
negbags = []
#
for k in range(0, 6):
#
stop = False
while not stop:
q = numpy.random.randint(0, len(bags))
if bags[i][1] != bags[q][1]:
stop = True
#
negbags.append(bags[q][0])
#
usehardnegs = True
if usehardnegs:
triplets.append([
bags[i][0],
bags[j][0],
negbags
])
else:
for negbag in negbags:
triplets.append([
bags[i][0],
bags[j][0],
negbag
])
#
return triplets
#
def extract_patches(img, keypoints, npix, size):
#
patches = []
for x, y, s, a in keypoints:
#
s = size*s/npix
cos = numpy.cos(a*numpy.pi/180.0)
sin = numpy.sin(a*numpy.pi/180.0)
#
M = numpy.matrix([
[+s*cos, -s*sin, (-s*cos+s*sin)*npix/2.0 + x],
[+s*sin, +s*cos, (-s*sin-s*cos)*npix/2.0 + y]
])
#
p = cv2.warpAffine(img, M, (npix, npix), flags=cv2.WARP_INVERSE_MAP+cv2.INTER_CUBIC+cv2.WARP_FILL_OUTLIERS)
patches.append( torch.from_numpy(p).permute(2, 0, 1) )
#
if len(patches) < 16:
return None
else:
return torch.stack(patches)
#
def load_keypoint_bags(imgpaths, prob):
#
orb = cv2.ORB_create(nfeatures=512, patchSize=16)
#surf = cv2.xfeatures2d.SURF_create(1024, 4, 2, True, False) # number of features detected per image varies drastically (in our experiments we used binary search over the hess parameter value to obtain the desired number of keypoints <- slow!)
def get_keypoints(img):
keypoints = []
keypoints.extend(orb.detect(img, None))
#keypoints.extend(surf.detect(img, None))
return [(kp.pt[0], kp.pt[1], kp.size, kp.angle) for kp in keypoints]
#
bags = []
for imgpath in imgpaths:
if numpy.random.random()<=prob:
#
#print('* processing ' + imgpath.split('/')[-1])
img = cv2.imread(imgpath)
keypoints = get_keypoints(img)
patches = extract_patches(img, keypoints, 32, 1.5)
#
label = int(imgpath.split('/')[-1][7:12])//4
#
if patches is not None:
bags.append( [patches, label] )
#
return bags
def init(folder='datasets/ukbench'):
#
trn = []
vld = []
for root, dirnames, filenames in os.walk(folder):
for filename in filenames:
if filename.endswith('.jpg'):
if int(filename[7:12])//4<300:
vld.append(os.path.join(root, filename))
else:
trn.append(os.path.join(root, filename))
#
return (lambda: generate_triplets(load_keypoint_bags(trn, 0.33))), (lambda: generate_triplets(load_keypoint_bags(vld, 1.00)))
#
'''
a, b = init('ukbench')
import time
start = time.time()
trn = a()
print('* elapsed time: %f [s]' % (time.time()-start))
print(len(trn))
''' | StarcoderdataPython |
3338903 | urls = [
"pagecounts-20121001-000000.gz",
"pagecounts-20121001-010000.gz",
"pagecounts-20121001-020000.gz",
"pagecounts-20121001-030000.gz",
"pagecounts-20121001-040000.gz",
"pagecounts-20121001-050000.gz",
"pagecounts-20121001-060001.gz",
"pagecounts-20121001-070000.gz",
"pagecounts-20121001-080000.gz",
"pagecounts-20121001-090000.gz",
"pagecounts-20121001-100000.gz",
"pagecounts-20121001-110000.gz",
"pagecounts-20121001-120000.gz",
"pagecounts-20121001-130000.gz",
"pagecounts-20121001-140000.gz",
"pagecounts-20121001-150000.gz",
"pagecounts-20121001-160000.gz",
"pagecounts-20121001-170000.gz",
"pagecounts-20121001-180000.gz",
"pagecounts-20121001-190000.gz",
"pagecounts-20121001-200001.gz",
"pagecounts-20121001-210000.gz",
"pagecounts-20121001-220000.gz",
"pagecounts-20121001-230000.gz",
"pagecounts-20121002-000000.gz",
"pagecounts-20121002-010000.gz",
"pagecounts-20121002-020000.gz",
"pagecounts-20121002-030000.gz",
"pagecounts-20121002-040000.gz",
"pagecounts-20121002-050000.gz",
"pagecounts-20121002-060000.gz",
"pagecounts-20121002-070000.gz",
"pagecounts-20121002-080000.gz",
"pagecounts-20121002-090000.gz",
"pagecounts-20121002-100001.gz",
"pagecounts-20121002-110000.gz",
"pagecounts-20121002-120000.gz",
"pagecounts-20121002-130000.gz",
"pagecounts-20121002-140000.gz",
"pagecounts-20121002-150000.gz",
"pagecounts-20121002-160000.gz",
"pagecounts-20121002-170000.gz",
"pagecounts-20121002-180000.gz",
"pagecounts-20121002-190000.gz",
"pagecounts-20121002-200000.gz",
"pagecounts-20121002-210000.gz",
"pagecounts-20121002-220000.gz",
"pagecounts-20121002-230000.gz",
"pagecounts-20121003-000001.gz",
"pagecounts-20121003-010000.gz",
"pagecounts-20121003-020000.gz",
"pagecounts-20121003-030000.gz",
"pagecounts-20121003-040000.gz",
"pagecounts-20121003-050000.gz",
"pagecounts-20121003-060000.gz",
"pagecounts-20121003-070000.gz",
"pagecounts-20121003-080000.gz",
"pagecounts-20121003-090000.gz",
"pagecounts-20121003-100000.gz",
"pagecounts-20121003-110000.gz",
"pagecounts-20121003-120000.gz",
"pagecounts-20121003-130001.gz",
"pagecounts-20121003-140000.gz",
"pagecounts-20121003-150000.gz",
"pagecounts-20121003-160000.gz",
"pagecounts-20121003-170000.gz",
"pagecounts-20121003-180000.gz",
"pagecounts-20121003-190000.gz",
"pagecounts-20121003-200000.gz",
"pagecounts-20121003-210000.gz",
"pagecounts-20121003-220000.gz",
"pagecounts-20121003-230000.gz",
"pagecounts-20121004-000000.gz",
"pagecounts-20121004-010000.gz",
"pagecounts-20121004-020000.gz",
"pagecounts-20121004-030001.gz",
"pagecounts-20121004-040000.gz",
"pagecounts-20121004-050000.gz",
"pagecounts-20121004-060000.gz",
"pagecounts-20121004-070000.gz",
"pagecounts-20121004-080000.gz",
"pagecounts-20121004-090000.gz",
"pagecounts-20121004-100000.gz",
"pagecounts-20121004-110000.gz",
"pagecounts-20121004-120000.gz",
"pagecounts-20121004-130000.gz",
"pagecounts-20121004-140000.gz",
"pagecounts-20121004-150000.gz",
"pagecounts-20121004-160000.gz",
"pagecounts-20121004-170001.gz",
"pagecounts-20121004-180000.gz",
"pagecounts-20121004-190000.gz",
"pagecounts-20121004-200000.gz",
"pagecounts-20121004-210000.gz",
"pagecounts-20121004-220000.gz",
"pagecounts-20121004-230000.gz",
"pagecounts-20121005-000000.gz",
"pagecounts-20121005-010000.gz",
"pagecounts-20121005-020000.gz",
"pagecounts-20121005-030000.gz",
"pagecounts-20121005-040000.gz",
"pagecounts-20121005-050000.gz",
"pagecounts-20121005-060000.gz",
"pagecounts-20121005-070001.gz",
"pagecounts-20121005-080000.gz",
"pagecounts-20121005-090000.gz",
"pagecounts-20121005-100000.gz",
"pagecounts-20121005-110000.gz",
"pagecounts-20121005-120000.gz",
"pagecounts-20121005-130000.gz",
"pagecounts-20121005-140000.gz",
"pagecounts-20121005-150000.gz",
"pagecounts-20121005-160000.gz",
"pagecounts-20121005-170000.gz",
"pagecounts-20121005-180000.gz",
"pagecounts-20121005-190000.gz",
"pagecounts-20121005-200000.gz",
"pagecounts-20121005-210001.gz",
"pagecounts-20121005-220000.gz",
"pagecounts-20121005-230000.gz",
"pagecounts-20121006-000000.gz",
"pagecounts-20121006-010000.gz",
"pagecounts-20121006-020000.gz",
"pagecounts-20121006-030000.gz",
"pagecounts-20121006-040000.gz",
"pagecounts-20121006-050000.gz",
"pagecounts-20121006-060000.gz",
"pagecounts-20121006-070000.gz",
"pagecounts-20121006-080000.gz",
"pagecounts-20121006-090000.gz",
"pagecounts-20121006-100000.gz",
"pagecounts-20121006-110000.gz",
"pagecounts-20121006-120001.gz",
"pagecounts-20121006-130000.gz",
"pagecounts-20121006-140000.gz",
"pagecounts-20121006-150000.gz",
"pagecounts-20121006-160000.gz",
"pagecounts-20121006-170000.gz",
"pagecounts-20121006-180000.gz",
"pagecounts-20121006-190000.gz",
"pagecounts-20121006-200000.gz",
"pagecounts-20121006-210000.gz",
"pagecounts-20121006-220000.gz",
"pagecounts-20121006-230000.gz",
"pagecounts-20121007-000000.gz",
"pagecounts-20121007-010000.gz",
"pagecounts-20121007-020001.gz",
"pagecounts-20121007-030000.gz",
"pagecounts-20121007-040000.gz",
"pagecounts-20121007-050000.gz",
"pagecounts-20121007-060000.gz",
"pagecounts-20121007-070000.gz",
"pagecounts-20121007-080000.gz",
"pagecounts-20121007-090000.gz",
"pagecounts-20121007-100000.gz",
"pagecounts-20121007-110000.gz",
"pagecounts-20121007-120000.gz",
"pagecounts-20121007-130000.gz",
"pagecounts-20121007-140000.gz",
"pagecounts-20121007-150001.gz",
"pagecounts-20121007-160000.gz",
"pagecounts-20121007-170000.gz",
"pagecounts-20121007-180000.gz",
"pagecounts-20121007-190000.gz",
"pagecounts-20121007-200000.gz",
"pagecounts-20121007-210000.gz",
"pagecounts-20121007-220000.gz",
"pagecounts-20121007-230000.gz",
"pagecounts-20121008-000000.gz",
"pagecounts-20121008-010000.gz",
"pagecounts-20121008-020000.gz",
"pagecounts-20121008-030000.gz",
"pagecounts-20121008-040001.gz",
"pagecounts-20121008-050000.gz",
"pagecounts-20121008-060000.gz",
"pagecounts-20121008-070000.gz",
"pagecounts-20121008-080000.gz",
"pagecounts-20121008-090000.gz",
"pagecounts-20121008-100000.gz",
"pagecounts-20121008-110000.gz",
"pagecounts-20121008-120000.gz",
"pagecounts-20121008-130000.gz",
"pagecounts-20121008-140000.gz",
"pagecounts-20121008-150000.gz",
"pagecounts-20121008-160000.gz",
"pagecounts-20121008-170000.gz",
"pagecounts-20121008-180001.gz",
"pagecounts-20121008-190000.gz",
"pagecounts-20121008-200000.gz",
"pagecounts-20121008-210000.gz",
"pagecounts-20121008-220000.gz",
"pagecounts-20121008-230000.gz",
"pagecounts-20121009-000000.gz",
"pagecounts-20121009-010000.gz",
"pagecounts-20121009-020000.gz",
"pagecounts-20121009-030000.gz",
"pagecounts-20121009-040000.gz",
"pagecounts-20121009-050000.gz",
"pagecounts-20121009-060000.gz",
"pagecounts-20121009-070001.gz",
"pagecounts-20121009-080000.gz",
"pagecounts-20121009-090000.gz",
"pagecounts-20121009-100000.gz",
"pagecounts-20121009-110000.gz",
"pagecounts-20121009-120000.gz",
"pagecounts-20121009-130000.gz",
"pagecounts-20121009-140000.gz",
"pagecounts-20121009-150000.gz",
"pagecounts-20121009-160000.gz",
"pagecounts-20121009-170000.gz",
"pagecounts-20121009-180000.gz",
"pagecounts-20121009-190000.gz",
"pagecounts-20121009-200001.gz",
"pagecounts-20121009-210000.gz",
"pagecounts-20121009-220000.gz",
"pagecounts-20121009-230000.gz",
"pagecounts-20121010-000000.gz",
"pagecounts-20121010-010000.gz",
"pagecounts-20121010-020000.gz",
"pagecounts-20121010-030000.gz",
"pagecounts-20121010-040000.gz",
"pagecounts-20121010-050000.gz",
"pagecounts-20121010-060000.gz",
"pagecounts-20121010-070000.gz",
"pagecounts-20121010-080000.gz",
"pagecounts-20121010-090000.gz",
"pagecounts-20121010-100000.gz",
"pagecounts-20121010-110001.gz",
"pagecounts-20121010-120000.gz",
"pagecounts-20121010-130000.gz",
"pagecounts-20121010-140000.gz",
"pagecounts-20121010-150000.gz",
"pagecounts-20121010-160000.gz",
"pagecounts-20121010-170000.gz",
"pagecounts-20121010-180000.gz",
"pagecounts-20121010-190000.gz",
"pagecounts-20121010-200000.gz",
"pagecounts-20121010-210000.gz",
"pagecounts-20121010-220000.gz",
"pagecounts-20121010-230000.gz",
"pagecounts-20121011-000000.gz",
"pagecounts-20121011-010001.gz",
"pagecounts-20121011-020000.gz",
"pagecounts-20121011-030000.gz",
"pagecounts-20121011-040000.gz",
"pagecounts-20121011-050000.gz",
"pagecounts-20121011-060000.gz",
"pagecounts-20121011-070000.gz",
"pagecounts-20121011-080000.gz",
"pagecounts-20121011-090000.gz",
"pagecounts-20121011-100000.gz",
"pagecounts-20121011-110000.gz",
"pagecounts-20121011-120000.gz",
"pagecounts-20121011-130000.gz",
"pagecounts-20121011-140000.gz",
"pagecounts-20121011-150001.gz",
"pagecounts-20121011-160000.gz",
"pagecounts-20121011-170000.gz",
"pagecounts-20121011-180000.gz",
"pagecounts-20121011-190000.gz",
"pagecounts-20121011-200000.gz",
"pagecounts-20121011-210000.gz",
"pagecounts-20121011-220000.gz",
"pagecounts-20121011-230000.gz",
"pagecounts-20121012-000000.gz",
"pagecounts-20121012-010000.gz",
"pagecounts-20121012-020000.gz",
"pagecounts-20121012-030000.gz",
"pagecounts-20121012-040000.gz",
"pagecounts-20121012-050000.gz",
"pagecounts-20121012-060001.gz",
"pagecounts-20121012-070000.gz",
"pagecounts-20121012-080000.gz",
"pagecounts-20121012-090000.gz",
"pagecounts-20121012-100000.gz",
"pagecounts-20121012-110000.gz",
"pagecounts-20121012-120000.gz",
"pagecounts-20121012-130000.gz",
"pagecounts-20121012-140000.gz",
"pagecounts-20121012-150000.gz",
"pagecounts-20121012-160000.gz",
"pagecounts-20121012-170000.gz",
"pagecounts-20121012-180000.gz",
"pagecounts-20121012-190000.gz",
"pagecounts-20121012-200001.gz",
"pagecounts-20121012-210000.gz",
"pagecounts-20121012-220000.gz",
"pagecounts-20121012-230000.gz",
"pagecounts-20121013-000000.gz",
"pagecounts-20121013-010000.gz",
"pagecounts-20121013-020000.gz",
"pagecounts-20121013-030000.gz",
"pagecounts-20121013-040000.gz",
"pagecounts-20121013-050000.gz",
"pagecounts-20121013-060000.gz",
"pagecounts-20121013-070000.gz",
"pagecounts-20121013-080000.gz",
"pagecounts-20121013-090001.gz",
"pagecounts-20121013-100000.gz",
"pagecounts-20121013-110000.gz",
"pagecounts-20121013-120000.gz",
"pagecounts-20121013-130000.gz",
"pagecounts-20121013-140000.gz",
"pagecounts-20121013-150000.gz",
"pagecounts-20121013-160000.gz",
"pagecounts-20121013-170000.gz",
"pagecounts-20121013-180000.gz",
"pagecounts-20121013-190000.gz",
"pagecounts-20121013-200000.gz",
"pagecounts-20121013-210000.gz",
"pagecounts-20121013-220001.gz",
"pagecounts-20121013-230000.gz",
"pagecounts-20121014-000000.gz",
"pagecounts-20121014-010000.gz",
"pagecounts-20121014-020000.gz",
"pagecounts-20121014-030000.gz",
"pagecounts-20121014-040000.gz",
"pagecounts-20121014-050000.gz",
"pagecounts-20121014-060000.gz",
"pagecounts-20121014-070000.gz",
"pagecounts-20121014-080000.gz",
"pagecounts-20121014-090000.gz",
"pagecounts-20121014-100000.gz",
"pagecounts-20121014-110000.gz",
"pagecounts-20121014-120001.gz",
"pagecounts-20121014-130000.gz",
"pagecounts-20121014-140000.gz",
"pagecounts-20121014-150000.gz",
"pagecounts-20121014-160000.gz",
"pagecounts-20121014-170000.gz",
"pagecounts-20121014-180000.gz",
"pagecounts-20121014-190000.gz",
"pagecounts-20121014-200000.gz",
"pagecounts-20121014-210000.gz",
"pagecounts-20121014-220000.gz",
"pagecounts-20121014-230000.gz",
"pagecounts-20121015-000000.gz",
"pagecounts-20121015-010000.gz",
"pagecounts-20121015-020001.gz",
"pagecounts-20121015-030000.gz",
"pagecounts-20121015-040000.gz",
"pagecounts-20121015-050000.gz",
"pagecounts-20121015-060000.gz",
"pagecounts-20121015-070000.gz",
"pagecounts-20121015-080000.gz",
"pagecounts-20121015-090000.gz",
"pagecounts-20121015-100000.gz",
"pagecounts-20121015-110000.gz",
"pagecounts-20121015-120000.gz",
"pagecounts-20121015-130000.gz",
"pagecounts-20121015-140001.gz",
"pagecounts-20121015-150000.gz",
"pagecounts-20121015-160000.gz",
"pagecounts-20121015-170000.gz",
"pagecounts-20121015-180000.gz",
"pagecounts-20121015-190000.gz",
"pagecounts-20121015-200000.gz",
"pagecounts-20121015-210000.gz",
"pagecounts-20121015-220000.gz",
"pagecounts-20121015-230000.gz",
"pagecounts-20121016-000000.gz",
"pagecounts-20121016-010000.gz",
"pagecounts-20121016-020000.gz",
"pagecounts-20121016-030000.gz",
"pagecounts-20121016-040001.gz",
"pagecounts-20121016-050000.gz",
"pagecounts-20121016-060000.gz",
"pagecounts-20121016-070000.gz",
"pagecounts-20121016-080000.gz",
"pagecounts-20121016-090000.gz",
"pagecounts-20121016-100000.gz",
"pagecounts-20121016-110000.gz",
"pagecounts-20121016-120000.gz",
"pagecounts-20121016-130000.gz",
"pagecounts-20121016-140000.gz",
"pagecounts-20121016-150000.gz",
"pagecounts-20121016-160001.gz",
"pagecounts-20121016-170000.gz",
"pagecounts-20121016-180000.gz",
"pagecounts-20121016-190000.gz",
"pagecounts-20121016-200000.gz",
"pagecounts-20121016-210000.gz",
"pagecounts-20121016-220000.gz",
"pagecounts-20121016-230000.gz",
"pagecounts-20121017-000000.gz",
"pagecounts-20121017-010000.gz",
"pagecounts-20121017-020000.gz",
"pagecounts-20121017-030000.gz",
"pagecounts-20121017-040000.gz",
"pagecounts-20121017-050000.gz",
"pagecounts-20121017-060001.gz",
"pagecounts-20121017-070000.gz",
"pagecounts-20121017-080000.gz",
"pagecounts-20121017-090000.gz",
"pagecounts-20121017-100000.gz",
"pagecounts-20121017-110000.gz",
"pagecounts-20121017-120000.gz",
"pagecounts-20121017-130000.gz",
"pagecounts-20121017-140000.gz",
"pagecounts-20121017-150000.gz",
"pagecounts-20121017-160000.gz",
"pagecounts-20121017-170000.gz",
"pagecounts-20121017-180000.gz",
"pagecounts-20121017-190000.gz",
"pagecounts-20121017-200001.gz",
"pagecounts-20121017-210000.gz",
"pagecounts-20121017-220000.gz",
"pagecounts-20121017-230000.gz",
"pagecounts-20121018-000000.gz",
"pagecounts-20121018-010000.gz",
"pagecounts-20121018-020000.gz",
"pagecounts-20121018-030000.gz",
"pagecounts-20121018-040000.gz",
"pagecounts-20121018-050000.gz",
"pagecounts-20121018-060000.gz",
"pagecounts-20121018-070000.gz",
"pagecounts-20121018-080000.gz",
"pagecounts-20121018-090000.gz",
"pagecounts-20121018-100001.gz",
"pagecounts-20121018-110000.gz",
"pagecounts-20121018-120000.gz",
"pagecounts-20121018-130000.gz",
"pagecounts-20121018-140000.gz",
"pagecounts-20121018-150000.gz",
"pagecounts-20121018-160000.gz",
"pagecounts-20121018-170000.gz",
"pagecounts-20121018-180000.gz",
"pagecounts-20121018-190000.gz",
"pagecounts-20121018-200000.gz",
"pagecounts-20121018-210000.gz",
"pagecounts-20121018-220000.gz",
"pagecounts-20121018-230000.gz",
"pagecounts-20121019-000001.gz",
"pagecounts-20121019-010000.gz",
"pagecounts-20121019-020000.gz",
"pagecounts-20121019-030000.gz",
"pagecounts-20121019-040000.gz",
"pagecounts-20121019-050000.gz",
"pagecounts-20121019-060000.gz",
"pagecounts-20121019-070000.gz",
"pagecounts-20121019-080000.gz",
"pagecounts-20121019-090000.gz",
"pagecounts-20121019-100000.gz",
"pagecounts-20121019-110000.gz",
"pagecounts-20121019-120000.gz",
"pagecounts-20121019-130000.gz",
"pagecounts-20121019-140001.gz",
"pagecounts-20121019-150000.gz",
"pagecounts-20121019-160000.gz",
"pagecounts-20121019-170000.gz",
"pagecounts-20121019-180000.gz",
"pagecounts-20121019-190000.gz",
"pagecounts-20121019-200000.gz",
"pagecounts-20121019-210000.gz",
"pagecounts-20121019-220000.gz",
"pagecounts-20121019-230000.gz",
"pagecounts-20121020-000000.gz",
"pagecounts-20121020-010000.gz",
"pagecounts-20121020-020000.gz",
"pagecounts-20121020-030000.gz",
"pagecounts-20121020-040001.gz",
"pagecounts-20121020-050000.gz",
"pagecounts-20121020-060000.gz",
"pagecounts-20121020-070000.gz",
"pagecounts-20121020-080000.gz",
"pagecounts-20121020-090000.gz",
"pagecounts-20121020-100000.gz",
"pagecounts-20121020-110000.gz",
"pagecounts-20121020-120000.gz",
"pagecounts-20121020-130000.gz",
"pagecounts-20121020-140000.gz",
"pagecounts-20121020-150000.gz",
"pagecounts-20121020-160000.gz",
"pagecounts-20121020-170000.gz",
"pagecounts-20121020-180001.gz",
"pagecounts-20121020-190000.gz",
"pagecounts-20121020-200000.gz",
"pagecounts-20121020-210000.gz",
"pagecounts-20121020-220000.gz",
"pagecounts-20121020-230000.gz",
"pagecounts-20121021-000000.gz",
"pagecounts-20121021-010000.gz",
"pagecounts-20121021-020000.gz",
"pagecounts-20121021-030000.gz",
"pagecounts-20121021-040000.gz",
"pagecounts-20121021-050000.gz",
"pagecounts-20121021-060000.gz",
"pagecounts-20121021-070000.gz",
"pagecounts-20121021-080001.gz",
"pagecounts-20121021-090000.gz",
"pagecounts-20121021-100000.gz",
"pagecounts-20121021-110000.gz",
"pagecounts-20121021-120000.gz",
"pagecounts-20121021-130000.gz",
"pagecounts-20121021-140000.gz",
"pagecounts-20121021-150000.gz",
"pagecounts-20121021-160000.gz",
"pagecounts-20121021-170000.gz",
"pagecounts-20121021-180000.gz",
"pagecounts-20121021-190000.gz",
"pagecounts-20121021-200000.gz",
"pagecounts-20121021-210000.gz",
"pagecounts-20121021-220001.gz",
"pagecounts-20121021-230000.gz",
"pagecounts-20121022-000000.gz",
"pagecounts-20121022-010000.gz",
"pagecounts-20121022-020000.gz",
"pagecounts-20121022-030000.gz",
"pagecounts-20121022-040000.gz",
"pagecounts-20121022-050000.gz",
"pagecounts-20121022-060000.gz",
"pagecounts-20121022-070000.gz",
"pagecounts-20121022-080000.gz",
"pagecounts-20121022-090000.gz",
"pagecounts-20121022-100000.gz",
"pagecounts-20121022-110001.gz",
"pagecounts-20121022-120000.gz",
"pagecounts-20121022-130000.gz",
"pagecounts-20121022-140000.gz",
"pagecounts-20121022-150000.gz",
"pagecounts-20121022-160000.gz",
"pagecounts-20121022-170000.gz",
"pagecounts-20121022-180000.gz",
"pagecounts-20121022-190000.gz",
"pagecounts-20121022-200000.gz",
"pagecounts-20121022-210000.gz",
"pagecounts-20121022-220000.gz",
"pagecounts-20121022-230000.gz",
"pagecounts-20121023-000001.gz",
"pagecounts-20121023-010000.gz",
"pagecounts-20121023-020000.gz",
"pagecounts-20121023-030000.gz",
"pagecounts-20121023-040000.gz",
"pagecounts-20121023-050000.gz",
"pagecounts-20121023-060000.gz",
"pagecounts-20121023-070000.gz",
"pagecounts-20121023-080000.gz",
"pagecounts-20121023-090000.gz",
"pagecounts-20121023-100000.gz",
"pagecounts-20121023-110000.gz",
"pagecounts-20121023-120000.gz",
"pagecounts-20121023-130000.gz",
"pagecounts-20121023-140000.gz",
"pagecounts-20121023-150001.gz",
"pagecounts-20121023-160000.gz",
"pagecounts-20121023-170000.gz",
"pagecounts-20121023-180000.gz",
"pagecounts-20121023-190000.gz",
"pagecounts-20121023-200000.gz",
"pagecounts-20121023-210000.gz",
"pagecounts-20121023-220000.gz",
"pagecounts-20121023-230000.gz",
"pagecounts-20121024-000000.gz",
"pagecounts-20121024-010000.gz",
"pagecounts-20121024-020000.gz",
"pagecounts-20121024-030001.gz",
"pagecounts-20121024-040000.gz",
"pagecounts-20121024-050000.gz",
"pagecounts-20121024-060000.gz",
"pagecounts-20121024-070000.gz",
"pagecounts-20121024-080000.gz",
"pagecounts-20121024-090000.gz",
"pagecounts-20121024-100000.gz",
"pagecounts-20121024-110000.gz",
"pagecounts-20121024-120000.gz",
"pagecounts-20121024-130000.gz",
"pagecounts-20121024-140000.gz",
"pagecounts-20121024-150000.gz",
"pagecounts-20121024-160001.gz",
"pagecounts-20121024-170000.gz",
"pagecounts-20121024-180000.gz",
"pagecounts-20121024-190000.gz",
"pagecounts-20121024-200000.gz",
"pagecounts-20121024-210000.gz",
"pagecounts-20121024-220000.gz",
"pagecounts-20121024-230000.gz",
"pagecounts-20121025-000000.gz",
"pagecounts-20121025-010000.gz",
"pagecounts-20121025-020000.gz",
"pagecounts-20121025-030000.gz",
"pagecounts-20121025-040000.gz",
"pagecounts-20121025-050001.gz",
"pagecounts-20121025-060000.gz",
"pagecounts-20121025-070000.gz",
"pagecounts-20121025-080000.gz",
"pagecounts-20121025-090000.gz",
"pagecounts-20121025-100000.gz",
"pagecounts-20121025-110000.gz",
"pagecounts-20121025-120000.gz",
"pagecounts-20121025-130000.gz",
"pagecounts-20121025-140000.gz",
"pagecounts-20121025-150000.gz",
"pagecounts-20121025-160000.gz",
"pagecounts-20121025-170001.gz",
"pagecounts-20121025-180000.gz",
"pagecounts-20121025-190000.gz",
"pagecounts-20121025-200000.gz",
"pagecounts-20121025-210000.gz",
"pagecounts-20121025-220000.gz",
"pagecounts-20121025-230000.gz",
"pagecounts-20121026-000000.gz",
"pagecounts-20121026-010000.gz",
"pagecounts-20121026-020000.gz",
"pagecounts-20121026-030000.gz",
"pagecounts-20121026-040000.gz",
"pagecounts-20121026-050000.gz",
"pagecounts-20121026-060000.gz",
"pagecounts-20121026-070001.gz",
"pagecounts-20121026-080000.gz",
"pagecounts-20121026-090000.gz",
"pagecounts-20121026-100000.gz",
"pagecounts-20121026-110000.gz",
"pagecounts-20121026-120000.gz",
"pagecounts-20121026-130000.gz",
"pagecounts-20121026-140000.gz",
"pagecounts-20121026-150000.gz",
"pagecounts-20121026-160000.gz",
"pagecounts-20121026-170000.gz",
"pagecounts-20121026-180000.gz",
"pagecounts-20121026-190000.gz",
"pagecounts-20121026-200001.gz",
"pagecounts-20121026-210000.gz",
"pagecounts-20121026-220000.gz",
"pagecounts-20121026-230000.gz",
"pagecounts-20121027-000000.gz",
"pagecounts-20121027-010000.gz",
"pagecounts-20121027-020000.gz",
"pagecounts-20121027-030000.gz",
"pagecounts-20121027-040000.gz",
"pagecounts-20121027-050000.gz",
"pagecounts-20121027-060000.gz",
"pagecounts-20121027-070000.gz",
"pagecounts-20121027-080000.gz",
"pagecounts-20121027-090001.gz",
"pagecounts-20121027-100000.gz",
"pagecounts-20121027-110000.gz",
"pagecounts-20121027-120000.gz",
"pagecounts-20121027-130000.gz",
"pagecounts-20121027-140000.gz",
"pagecounts-20121027-150000.gz",
"pagecounts-20121027-160000.gz",
"pagecounts-20121027-170000.gz",
"pagecounts-20121027-180000.gz",
"pagecounts-20121027-190000.gz",
"pagecounts-20121027-200000.gz",
"pagecounts-20121027-210000.gz",
"pagecounts-20121027-220001.gz",
"pagecounts-20121027-230000.gz",
"pagecounts-20121028-000000.gz",
"pagecounts-20121028-010000.gz",
"pagecounts-20121028-020000.gz",
"pagecounts-20121028-030000.gz",
"pagecounts-20121028-040000.gz",
"pagecounts-20121028-050000.gz",
"pagecounts-20121028-060000.gz",
"pagecounts-20121028-070000.gz",
"pagecounts-20121028-080000.gz",
"pagecounts-20121028-090000.gz",
"pagecounts-20121028-100000.gz",
"pagecounts-20121028-110000.gz",
"pagecounts-20121028-120001.gz",
"pagecounts-20121028-130000.gz",
"pagecounts-20121028-140000.gz",
"pagecounts-20121028-150000.gz",
"pagecounts-20121028-160000.gz",
"pagecounts-20121028-170000.gz",
"pagecounts-20121028-180000.gz",
"pagecounts-20121028-190000.gz",
"pagecounts-20121028-200000.gz",
"pagecounts-20121028-210000.gz",
"pagecounts-20121028-220000.gz",
"pagecounts-20121028-230000.gz",
"pagecounts-20121029-000000.gz",
"pagecounts-20121029-010001.gz",
"pagecounts-20121029-020000.gz",
"pagecounts-20121029-030000.gz",
"pagecounts-20121029-040000.gz",
"pagecounts-20121029-050000.gz",
"pagecounts-20121029-060000.gz",
"pagecounts-20121029-070000.gz",
"pagecounts-20121029-080000.gz",
"pagecounts-20121029-090000.gz",
"pagecounts-20121029-100000.gz",
"pagecounts-20121029-110000.gz",
"pagecounts-20121029-120000.gz",
"pagecounts-20121029-130000.gz",
"pagecounts-20121029-140000.gz",
"pagecounts-20121029-150001.gz",
"pagecounts-20121029-160000.gz",
"pagecounts-20121029-170000.gz",
"pagecounts-20121029-180000.gz",
"pagecounts-20121029-190000.gz",
"pagecounts-20121029-200000.gz",
"pagecounts-20121029-210000.gz",
"pagecounts-20121029-220000.gz",
"pagecounts-20121029-230000.gz",
"pagecounts-20121030-000000.gz",
"pagecounts-20121030-010000.gz",
"pagecounts-20121030-020000.gz",
"pagecounts-20121030-030000.gz",
"pagecounts-20121030-040001.gz",
"pagecounts-20121030-050000.gz",
"pagecounts-20121030-060000.gz",
"pagecounts-20121030-070000.gz",
"pagecounts-20121030-080000.gz",
"pagecounts-20121030-090000.gz",
"pagecounts-20121030-100000.gz",
"pagecounts-20121030-110000.gz",
"pagecounts-20121030-120000.gz",
"pagecounts-20121030-130000.gz",
"pagecounts-20121030-140000.gz",
"pagecounts-20121030-150000.gz",
"pagecounts-20121030-160000.gz",
"pagecounts-20121030-170001.gz",
"pagecounts-20121030-180000.gz",
"pagecounts-20121030-190000.gz",
"pagecounts-20121030-200000.gz",
"pagecounts-20121030-210000.gz",
"pagecounts-20121030-220000.gz",
"pagecounts-20121030-230000.gz",
"pagecounts-20121031-000000.gz",
"pagecounts-20121031-010000.gz",
"pagecounts-20121031-020000.gz",
"pagecounts-20121031-030000.gz",
"pagecounts-20121031-040000.gz",
"pagecounts-20121031-050000.gz",
"pagecounts-20121031-060001.gz",
"pagecounts-20121031-070000.gz",
"pagecounts-20121031-080000.gz",
"pagecounts-20121031-090000.gz",
"pagecounts-20121031-100000.gz",
"pagecounts-20121031-110000.gz",
"pagecounts-20121031-120000.gz",
"pagecounts-20121031-130000.gz",
"pagecounts-20121031-140000.gz",
"pagecounts-20121031-150000.gz",
"pagecounts-20121031-160000.gz",
"pagecounts-20121031-170000.gz",
"pagecounts-20121031-180000.gz",
"pagecounts-20121031-190001.gz",
"pagecounts-20121031-200000.gz",
"pagecounts-20121031-210000.gz",
"pagecounts-20121031-220000.gz",
"pagecounts-20121031-230000.gz",
]
import os
base = "http://dumps.wikimedia.org/other/pagecounts-raw/"
tail = "2012/2012-10/"
i = 0
for url in urls:
print "%d completeted of %d total. %d remaining" % (i, len(urls), len(urls) - i)
#os.system("curl --silent -O %s >> /dev/null" % (base + tail + url))
os.system("curl -O %s" % (base + tail + url))
i = i + 1
| StarcoderdataPython |
4836988 | from openerp.osv import fields, osv
class stock_move(osv.Model):
_name = 'stock.move'
_inherit = 'stock.move'
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
res_prod = super(stock_move, self).onchange_product_id(cr, uid, ids, prod_id, loc_id,loc_dest_id, partner_id)
prod_obj = self.pool.get('product.product')
obj = prod_obj.browse(cr, uid, prod_id)
res_prod['value'].update({'image_small': obj.image_small})
return res_prod
_columns = {
'image_small' : fields.binary('Product Image'),
}
stock_move()
class sale_order_line(osv.Model):
_name = 'sale.order.line'
_inherit = 'sale.order.line'
_columns = {
'image_small' : fields.binary('Product Image'),
}
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False,image_small=False, context=None):
context = context or {}
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
product_obj = self.pool.get('product.product')
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'image_small': product_obj.image_small or False})
return res
sale_order_line()
class sale_order(osv.Model):
_name = 'sale.order'
_inherit = 'sale.order'
def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):
res = super(sale_order, self)._prepare_order_line_move(cr, uid, order=order, line=line, picking_id=picking_id, date_planned=date_planned, context=context)
res['image_small'] = line.image_small
return res
sale_order()
| StarcoderdataPython |
3201109 | <filename>scrapper/web_scrapper.py
from bs4 import BeautifulSoup
import requests
import re
from math import ceil
all_quotes= {}
global_countr= 1
def clean_quote(quote_uncleaned):
# remove unwanted characters
cleaned_quote1= re.sub('\n','',quote_uncleaned)
cleaned_quote2= re.sub(' +',' ',cleaned_quote1)
cleaned_quote3= re.sub(' “','',cleaned_quote2)
cleaned_quote4= re.sub('.”','',cleaned_quote3)
cleaned_quote5= cleaned_quote4.strip() # remove trailing whitespaces
return cleaned_quote5
def clean_author(author_txt):
# get author name.. present before comma by splitting
author= author_txt.split(',')
final_txt1= re.sub('\n+','',author[0])
cleaned_author= re.sub(' +',' ',final_txt1) # remove unwanted & trailing whitespaces
cleaned_author1= cleaned_author.strip()
return cleaned_author1
def clean_quote_and_author(quote_txt):
quote_with_author= []
# txt before "―" is our quote & txt after "―" is our author & other stuff
quote_author_unclean= quote_txt.split('―')
cleaned_quote= clean_quote(quote_author_unclean[0]) # format quote
cleaned_author= clean_author(quote_author_unclean[1]) # format author name
# append author & quote to list
quote_with_author.append(cleaned_quote)
quote_with_author.append(cleaned_author)
return quote_with_author
def makeSoup(url):
resp_by_category= requests.get(url)
htmlContent= resp_by_category.content
soup= BeautifulSoup(htmlContent,'html.parser')
return soup
def get_cloud_quotes_quantity(category):
soup= makeSoup(f'https://www.goodreads.com/quotes/tag/{category}?page=1&utf8=%E2%9C%93')
res= soup.find('span' ,class_='smallText')
results= res.text
if results:
tmp_str_val= ''
final_extracted_val= int()
for indx in range(len(results)-1,8,-1):
if results[indx]== ' ':
break
if results[indx]== ',':
continue
tmp_str_val+= results[indx]
final_extracted_val= int(tmp_str_val)
return final_extracted_val,soup
else:
return 0,soup
def scrap_single_page_data(soup,quantity,total_quotes_to_scrap):
countr= 1
all_quotes_div= soup.findAll('div',class_='quoteText')
for quote_div in all_quotes_div:
quote_txt= quote_div.text
quot_and_author= clean_quote_and_author(quote_txt)
quote= quot_and_author[0]
author= quot_and_author[1]
all_quotes[quote]= author
if countr == quantity:
break
countr += 1
process_unformatted= (len(all_quotes)/total_quotes_to_scrap)*100
process_round_off= round(process_unformatted,2)
print(f"Processing....{process_round_off}%")
def scrap_multi_page_data(no_of_pages, category, quantity):
global all_quotes
while len(all_quotes) < quantity:
for page_no in range(1, no_of_pages+1):
soup= makeSoup(f'https://www.goodreads.com/quotes/tag/{category}?page={page_no}&utf8=%E2%9C%93')
if page_no != no_of_pages:
scrap_single_page_data(soup,30,quantity)
else:
last_page_quantity= quantity % 30
scrap_single_page_data(soup,last_page_quantity,quantity)
def get_quotes(category,quantity):
if quantity< 1:
print('please enter a greater number than 0')
return
cloud_quotes_quantity,soup= get_cloud_quotes_quantity(category)
if cloud_quotes_quantity <=0:
print(f"Sorry! we can't find any data for that Query. \nPlease Enter proper category like 'life', 'water',etc.,")
elif cloud_quotes_quantity< quantity:
print(f"Sorry, we can't find that much quantity of results\nwe only have {cloud_quotes_quantity} results.")
else:
if quantity <= 30:
scrap_single_page_data(soup,quantity,quantity)
elif quantity > 30:
res= quantity
no_of_pages= ceil(res / 30)
scrap_multi_page_data(no_of_pages, category, quantity)
def scrap_quotes(category,quantity):
get_quotes(category,quantity)
if get_quotes:
return all_quotes
else:
print('No Quote Found')
| StarcoderdataPython |
3367343 | <filename>Numbers/change.py<gh_stars>10-100
#!/usr/bin/env python3
# Change Calculator
# Calculates the change for US dollar
# Prints out the type of bills and coins
# that needs to be given to the customer
def changeCoins(change):
p = 0 # 0.01
n = 0 # 0.05
d = 0 # 0.10
q = 0 # 0.25
changeCoins = int((change - int(change)) * 100)
if changeCoins >= 25:
q = int(changeCoins / 25)
changeCoins = changeCoins - q * 25
if changeCoins >= 10:
d = int(changeCoins / 10)
changeCoins = changeCoins - d * 10
if changeCoins >= 5:
n = int(changeCoins / 5)
changeCoins = changeCoins - n * 5
if changeCoins >= 1:
p = int(changeCoins / 1)
print('\nPlease give the customer the following coins\n\
Quarters:', q, 'Dimes:', d, 'Nickels:', n, 'Pennies: ', p)
def changeBill(change):
changeBills = int(change)
hun = 0
fif = 0
twe = 0
ten = 0
one = 0
if changeBills >= 100:
hun = int(changeBills / 100)
changeBills = changeBills - hun * 100
if changeBills >= 50:
fif = int(changeBills / 50)
changeBills = changeBills - fif * 50
if changeBills >= 20:
twe = int(changeBills / 20)
changeBills = changeBills - twe * 20
if changeBills >= 10:
ten = int(changeBills / 10)
changeBills = changeBills - ten * 10
if changeBills >= 1:
one = int(changeBills / 1)
print('\nPlease give the customer the following bills:\n',
'Hundreds: ', hun, ' Fifties: ', fif,
' Twenties: ', twe, ' Tens: ', ten, ' Ones: ', one, '\n', sep='')
def changeCalc(cost, money):
if money < cost:
change = cost - money
print('\nPlease pay %.2f$ more\n', change)
else:
change = money - cost
print('\nThe change is: %.2f$\n' % change)
changeBill(change)
changeCoins(change)
def main():
cost = round(float(input('Enter the cost of the purchase: ')), 2)
money = round(float(input('Enter the money given: ')), 2)
changeCalc(cost, money)
if __name__ == '__main__':
main()
| StarcoderdataPython |
45966 | from satsolver import SatSolver
from dimacs import Glucose, RSat
try:
from cryptominisat import CryptoMiniSat
except ImportError:
pass
| StarcoderdataPython |
3331740 | # app/robo_advisor.py
import requests
import dotenv
import json
import datetime
import csv
import os
from dotenv import load_dotenv
import plotly.graph_objects as go
import operator
# LOAD .ENV ----------------------------------------------------------------------
load_dotenv()
api_key = os.environ.get('ALPHAVANTAGE_API_KEY')
# FUNCTIONS ----------------------------------------------------------------------
def to_usd(my_price):
'''
Converts a numeric value to usd-formatted string, for printing and display purposes.
Param: my_price (int or float) like 4000.444444
Example: to_usd(4000.444444)
Returns: $4,000.44
'''
return f'${my_price:,.2f}' # > $12,000.71
def date_suffix(dt_for_suf):
'''
Adds st, nd, rd, or th to the end of a day of the month.
'''
if 4 <= dt_for_suf.day <= 20 or 24 <= dt_for_suf.day <= 30:
suffix='th'
else:
suffix = ['st', 'nd', 'rd'][dt_for_suf.day % 10 - 1]
return suffix
def hasnum(ticker_input_str):
'''
Checks string for presence of numeric character
'''
return any(char.isdigit() for char in ticker_input_str)
# REQUEST API DATA ----------------------------------------------------------------
## TICKER INPUT AND VALIDATION ----------------------------------------------------
failed_tickers = []
init_tk_str = os.environ.get('INIT_TICKER_LIST')
working_tk = init_tk_str.split(',')
initial_tickers = [str(t).strip() for t in working_tk]
for t in initial_tickers:
if hasnum(t) == True:
failed_tickers.append({'ticker':t.upper(),'err_type':'Discarded from ticker list for presence of invalid numeric characters'})
initial_tickers = [t for t in initial_tickers if hasnum(t) == False]
initial_tickers = [t for t in initial_tickers if t != '']
if len(initial_tickers) > 0:
print('ROBO ADVISOR IS INITIALIZED WITH THE FOLLOWING TICKER(S):')
for t in initial_tickers:
print(f"---{t}")
if len(failed_tickers) > 0:
print("-------------------------")
print('The following initialized tickers were discarded for invalid numeric characters:')
for ft in failed_tickers:
print(f"---{ft['ticker'].upper()}")
print("-------------------------")
add_tick_yn = input('Would you like to add more tickers? [y/n]')
while str(add_tick_yn).lower() not in ["y","n"]:
add_tick_yn=input("Response not recognized. Please respond with 'y' for yes or 'n' for no.\nWould you like to add more tickers? [y/n]")
if str(add_tick_yn).lower() == "n":
raw_input_tickers = initial_tickers
else:
add_tick = input('Enter tickers (separated by comma if more than one - e.g. MSFT,IBM):')
working_add_tick = str(add_tick).split(',')
fin_add_tick = [str(t).strip() for t in working_add_tick]
for t in fin_add_tick:
if hasnum(t) == True:
failed_tickers.append({'ticker': t.upper(), 'err_type': 'Discarded from ticker list for presence of invalid numeric characters'})
fin_add_tick = [t for t in fin_add_tick if hasnum(t) == False]
fin_add_tick = [t for t in fin_add_tick if t != '']
raw_input_tickers = initial_tickers
for t in fin_add_tick:
raw_input_tickers.append(t)
else:
add_tick = input('Enter tickers (separated by comma if more than one - e.g. MSFT,IBM):')
working_add_tick = str(add_tick).split(',')
working_add_tick = [str(t).strip() for t in working_add_tick]
for t in working_add_tick:
if hasnum(t) == True:
failed_tickers.append({'ticker': t.upper(), 'err_type': 'Discarded from ticker list for presence of invalid numeric characters'})
working_add_tick = [t for t in working_add_tick if hasnum(t) == False]
raw_input_tickers=[t for t in working_add_tick if t!='']
raw_input_tickers=[t.upper() for t in raw_input_tickers]
input_ticker = [str(t).replace(" ", "") for t in raw_input_tickers]
spchk = [str(t).find(" ") for t in raw_input_tickers]
# PULL DATE AND TIME OF EXECUTION (CURRENT DATE AND TIME)----------------------------------------
dt_exec = datetime.datetime.now()
# PRINT FIRST LINES DESCRIBING PROGRAM EXECUTION-------------------------------------------------
print("-------------------------")
print("REQUESTING STOCK MARKET DATA...")
print(f"REQUEST AT: {dt_exec.strftime('%#I:%M%p').lower()} on {dt_exec.strftime('%A, %B %#d')}{date_suffix(dt_exec)}, {dt_exec.strftime('%Y')}")
# REQUEST DATA FROM API AND RUN CALCULATIONS FOR EACH TICKER (LOOP)-------------------------------
for tkr in input_ticker:
request_url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={tkr}&apikey={api_key}"
response = requests.get(request_url)
# PARSE API DATA -----------------------------------------------------------------------
parsed_response = json.loads(response.text)
error_check_list = list(parsed_response.keys())
error_check = error_check_list[0]
if error_check=='Meta Data': # IF TICKER IS ABLE TO PULL ACTUAL DATA
# PULL LAST REFRESH DATE FROM DATA ------------------------------------------------------------------
last_refreshed = parsed_response['Meta Data']['3. Last Refreshed']
last_ref_dt = datetime.datetime.fromisoformat(last_refreshed)
# PULL SYMBOL FROM DATA ------------------------------------------------------------------
symbol = parsed_response['Meta Data']['2. Symbol']
# PULL LATEST CLOSE FROM DATA ------------------------------------------------------------
close_days = list(parsed_response['Time Series (Daily)'].keys())
latest_day = close_days[0]
px_last = parsed_response['Time Series (Daily)'][latest_day]['4. close']
# PULL HIGH AND LOW FROM DATA----------------------------------------------------------------
highlow_pd = min(100,len(close_days))
high_px = []
for d in close_days[0:highlow_pd]:
high_px.append(float(parsed_response['Time Series (Daily)'][d]['2. high']))
recent_high = max(high_px)
low_px = []
for d in close_days[0:highlow_pd]:
low_px.append(float(parsed_response['Time Series (Daily)'][d]['3. low']))
recent_low = min(low_px)
# PULL MOST RECENT DATE OF HIGH/LOW PRICE FOR USE IN CHART--------------------------------
high_date = []
low_date= []
for k, d in parsed_response['Time Series (Daily)'].items():
if float(d['2. high']) == recent_high:
high_date.append(k)
elif float(d['3. low']) == recent_low:
low_date.append(k)
recent_high_dt = datetime.datetime.fromisoformat(high_date[0])
recent_low_dt = datetime.datetime.fromisoformat(low_date[0])
# WRITE CSV DATA ------------------------------------------------------------------------
headers = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
csv_filepath = os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', f"{symbol}.csv")
chart_data=[]
with open(csv_filepath,'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=headers)
writer.writeheader()
for k in close_days:
writer.writerow({
'timestamp': k,
'open': parsed_response['Time Series (Daily)'][k]['1. open'],
'high': parsed_response['Time Series (Daily)'][k]['2. high'],
'low': parsed_response['Time Series (Daily)'][k]['3. low'],
'close': parsed_response['Time Series (Daily)'][k]['4. close'],
'volume': parsed_response['Time Series (Daily)'][k]['5. volume']
})
chart_data.append({
'timestamp': k,
'open': parsed_response['Time Series (Daily)'][k]['1. open'],
'high': parsed_response['Time Series (Daily)'][k]['2. high'],
'low': parsed_response['Time Series (Daily)'][k]['3. low'],
'close': parsed_response['Time Series (Daily)'][k]['4. close'],
'volume': parsed_response['Time Series (Daily)'][k]['5. volume']
})
# RECOMMENDATION ------------------------------------------------------------------------
rec_criteria = float(px_last) / float(recent_low)
if rec_criteria >= 1.2:
rec = f"DO NOT BUY {symbol}!"
reason = f"{symbol} most recently closed at or above 20% of its recent low."
rec_cht=f"Do Not Buy: currently trading at or above 20% of its recent low"
else:
rec = f"BUY {symbol}!"
reason = f"{symbol} most recently closed within 20% of its recent low"
rec_cht=f"Buy: currently trading within 20% of recent low"
# PRINT INFORMATION ---------------------------------------------------------------------
print("-------------------------")
print(f"SELECTED SYMBOL: {symbol}")
print("-------------------------")
print(f"LATEST DAY: {last_ref_dt.strftime('%A, %B %#d')}{date_suffix(last_ref_dt)}, {last_ref_dt.strftime('%Y')}")
print(f"LATEST CLOSE: {to_usd(float(px_last))}")
print(f"RECENT HIGH: {to_usd(recent_high)}")
print(f"RECENT LOW: {to_usd(recent_low)}")
print("-------------------------")
print(f"ANALYSIS: {symbol} is trading at {(100*rec_criteria):.1f}% of its recent low")
print(f"RECOMMENDATION: {rec}")
print(f"RECOMMENDATION REASON: {reason}")
print("-------------------------")
print(f"WRITING DATA TO CSV: {os.path.abspath(csv_filepath)}")
print("-------------------------")
#PREP CHART----------------------------------------------------------------------------------
sorted_chart_data = sorted(
chart_data, key=operator.itemgetter('timestamp'), reverse=False)
#print(sorted_chart_data)
cht_timestamp = [p['timestamp'] for p in sorted_chart_data]
cht_open = [p['open'] for p in sorted_chart_data]
cht_close = [p['close'] for p in sorted_chart_data]
cht_high = [p['high'] for p in sorted_chart_data]
cht_low = [p['low'] for p in sorted_chart_data]
#print(cht_timestamp)
anno = [dict(x=last_ref_dt, y=px_last, xref='x', yref='y', text=f"Last Close: {to_usd(float(px_last))}", showarrow=True, arrowhead=7, ax=-40, ay=80),
dict(x=recent_high_dt, y=recent_high, xref='x', yref='y', text=f"Recent High: {to_usd(recent_high)}", showarrow=True, arrowhead=7, ax=-40, ay=-40),
dict(x=recent_low_dt, y=recent_low, xref='x', yref='y', text=f"Recent Low: {to_usd(recent_low)}", showarrow=True, arrowhead=7, ax=-40, ay=40),
dict(x=last_ref_dt, y=(1.2*recent_low), xref='x', yref='y', text=f"Price Threshhold for Purchase: {to_usd(1.2*recent_low)}", showarrow=False, yanchor='bottom',xanchor='right')]
thresh=[dict(x0=min(cht_timestamp),x1=max(cht_timestamp),y0=(1.2*recent_low),y1=(1.2*recent_low),xref='x',yref='y',line_width=1)]
#print(anno)
fig = go.Figure(data=[go.Candlestick(
x=cht_timestamp, open=cht_open, high=cht_high, low=cht_low, close=cht_close)],
layout=go.Layout(title=go.layout.Title(text=f"{symbol} - {rec_cht}"), shapes=thresh, annotations=anno, yaxis_title="Price per Share (USD)"))
fig.show()
else: #IF TICKER NOT FOUND ON API
if error_check == "Error Message":
failed_tickers.append({'ticker': tkr, 'err_type': 'Invalid API Call'})
elif error_check == "Note":
failed_tickers.append({'ticker': tkr, 'err_type': 'Exceeds API Call Limit (5 per minute and 500 per day)'})
else:
failed_tickers.append({'ticker': tkr, 'err_type': 'Other'})
# ERROR SUMMARY -----------------------------------------------------------------
if len(failed_tickers) > 0:
if len(failed_tickers) == len(input_ticker):
print("-------------------------")
print("UNABLE TO GENERATE REPORT FOR THE SPECIFIED TICKER(S).\nSEE ERROR SUMMARY")
print("-------------------------")
print("-------------------------")
print("ERROR SUMMARY:")
print("The program discarded or was unable to pull data from the API for the following ticker(s):")
for t in failed_tickers:
print(f"----{t['ticker']}: {t['err_type']}")
print("Please check the accuracy of the ticker(s) and try again.")
if len(spchk) > 0:
if max(spchk) > -1:
print("Note: spaces found in ticker inputs are automatically removed")
print("-------------------------")
print("HAPPY INVESTING!")
print("-------------------------")
| StarcoderdataPython |
170290 | # Generated by Django 3.0.3 on 2020-03-18 19:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('staff', '0010_auto_20200318_1846'),
]
operations = [
migrations.RenameField(
model_name='instructor',
old_name='text_history',
new_name='history',
),
migrations.RenameField(
model_name='student',
old_name='text_history',
new_name='history',
),
]
| StarcoderdataPython |
3394529 | <reponame>tektecher/micropython
# Dev by <NAME>
from machine import Pin, ADC
adc = ADC(Pin(36))
adc.atten(ADC.ATTN_11DB)
adc.width(ADC.WIDTH_12BIT)
def read():
return adc.read()
| StarcoderdataPython |
4823285 | <reponame>liuyangdh/multimodal-vae-public
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import random
import numpy as np
from copy import deepcopy
from PIL import Image
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
N_MODALITIES = 6
VALID_PARTITIONS = {'train': 0, 'val': 1, 'test': 2}
class CelebVision(Dataset):
"""Define dataset of images of celebrities with a series of
transformations applied to it.
The user needs to have pre-defined the Anno and Eval folder from
http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
@param partition: string
train|val|test [default: train]
See VALID_PARTITIONS global variable.
@param data_dir: string
path to root of dataset images [default: ./data]
"""
def __init__(self, partition='train', data_dir='./data'):
super(CelebVision, self).__init__()
self.partition = partition
self.data_dir = data_dir
assert partition in VALID_PARTITIONS.keys()
# load a list of images for the user-chosen partition
self.image_paths = load_eval_partition(partition, data_dir=data_dir)
self.size = int(len(self.image_paths))
# resize image to 64 x 64
self.image_transform = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
image_path = os.path.join(self.data_dir, 'img_align_celeba',
self.image_paths[index])
gray_path = os.path.join(self.data_dir, 'img_align_celeba_grayscale',
self.image_paths[index])
edge_path = os.path.join(self.data_dir, 'img_align_celeba_edge',
self.image_paths[index])
mask_path = os.path.join(self.data_dir, 'img_align_celeba_mask',
self.image_paths[index])
# open PIL Image -- these are fixed versions of image that we save
image = Image.open(image_path).convert('RGB')
gray_image = Image.open(gray_path).convert('L')
edge_image = Image.open(edge_path).convert('L')
mask_image = Image.open(mask_path).convert('L')
# add blocked to image
obscured_image = Image.open(image_path).convert('RGB')
obscured_image = obscure_image(obscured_image)
# add watermark to image
watermark_image = Image.open(image_path).convert('RGB')
watermark_image = add_watermark(obscured_image,
watermark_path='./watermark.png')
image = self.image_transform(image)
gray_image = self.image_transform(grayscale_image)
edge_image = self.image_transform(edge_image)
mask_image = self.image_transform(mask_image)
obscured_image = self.image_transform(obscured_image)
watermark_image = self.image_transform(watermark_image)
# masks are normally white with black lines but we want to
# be consistent with edges and MNIST-stuff, we so make the background
# black and the lines white.
mask_image = 1 - mask_image
# return everything as a bundle
return (image, grayscale_image, edge_image,
mask_image, obscured_image, watermark_image)
def __len__(self):
return self.size
def obscure_image(image):
"""Block image vertically in half with black pixels.
@param image: np.array
color image
@return: np.array
color image with vertically blocked pixels
"""
image_npy = deepcopy(np.asarray(image))
# we obscure half height because should be easier to complete
# a face given vertical half than horizontal half
center_h = image_npy.shape[1] // 2
image_npy[:, center_h + 1:, :] = 0
image = Image.fromarray(image_npy)
return image
def add_watermark(image, watermark_path='./watermark.png'):
"""Overlay image of watermark on color image.
@param image: np.array
color image
@param watermark_path: string
path to fixed watermark image
[default: ./watermark.png]
@return: np.array
color image with overlayed watermark
"""
watermark = Image.open(watermark_path)
nw, nh = image.size[0], image.size[1]
watermark = watermark.resize((nw, nh), Image.BICUBIC)
image.paste(watermark, (0, 0), watermark)
return image
| StarcoderdataPython |
3285748 | <gh_stars>1-10
from asyncio import Future, ensure_future, get_running_loop # no
from functools import partial
from typing import Awaitable
from ..command import Command
from ..constants import SessionState
from ..message import Message
from ..notification import Notification
from ..security import Authentication
from ..session import Session
from .channel import Channel
class ClientChannel(Channel):
"""Client channel representation."""
async def establish_session_async(
self,
compression: str,
encryption: str,
identity: str,
authentication: Authentication,
instance: str
) -> Session:
"""Esablish a new session.
Args:
compression (str): compression type
encryption (str): encryption type
identity (str): identity str
authentication (Authentication): Authentication specs
instance (str): instance name
Returns:
Session: A established Session
"""
self.ensure_state([SessionState.NEW], True)
session: Session = await self.start_new_session_async()
if session.encryption_options or session.compression_options:
compression = compression if compression else session.compression_options[0] # noqa: E501
encryption = encryption if encryption else session.encryption_options[0] # noqa: E501
session: Session = await self.negotiate_session_async(compression, encryption) # noqa: E501
else:
if session.compression != self.transport.compression:
self.transport.set_compression(session.compression)
if session.encryption != self.transport.encryption:
self.transport.set_encryption(session.encryption)
session: Session = await self.authenticate_session_async(identity, authentication, instance) # noqa: E501
self.__reset_session_listeners()
return session
def start_new_session_async(self) -> Awaitable[Session]:
"""Start new session.
Returns:
Future: A new Session
"""
self.ensure_state([SessionState.NEW], True)
loop = get_running_loop()
future = loop.create_future()
self.on_session_negotiating = future.set_result
self.on_session_authenticating = future.set_result
self.__on_session_failed = future.set_exception
session = Session(SessionState.NEW)
self.send_session(session)
return future
def negotiate_session_async(
self,
session_compression: str,
session_encryption: str
) -> Awaitable[Session]:
"""Handle session in negotiating state.
Args:
session_compression (str): session compression type
session_encryption (str): session encryption type
Returns:
Future: A negotiated Session
"""
self.ensure_state([SessionState.NEGOTIATING], True)
loop = get_running_loop()
future = loop.create_future()
self.on_session_authenticating = future.set_result
self.__on_session_failed = future.set_exception
session = Session(
SessionState.NEGOTIATING,
encryption=session_encryption,
compression=session_compression
)
session.id = self.session_id
self.send_session(session)
return future
def authenticate_session_async(
self,
identity: str,
authentication: Authentication,
instance: str
) -> Awaitable[Session]:
"""Authenticate session.
Args:
identity (str): Identity to authenticate
authentication (Authentication): Authentication object
instance (str): Instance to authenticate
Returns:
Future: An authenticated Session
"""
self.ensure_state([SessionState.AUTHENTICATING], True)
loop = get_running_loop()
future = loop.create_future()
self.on_session_established = future.set_result
self.__on_session_failed = future.set_exception
session = Session(
SessionState.AUTHENTICATING,
scheme=authentication.scheme if authentication.scheme else 'unknown', # noqa: E501
authentication=authentication
)
session.from_n = f'{identity}/{instance}'
session.id = self.session_id
self.send_session(session)
return future
def send_finishing_session_async(self) -> Awaitable[Session]:
"""Handle session in state finishing.
Returns:
Future: session future
"""
self.ensure_state([SessionState.ESTABLISHED], True)
loop = get_running_loop()
future = loop.create_future()
self.__on_session_finished = future.set_result
self.__on_session_failed = future.set_exception
session = Session(SessionState.FINISHING)
session.id = self.session_id
self.send_session(session)
return future
def on_session_finished(self, session: Session) -> None:
"""Handle callback on session finished.
Args:
session (Session): Received session
"""
pass
def on_session_failed(self, session: Session) -> None:
"""Handle callback on session failed.
Args:
session (Session): Received Session
"""
pass
def on_session(self, session: Session) -> None: # noqa: WPS213
"""Handle session envelope received.
Args:
session (Session): Received Session
"""
self.session_id = session.id
self.state = session.state
if session.state == SessionState.ESTABLISHED:
self.local_node = str(session.to)
self.remote_node = session.from_n
# Switch case
if session.state == SessionState.NEGOTIATING:
self.on_session_negotiating(session)
return
if session.state == SessionState.AUTHENTICATING:
self.on_session_authenticating(session)
return
if session.state == SessionState.ESTABLISHED:
self.on_session_established(session)
return
if session.state == SessionState.FINISHED:
task = ensure_future(self.transport.close_async())
task.add_done_callback(
partial(self.__on_session_finished_callbacks, session=session)
)
return
if session.state == SessionState.FAILED:
task = ensure_future(self.transport.close_async())
task.add_done_callback(
partial(self.__on_session_failed_callbacks, session=session)
)
return
def on_message(self, message: Message) -> None: # noqa: D102
pass
def on_command(self, command: Command) -> None: # noqa: D102
pass
def on_notification( # noqa: D102
self,
notification: Notification
) -> None:
pass
def on_session_authenticating(self, session: Session) -> None:
"""Handle session authenticating callback.
Args:
session (Session): received Session
"""
pass
def on_session_negotiating(self, session: Session) -> None:
"""Handle session negotiating callback.
Args:
session (Session): received Session
"""
pass
def on_session_established(self, session: Session) -> None:
"""Handle session established callback.
Args:
session (Session): received Session
"""
pass
def __reset_session_listeners(self) -> None:
self.__on_session_finished = \
self.on_session_negotiating = \
self.on_session_established = \
self.on_session_authenticating = \
self.__on_session_failed = self.__empty_method # noqa: WPS429
def __on_session_failed(self, session: Session) -> None:
pass
def __on_session_finished(self, session: Session) -> None:
pass
def __empty_method(self) -> None:
pass
def __on_session_finished_callbacks(
self,
fut: Future,
session: Session
) -> None:
self.__on_session_finished(session)
self.on_session_finished(session)
def __on_session_failed_callbacks(
self,
fut: Future,
session: Session
) -> None:
self.__on_session_failed(session)
self.on_session_failed(session)
| StarcoderdataPython |
110493 | #%%
from datetime import datetime
import numpy as np
import pandas as pd
from tqdm import tqdm
# %%
picks = pd.read_csv('gamma_picks.csv', sep="\t")
events = pd.read_csv('gamma_catalog.csv', sep="\t")
# %%
events["match_id"] = events.apply(lambda x: f'{x["event_idx"]}_{x["file_index"]}', axis=1)
picks["match_id"] = picks.apply(lambda x: f'{x["event_idx"]}_{x["file_index"]}', axis=1)
# %%
out_file = open("hypoInput.arc", "w")
picks_by_event = picks.groupby("match_id").groups
for i in tqdm(range(len(events))):
event = events.iloc[i]
event_time = datetime.strptime(event["time"], "%Y-%m-%dT%H:%M:%S.%f").strftime("%Y%m%d%H%M%S%f")[:-4]
lat_degree = int(event["latitude"])
lat_minute = (event["latitude"] - lat_degree) * 60 * 100
south = "S" if lat_degree <= 0 else " "
lng_degree = int(event["longitude"])
lng_minute = (event["longitude"] - lng_degree) * 60 * 100
east = "E" if lng_degree >= 0 else " "
depth = event["depth(m)"] / 1e3 * 100
event_line = f"{event_time}{abs(lat_degree):2d}{south}{abs(lat_minute):4.0f}{abs(lng_degree):3d}{east}{abs(lng_minute):4.0f}{depth:5.0f}"
out_file.write(event_line + "\n")
picks_idx = picks_by_event[event["match_id"]]
for j in picks_idx:
pick = picks.iloc[j]
network_code, station_code, comp_code, channel_code = pick['id'].split('.')
phase_type = pick['type']
phase_weight = min(max(int((1 - pick['prob']) / (1 - 0.3) * 4) - 1, 0), 3)
pick_time = datetime.strptime(pick["timestamp"], "%Y-%m-%dT%H:%M:%S.%f")
phase_time_minute = pick_time.strftime("%Y%m%d%H%M")
phase_time_second = pick_time.strftime("%S%f")[:-4]
tmp_line = f"{station_code:<5}{network_code:<2} {comp_code:<1}{channel_code:<3}"
if phase_type.upper() == 'P':
pick_line = f"{tmp_line:<13} P {phase_weight:<1d}{phase_time_minute} {phase_time_second}"
elif phase_type.upper() == 'S':
pick_line = f"{tmp_line:<13} 4{phase_time_minute} {'':<12}{phase_time_second} S {phase_weight:<1d}"
else:
raise (f"Phase type error {phase_type}")
out_file.write(pick_line + "\n")
out_file.write("\n")
if i > 1e3:
break
out_file.close()
| StarcoderdataPython |
7155 | <reponame>jeffkimbrel/MergeMetabolicAnnotations<filename>lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py<gh_stars>1-10
import os
import datetime
import logging
import json
import uuid
from installed_clients.WorkspaceClient import Workspace as Workspace
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api
import MergeMetabolicAnnotations.utils.functions as f
class CompareAnnotationsUtil:
def __init__(self, config):
self.config = config
self.timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.kbr = KBaseReport(self.callback_url)
self.anno_api = annotation_ontology_api()
self.ws_client = Workspace(config["workspace-url"])
def run(self, ctx, params):
get_ontology_results = self.anno_api.get_annotation_ontology_events({
"input_ref": params['genome'],
"workspace-url": self.config["workspace-url"]
})
ontology_selected = f.filter_selected_ontologies(
get_ontology_results, params, workflow="compare")
with open(os.path.join(self.scratch, "get_ontology_dump.json"), 'w') as outfile:
json.dump(ontology_selected, outfile, indent=2)
# make reports
html_reports = []
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
os.mkdir(output_directory)
event_summary = f.get_event_lists(ontology_selected)
html_reports = f.compare_report_stack(html_reports, event_summary, output_directory)
# finalize html reports
report_params = {
'message': '',
'html_links': html_reports,
'direct_html_link_index': 0,
'workspace_name': params['workspace_name'],
'report_object_name': f'compare_annotations_{uuid.uuid4()}'}
report_output = self.kbr.create_extended_report(report_params)
return {'report_name': report_output['name'],
'report_ref': report_output['ref']}
| StarcoderdataPython |
47096 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-25 09:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('focus', '0003_auto_20190125_1721'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '文章', 'verbose_name_plural': '文章'},
),
migrations.AlterModelOptions(
name='author',
options={'verbose_name': '作者', 'verbose_name_plural': '作者'},
),
migrations.AlterModelOptions(
name='column',
options={'ordering': ['name'], 'verbose_name': '类别', 'verbose_name_plural': '类别'},
),
migrations.AlterModelOptions(
name='comment',
options={'verbose_name': '评论', 'verbose_name_plural': '评论'},
),
migrations.AlterModelOptions(
name='poll',
options={'verbose_name': '点赞', 'verbose_name_plural': '点赞'},
),
]
| StarcoderdataPython |
3318634 | <filename>miners/__init__.py
from miners.Miner import Miner
from miners.imdb.ImdbMiner import IMDB
| StarcoderdataPython |
50425 | import cv2
import numpy as np
from shapes import Myinit
class Triangle(Myinit):
def __init__(self):
super(Triangle, self).__init__()
self.vertices = np.array([[100,50], [150,150], [50,150]],np.int32)
self.vertices = self.vertices.reshape((-1, 1, 2))
self.color=(255,0,255)
def form_shape(self):
self.img = cv2.polylines(self.img, [self.vertices], True, self.color)
cv2.fillPoly(self.img, [self.vertices], self.color)
def welcome(self):
print('Printing Triangle...!')
def sides(self):
print("Triangle has 3 sides.")
def draw_shape(self):
self.welcome()
self.form_shape()
self.sides()
cv2.imshow("Triangle", self.img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
1632347 | #!/usr/bin/env python
import rospy
import math
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
import numpy as np
from geometry_msgs.msg import Twist, Pose
from move_base_msgs.msg import MoveBaseActionGoal
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
class Debris_Photography:
def __init__(self):
self.currentGoal=None
self._cvbridge = CvBridge()
self._image = None
self.obstacles = []
self.max_speed = rospy.get_param('~max_speed', 0.5)
self.max_steering = rospy.get_param('~max_steering', 0.37)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.debris_pos = rospy.Subscriber('/racecar/object_coords', Pose, self.debris_callback, queue_size=1)
self.cam_sub = rospy.Subscriber('/racecar/raspicam_node/image', Image, self.cam_cb, queue_size=1)
def clamp(self, value, max_value):
if abs(value) > max_value:
if value < 0:
max_value = -max_value
value = max_value
return value
def approachDebris(self,angle,distance,x,y):
goal_distance = 1.5
if abs(angle) < 0.05 and distance < 1.9:
rospy.loginfo("New object")
wait = 5 #seconds
for i in range(0, 10*wait):
self.cmd_vel_pub.publish(Twist())
rospy.sleep(wait/100)
cv_image = self._cvbridge.imgmsg_to_cv2(self._image, desired_encoding='passthrough')
rospy.loginfo("Registered image:")
photo_str = "photo_" + str(len(self.obstacles)) + ".png"
rospy.loginfo(cv2.imwrite(photo_str, cv_image))
self.obstacles.append((x,y))
else:
twist = Twist()
twist.linear.x = self.clamp(distance-goal_distance, self.max_speed)
twist.angular.z = self.clamp(angle, self.max_steering)
rospy.loginfo("twist")
rospy.loginfo(twist)
self.cmd_vel_pub.publish(twist)
def debris_callback(self, msg):
# obj_map_x = msg.position.x
# obj_map_y = msg.position.y
# obj_dist = msg.position.z
# obj_angle = msg.orientation.z
if abs(msg.orientation.z) > 1:
return
for i in self.obstacles:
if math.sqrt((i[0]-msg.position.x)**2+(i[1]-msg.position.y)**2) <= 1:
rospy.loginfo("Object already detected")
return
self.approachDebris(msg.orientation.z,msg.position.z,msg.position.x,msg.position.y)
def cam_cb(self, msg):
self._image = msg
def main():
rospy.init_node('Debris_Photography')
debryPhotography = Debris_Photography()
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
3287330 | <reponame>y-agg/pywakit
from termcolor import colored
from urllib.request import urlopen
from selenium import webdriver
from selenium.webdriver.common.by import By
from time import sleep
from datetime import datetime
from win32com.client import Dispatch
from win32com.client import Dispatch
import platform, requests, zipfile, os, sys, urllib
from clint.textui import progress
sys.setrecursionlimit(10**6)
class CountryCodeException(Exception):
pass
class IllegalArgumentError(Exception):
pass
class MaximumTimeAttemptException(Exception):
pass
class InternetConnectionException(Exception):
pass
class WhatsApp():
def __init__(self):
self.very_short_time_break = 0.5
self.short_time_break = 2
self.long_time_break = 7
self.medium_time_break = 5
self.retry= 10
self.log_file = self.open_file("log.txt")
self.message_log_file = self.open_file("pywakit_db.txt")
self.webpage_url = "https://web.whatsapp.com/"
self.webpage_xpath = '//div[@class = "_3FRCZ copyable-text selectable-text" and @dir="ltr" and @data-tab="3"]'
self.canvas_qr_xpath = '//canvas[@aria-label="Scan me!" and @role="img"]'
self.send_button_xpath = '//span[@data-testid="send" and @data-icon="send"]'
self.invalid_number_modal_xpath= '//div[@class="_2HE5l" and @data-animate-modal-body="true:"]'
self.default_chromedriver_path = './pywakit/chromedriver'
def setup_driver(self, chromedriver=None):
if chromedriver==None and (os.path.isfile(self.default_chromedriver_path+".exe") or os.path.isfile(self.default_chromedriver_path)):
self.log_data("// Status: Chromedriver found on this system", 'yellow')
self.setup_driver(self.default_chromedriver_path)
return
if chromedriver==None:
self.log_data("// Status: Downloading chromedriver", 'yellow')
self.download_chrome_driver()
self.setup_driver(self.chromedriver_path)
return
if isinstance(chromedriver, str) != True:
self.log_data("// Warning: The path must be of string type", 'red')
raise IllegalArgumentError("The path must be of string type")
if chromedriver.strip() == '':
self.log_data("// Warning: chromedriver path can't be empty or null. Proivde Valid Path to Function", 'red')
raise IllegalArgumentError("chromedriver path can't be empty or null")
if not(os.path.isfile(chromedriver+".exe") or os.path.isfile(chromedriver)):
self.log_data("// Error: chromedriver.exe or chromedriver not found at provided path", 'red')
raise IllegalArgumentError("chromedriver.exe or chromedriver not found at provided path")
self.check_internet_connection()
self.driver = webdriver.Chrome(chromedriver)
return
def check_valid_phone_number(self, number):
if isinstance(number, str):
if "+" not in number:
self.log_data("// Warning: Country code missing from phone_no", 'red')
raise CountryCodeException("Country code missing from phone_no")
return
self.log_data("// Warning: Invalid Number Entered", 'red')
IllegalArgumentError("Invalid Number Entered")
def call_sleep_function(self, time):
self.only_log_data(f"// Process: Sleep Cycle Excuted for {time} seconds")
sleep(time)
def get_retry_val(self):
return self.retry
def very_short_break(self):
self.call_sleep_function(self.very_short_time_break)
def medium_short_break(self):
self.call_sleep_function(self.medium_time_break)
def short_break(self):
self.call_sleep_function(self.short_time_break)
def long_break(self):
self.call_sleep_function(self.long_time_break)
def check_message_sent(self):
data = self.driver.find_elements_by_xpath('//span[@data-testid="msg-time" and @data-icon="msg-time"]')
if len(data) > 0:
self.log_data("// Waiting: Message Is Not Yet Sent ..", 'yellow')
self.short_break()
self.check_message_sent()
def open_file(self, filenname):
if os.path.isfile(filenname):
pointer = open(filenname, 'a')
pointer.write(f"[{datetime.now()}] // Process: {filenname} is opened. \n")
return pointer
pointer = open(filenname, 'w')
pointer.write(f"File Created: {filenname} created at {datetime.now()} \n")
pointer.write(f"pywakit {filenname} file\n")
pointer.write("________________________________________________________\n")
return pointer
def check_internet_connection(self):
self.only_log_data("Status: Checking Internet Connection Status")
try:
urlopen('https://www.google.com', timeout=1)
except urllib.error.URLError:
self.log_data("You are not connected to internet", 'red')
raise InternetConnectionException("You are not connected to internet")
def destroy(self):
self.log_data("// Closing: Closing All pointers...", 'yellow')
self.log_data("// Status: All pointers Closed...", 'green')
self.only_log_data("\n++++++++++\n----------\n++++++++++")
self.driver.close()
self.log_file.close()
self.message_log_file.close()
def log_data(self, message, color, file_log=None):
if file_log == 'log_message_file':
self.message_log_file.write(f"[{datetime.now()}] {message} \n")
print(colored(message, color))
self.log_file.write(f"[{datetime.now()}] {message} \n")
def only_log_data(self, message):
self.log_file.write(f"[{datetime.now()}] {message} \n")
def get_google_chrome_version(self):
self.only_log_data("// Process: get_google_chrome_version() is called...")
paths = [r"C:\Program Files\Google\Chrome\Application\chrome.exe",
r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"]
version = list(filter(None, [self.get_version_via_com(p) for p in paths]))[0]
return version
def get_version_via_com(self,filename):
self.only_log_data("// Process: get_version_via_com() is called...")
parser = Dispatch("Scripting.FileSystemObject")
try:
version = parser.GetFileVersion(filename)
except Exception:
return None
return version
def download(self,url):
file_name= url.split('/')[-1]
self.only_log_data("// Process: download() is called...")
r = requests.get(url, stream=True)
if r.status_code ==200:
self.log_data(f"Status: Downloading chromedriver.zip file",'yellow')
with open(f"./pywakit/{file_name}", "wb") as chrome_exe:
total_length = int(r.headers.get('content-length'))
for ch in progress.bar(r.iter_content(chunk_size = 2391975), expected_size=(total_length/1024) + 1):
if ch:
chrome_exe.write(ch)
self.log_data(f"Status: Download successful",'green')
self.log_data(f"Status: Unziping {file_name}",'yellow')
with zipfile.ZipFile(f"./pywakit/{file_name}", 'r') as zip_ref:
zip_ref.extractall("./pywakit/")
self.log_data(f"Status: Unziping of {file_name} successfull",'yellow')
os.remove(f"./pywakit/{file_name}")
self.log_data(f"Status: Removing {file_name} successfull",'green')
self.chromedriver_path = './pywakit/chromedriver'
else:
self.log_data(f'''// Error:{url} generated is invalid somehow. Please download the chromedriver Manually from https://sites.google.com/a/chromium.org/chromedriver/ and after downloading copy the path of extracted chromedriver.exe file and pass it to setup_driver function as arugument like Object_name.setup_driver('PATH_OF_CROME_DRIVER')\n''', 'yellow')
sys.exit()
def download_chrome_driver(self):
chromedriver_version= self.get_google_chrome_version()
self.log_data(f"Info: Chrome {chromedriver_version} on your System.",'yellow')
architecture_version= platform.architecture()[1].lower()
if architecture_version.startswith('darwin'):
os_ = 'mac'
architecture = 64 if float(chromedriver_version) >= 2.23 else 32
elif architecture_version.startswith('linux'):
os_ = 'linux'
architecture = platform.architecture()[0][:-3]
elif architecture_version.startswith('win'):
os_ = 'win'
architecture = 32
else:
raise Exception('Unsupported platform: {0}'.format(architecture_version))
self.log_data(f"Info: Chomedriver based on your system config is {os_}{architecture}.",'yellow')
link= f"https://chromedriver.storage.googleapis.com/{chromedriver_version}/chromedriver_{os_}{architecture}.zip"
self.log_data(f"Chrome Driver link generated {link}",'yellow')
self.download(link)
def scan_code(self):
self.log_data(
f"Process: Opening {self.webpage_url} in Web Broswer.", 'yellow')
self.driver.get(self.webpage_url)
self.check_qr_code_is_avaiable(self.canvas_qr_xpath, self.get_retry_val())
self.check_qr_code_scanned(self.canvas_qr_xpath, self.get_retry_val())
def check_qr_code_is_avaiable(self, xpath, retry):
self.only_log_data("Process: check_qr_code_is_avaiable() is called")
if retry == 0:
self.check_internet_connection()
self.log_data("//Warning: Some Element Are Working Right.", 'red')
raise MaximumTimeAttemptException("Exiting Program, Limit reached")
if len(self.driver.find_elements_by_xpath(xpath)) == 0:
self.log_data("//Checking : Looking for QR code", 'yellow')
if not(self.quick_check_webpage(self.webpage_xpath)):
self.long_break()
self.check_qr_code_is_avaiable(xpath, retry-1)
return
self.log_data("// Status: QR code is avaiable to scan...", 'green')
return
def check_qr_code_scanned(self, xpath, retry):
self.only_log_data("Process: check_qr_code_scanned() is called")
if retry == 0:
self.log_data("//Warning: Some Element Are Working Right.", 'red')
raise MaximumTimeAttemptException("Exiting Program, Limit reached")
if len(self.driver.find_elements_by_xpath(xpath)) != 0:
self.log_data("// Status: Scan QR code...", 'yellow')
self.long_break()
self.check_qr_code_scanned(xpath, retry-1)
return
self.log_data("// Status: QR Code is Scanned...", 'green')
return
def quick_check_webpage(self, xpath):
return True if len(self.driver.find_elements_by_xpath(xpath)) > 0 else False
def check_webpage_loaded(self, xpath, retry=20):
self.only_log_data("Process: check_webpage_loaded() is Called.")
if retry == 0:
self.log_data("//Warning: Some Element Are Working Right.", 'red')
raise MaximumTimeAttemptException("Program Terminated.")
if retry == 2:
self.check_internet_connection()
if retry == 5:
self.log_data("// Warning: Your Internet Connection Is Not Stable.Check Your Internet Speed...", 'yellow')
if not(len(self.driver.find_elements_by_xpath(xpath)) > 0):
self.long_break()
self.check_webpage_loaded(xpath, retry-1)
def is_given_number_avaiable(self, number):
self.only_log_data("// Process: Checking is given number avaiable on whatsapp .")
if len(self.driver.find_elements_by_xpath(self.send_button_xpath)) == 0:
self.log_data(f"// Warning: {number} is Invalid, Does'nt exists in whatsapp database.", 'red')
return False
return True
def is_send_button_avaiable(self, number):
self.only_log_data("Process: Validating Number and Button.")
if len(self.driver.find_elements_by_xpath(self.send_button_xpath)) == 0:
return self.is_given_number_avaiable(number)
return True
def send_message(self, number, message):
self.only_log_data("// Process: send_message() is called.")
self.check_valid_phone_number(number)
self.driver.get(f'{self.webpage_url}send?phone='+number+'&text='+message)
self.check_webpage_loaded(self.webpage_xpath)
self.short_break()
if not(self.quick_check_webpage(self.webpage_xpath)):
self.check_webpage_loaded(self.webpage_xpath)
if self.is_send_button_avaiable(number):
self.driver.find_element_by_xpath(self.send_button_xpath).click()
self.check_message_sent()
self.log_data(f"// Status: Message is successfully sent to {number}", 'green', 'log_message_file')
def show_log(self):
self.log_data('// Process: User Requested to print Log File data.','yellow')
with open('log.txt','r') as data:
print(data.read())
def show_history(self):
self.log_data('// Process: User Requested to print Message File data','yellow')
with open('pywakit_db.txt','r') as data:
for i in data.read().split('\n'):
if "Message is successfully" in i:
print(i)
if __name__ == "__main__":
ob= WhatsApp()
ob.show_history()
| StarcoderdataPython |
1790326 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from decouple import config
app = Flask(__name__)
app.config.from_object(config("APP_SETTINGS"))
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from core import routes | StarcoderdataPython |
43616 | <filename>cluster_scripts/gen_train_exp.py
#!/usr/bin/env python3
"""Script for generating experiments.txt"""
import os
from lxml import etree
from dotenv import load_dotenv, find_dotenv
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from config import ANALYSIS as cfg
load_dotenv(find_dotenv('.env'))
FEATS_DIR=os.getenv('FEATS_DIR')
# Name of the experiment
NAME=os.getenv('NAME') if os.getenv('NAME') != None else 'exp1'
NUM_EPOCHS=int(os.getenv('EPOCHS'))
def parse_preambles(filename):
'''
Input: filepath of the preambles.mrt
Output: Dict mapping meeting_ids to list of audio_files (of individual channels) present in this meeting
1) Dict: (meeting_id) -> [chan_id_1_1, chan_id_1_2,...]
2) Dict: (meeting_id) -> [chan_id_2_1, chan_id_2_2,...]
'''
chan_audio_in_meeting = {}
tree = etree.parse(filename)
meetings = tree.xpath('//Meeting')
for meeting in meetings:
id = meeting.get('Session')
for part in meeting.xpath('./Preamble/Channels/Channel'):
if id in chan_audio_in_meeting.keys():
chan_audio_in_meeting[id].append(part.get('AudioFile'))
else:
chan_audio_in_meeting[id] = [part.get('AudioFile')]
return chan_audio_in_meeting
PREAMBLES_PATH = os.path.join(cfg['transcript_dir'], 'preambles.mrt')
CHAN_AUDIO_IN_MEETING = parse_preambles(PREAMBLES_PATH)
# The home dir on the node's scratch disk
USER = os.getenv('USER')
# This may need changing to e.g. /disk/scratch_fast depending on the cluster
SCRATCH_DISK = '/disk/scratch'
SCRATCH_HOME = f'{SCRATCH_DISK}/{USER}'
DATA_HOME = f'{SCRATCH_HOME}/icsi/data'
#base_call = (f"python main.py -i {DATA_HOME}/input -o {DATA_HOME}/output " # "--epochs 50")
base_call = (f"python train.py --config resnet_base --checkpoint_dir {SCRATCH_HOME}/icsi/checkpoints --data_root {SCRATCH_HOME}/icsi --lhotse_dir {FEATS_DIR} --num_epochs=1")
out_file_name = f"{NAME}_exp_train.txt"
output_file = open(out_file_name, "w")
exp_counter = 0
for i in range(NUM_EPOCHS):
exp_counter+= 1
# Note that we don't set a seed for rep - a seed is selected at random
# and recorded in the output data by the python script
expt_call = (
f"{base_call} "
)
print(expt_call, file=output_file)
output_file.close()
print(f'Generated commands for {exp_counter} training epochs.\nSaved in: {out_file_name}')
| StarcoderdataPython |
13885 | cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars -drivers
cars_driven = drivers
carpool_carpacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print("There are", cars, "cars available")
print("There are only", drivers, "drivers available")
print("There will be", cars_not_driven, "empty cars today")
print("We can transport", carpool_carpacity, "people today")
print("We have", passengers, "to carpool today")
print("We need to put about", average_passengers_per_car, "people in each car") | StarcoderdataPython |
4811220 | /home/runner/.cache/pip/pool/d0/ae/4d/24abaf2af3445a9f845fb8f43838b765042020c1e86f348526adc6ef23 | StarcoderdataPython |
21726 | <reponame>gitter-badger/DHOD
import numpy as np
import sys, os
from scipy.optimize import minimize
import json
import matplotlib.pyplot as plt
#
sys.path.append('./utils')
import tools
#
bs, ncf, stepf = 400, 512, 40
path = '../data/z00/'
ftype = 'L%04d_N%04d_S%04d_%02dstep/'
ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/'
mm = np.load('../data/Illustris_halo_groupmass.npy').T
mh = mm[1]*1e10
ms = mm[2]*1e10
def getstellar(mbins):
scount, smass, lsstd = np.zeros_like(mbins), np.zeros_like(mbins), np.zeros_like(mbins)
hmass = np.zeros_like(mbins)
for i in range(mbins.size-1):
if i == mbins.size-1: mask = (mm[1]*1e10 > mbins[i])
else: mask = (mm[1]*1e10 > mbins[i]) & (mm[1]*1e10<mbins[i+1])
scount[i] = mask.sum()
smass[i] = mm[2][mask].mean()*1e10
#sstd[i] = mm[2][mask].std()*1e10
lsstd[i] = np.log(mm[2][mask]*1e10).std()
hmass[i] = mm[1][mask].mean()*1e10
return scount, smass, lsstd, hmass
def fitstellar(p, smass, hmass, rety=False):
p0, p1 = p
yy = p1*np.log(hmass)+p0
if rety: return np.exp(yy)
return sum((np.log(smass[:-1]) - yy[:-1])**2)
def fitscatter(p, hmass, rstd, rety=False):
p0, p1, p2 = p
xx = np.log(hmass)
yy = p0 + p1*xx + p2*xx**2
if rety: return yy
return sum((yy[:-1] - rstd[:-1])**2)
def dofit():
mbins = 10**np.arange(12, 14, 0.1)
scount, smass, lsstd, hmass = getstellar(mbins)
pp = minimize(lambda p: fitstellar(p, smass, hmass), [1, 1])
#pps = minimize(lambda p: fitscatter(p, hmass, sstd/smass), [0.3, 0.0, .0])
pps = minimize(lambda p: fitscatter(p, hmass, lsstd), [0.3, 0.0, .0])
fname = '../data/stellar.json'
data = {'stellarfit':list(pp.x), 'scatterfit':list(pps.x)}
data['mbins'] = list(mbins)
data['NOTE'] = 'Fit b/w range 1e12, 1e14'
with open(fname, "w") as write_file:
json.dump(data, write_file, indent=4)
def scattercatalog(seed, mmin=1e12):
hmass = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
print(hmass.max()/1e12, hmass.min()/1e12)
with open('../data/stellar.json', "r") as read_file:
p = json.load(read_file)
mbins = p['mbins']
pm = p['stellarfit']
ps = p['scatterfit']
print(pm, ps)
smassmean = fitstellar(pm, None, hmass, True)
smasssig = fitscatter(ps, hmass, None, True)
print(fitstellar(pm, None, 1e12,True))
print(fitscatter(ps, 1e12, None, True))
smasssig[smasssig < 0.1] = 0.1
np.random.seed(seed)
scatter = np.random.normal(scale=smasssig)
smass = np.exp(np.log(smassmean) + scatter)
mask = hmass >= mmin
smass[~mask] = -999
np.save(path + ftype%(bs, ncf, seed, stepf) + '/stellarmass', smass)
fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True, sharey=True)
axis = ax[0]
axis.plot(hmass[mask], smass[mask], '.')
axis.plot(hmass[mask], smassmean[mask], '.')
axis.loglog()
axis.grid()
axis.set_title('FastPM')
axis = ax[1]
axis.plot(mh[mh>mmin], ms[mh>mmin], '.')
axis.plot(hmass[mask], smassmean[mask], '.')
axis.loglog()
axis.grid()
axis.set_title('Illustris')
plt.savefig(path + ftype%(bs, ncf, seed, stepf) + '/stellarmass.png')
plt.close()
if __name__=='__main__':
if os.path.isfile('../data/stellar.json'): print('Stellar fit exits')
else: dofit()
dofit()
for seed in range(100, 1000, 100):
scattercatalog(seed)
| StarcoderdataPython |
117853 | <reponame>sfox14/butterfly
import numpy as np
import torch
from torch.nn import functional as F
from numpy.polynomial import chebyshev, legendre
from utils import bitreversal_permutation
def polymatmul(A, B):
"""Batch-multiply two matrices of polynomials
Parameters:
A: (N, batch_size, n, m, d1)
B: (batch_size, m, p, d2)
Returns:
AB: (N, batch_size, n, p, d1 + d2 - 1)
"""
unsqueezed = False
if A.dim() == 4:
unsqueezed = True
A = A.unsqueeze(0)
N, batch_size, n, m, d1 = A.shape
batch_size_, m_, p, d2 = B.shape
assert batch_size == batch_size_
assert m == m_
# Naive implementation using conv1d and loop, slower but easier to understand
# Bt_flipped = B.transpose(1, 2).flip(-1)
# result = torch.stack([
# F.conv1d(A[:, i].reshape(-1, m, d1), Bt_flipped[i], padding=d2 - 1).reshape(N, n, p, -1)
# for i in range(batch_size)
# ], dim=1)
# Batched implementation using grouped convolution, faster
result = F.conv1d(A.transpose(1, 2).reshape(N * n, batch_size * m, d1),
B.transpose(1, 2).reshape(batch_size * p, m, d2).flip(-1),
padding=d2 - 1,
groups=batch_size).reshape(N, n, batch_size, p, d1 + d2 - 1).transpose(1, 2)
return result.squeeze(0) if unsqueezed else result
def ops_transpose_mult(a, b, c, p0, p1, v):
"""Fast algorithm to multiply P^T v where P is the matrix of coefficients of
OPs, specified by the coefficients a, b, c, and the starting polynomials p0,
p_1.
In particular, the recurrence is
P_{n+2}(x) = (a[n] x + b[n]) P_{n+1}(x) + c[n] P_n(x).
Parameters:
a: array of length n
b: array of length n
c: array of length n
p0: real number representing P_0(x).
p1: pair of real numbers representing P_1(x).
v: (batch_size, n)
Return:
result: P^T v.
"""
n = v.shape[-1]
m = int(np.log2(n))
assert n == 1 << m, "Length n must be a power of 2."
# Preprocessing: compute T_{i:j}, the transition matrix from p_i to p_j.
T = [None] * (m + 1)
# Lowest level, filled with T_{i:i+1}
# n matrices, each 2 x 2, with coefficients being polynomials of degree <= 1
T[0] = torch.zeros(n, 2, 2, 2)
T[0][:, 0, 0, 1] = a
T[0][:, 0, 0, 0] = b
T[0][:, 0, 1, 0] = c
T[0][:, 1, 0, 0] = 1.0
for i in range(1, m + 1):
T[i] = polymatmul(T[i - 1][1::2], T[i - 1][::2])
P_init = torch.tensor([p1, [p0, 0.0]], dtype=torch.float) # [p_1, p_0]
P_init = P_init.unsqueeze(0).unsqueeze(-2)
# Check that T is computed correctly
# These should be the polynomials P_{n+1} and P_n
# Pnp1n = polymatmul(T[m], P_init).squeeze()
# Bottom-up multiplication algorithm to avoid recursion
S = [None] * m
Tidentity = torch.eye(2).unsqueeze(0).unsqueeze(3)
S[0] = v[:, fc00:db20:35b:7399::5, None, None, None] * T[0][::2]
S[0][:, :, :, :, :1] += v[:, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, None, None, None] * Tidentity
for i in range(1, m):
S[i] = polymatmul(S[i - 1][:, 1::2], T[i][::2])
S[i][:, :, :, :, :S[i - 1].shape[-1]] += S[i - 1][:, ::2]
result = polymatmul(S[m - 1][:, :, [1], :, :n-1], P_init).squeeze(1).squeeze(1).squeeze(1)
return result
def ops_transpose_mult_br(a, b, c, p0, p1, v):
"""Fast algorithm to multiply P^T v where P is the matrix of coefficients of
OPs, specified by the coefficients a, b, c, and the starting polynomials p0,
p_1. Implementation with bit-reversal.
In particular, the recurrence is
P_{n+2}(x) = (a[n] x + b[n]) P_{n+1}(x) + c[n] P_n(x).
Parameters:
a: array of length n
b: array of length n
c: array of length n
p0: real number representing P_0(x).
p1: pair of real numbers representing P_1(x).
v: (batch_size, n)
Return:
result: P^T v.
"""
n = v.shape[-1]
m = int(np.log2(n))
assert n == 1 << m, "Length n must be a power of 2."
# Preprocessing: compute T_{i:j}, the transition matrix from p_i to p_j.
T_br = [None] * (m + 1)
# Lowest level, filled with T_{i:i+1}
# n matrices, each 2 x 2, with coefficients being polynomials of degree <= 1
T_br[0] = torch.zeros(n, 2, 2, 2)
T_br[0][:, 0, 0, 1] = a
T_br[0][:, 0, 0, 0] = b
T_br[0][:, 0, 1, 0] = c
T_br[0][:, 1, 0, 0] = 1.0
br_perm = bitreversal_permutation(n)
T_br[0] = T_br[0][br_perm]
for i in range(1, m + 1):
T_br[i] = polymatmul(T_br[i - 1][n >> i:], T_br[i - 1][:n >> i])
P_init = torch.tensor([p1, [p0, 0.0]], dtype=torch.float) # [p_1, p_0]
P_init = P_init.unsqueeze(0).unsqueeze(-2)
# Check that T_br is computed correctly
# These should be the polynomials P_{n+1} and P_n
# Pnp1n = polymatmul(T_br[m], P_init).squeeze()
v_br = v[:, br_perm]
# Bottom-up multiplication algorithm to avoid recursion
S_br = [None] * m
Tidentity = torch.eye(2).unsqueeze(0).unsqueeze(3)
S_br[0] = v_br[:, n//2:, None, None, None] * T_br[0][:n // 2]
S_br[0][:, :, :, :, :1] += v_br[:, :n//2, None, None, None] * Tidentity
for i in range(1, m):
S_br[i] = polymatmul(S_br[i - 1][:, (n >> (i + 1)):], T_br[i][:(n >> (i + 1))])
S_br[i][:, :, :, :, :S_br[i - 1].shape[-1]] += S_br[i - 1][:, :(n >> (i + 1))]
result = polymatmul(S_br[m - 1][:, :, [1], :, :n-1], P_init).squeeze(1).squeeze(1).squeeze(1)
return result
def chebyshev_transpose_mult_slow(v):
"""Naive multiplication P^T v where P is the matrix of coefficients of
Chebyshev polynomials.
Parameters:
v: (batch_size, n)
Return:
P^T v: (batch_size, n)
"""
n = v.shape[-1]
# Construct the coefficient matrix P for Chebyshev polynomials
P = np.zeros((n, n), dtype=np.float32)
for i, coef in enumerate(np.eye(n)):
P[i, :i + 1] = chebyshev.cheb2poly(coef)
P = torch.tensor(P)
return v @ P
def legendre_transpose_mult_slow(v):
"""Naive multiplication P^T v where P is the matrix of coefficients of
Legendre polynomials.
Parameters:
v: (batch_size, n)
Return:
P^T v: (batch_size, n)
"""
n = v.shape[-1]
# Construct the coefficient matrix P for Legendre polynomials
P = np.zeros((n, n), dtype=np.float32)
for i, coef in enumerate(np.eye(n)):
P[i, :i + 1] = legendre.leg2poly(coef)
P = torch.tensor(P)
return v @ P
def ops_transpose_mult_test():
# Trying to find memory leak
# n = 64
# batch_size = 1000
n = 8
batch_size = 2
v = torch.randn(batch_size, n)
# Chebyshev polynomials
result = ops_transpose_mult(2.0 * torch.ones(n), torch.zeros(n), -torch.ones(n), 1.0, (0.0, 1.0), v)
result_br = ops_transpose_mult_br(2.0 * torch.ones(n), torch.zeros(n), -torch.ones(n), 1.0, (0.0, 1.0), v)
result_slow = chebyshev_transpose_mult_slow(v)
assert torch.allclose(result, result_slow)
assert torch.allclose(result, result_br)
# Legendre polynomials
n_range = torch.arange(n, dtype=torch.float)
result = ops_transpose_mult((2 * n_range + 3) / (n_range + 2), torch.zeros(n), -(n_range + 1) / (n_range + 2), 1.0, (0.0, 1.0), v)
result_br = ops_transpose_mult_br((2 * n_range + 3) / (n_range + 2), torch.zeros(n), -(n_range + 1) / (n_range + 2), 1.0, (0.0, 1.0), v)
result_slow = legendre_transpose_mult_slow(v)
assert torch.allclose(result, result_slow)
assert torch.allclose(result, result_br)
if __name__ == '__main__':
ops_transpose_mult_test()
# TODO: there might be a memory leak, trying to find it here
# for _ in range(1000):
# temp = polymatmul(A, B)
| StarcoderdataPython |
1604991 | # Problem Link : Check Sheet link below
# Excel-Sheet Link : https://drive.google.com/file/d/1L3EOLDMs-Fx2XoKclkCg1OVymDGh6psP/view?usp=sharing
# Youtube Video Link :
# Q> Find Union and Intersections of 2 Arrays
# Fundamental / Naive Approach
def Solution_1(Array_1, Array_2): # Time: O(m*n), Space: O(0)
Union = Array_1
Intersection = []
for i in range(len(Array_2)):
if(Array_2[i] in Union):
Intersection.append(Array_2[i])
else:
Union.append(Array_2[i])
return Union, Intersection
# Time : O((m(log(m) + n(log(n)) + (m + n)) ,Space : O(1)
def Solution_2(Array_1, Array_2):
Array_1.sort()
Array_2.sort()
# Array_1 will be shorter
if(len(Array_1) > len(Array_2)):
Array_1, Array_2 = Array_2, Array_1
i = 0
j = 0
Union = []
Intersection = []
while(i <= len(Array_1)-1 and j <= len(Array_2)-1):
if(Array_1[i] > Array_2[j]):
Union.append(Array_2[j])
j += 1
elif(Array_1[i] < Array_2[j]):
Union.append(Array_1[i])
i += 1
else:
Union.append(Array_1[i])
Intersection.append(Array_1[i])
i += 1
j += 1
# temp = len(Array_1) - 3 # This is wrong do not do like at and repeat the mistake i did
# for i in range(len(Array_2) - len(Array_1) + 3):
# Union.append(Array_2[temp])
# # print(Array_2[temp])
# temp += 1
while(j < len(Array_2)):
Union.append(Array_2[j])
j += 1
return Union, Intersection
Array_1 = [2, 1, 3, 4, 5, 6, 7]
Array_2 = [2, 5, 4, 9, 8, 7, 10, 11, 13, 45]
UNION, INTERSECTION = Solution_1(Array_1, Array_2)
UNION.sort()
print(f"The union of 2 Arrays is : {UNION}")
print(f"The intersections of 2 Arrays is : {INTERSECTION}")
# This Code is Written by <NAME> aka The "Dead_Coder"
| StarcoderdataPython |
1729301 | from django.contrib import admin
from .models import Thing, Country, Continent, AdministrativeArea, Landform, Place, Text
# Register your models here.
admin.site.register(Thing)
admin.site.register(Country)
admin.site.register(Continent)
admin.site.register(AdministrativeArea)
admin.site.register(Landform)
admin.site.register(Place)
admin.site.register(Text) | StarcoderdataPython |
3219955 | <gh_stars>0
"""
Constants and other config variables used throughout the packagemanager module
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
# Configuration command/response channel
CONFIGURATION_CMD_CHANNEL = 'configuration/command/'
CONFIGURATION_RESP_CHANNEL = 'configuration/response/'
# Configuration paths
TRUSTED_REPOSITORIES_LIST = 'trustedRepositories'
# Configuration paths that support append and remove
CONFIGURATION_APPEND_REMOVE_PATHS_LIST = [
'sotaSW', 'trustedRepositories', 'ubuntuAptSource']
| StarcoderdataPython |
120125 | <gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['Config', 'URLs', 'download_data', 'file_extract', 'download_file_from_google_drive', 'untar_data']
# Cell
import os
import shutil
import requests
from pathlib import Path
from tqdm.notebook import tqdm
import zipfile, tarfile
# Cell
class Config:
config_path = Path('~/.aiadv').expanduser()
def __init__(self):
self.config_path.mkdir(parents=True, exist_ok=True)
# Cell
class URLs:
# Datasets
YELP_REIVEWS = {'id': '1G42LXv72DrhK4QKJoFhabVL4IU6v2ZvB',
'fname': 'yelp_reveiw.csv'}
MOVIE_LENS_SAMPLE = {'id': '1k2y0qC0E3oHeGA5a427hRgfbW7hnQBgF',
'fname': 'movie_lens_sample.zip'}
ENG_FRA = {'id': '1dU-cTcPxHlpoFMnWe21jB4n6GRJdLdJO',
'fname': 'eng_fra.txt'}
def path(ds=None):
fname = ds['fname']
path = Config.config_path/fname
return path
def stem(path):
if str(path).endswith('gz') or str(path).endswith('zip'):
parent = path.parent
return parent/path.stem
else: return path
# Cell
def download_data(ds, force_download=False):
"Download `url` to `fname`."
dest = URLs.path(ds)
dest.parent.mkdir(parents=True, exist_ok=True)
if not dest.exists() or force_download:
download_file_from_google_drive(ds['id'], dest, overwrite=force_download)
return dest
# Cell
def file_extract(fname):
"Extract `fname` using `tarfile` or `zipfile"
fname_str = str(fname)
if fname_str.endswith('gz'):
dest = URLs.stem(fname)
tarfile.open(fname, 'r:gz').extractall(dest)
os.remove(fname)
return dest
elif fname_str.endswith('zip'):
dest = URLs.stem(fname)
zipfile.ZipFile(fname).extractall(dest)
os.remove(fname)
return dest
elif fname_str.endswith('csv') or fname_str.endswith('txt'): return fname
else: raise Exception(f'Unrecognized archive: {fname}')
# Cell
def download_file_from_google_drive(id, dest, overwrite=False):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
print("Trying to fetch {}".format(dest.name))
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, pbar=None):
CHUNK_SIZE = 1024*1024
show_progress = True
try: file_size = int(response.headers["Content-Length"])
except: show_progress = False
with open(destination, "wb") as f:
if show_progress: pbar = tqdm(unit="MB", total=int(file_size/CHUNK_SIZE))
else: pbar = tqdm(unit="MB")
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
pbar.update()
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, dest)
# Cell
def untar_data(ds, force_download=False, extract_func=file_extract):
dest = URLs.path(ds)
stem = URLs.stem(dest)
fname = ds['fname']
if force_download:
if stem.exists():
try: os.remove(stem)
except: shutil.rmtree(stem)
if not stem.exists():
download_data(ds)
path=extract_func(dest)
return path
return stem | StarcoderdataPython |
27796 | # -*- coding: UTF-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from spectral_clustering.spectral_embedding_ import spectral_embedding
def assert_first_col_equal(maps):
constant_vec = [1] * maps.shape[0]
assert_array_almost_equal(maps[:, 0] / maps[0, 0], constant_vec)
def test_spectral_embedding():
"""
根据spectral embedding的定义,第一列的数据是恒等的
"""
adjacency = np.array([
[0., 0.8, 0.9, 0.],
[0.8, 0., 0., 0.],
[0.9, 0., 0., 1.],
[0., 0., 1., 0.]])
maps = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="arpack")
assert_first_col_equal(maps)
maps_1 = spectral_embedding(
adjacency, n_components=2, drop_first=False, eigen_solver="lobpcg")
assert_first_col_equal(maps_1)
| StarcoderdataPython |
1603026 | import unittest2 as unittest
import pymongo
import time
import random
import threading
from oplogreplay import OplogReplayer
SOURCE_HOST = '127.0.0.1:27017'
DEST_HOST = '127.0.0.1:27018'
TESTDB = 'testdb'
# Inherit from OplogReplayer to count number of processed_op methodcalls.
class CountingOplogReplayer(OplogReplayer):
count = 0
def process_op(self, ns, raw):
OplogReplayer.process_op(self, ns, raw)
CountingOplogReplayer.count += 1
class TestOplogReplayer(unittest.TestCase):
""" TestCase for the OplogReplayer.
Each test performs the following (see setUp and tearDown for more details):
* delete test databases
* start an OplogReplayer
* perform some actions (inserts, etc.)
* wait for the OplogReplayer to finish replaying ops
* assertions
* stop the OplogReplayer
"""
@classmethod
def setUpClass(cls):
# Create connections to both test databases.
cls.source = pymongo.Connection(SOURCE_HOST)
cls.dest = pymongo.Connection(DEST_HOST)
def _start_replay(self, **kwargs):
# Stop the OplogReplayer before starting a new one.
self._stop_replay()
#if getattr(self, 'oplogreplayer', None):
# self._stop_replay()
# Init & start OplogReplayer, in a separate thread.
self.oplogreplayer = CountingOplogReplayer(
SOURCE_HOST, DEST_HOST, poll_time=0.1, **kwargs)
self.thread = threading.Thread(target=self.oplogreplayer.start)
self.thread.start()
def _stop_replay(self):
# Stop OplogReplayer & join its thread.
if getattr(self, 'oplogreplayer', None):
self.oplogreplayer.stop()
if getattr(self, 'thread', None):
self.thread.join()
# Delete oplogreplayer & thread.
self.oplogreplayer = None
self.thread = None
def setUp(self):
# Drop test databases.
self.source.drop_database(TESTDB)
self.dest.drop_database(TESTDB)
self.dest.drop_database('oplogreplay')
# Sleep a little to allow drop database operations to complete.
time.sleep(0.05)
# Remember Database objects.
self.sourcedb = self.source.testdb
self.destdb = self.dest.testdb
# Stop replay, in case it was still running from a previous test.
self._stop_replay()
# Reset global counter & start OplogReplayer.
CountingOplogReplayer.count = 0
self._start_replay()
def tearDown(self):
self._stop_replay()
def _synchronous_wait(self, target, timeout=3.0):
""" Synchronously wait for the oplogreplay to finish.
Waits until the oplog's retry_count hits target, but at most
timeout seconds.
"""
wait_until = time.time() + timeout
while time.time() < wait_until:
if CountingOplogReplayer.count == target:
return
time.sleep(0.05)
# Synchronously waiting timed out - we should alert this.
raise Exception('retry_count was only %s/%s after a %.2fsec wait' % \
(CountingOplogReplayer.count, target, timeout))
def assertCollectionEqual(self, coll1, coll2):
self.assertEqual(coll1.count(), coll2.count(),
msg='Collections have different count.')
for obj1 in coll1.find():
obj2 = coll2.find_one(obj1)
self.assertEqual(obj1, obj2)
def assertDatabaseEqual(self, db1, db2):
self.assertListEqual(db1.collection_names(), db2.collection_names(),
msg='Databases have different collections.')
for coll in db1.collection_names():
self.assertCollectionEqual(db1[coll], db2[coll])
def test_writes(self):
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 2})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 3})
self.sourcedb.testcoll.remove({'nr': 3})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 4})
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 5})
self.sourcedb.testcoll.insert({'content': '...', 'nr': 6})
self.sourcedb.testcoll.update({'nr': 6}, {'$set': {'content': 'newContent'}})
self.sourcedb.testcoll.update({'nr': 97}, {'$set': {'content': 'newContent'}})
self.sourcedb.testcoll.update({'nr': 8}, {'$set': {'content': 'newContent'}}, upsert=True)
self.sourcedb.testcoll.remove({'nr': 99})
self.sourcedb.testcoll.remove({'nr': 3})
self.sourcedb.testcoll.remove({'nr': 4})
self.sourcedb.testcoll.insert({'content': 'new content', 'nr': 3})
self.sourcedb.testcoll.insert({'content': 'new content', 'nr': 4})
# Removes and updates that don't do anything will not hit the oplog:
self._synchronous_wait(12)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
def _perform_bulk_inserts(self, nr=100):
for i in xrange(nr):
obj = { 'content': '%s' % random.random(),
'nr': random.randrange(100000) }
self.sourcedb.testcoll.insert(obj)
def test_bulk_inserts(self):
self._perform_bulk_inserts(1000)
self._synchronous_wait(1000)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
def test_discontinued_replay(self):
self._perform_bulk_inserts(200)
self._stop_replay()
self._perform_bulk_inserts(150)
self._start_replay()
self._perform_bulk_inserts(100)
self._synchronous_wait(450)
# Test that the 2 test databases are identical.
self.assertDatabaseEqual(self.sourcedb, self.destdb)
# Test that no operation was replayed twice.
self.assertEqual(CountingOplogReplayer.count, 450)
def test_index_operations(self):
# Create an index, then test that it was created on destionation.
index = self.sourcedb.testidx.ensure_index('idxfield')
self._synchronous_wait(1)
self.assertIn(index, self.destdb.testidx.index_information())
# Delete the index, and test that it was deleted from destination.
self.sourcedb.testidx.drop_index(index)
self._synchronous_wait(2)
self.assertNotIn(index, self.destdb.testidx.index_information())
def test_replay_indexes(self):
# Create index1 on source + dest.
index1 = self.sourcedb.testidx.ensure_index('idxfield1')
# Restart OplogReplayer, without replaying indexes.
self._start_replay(replay_indexes=False)
# Create index2 on source only.
index2 = self.sourcedb.testidx.ensure_index('idxfield2')
# Delete index1 from source only.
self.sourcedb.testidx.drop_index(index1)
self._synchronous_wait(3)
# Test indexes on source and destination.
source_indexes = self.sourcedb.testidx.index_information()
self.assertNotIn(index1, source_indexes)
self.assertIn(index2, source_indexes)
dest_indexes = self.destdb.testidx.index_information()
self.assertIn(index1, dest_indexes)
self.assertNotIn(index2, dest_indexes)
def test_start_from_ts(self):
self._stop_replay()
# Should not be replayed:
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
# Get last timestamp.
obj = self.source.local.oplog.rs.find().sort('$natural', -1).limit(1)[0]
lastts = obj['ts']
# Should be replayed.
self.sourcedb.testcoll.insert({'content': 'mycontent', 'nr': 1})
self._start_replay(ts=lastts)
self._synchronous_wait(1)
self.assertEqual(self.destdb.testcoll.count(), 1)
| StarcoderdataPython |
3234570 | <filename>tests/performance_lighthouse.py
#-*- coding: utf-8 -*-
import sys
import socket
import ssl
import json
import requests
import urllib # https://docs.python.org/3/library/urllib.parse.html
import uuid
import re
from bs4 import BeautifulSoup
import config
from tests.utils import *
import gettext
_ = gettext.gettext
### DEFAULTS
googlePageSpeedApiKey = config.googlePageSpeedApiKey
def run_test(langCode, url, strategy='mobile', category='performance'):
"""
perf = https://www.googleapis.com/pagespeedonline/v5/runPagespeed?category=performance&strategy=mobile&url=YOUR-SITE&key=YOUR-KEY
a11y = https://www.googleapis.com/pagespeedonline/v5/runPagespeed?category=accessibility&strategy=mobile&url=YOUR-SITE&key=YOUR-KEY
practise = https://www.googleapis.com/pagespeedonline/v5/runPagespeed?category=best-practices&strategy=mobile&url=YOUR-SITE&key=YOUR-KEY
pwa = https://www.googleapis.com/pagespeedonline/v5/runPagespeed?category=pwa&strategy=mobile&url=YOUR-SITE&key=YOUR-KEY
seo = https://www.googleapis.com/pagespeedonline/v5/runPagespeed?category=seo&strategy=mobile&url=YOUR-SITE&key=YOUR-KEY
"""
language = gettext.translation('performance_lighthouse', localedir='locales', languages=[langCode])
language.install()
_ = language.gettext
print(_('TEXT_RUNNING_TEST'))
check_url = url.strip()
pagespeed_api_request = 'https://www.googleapis.com/pagespeedonline/v5/runPagespeed?category={0}&url={1}&strategy={2}&key={3}'.format(category, check_url, strategy, googlePageSpeedApiKey)
get_content = ''
try:
get_content = httpRequestGetContent(pagespeed_api_request)
except: # breaking and hoping for more luck with the next URL
print(
'Error! Unfortunately the request for URL "{0}" failed, message:\n{1}'.format(
check_url, sys.exc_info()[0]))
pass
json_content = ''
try:
json_content = json.loads(get_content)
except: # might crash if checked resource is not a webpage
print('Error! JSON failed parsing for the URL "{0}"\nMessage:\n{1}'.format(
check_url, sys.exc_info()[0]))
pass
return_dict = {}
return_dict = json_content['lighthouseResult']['audits']['metrics']['details']['items'][0]
for item in json_content['lighthouseResult']['audits'].keys():
try:
return_dict[item] = json_content['lighthouseResult']['audits'][item]['numericValue']
except:
# has no 'numericValue'
#print(item, 'har inget värde')
pass
speedindex = int(return_dict['observedSpeedIndex'])
review = ''
if speedindex <= 500:
points = 5
review = _("TEXT_REVIEW_VERY_GOOD")
elif speedindex <= 1200:
points = 4
review = _("TEXT_REVIEW_IS_GOOD")
elif speedindex <= 2500:
points = 3
review = _("TEXT_REVIEW_IS_OK")
elif speedindex <= 3999:
points = 2
review = _("TEXT_REVIEW_IS_BAD")
elif speedindex > 3999:
points = 1
review = _("TEXT_REVIEW_IS_VERY_BAD")
review += _("TEXT_REVIEW_OBSERVED_SPEED").format(convert_to_seconds(return_dict["observedSpeedIndex"], False))#'* Observerad hastighet: {} sekunder\n'.format(convert_to_seconds(return_dict["observedSpeedIndex"], False))
review += _("TEXT_REVIEW_FIRST_MEANINGFUL_PAINT").format(convert_to_seconds(return_dict["firstMeaningfulPaint"], False))#'* Första meningsfulla visuella ändring: {} sek\n'.format(convert_to_seconds(return_dict["firstMeaningfulPaint"], False))
review += _("TEXT_REVIEW_FIRST_MEANINGFUL_PAINT_3G").format(convert_to_seconds(return_dict["first-contentful-paint-3g"], False))#'* Första meningsfulla visuella ändring på 3G: {} sek\n'.format(convert_to_seconds(return_dict["first-contentful-paint-3g"], False))
review += _("TEXT_REVIEW_CPU_IDLE").format(convert_to_seconds(return_dict["firstCPUIdle"], False))#'* CPU vilar efter: {} sek\n'.format(convert_to_seconds(return_dict["firstCPUIdle"], False))
review += _("TEXT_REVIEW_INTERACTIVE").format(convert_to_seconds(return_dict["interactive"], False))#'* Webbplatsen är interaktiv: {} sek\n'.format(convert_to_seconds(return_dict["interactive"], False))
review += _("TEXT_REVIEW_REDIRECTS").format(convert_to_seconds(return_dict["redirects"], False))#'* Antal hänvisningar: {} st\n'.format(return_dict["redirects"])
review += _("TEXT_REVIEW_TOTAL_WEIGHT").format(int(return_dict["total-byte-weight"]/1000))#'* Sidans totala vikt: {} kb\n'.format(int(return_dict["total-byte-weight"]/1000))
return (points, review, return_dict)
| StarcoderdataPython |
1670964 | from __future__ import absolute_import
from functools import wraps
import types
from django.views.decorators.http import require_GET
from django.views.generic import View
from .response import get_jsonp_response
from .utils import get_callback
def jsonp(view):
if isinstance(view, types.FunctionType):
@require_GET
@wraps(view)
def jsonpfied_view(request, *args, **kwargs):
return get_jsonp_response(view(request, *args, **kwargs), callback=get_callback(request))
return jsonpfied_view
elif issubclass(view, View):
class JSONPfiedCBV(view):
http_method_names = ['get'] # only GET method is allowed for JSONP
def get(self, request, *args, **kwargs):
return get_jsonp_response(
super(JSONPfiedCBV, self).get(request, *args, **kwargs),
callback=get_callback(request))
return JSONPfiedCBV
else:
raise NotImplementedError('Only django CBVs and FBVs are supported')
| StarcoderdataPython |
1797811 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Box Least Squares
=================
AstroPy-compatible reference implementation of the transit periorogram used
to discover transiting exoplanets.
"""
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
from .core import BoxLeastSquares, BoxLeastSquaresResults
| StarcoderdataPython |
54720 | <filename>direct/fsm/StatePush.py
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: direct.fsm.StatePush
__all__ = [
'StateVar', 'FunctionCall', 'EnterExit', 'Pulse', 'EventPulse',
'EventArgument']
from direct.showbase.DirectObject import DirectObject
class PushesStateChanges:
def __init__(self, value):
self._value = value
self._subscribers = set()
def destroy(self):
if len(self._subscribers) != 0:
raise '%s object still has subscribers in destroy(): %s' % (
self.__class__.__name__, self._subscribers)
del self._subscribers
del self._value
def getState(self):
return self._value
def pushCurrentState(self):
self._handleStateChange()
return self
def _addSubscription(self, subscriber):
self._subscribers.add(subscriber)
subscriber._recvStatePush(self)
def _removeSubscription(self, subscriber):
self._subscribers.remove(subscriber)
def _handlePotentialStateChange(self, value):
oldValue = self._value
self._value = value
if oldValue != value:
self._handleStateChange()
def _handleStateChange(self):
for subscriber in self._subscribers:
subscriber._recvStatePush(self)
class ReceivesStateChanges:
def __init__(self, source):
self._source = None
self._initSource = source
return
def _finishInit(self):
self._subscribeTo(self._initSource)
del self._initSource
def destroy(self):
self._unsubscribe()
del self._source
def _subscribeTo(self, source):
self._unsubscribe()
self._source = source
if self._source:
self._source._addSubscription(self)
def _unsubscribe(self):
if self._source:
self._source._removeSubscription(self)
self._source = None
return
def _recvStatePush(self, source):
pass
class StateVar(PushesStateChanges):
def set(self, value):
PushesStateChanges._handlePotentialStateChange(self, value)
def get(self):
return PushesStateChanges.getState(self)
class StateChangeNode(PushesStateChanges, ReceivesStateChanges):
def __init__(self, source):
ReceivesStateChanges.__init__(self, source)
PushesStateChanges.__init__(self, source.getState())
ReceivesStateChanges._finishInit(self)
def destroy(self):
PushesStateChanges.destroy(self)
ReceivesStateChanges.destroy(self)
def _recvStatePush(self, source):
self._handlePotentialStateChange(source._value)
class ReceivesMultipleStateChanges:
def __init__(self):
self._key2source = {}
self._source2key = {}
def destroy(self):
keys = self._key2source.keys()
for key in keys:
self._unsubscribe(key)
del self._key2source
del self._source2key
def _subscribeTo(self, source, key):
self._unsubscribe(key)
self._key2source[key] = source
self._source2key[source] = key
source._addSubscription(self)
def _unsubscribe(self, key):
if key in self._key2source:
source = self._key2source[key]
source._removeSubscription(self)
del self._key2source[key]
del self._source2key[source]
def _recvStatePush(self, source):
self._recvMultiStatePush(self._source2key[source], source)
def _recvMultiStatePush(self, key, source):
pass
class FunctionCall(ReceivesMultipleStateChanges, PushesStateChanges):
def __init__(self, func, *args, **kArgs):
self._initialized = False
ReceivesMultipleStateChanges.__init__(self)
PushesStateChanges.__init__(self, None)
self._func = func
self._args = args
self._kArgs = kArgs
self._bakedArgs = []
self._bakedKargs = {}
for i in xrange(len(self._args)):
key = i
arg = self._args[i]
if isinstance(arg, PushesStateChanges):
self._bakedArgs.append(arg.getState())
self._subscribeTo(arg, key)
else:
self._bakedArgs.append(self._args[i])
for key, arg in self._kArgs.iteritems():
if isinstance(arg, PushesStateChanges):
self._bakedKargs[key] = arg.getState()
self._subscribeTo(arg, key)
else:
self._bakedKargs[key] = arg
self._initialized = True
return
def destroy(self):
ReceivesMultipleStateChanges.destroy(self)
PushesStateChanges.destroy(self)
del self._func
del self._args
del self._kArgs
del self._bakedArgs
del self._bakedKargs
def getState(self):
return (
tuple(self._bakedArgs), dict(self._bakedKargs))
def _recvMultiStatePush(self, key, source):
if isinstance(key, str):
self._bakedKargs[key] = source.getState()
else:
self._bakedArgs[key] = source.getState()
self._handlePotentialStateChange(self.getState())
def _handleStateChange(self):
if self._initialized:
self._func(*self._bakedArgs, **self._bakedKargs)
PushesStateChanges._handleStateChange(self)
class EnterExit(StateChangeNode):
def __init__(self, source, enterFunc, exitFunc):
self._enterFunc = enterFunc
self._exitFunc = exitFunc
StateChangeNode.__init__(self, source)
def destroy(self):
StateChangeNode.destroy(self)
del self._exitFunc
del self._enterFunc
def _handlePotentialStateChange(self, value):
StateChangeNode._handlePotentialStateChange(self, bool(value))
def _handleStateChange(self):
if self._value:
self._enterFunc()
else:
self._exitFunc()
StateChangeNode._handleStateChange(self)
class Pulse(PushesStateChanges):
def __init__(self):
PushesStateChanges.__init__(self, False)
def sendPulse(self):
self._handlePotentialStateChange(True)
self._handlePotentialStateChange(False)
class EventPulse(Pulse, DirectObject):
def __init__(self, event):
Pulse.__init__(self)
self.accept(event, self.sendPulse)
def destroy(self):
self.ignoreAll()
Pulse.destroy(self)
class EventArgument(PushesStateChanges, DirectObject):
def __init__(self, event, index=0):
PushesStateChanges.__init__(self, None)
self._index = index
self.accept(event, self._handleEvent)
return
def destroy(self):
self.ignoreAll()
del self._index
PushesStateChanges.destroy(self)
def _handleEvent(self, *args):
self._handlePotentialStateChange(args[self._index])
class AttrSetter(StateChangeNode):
def __init__(self, source, object, attrName):
self._object = object
self._attrName = attrName
StateChangeNode.__init__(self, source)
self._handleStateChange()
def _handleStateChange(self):
setattr(self._object, self._attrName, self._value)
StateChangeNode._handleStateChange(self) | StarcoderdataPython |
150172 | <reponame>edith007/The-Movie-Database
from django.shortcuts import render, redirect
import urllib.request
from random import shuffle
from .models import Show, UserRating
from django.contrib.auth.models import User
from django.core.paginator import Paginator
def home(request):
shows = list(Show.objects.all())
shuffle(shows)
return render(request, 'movie/home.html', {'shows': shows[:5]})
def about(request):
return render(request, 'movie/about.html', {'title': "About"})
def search(request, query: str = ""):
if request.method == 'POST' and 'query' in request.POST:
query = request.POST['query']
if not query:
return render(request, 'movie/search.html')
url = "http://api.tvmaze.com/search/shows?q=%s" % query
resp = requests.get(url).json()
if not resp:
return render(request, 'movie/search.html', {'query': query})
else:
for each in resp:
if each["show"]["summary"]:
each["show"]["summary"] = BeautifulSoup(each["show"]["summary"], "lxml").text
if not each["show"]["image"]:
each["show"]["image"] = {"original": "/static/movie/default.png"}
context = {
'results': resp,
'query': query,
'title': "Search"
}
return render(request, 'movie/search.html', context)
def showlist(request, username: str = ""):
if not username:
return redirect('movie-home')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return redirect('movie-home')
sort_type = request.GET.get('sort')
if not sort_type or not sort_type.isdigit():
sort_type = 1
sort_type = int(sort_type)
if sort_type == 2:
results = UserRating.objects.filter(user=user).order_by('pk')
elif sort_type == 3:
results = UserRating.objects.filter(user=user).order_by('-rating')
elif sort_type == 4:
results = UserRating.objects.filter(user=user).order_by('rating')
else:
results = UserRating.objects.filter(user=user).order_by('-pk')
paginator = Paginator(results, 25)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
for result in page_obj:
result.show.genres = result.show.genres.split(",")
return render(request, 'movie/list.html', {'username': username,
'title': f"{username}'s list",
'page_obj': page_obj,
'total': paginator.count,
'sort_type': sort_type})
def show(request, showid: int = 0):
if not showid:
return redirect('movie-home')
url = f"http://api.tvmaze.com/shows/{showid}"
resp = requests.get(url).json()
if resp['status'] == 404:
return redirect('movie-show')
if resp["summary"]:
resp["summary"] = BeautifulSoup(resp["summary"], "lxml").text
if not resp["image"]:
resp["image"] = {"original": "/static/movie/default.png"}
if request.user.is_authenticated:
try:
sh = Show.objects.get(showid=showid)
sh.image = resp['image']['original']
sh.status = resp['status']
sh.genres = ','.join(resp['genres'])
sh.name = resp['name']
sh.save()
except Show.DoesNotExist:
sh = Show.objects.create(showid=showid,
image=resp['image']['original'],
status=resp['status'],
genres=','.join(resp['genres']),
name=resp['name'])
if request.method == 'POST':
if "rating" in request.POST:
value = int(request.POST['rating'])
if value == 0:
UserRating.objects.get(show=sh, user=request.user).delete()
return render(request, 'movie/show.html', {'show': resp, 'title': resp['name'],
'watched': False, 'rating': 0})
else:
try:
ur = UserRating.objects.get(show=sh, user=request.user)
except UserRating.DoesNotExist:
ur = UserRating(show=sh, user=request.user)
ur.rating = value
ur.save()
return render(request, 'movie/show.html', {'show': resp, 'title': resp['name'], 'watched': True,
'rating': value,
'position': ur.position})
if "#" in request.POST:
try:
value = int(request.POST['#'])
except ValueError:
value = 0
try:
ur = UserRating.objects.get(user=request.user, position=value)
ur.position = 0
ur.save()
except UserRating.DoesNotExist:
pass
ur = UserRating.objects.get(show=sh, user=request.user)
ur.position = value
ur.save()
return render(request, 'movie/show.html', {'show': resp, 'title': resp['name'], 'watched': True,
'rating': ur.rating,
'position': ur.position})
if not request.user.is_authenticated:
return render(request, 'movie/show.html', {'show': resp, 'title': resp['name'], "watched": False, "rating": 0})
try:
ur = UserRating.objects.get(show=Show.objects.get(showid=showid), user=request.user)
return render(request, 'movie/show.html', {'show': resp, 'title': resp['name'], 'watched': True,
'rating': ur.rating,
'position': ur.position})
except UserRating.DoesNotExist:
return render(request, 'movie/show.html', {'show': resp, 'title': resp['name'], "watched": False, "rating": 0})
| StarcoderdataPython |
167909 | <reponame>apampuch/PySRCG
from abc import ABC
from tkinter import *
from tkinter import ttk
from src import app_data
from src.CharData.accessory import *
from src.CharData.vehicle import Vehicle
from src.Tabs.three_column_buy_tab import ThreeColumnBuyTab
class VehicleAccessoriesTab(ThreeColumnBuyTab, ABC):
def __init__(self, parent, buy_button_text, sell_button_text):
super().__init__(parent, buy_button_text, sell_button_text)
# acc is a vehcile with accessories, so any vehicle really
# key is name, value is matching thing with it
self.accobj_dict = {}
# fill stuff with memory
self.accessory_things_box = ttk.Combobox(self, values=self.accobj_dict.keys(), state="readonly", width=30)
self.fill_combobox()
self.accessory_things_box.bind("<<ComboboxSelected>>", self.get_accobj)
self.accessory_things_box.grid(column=2, row=1)
def fill_combobox(self):
self.accobj_dict = {}
self.fill_stuff_with_accessories(self.statblock.vehicles)
self.accobj_dict["Unattached Accessories"] = self.statblock.other_accessories
self.accessory_things_box["values"] = list(self.accobj_dict.keys())
self.accessory_things_box.set("Unattached Accessories")
def fill_stuff_with_accessories(self, char_list):
"""Traverses entire character looking for stuff with an accessories property.
:param char_list: something in self.statblock that could have something with accessories
:type char_list: list
"""
for node in char_list:
# check for duplicate names
key = node.name
# count names that contain the key we want to use
# we use regex to strip any dupe counts that
dupe_count = 1
for k in self.accobj_dict.keys():
k = re.sub(r"\s*\(\d+\)", "", k)
if k == key:
dupe_count += 1
# if we have more than one of the thing we want, add the dupe count to the key
if dupe_count > 1:
key += " ({})".format(dupe_count)
if hasattr(node, "accessories"):
self.accobj_dict[key] = node # .accessories
def get_accobj(self, event):
"""Fills the inventory box with software from the selected memobj"""
# clear list box
self.inventory_list.delete(0, END)
for obj in self.statblock_inventory:
self.inventory_list.insert(END, obj.name)
@property
def library_source(self):
return self.parent.game_data["Vehicle Accessories"]
@property
def statblock_inventory(self):
# return self.accobj_dict[self.accessory_things_box.get()]
# this one is done differently
# we store the entire vehicle in the dict instead of the inventory unless it's other_accessories
# so if it's an actual vehicle we need to get the accessories property from it and return that
key = self.accessory_things_box.get()
if key == "Unattached Accessories":
return self.accobj_dict[key]
else:
return self.accobj_dict[key].accessories
@property
def selected_vehicle(self):
key = self.accessory_things_box.get()
if key == "Unattached Accessories":
return None
else:
return self.accobj_dict[key]
@property
def attributes_to_calculate(self):
return ["cost"]
def buy_callback(self, item):
enough_cargo = True
if hasattr(item, "cf_cost"):
enough_cargo = self.has_cargo(item)
can_mount = True
if type(item) == Mount:
can_mount = self.mount_callback(item)
if app_data.pay_cash(item.cost, can_mount, enough_cargo):
self.add_inv_item(item)
def has_cargo(self, new_item):
"""Checks the accessories on the vehicle, returns true if we can fit the new one, false if we can't."""
v = self.selected_vehicle
# return true if not a vehicle, should only be true if it's other_accessories
if v is None:
return True
cargo_total = 0
for item in self.statblock_inventory:
if hasattr(item, "cf_cost"):
cargo_total += item.cf_cost
return cargo_total + new_item.cf_cost <= v.cargo
def mount_callback(self, new_mount) -> bool:
"""Checks the mounts already on the vehicle, returns true if we can fit the new mount, false if we can't."""
v = self.selected_vehicle
# return true if not a vehicle, should only be true if it's other_accessories
if v is None:
return True
mount_total = 0
for item in self.statblock_inventory:
if type(item) == Mount:
mount_total += item.body_cost
return mount_total + new_mount.body_cost <= v.body
def sell_callback(self, item_index):
self.statblock.cash += self.statblock_inventory[item_index].cost
self.remove_inv_item(item_index)
@property
def recurse_check_func(self):
def recurse_check(val):
return "cost" not in val.keys()
return recurse_check
@property
def recurse_end_func(self):
def recurse_end_callback(key, val, iid):
try:
if "recoil_compensation" in val.keys():
self.tree_item_dict[iid] = Mount(name=key, **val)
elif "damage" in val.keys():
self.tree_item_dict[iid] = VehicleWeapon(name=key, **val)
else:
self.tree_item_dict[iid] = Accessory(name=key, **val)
except TypeError as e:
print("Error with {}:".format(key))
print(e)
print()
return recurse_end_callback
def on_switch(self):
self.fill_combobox()
self.get_accobj(None)
def load_character(self):
self.on_switch()
| StarcoderdataPython |
91705 | from __future__ import division, print_function
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import os
from collections import OrderedDict
import time
from tcapy.util.mediator import Mediator
from tcapy.conf.constants import Constants
constants = Constants()
folder = constants.test_data_harness_folder
volatile_cache = Mediator.get_volatile_cache()
def tca_example_csv_trade_data_dukascopy():
"""Loads up trade/order data from CSV files and market data externally from Dukascopy. Does not use any databases, if
you rarely use TCA, this is fine. However, for heavy use of TCA, we strongly recommend maintaining an internal tick
database, as external downloading of data can be very slow.
In this case we are simply calculating the slippage of every trade and orders above them.
"""
from tcapy.analysis.tcaengine import TCAEngineImpl
from tcapy.analysis.tcarequest import TCARequest
from tcapy.analysis.algos.benchmark import BenchmarkArrival, BenchmarkMarketSpreadToMid
from tcapy.analysis.algos.metric import MetricSlippage
from tcapy.analysis.algos.resultsform import TimelineResultsForm
tca_version = constants.tcapy_version
tca_engine = TCAEngineImpl(version=tca_version)
# The test trade/order data is populated between 25 Apr 2017-05 Jun 2017
# with trades/orders for 'EURUSD', 'USDJPY' and 'EURJPY'
csv_trade_order_mapping = OrderedDict([('trade_df', os.path.join(folder, 'small_test_trade_df.csv')),
('order_df', os.path.join(folder, 'small_test_order_df.csv'))])
# Specify the TCA request (note: by specifiying multithreading is False, we avoid dependencies like Celery
# Depending on how the caching is setup, tcapy may try to download market data in monthly/weekly chunks and cache them,
# To force deletion of the cache you can run the below
# volatile_cache.clear_cache()
# However if you run TCA for the same period, it will load the market data from Redis/in-memory, rather than
# downloading it externally from Dukascopy
tca_request = TCARequest(start_date='05 May 2017', finish_date='10 May 2017', ticker=['EURUSD'],
tca_type='detailed',
trade_data_store='csv', market_data_store='dukascopy',
trade_order_mapping=csv_trade_order_mapping,
metric_calcs=[MetricSlippage()],
results_form=[TimelineResultsForm(metric_name='slippage', by_date='datehour', scalar=10000.0)],
benchmark_calcs=[BenchmarkArrival(), BenchmarkMarketSpreadToMid()],
use_multithreading=False)
# Dictionary of dataframes as output from TCA calculation
dict_of_df = tca_engine.calculate_tca(tca_request)
print(dict_of_df.keys())
def tca_example_csv_trade_data_dukascopy_no_redis():
"""Running TCA calculation but without any Redis caching at all. In practice, this should be avoided, since it will
likely be much slower, given we'll end up accessing market data/trade data a lot more often from a slow source.
This is particularly an issue when we're downloading large samples of market data from an external source. For very small
time periods this might be fine.
"""
from tcapy.analysis.tcaengine import TCAEngineImpl
from tcapy.analysis.tcarequest import TCARequest
from tcapy.analysis.algos.benchmark import BenchmarkArrival, BenchmarkMarketSpreadToMid
from tcapy.analysis.algos.metric import MetricSlippage
from tcapy.analysis.algos.resultsform import TimelineResultsForm
tca_version = constants.tcapy_version
tca_engine = TCAEngineImpl(version=tca_version)
# The test trade/order data is populated between 25 Apr 2017-05 Jun 2017
# with trades/orders for 'EURUSD', 'USDJPY' and 'EURJPY'
csv_trade_order_mapping = OrderedDict([('trade_df', os.path.join(folder, 'small_test_trade_df.csv')),
('order_df', os.path.join(folder, 'small_test_order_df.csv'))])
# Specify the TCA request (note: by specifiying multithreading is False, we avoid dependencies like Celery
# Depending on how the caching is setup, tcapy may try to download market data in monthly/weekly chunks and cache them,
# To force deletion of the cache you can run the below
# volatile_cache.clear_cache()
# However if you run TCA for the same period, it will load the market data from Redis/in-memory, rather than
# downloading it externally from Dukascopy
tca_request = TCARequest(start_date='05 May 2017', finish_date='06 May 2017', ticker=['EURUSD'],
tca_type='detailed',
trade_data_store='csv', market_data_store='dukascopy',
trade_order_mapping=csv_trade_order_mapping,
metric_calcs=[MetricSlippage()],
results_form=[
TimelineResultsForm(metric_name='slippage', by_date='datehour', scalar=10000.0)],
benchmark_calcs=[BenchmarkArrival(), BenchmarkMarketSpreadToMid()],
use_multithreading=False)
tca_request.multithreading_params = {'splice_request_by_dates': False, # True or False
'cache_period': 'month', # month or week
# Cache trade data in monthly/periodic chunks in Redis (reduces database calls a lot)
'cache_period_trade_data': False,
# Cache market data in monthly/periodic chunks in Redis (reduces database calls a lot)
'cache_period_market_data': False,
# Return trade data internally as handles (usually necessary for Celery)
'return_cache_handles_trade_data': False,
# Return market data internally as handles (usually necessary for Celery)
'return_cache_handles_market_data': False,
# Recommend using Celery, which allows us to reuse Python processes
'parallel_library': 'single'
}
# Dictionary of dataframes as output from TCA calculation
dict_of_df = tca_engine.calculate_tca(tca_request)
print(dict_of_df.keys())
market_df = dict_of_df['market_df']
market_df_minute = market_df.resample('1min').last()
print(market_df_minute)
if __name__ == '__main__':
start = time.time()
# tca_example_csv_trade_data_dukascopy()
tca_example_csv_trade_data_dukascopy_no_redis()
finish = time.time()
print('Status: calculated ' + str(round(finish - start, 3)) + "s")
| StarcoderdataPython |
55584 | from setuptools import setup
setup(name='geo',
version='0.1',
description='Useful geoprospection processing methods',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['geo'],
zip_safe=False)
| StarcoderdataPython |
3274238 | from rest_framework import serializers
from koalixcrm.accounting.accounting.product_category import ProductCategory
from koalixcrm.accounting.rest.product_categorie_rest import ProductCategoryMinimalJSONSerializer
from koalixcrm.crm.product.product_type import ProductType
from koalixcrm.crm.product.tax import Tax
from koalixcrm.crm.product.unit import Unit
from koalixcrm.crm.rest.tax_rest import OptionTaxJSONSerializer
from koalixcrm.crm.rest.unit_rest import OptionUnitJSONSerializer
class ProductJSONSerializer(serializers.HyperlinkedModelSerializer):
productNumber = serializers.IntegerField(source='product_number', allow_null=False)
unit = OptionUnitJSONSerializer(source='default_unit', allow_null=False)
tax = OptionTaxJSONSerializer(allow_null=False)
productCategory = ProductCategoryMinimalJSONSerializer(source='accounting_product_categorie', allow_null=False)
class Meta:
model = ProductType
fields = ('id',
'productNumber',
'title',
'unit',
'tax',
'productCategory')
depth = 1
def create(self, validated_data):
product = ProductType()
product.product_number = validated_data['product_number']
product.title = validated_data['title']
# Deserialize default_unit
default_unit = validated_data.pop('default_unit')
if default_unit:
if default_unit.get('id', None):
product.default_unit = Unit.objects.get(id=default_unit.get('id', None))
else:
product.default_unit = None
# Deserialize tax
tax = validated_data.pop('tax')
if tax:
if tax.get('id', None):
product.tax = Tax.objects.get(id=tax.get('id', None))
else:
product.tax = None
# Deserialize product category
product_category = validated_data.pop('accounting_product_categorie')
if product_category:
if product_category.get('id', None):
product.accounting_product_category = ProductCategory.objects.get(id=product_category.get('id', None))
else:
product.accounting_product_category = None
product.save()
return product
def update(self, instance, validated_data):
instance.title = validated_data['title']
instance.product_number = validated_data['product_number']
# Deserialize default_unit
default_unit = validated_data.pop('default_unit')
if default_unit:
if default_unit.get('id', instance.default_unit):
instance.default_unit = Unit.objects.get(id=default_unit.get('id', None))
else:
instance.default_unit = instance.default_unit
else:
instance.default_unit = None
# Deserialize tax
tax = validated_data.pop('tax')
if tax:
if tax.get('id', instance.default_unit):
instance.tax = Tax.objects.get(id=tax.get('id', None))
else:
instance.tax = instance.tax
else:
instance.tax = None
# Deserialize product category
product_category = validated_data.pop('accounting_product_categorie')
if product_category:
if product_category.get('id', instance.accounting_product_categorie):
instance.accounting_product_categorie = ProductCategory.objects.get(
id=product_category.get('id', None))
else:
instance.accounting_product_categorie = instance.accounting_product_categorie
else:
instance.accounting_product_categorie = None
instance.save()
return instance
| StarcoderdataPython |
141501 | <reponame>EricRemmerswaal/tensorflow<gh_stars>1000+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraphs for V2 control flow."""
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
class ControlFlowFuncGraph(func_graph.FuncGraph):
"""Contains control flow-specific FuncGraph logic."""
def __init__(self, *args, **kwargs):
super(ControlFlowFuncGraph, self).__init__(*args, **kwargs)
outer_graph = self.outer_graph
# Unlike tf.function, control flow FuncGraphs are generally created one per
# op. This means hard-coding any outer device scopes in the body (rather
# than inspecting the call-time placement of the control flow op) makes
# sense.
self._device_function_stack = outer_graph._device_function_stack.copy() # pylint: disable=protected-access
self.is_control_flow_graph = True
if ops.executing_eagerly_outside_functions():
func_graph.override_func_graph_name_scope(
self, self.outer_graph.get_name_scope())
class CondBranchFuncGraph(ControlFlowFuncGraph):
"""FuncGraph for branches of tf.cond().
This is used to distinguish cond branches from other functions.
"""
class WhileCondFuncGraph(ControlFlowFuncGraph):
"""FuncGraph for the condition of tf.while_loop().
This is used to distinguish while conditions from other functions.
"""
class WhileBodyFuncGraph(ControlFlowFuncGraph):
"""FuncGraph for the body of tf.while_loop().
This is used to distinguish while bodies from other functions.
"""
| StarcoderdataPython |
1691775 | import asyncio
import datetime
import json
import logging
import os
import queue
import re
import sys
import threading
import time
import typing
from dataclasses import dataclass
import aiofiles
import requests
logger = logging.getLogger(__name__)
VmGuid = str
@dataclass
class VMInfo:
id: VmGuid
name: str
serial_port_path: str
class LogShipper:
"""
Send logs over tcp
"""
def __init__(self, start: bool = False):
self.queue = queue.Queue()
self._socket_mutex = threading.Lock()
self._worker = threading.Thread(target=self._process_events)
self._session = requests.Session()
if start:
self._worker.start()
def start(self):
self._worker.start()
def stop(self):
logger.debug("Putting log shipper stop message")
self.queue.put(None)
logger.info("Waiting for log queue to empty")
self._worker.join()
logger.debug("Log queue is empty")
def _process_events(self):
while event := self.queue.get():
self._write(event)
def _write(self, data):
# TODO, check for errors? retry?
self._session.post(
f"http://{sys.argv[1]}",
json=data,
)
class MachineEventEmitter:
"""
Process VM change events using WMI vm_event subscription
"""
ENUMS = {
"Msvm_ComputerSystem": {
"EnabledState": {
0: "Unknown",
1: "Other",
2: "Enabled",
3: "Disabled",
4: "Shutting Down",
5: "Not Applicable",
6: "Enabled but Offline",
7: "In Test",
8: "Deferred",
9: "Quiesce",
10: "Starting",
},
"HealthState": {
5: "OK",
20: "Major Failure",
25: "Critical Failure",
},
"OperationalStatus": {
2: "OK",
3: "Degraded",
5: "Predictive Failure",
10: "Stopped",
11: "In Service",
15: "Dormant",
},
}
}
ENUMS["Msvm_ComputerSystem"]["RequestedState"] = ENUMS["Msvm_ComputerSystem"][
"EnabledState"
]
def __init__(self):
self.watcher = None
self.ready = asyncio.Event()
async def events(self) -> typing.AsyncGenerator[VMInfo, None]:
await self._create_event_monitor_process()
logger.info("Processing events")
while True:
logger.debug("Waiting for event")
# TODO handle powershell crashes
event = await self.watcher.stdout.readline()
event = event.decode()
if not event:
break
event = event.strip()
if event[0] != "{" or event[-1] != "}":
logger.debug("%s", event)
continue
logger.info("Got event %s", event)
data = json.loads(event)
data["ComPort1Path"] = await self._get_serial_path(data["Name"])
yield VMInfo(
id=data["Name"],
name=data["ElementName"],
serial_port_path=data["ComPort1Path"],
)
async def list_virtual_machine_ports(self):
vm_data = await self._ps_exec(
"Get-VM"
" | Select Id, VMName, @{n='Path'; e={$_.ComPort1.Path}}"
" | Where-Object {$_.Path}"
)
return [
VMInfo(
id=vm["Id"],
name=vm["VMName"],
serial_port_path=vm["Path"],
)
for vm in vm_data
]
async def _signal_ready(self):
# WMI Event subscription has 2 second interval
await asyncio.sleep(2)
logger.info("Setting event watcher task ready")
self.ready.set()
async def _create_event_monitor_process(self):
logger.info("Creating event watcher process")
self.watcher = await asyncio.create_subprocess_exec(
"powershell",
"-Command",
"-",
stdin=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
)
logger.info("Registering WMI event")
event_command = r"""
$ew = Register-WmiEvent
-Namespace root\virtualization\v2
-Query "SELECT * FROM __InstanceModificationEvent WITHIN 2 WHERE TargetInstance ISA 'Msvm_ComputerSystem' AND TargetInstance.EnabledState = 2"
-Action {
$e = $EventArgs.NewEvent.TargetInstance | Select HealthState, EnabledState, RequestedState, ElementName, Name;
Write-Host ($e | ConvertTo-Json -Compress)
}
""".replace(
"\n", ""
).encode()
logger.debug("Event command %s", event_command)
self.watcher.stdin.write(event_command)
self.watcher.stdin.write(b"\r\n")
logger.debug("Waiting for watcher process stdin to drain")
await self.watcher.stdin.drain()
async def check_errors():
while not self.watcher.stderr.at_eof():
err = await self.watcher.stderr.readline()
err = err.decode().strip()
if err:
logger.warning("%s", err)
if " FullyQualifiedErrorId " in err:
logger.error(
"Event watcher process logged an error. Terminating process"
)
self.watcher.terminate()
asyncio.create_task(check_errors())
await self._signal_ready()
async def _get_serial_path(self, vm_id: VmGuid) -> str:
if not re.match(r"^[A-Fa-f0-9-]+$", vm_id):
logger.warning("Not a VmGuid %s", vm_id)
raise ValueError("VM GUID required")
serial_port_path = ""
try:
serial_port_path = await self._ps_exec(
f"(Get-VM -Id {vm_id}).ComPort1.Path"
)
except AttributeError as e:
logger.warning("Couldn't get serial port data for %s", vm_id)
logger.error(e)
return serial_port_path
@staticmethod
async def _ps_exec(command: str) -> str:
command += " | ConvertTo-Json"
logger.debug("PS EXEC %s", command)
start = time.time()
exec_result = await asyncio.create_subprocess_exec(
"powershell", "-Command", command, stdout=asyncio.subprocess.PIPE
)
stdout, _ = await exec_result.communicate()
stdout = stdout.decode().strip()
for line in stdout.splitlines():
logger.debug("PS OUT %s", line)
rc = exec_result.returncode
if rc != 0:
logger.warning("PS ERR %d", rc)
decoded_result = ""
else:
decoded_result = json.loads(stdout)
logger.debug("Completed `%s` in %.2f seconds", command, time.time() - start)
return decoded_result
class SerialTail:
def __init__(self, *, message_queue: queue.Queue):
self._watchers = {}
self._queue = message_queue
def shutdown(self):
self._prune_watchers(prune_all=True)
def watch(self, vm: VMInfo) -> None:
logger.info("Attempting to start watcher for %s (%s)", vm.name, vm.id)
if vm.id in self._watchers:
if not self._watchers[vm.id].done():
logger.warning("Logger already running for %s", vm.id)
return
logger.info(
"Serial watcher is terminated for %s and will be replaced",
vm.name,
)
self._watchers[vm.id].result()
del self._watchers[vm.id]
self._watchers[vm.id] = asyncio.create_task(self._watch_events(vm, self._queue))
self._prune_watchers()
@classmethod
async def _watch_events(cls, vm: VMInfo, out_q: queue.Queue):
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
initial_tries = 10
remaining_tries = initial_tries
while remaining_tries > 0:
try:
async with aiofiles.open(
vm.serial_port_path, mode="r", encoding="utf8"
) as pipe:
logger.info("Successfully opened %s", vm.serial_port_path)
try:
while event := await pipe.readline():
message = ansi_escape.sub("", event.strip("\r\n"))
logger.debug("Got %s", message)
if not message:
continue
out_q.put(
{
"time": datetime.datetime.now().isoformat(),
"id": vm.id,
"hostname": vm.name,
"message": message,
}
)
except UnicodeDecodeError as e:
logger.exception(e)
remaining_tries -= 1
continue
# Not sure if there's any other case this can happen besides VM is powered off
except FileNotFoundError:
# These cases can be ignored since the event watching code will catch
# startup events and retry the port watcher thread for that VM
logger.info(
"Pipe %s (for %s/%s) doesn't currently exist. VM likely isn't running",
vm.serial_port_path,
vm.name,
vm.id,
)
break
except OSError as e:
# When the pipe is already open somewhere else
if e.errno == 22:
remaining_tries -= 1
logger.info(
"Pipe %s (for %s/%s) isn't available. Retrying",
vm.serial_port_path,
vm.name,
vm.id,
)
# .05 -> [0.1, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4, 12.8, 25.6]
time.sleep(0.05 * 2 ** (initial_tries - remaining_tries))
continue
logger.debug("Error attributes %s", dir(e))
logger.exception(e)
break
if remaining_tries == 0:
logger.warning(
"Ran out of retries waiting on %s (for %s/%s)",
vm.serial_port_path,
vm.name,
vm.id,
)
else:
logger.info("Stopping logger for %s", vm.serial_port_path)
def _prune_watchers(self, prune_all: bool = False):
for vm_id in list(self._watchers.keys()):
if self._watchers[vm_id].done() or prune_all:
logger.warning("Removing dead watcher for %s", vm_id)
self._watchers[vm_id].cancel()
self._watchers[vm_id].result()
del self._watchers[vm_id]
async def watch_events(*, event_emitter: MachineEventEmitter) -> None:
events = event_emitter.events()
async for vm_event in events:
logger.info("Got vm_event %s", vm_event)
if not vm_event.serial_port_path:
logger.warning("No serial port found for %s", vm_event.name)
continue
watcher.watch(vm_event)
async def process_events(*, event_emitter: MachineEventEmitter) -> None:
event_task = asyncio.create_task(watch_events(event_emitter=event_emitter))
await event_emitter.ready.wait()
vm_list = await event_emitter.list_virtual_machine_ports()
logger.info("Found %s with COM ports", [v.name for v in vm_list])
for vm_info in vm_list:
watcher.watch(vm_info)
logger.info("Waiting on event watcher task")
await event_task
logger.info("Event watcher task completed")
if __name__ == "__main__":
import signal
# see https://stackoverflow.com/a/37420223/2751619
signal.signal(signal.SIGINT, signal.SIG_DFL)
log_level = os.environ.get("LOG_LEVEL", "INFO").upper()
if log_level not in ("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"):
log_level = "INFO"
logging.basicConfig(
format="%(asctime)s:%(levelname)7s:%(process)8d:%(name)s: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S%z",
level=getattr(logging, log_level),
)
report_errors = False
if os.environ.get("ROLLBAR_API_TOKEN"):
logger.info("Initializing rollbar")
import rollbar
rollbar.init(
os.environ.get("ROLLBAR_API_TOKEN"),
os.environ.get("ENVIRONMENT", "production"),
)
report_errors = True
try:
mem = MachineEventEmitter()
shipper = LogShipper(start=True)
watcher = SerialTail(message_queue=shipper.queue)
loop = asyncio.get_event_loop()
loop.run_until_complete(process_events(event_emitter=mem))
logger.info("Event loop complete")
logger.info("Shutting down watcher")
watcher.shutdown()
logger.info("Shutting down log shipper")
shipper.stop()
except Exception as exc: # pylint: disable=broad-except
if report_errors:
rollbar.report_exc_info()
logger.error(exc)
sys.exit(1)
| StarcoderdataPython |
1791972 | #!/usr/bin/env python
"""
Created on Fri Oct 7 13:27:14 2016
@author: dennis
"""
import numpy as np
from tf.transformations import *
from geometry_msgs.msg import Point, Vector3
from geometry_msgs.msg import Quaternion
def is_at_orientation(q_current, q_desired, offset_rad):
q_des= np.array((q_desired.x, q_desired.y, q_desired.z, q_desired.w))
q_c = np.array((q_current.x, q_current.y, q_current.z, q_current.w))
q_e = quaternion_multiply(q_des, quaternion_inverse(q_c))
return 2.0 * np.arccos(np.abs(q_e[3])) < offset_rad
#norm = np.sqrt(1 - (q_e[3] * q_e[3]))
#v = [0,0,0]
#v[0] = q_e[0]/norm
#v[1] = q_e[1]/norm
#v[2] = q_e[2]/norm
def is_at_position(p_current, p_desired, offset):
des_pos = np.array((p_desired.x,
p_desired.y,
p_desired.z))
cur_pos = np.array((p_current.x,
p_current.y,
p_current.z))
return np.linalg.norm(des_pos - cur_pos) < offset
def q_ros_to_numpy(q_ros):
# x,y,z,w
q = np.array([0.0,0.0,0.0,0.0])
q[0] = q_ros.x
q[1] = q_ros.y
q[2] = q_ros.z
q[3] = q_ros.w
return q
def p_numpy_to_ros(p_np):
p = Point()
p.x = p_np[0]
p.y = p_np[1]
p.z = p_np[2]
return p
def p_numpy_to_ros_vector(p_np):
p = Vector3()
p.x = p_np[0]
p.y = p_np[1]
p.z = p_np[2]
return p
def p_ros_to_numpy(p_ros):
p = np.array([0.0,0.0,0.0])
p[0] = p_ros.x
p[1] = p_ros.y
p[2] = p_ros.z
return p
def print_arrays(arrays, format_str = '{0:.3f}'):
array_formated = [None] * len(arrays)
for idx, vec in enumerate(arrays):
array_formated[idx] = ndprint(vec, format_str)
print(' '.join('{}: {}'.format(*k) for k in enumerate(array_formated)))
def ndprint(a, format_string ='{0:.3f}'):
return [format_string.format(v,i) for i,v in enumerate(a)]
# from diebel with changed order: q = [x,y,z,w]
def rotation_from_q(q):
# change order if w is at index 3
R = np.zeros([3,3])
R[0,0] = q[3]*q[3] + q[0]*q[0] - q[1]*q[1] - q[2] * q[2]
R[0,1] = 2*q[0]*q[1] + 2*q[3]*q[2]
R[0,2] = 2*q[0]*q[2] - 2*q[3]*q[1]
R[1,0] = 2*q[0]*q[1] - 2*q[3]*q[2]
R[1,1] = q[3]*q[3] - q[0]*q[0] + q[1]*q[1] - q[2] * q[2]
R[1,2] = 2*q[1]*q[2] + 2*q[3]*q[0]
R[2,0] = 2*q[0]*q[2] + 2*q[3]*q[1]
R[2,1] = 2*q[1]*q[2] - 2*q[3]*q[0]
R[2,2] = q[3]*q[3] - q[0]*q[0] - q[1]*q[1] + q[2] * q[2]
return R
def rotation_from_q_transpose(q):
return np.transpose(rotation_from_q(q))
def threshold(vec, min_val, max_val):
''' Returns vec thresholded, such that the magnitude is in the range [min_val, max_val]'''
magnitude = np.linalg.norm(vec)
new_magnitude = max(min_val, min(max_val, magnitude))
return (new_magnitude / magnitude) * vec | StarcoderdataPython |
3371578 | <gh_stars>0
from urllib.error import URLError
from urllib.request import urlopen
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
pattern_regex = re.compile(pattern_str, pattern_ignore_case)
return pattern_regex.findall(page_html) if page_html else []
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
conn = pymysql.connect(host='localhost', port=3306,
database='crawler', user='root',
password='<PASSWORD>', charset='utf8')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
# 通过下面的字典避免重复抓取并控制抓取深度
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
# 尝试用utf-8/gbk/gb2312三种字符集进行页面解码
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_matched_parts(page_html, match_pattern)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings[0], link))
cursor.executemany('insert into tb_result values (default, %s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']',
max_depth=2)
if __name__ == '__main__':
main() | StarcoderdataPython |
1742619 | import pygame
import random
import math
import os
pygame.init()
PATH = os.getcwd().replace('\\', '/')
img = pygame.image.load('./img/icon.png')
pygame.display.set_icon(img)
class DrawInformation:
BLACK = 0, 0, 0
WHITE = 255, 255, 255
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
BACKGROUND_COLOR = BLACK
FONT = pygame.font.SysFont('consolas', 20)
LARGE_FONT = pygame.font.SysFont('consolas', 40)
SIDE_PAD = 100
TOP_PAD = 150
def __init__(self, width, height, lst):
self.width = width
self.height = height
self.window = pygame.display.set_mode((width, height))
pygame.display.set_caption("Visualizador de Algoritmo")
self.set_list(lst)
def set_list(self, lst):
self.lst = lst
self.min_val = min(lst)
self.max_val = max(lst)
self.block_width = round((self.width - self.SIDE_PAD) / len(lst))
self.block_height = math.floor((self.height - self.TOP_PAD) / (self.max_val - self.min_val))
self.start_x = self.SIDE_PAD // 2
def draw(draw_info, algo_name, ascending):
draw_info.window.fill(draw_info.BACKGROUND_COLOR)
title = draw_info.LARGE_FONT.render(f"{algo_name} - {'Ascendente' if ascending else 'Descendente'}", 1, draw_info.GREEN)
draw_info.window.blit(title, ( (draw_info.width - title.get_width() )/ 2, 10))
controls = draw_info.FONT.render('R - Resetar | ESPAÇO - Ordenar | A - Ascendente | D - Descendente', 1, draw_info.WHITE)
draw_info.window.blit(controls, ( (draw_info.width - controls.get_width() )/ 2, 50))
sorting = draw_info.FONT.render('I - Insertion Sort | B - Bubble Sort | C - Comb Sort', 1, draw_info.WHITE)
draw_info.window.blit(sorting, ( (draw_info.width - sorting.get_width() )/ 2, 80))
draw_list(draw_info)
pygame.display.update()
def draw_list(draw_info, color_positions={}, clear_bg=False):
lst = draw_info.lst
if clear_bg:
clear_rect = (draw_info.SIDE_PAD//2, draw_info.TOP_PAD,
draw_info.width - draw_info.SIDE_PAD,
draw_info.height - draw_info.TOP_PAD)
pygame.draw.rect(draw_info.window, draw_info.BACKGROUND_COLOR, clear_rect)
for i, val in enumerate(lst):
x = draw_info.start_x + i * draw_info.block_width
y = draw_info.height - (val - draw_info.min_val) * draw_info.block_height
hue = int(math.floor(val * 1.2))
color = (hue, hue, 255 - hue)
if i in color_positions:
color = color_positions[i]
pygame.draw.rect(draw_info.window, color, (x, y, draw_info.block_width, draw_info.height))
if clear_bg:
pygame.display.update()
def generate_starting_list(n, min_val, max_val):
lst = []
for _ in range(n):
val = random.randint(min_val, max_val)
lst.append(val)
return lst
def bubble_sort(draw_info, ascending=True):
lst = draw_info.lst
for i in range(len(lst) - 1):
for j in range(len(lst) - 1 - i):
num1 = lst[j]
num2 = lst[j + 1]
if (num1 > num2 and ascending) or (num1 < num2 and not ascending):
lst[j], lst[j + 1] = lst[j + 1], lst[j]
draw_list(draw_info, {j: draw_info.GREEN, j + 1: draw_info.RED}, True)
yield True
return lst
def insertion_sort(draw_info, ascending=True):
lst = draw_info.lst
for i in range(1, len(lst)):
current = lst[i]
while True:
ascending_sort = i > 0 and lst[i - 1] > current and ascending
descending_sort = i > 0 and lst[i - 1] < current and not ascending
if not ascending_sort and not descending_sort:
break
lst[i] = lst[i - 1]
i = i - 1
lst[i] = current
draw_list(draw_info, {i - 1: draw_info.GREEN, i: draw_info.RED}, True)
yield True
return lst
def comb_sort(draw_info, ascending=True):
lst = draw_info.lst
gap = math.floor(len(lst) / 1.3)
i = 0
while gap > 0 and i != len(lst) - 1:
i = 0
while i + gap < len(lst):
if (lst[i] > lst[i+gap] and ascending) or (lst[i] < lst[i+gap] and not ascending):
lst[i], lst[i+gap] = lst[i+gap], lst[i]
draw_list(draw_info, {i: draw_info.GREEN, i+gap: draw_info.RED}, True)
i += 1
yield True
gap = math.floor(gap / 1.3)
def main():
run = True
clock = pygame.time.Clock()
n = 50
min_val = 0
max_val = 200
lst = generate_starting_list(n, min_val, max_val)
draw_info = DrawInformation(800, 600, lst)
sorting = False
ascending = True
sorting_algorithm = bubble_sort
sorting_algo_name = 'Bubble Sort'
sorting_algorithm_generator = None
while run:
clock.tick(60)
if sorting:
try:
next(sorting_algorithm_generator)
except StopIteration:
sorting = False
pygame.mixer.music.stop()
pygame.mixer.music.load(f'{PATH}/SFX/resolve.mp3')
pygame.mixer.music.play()
else:
draw(draw_info, sorting_algo_name, ascending)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type != pygame.KEYDOWN:
continue
if event.key == pygame.K_r:
lst = generate_starting_list(n, min_val, max_val)
draw_info.set_list(lst)
sorting = False
pygame.mixer.music.stop()
elif event.key == pygame.K_SPACE and sorting == False:
sorting = True
pygame.mixer.music.load(f'{PATH}/SFX/expect.mp3')
pygame.mixer.music.play()
sorting_algorithm_generator = sorting_algorithm(draw_info, ascending)
elif event.key == pygame.K_a and not sorting:
ascending = True
elif event.key == pygame.K_d and not sorting:
ascending = False
elif event.key == pygame.K_i and not sorting:
sorting_algorithm = insertion_sort
sorting_algo_name = 'Insertion Sort'
elif event.key == pygame.K_b and not sorting:
sorting_algorithm = bubble_sort
sorting_algo_name = 'Bubble Sort'
elif event.key == pygame.K_c and not sorting:
sorting_algorithm = comb_sort
sorting_algo_name = 'Comb Sort'
pygame.quit()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3247843 | import inflection
class ClassDefinitionError(ValueError):
...
class OrphanedListenersError(ClassDefinitionError):
...
class MissingGetterSetterTemplateError(ClassDefinitionError):
...
class InvalidPostCoerceAttributeNames(ClassDefinitionError):
...
class CoerceMappingValueError(ClassDefinitionError):
...
class ClassCreationFailed(ValueError, TypeError):
def __init__(self, message, *errors):
assert len(errors) > 0, "Must have varargs of errors!"
self.errors = errors
self.message = message
super().__init__(message, *errors)
@classmethod
def _convert_exception_to_json(cls, item):
assert isinstance(item, Exception), f"item {item!r} ({type(item)}) is not an exception!"
if hasattr(item, "to_json"):
return item.to_json()
return {"type": inflection.titleize(type(item).__name__), "message": str(item)}
def to_json(self):
stack = list(self.errors)
results = []
while stack:
item = stack.pop(0)
if hasattr(item, "errors"):
stack.extend(item.to_json())
continue
if isinstance(item, Exception):
item = self.__class__._convert_exception_to_json(item)
item["parent_message"] = self.message
item["parent_type"] = inflection.titleize(type(self).__name__)
results.append(item)
return tuple(results)
| StarcoderdataPython |
1696849 | from bioimageio.spec.model import raw_nodes
def test_load_raw_model(unet2d_nuclei_broad_any):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(unet2d_nuclei_broad_any)
assert raw_model
def test_loaded_remote_raw_model_is_valid(unet2d_nuclei_broad_url):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(unet2d_nuclei_broad_url)
raw_model = load_raw_resource_description(raw_model)
assert raw_model
def test_load_raw_model_fixed_shape(unet2d_fixed_shape):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(unet2d_fixed_shape)
assert raw_model
def test_load_raw_model_diff_output_shape(unet2d_diff_output_shape):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(unet2d_diff_output_shape)
assert raw_model
def test_load_raw_model_multi_tensor(unet2d_multi_tensor):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(unet2d_multi_tensor)
assert raw_model
def test_load_raw_model_hpa(hpa_model):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(hpa_model)
assert raw_model
def test_load_raw_model_stardist(stardist_model):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(stardist_model)
assert raw_model
def test_load_raw_model_unet2d_keras_tf(unet2d_keras_tf):
from bioimageio.spec import load_raw_resource_description
raw_model = load_raw_resource_description(unet2d_keras_tf, update_to_format="latest")
assert isinstance(raw_model, raw_nodes.Model)
# test attachments
assert len(raw_model.attachments.files) == 1
assert (raw_model.root_path / raw_model.attachments.files[0]).exists()
def test_load_raw_model_to_format(unet2d_nuclei_broad_before_latest):
from bioimageio.spec import load_raw_resource_description
format_targets = [(0, 3), (0, 4)]
format_version = tuple(map(int, unet2d_nuclei_broad_before_latest["format_version"].split(".")[:2]))
for target in format_targets:
if format_version <= target:
to_format = ".".join(map(str, target))
raw_model = load_raw_resource_description(unet2d_nuclei_broad_before_latest, update_to_format=to_format)
assert raw_model.format_version[: raw_model.format_version.rfind(".")] == to_format
| StarcoderdataPython |
1705658 | <gh_stars>0
import os
import json
from tqdm import tqdm
import pandas as pd
def concat_text_and_save_as_jsonlist(df, outfile, text_cols):
"""
Save the dataframe as a jsonlist.
It ends up being expensive to concatenate the dataframe columns, so we do it
line by line as we save
"""
with open(outfile, 'w', encoding='utf-8') as outfile:
for _, line in tqdm(df.iterrows()):
line['text'] = ' '.join(line[text_cols])
line.drop(text_cols).to_json(outfile, orient='index', force_ascii=False)
outfile.write('\n')
if __name__ == "__main__":
# load in party matters data
party_matters = pd.read_csv(
'data/party_matters_200512_expanded.csv', index_col=0, dtype={'govtrack_id': str}
)
party_matters['congress_id'] = party_matters.natural_id.str.extract('(^[0-9]+)')
# load in speeches data
with open("data/speeches/debate_speeches_for_legislators_per_congress.json", "r") as infile:
speeches = json.load(infile)
congress_year_dict = {
'109': '20052006','110': '20072008', '111': '20092010', '112': '20112012'
}
speeches_data = pd.DataFrame()
for congress in speeches:
for legislator_id in tqdm(speeches[congress]):
speeches_data = speeches_data.append(pd.DataFrame({
'congress_num': congress,
'congress_id': congress_year_dict[congress],
'govtrack_id': legislator_id,
'speech': speeches[congress][legislator_id],
}), ignore_index=True)
del speeches
# merge speeches with party matters data: for now, we group all legislator's
# speech in each session
speeches_data = (
speeches_data.groupby(['congress_id', 'govtrack_id'])['speech']
.apply(' '.join)
.reset_index()
)
party_matters = party_matters.merge(speeches_data).reset_index()
# process data
party_matters.reset_index()
party_matters = party_matters.rename({'index': 'id'}, axis=1)
party_matters = party_matters.replace({"vote": {1.0: 'yay', 0.0: 'nay'}})
# TODO: add majority cosponsor
# nb: conatenating votes and speech all at once is expensive, we do in a loop below
# filter and save
if not os.path.exists('data/congress_votes_speech'):
os.makedirs('data/congress_votes_speech')
# training
concat_text_and_save_as_jsonlist(
"data/congress_votes_speech/train-bill_plus_all_speech-109_111.jsonlist",
df=party_matters.loc[party_matters.congress_num.isin(['109', '110', '111'])],
text_cols=['summary', 'speech'],
)
| StarcoderdataPython |
1621443 | import keras
# import keras_retinanet
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
# import miscellaneous modules
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
from tqdm import tqdm
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# use this environment flag to change which GPU to use
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
# adjust this to point to your downloaded/trained model
# models can be downloaded here: https://github.com/fizyr/keras-retinanet/releases
# model_path = os.path.join('..', 'snapshots', 'resnet50_coco_best_v2.1.0.h5')
model_path = 'resnet50_log.h5'
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
# if the model is not converted to an inference model, use the line below
# see: https://github.com/fizyr/keras-retinanet#converting-a-training-model-to-inference-model
#model = models.convert_model(model)
#print(model.summary())
# load label to names mapping for visualization purposes
labels_to_names = {0: 'plastic_bag', 1: 'plastic_wrapper', 2: 'plastic_bottle', 3: 'plastic_cap', 4: 'shoes',
5: 'decor', 6: 'cigarette', 7: 'paper_wrapper', 8: 'cardboard', 9: 'tetrapak', 10: 'cluster',
11: 'other'}
# base_path = '/Ted/datasets'
# folders = ['VOC_Test_Easy','VOC_Test_Hard']
# split = 'test' #can be train, train_val or test
# savedir = '/mnt/8A2A8B2E2A8B15FB/Ted/models/results/retinanet/predict'
base_path = '/Ted/datasets/VOC_Test_'
folders = ['VOC_Test_Easy', 'VOC_Test_Hard']
split = 'test' # can be train, train_val or test
savedir = '/Ted/results/retinanet50_log'
if not os.path.exists(savedir):
os.mkdir(savedir)
for folder in folders:
txt_file = os.path.join(base_path,folder,'ImageSets/Main',split + '.txt')
f = open(txt_file,'r')
lines = f.readlines()
for line in tqdm(lines):
img_name = line.strip()
img = os.path.join(base_path,folder,'JPEGImages',img_name + '.jpg')
# print('testing image ' + img + '\n')
try:
image = cv2.imread(img)
except:
print(img + ' does not exist')
continue
else:
# copy to draw on
draw = image.copy()
# draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
# start = time.time()
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
# print("processing time: ", time.time() - start)
# correct for image scale
boxes /= scale
annot = []
b = list()
# visualize detections
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted so we can break
if score < 0.5:
break
color = label_color(label)
# color = (0,0,255)
b = box.astype(int)
draw_box(draw, b, color=color)
caption = "{} {:.2f}".format(labels_to_names[label], score)
# print(labels_to_names[label],score)
annot.append(caption + ' ' + str(b[0])+ ' ' + str(b[1])+ ' ' + str(b[2])+ ' ' + str(b[3]))
if not os.path.exists(os.path.join(savedir,folder)):
os.mkdir(os.path.join(savedir,folder))
f = open(os.path.join(savedir,folder,img_name +'.txt'),'w+')
for annotation in annot:
f.write(annotation + '\n')
f.close()
if b:
draw_caption(draw, b, caption)
cv2.imwrite(os.path.join(savedir, folder, img_name + '.jpg'), draw)
# plt.figure(figsize=(15, 15))
# plt.axis('off')
# plt.imshow(draw)
# plt.show() | StarcoderdataPython |
134882 | <filename>pytrx/transformation.py
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 17:33:17 2016
@author: darren
A library of predefined moves for the Molecule class.
"""
import numpy as np
import math
import copy
from pytrx.utils import AtomicMass
from abc import (ABC as _ABC, abstractmethod as _abstractmethod)
class Transformation(_ABC):
''' Abstract class for transformations
'''
def __init__(self, dw=None, name=None):
self.dw = copy.deepcopy(dw)
# print('debug', name)
self.name = name
assert self.name is not None, 'Transofrmation must have name keyword that is not None'
assert type(self.name) == str, 'Transformation name must be a string'
@_abstractmethod
def prepare(self, xyz, Z_num):
'''computes the necessary '''
@_abstractmethod
def describe(self):
'''transformation description '''
@_abstractmethod
def transform(self, xyz, Z_num, amplitude=None):
pass
class Transformation_move_vector(Transformation):
# Move a group of atoms along a vector (normalized)
def __init__(self, group1, vector, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
self.group1 = np.array(group1)
self.vector = np.array(vector)
self.unit_vector = np.array(vector) / np.linalg.norm(vector)
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
# return self
def describe(self):
print(" Moving group1 along a predefined vector.")
print(f' Group 1: {self.group1}')
print(f' Vector : {self.unit_vector}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group1] += self.unit_vector * amplitude
return xyz
class Transformation_group_vector(Transformation):
# Move a group of atoms along a vector that is constructed by the center of coordinates of two other groups.
# Vector will be from vector_group 1 to vector_group 2
def __init__(self, group1, vector_groups, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
self.group1 = np.array(group1)
self.vector_groups = vector_groups
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.vector_groups[0]) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.vector_groups[1]) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
self.vector = np.mean(xyz[self.vector_groups[1]], 0) - np.mean(xyz[self.vector_groups[0]], 0)
self.unit_vector = np.array(self.vector) / np.linalg.norm(self.vector)
# return self
def describe(self):
print(" Moving group1 along a vector constructed by two other groups (mean of coordinates).")
print(" Vector will be from vector_group 1 to vector_group 2")
print(f' Group 1: {self.group1}')
print(f' Vector_group 1: {self.vector_groups[0]}')
print(f' Vector_group 2: {self.vector_groups[1]}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group1] -= self.unit_vector * amplitude
return xyz
class Transformation_vibration(Transformation):
def __init__(self, dxyz, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
self.dxyz = dxyz
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert self.dxyz.shape[0] == xyz.shape[0], \
'number of atoms in transformation and in the molecule must match'
return self
def describe(self):
print("Move all atoms along a predefined vibrational mode.")
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
return xyz + self.dxyz * amplitude
class Transformation_distance(Transformation):
# Move two groups of atoms closer/further in distance, using simple mean of coordinates as
# reference centers for each group.
# Vector is from group1 to group2. Negative amplitude is shrinking.
def __init__(self, group1, group2, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(group2) > 0), 'Cannot operate on empty set'
self.group1 = np.array(group1)
self.group2 = np.array(group2)
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.group2) <= len(xyz)), \
"Index out of bound: largest index of group 2 > length of supplied molecule"
self.group1_mean = np.mean(xyz[self.group1], 0)
self.group2_mean = np.mean(xyz[self.group2], 0)
self.unit_vec = (self.group2_mean - self.group1_mean) / np.linalg.norm(self.group2_mean - self.group1_mean)
# return self
def describe(self):
print(f' Increasing / decreasing distance between group1 and group2 using '
f'simple mean of coordinates as centers.\n'
f' Both groups move.')
print(f' Group 1: {self.group1}')
print(f' Group 2: {self.group2}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group1] -= self.unit_vec * amplitude / 2
xyz[self.group2] += self.unit_vec * amplitude / 2
return xyz
class Transformation_distance_1side(Transformation):
# Move GROUP 2 toward/away from GROUP 1 in distance, using simple mean of coordinates as
# reference centers for each group.
# Vector is from group1 to group2. Negative amplitude is shrinking.
# GROUP 1 is fixed.
def __init__(self, group1, group2, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(group2) > 0), 'Cannot operate on empty set'
self.group1 = np.array(group1)
self.group2 = np.array(group2)
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.group2) <= len(xyz)), \
"Index out of bound: largest index of group 2 > length of supplied molecule"
self.group1_mean = np.mean(xyz[self.group1], 0)
self.group2_mean = np.mean(xyz[self.group2], 0)
self.unit_vec = (self.group2_mean - self.group1_mean) / np.linalg.norm(self.group2_mean - self.group1_mean)
# return self
def describe(self):
print(f' Increasing / decreasing distance between group1 and group2 using '
f'simple mean of coordinates as centers.\n'
f' Only group 2 moves.')
print(f' Group 1: {self.group1}')
print(f' Group 2: {self.group2}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group2] += self.unit_vec * amplitude
return xyz
class Transformation_distance_1side_expand(Transformation):
# Move GROUP 2 toward/away from GROUP 1 in distance, using simple mean of coordinates as
# reference centers for each group.
# Vector is from group1 to group2. Negative amplitude is shrinking.
# GROUP 1 is fixed.
def __init__(self, group1, group2, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(group2) > 0), 'Cannot operate on empty set'
self.group1 = np.array(group1)
self.group2 = np.array(group2)
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.group2) <= len(xyz)), \
"Index out of bound: largest index of group 2 > length of supplied molecule"
self.group1_mean = np.mean(xyz[self.group1], 0)
# self.group2_mean = np.mean(xyz[self.group2], 0)
self.vecs = (xyz[self.group2] - self.group1_mean) / np.mean(np.linalg.norm(xyz[self.group2] - self.group1_mean, axis=1))
#self.vecs = (xyz[self.group2] - self.group1_mean) / np.linalg.norm(xyz[self.group2] - self.group1_mean, axis=1)[:,None]
# return self
def describe(self):
print(f' Increasing / decreasing distance between group1 and group2, ATOMWISE, using '
f'simple mean of coordinates as centers.\n'
f' Only group 2 moves.')
print(f' Group 1: {self.group1}')
print(f' Group 2: {self.group2}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group2] += self.vecs * amplitude
return xyz
class Transformation_distanceCOM(Transformation):
# Move two group of atoms closer/further in distance, using center of mass as ref centers for each group
# Vector is from group1 to group2. Negative amplitude is shrinking.
def __init__(self, group1, group2, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(group2) > 0), 'Cannot operate on empty set'
self.group1 = np.array(group1)
self.group2 = np.array(group2)
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.group2) <= len(xyz)), \
"Index out of bound: largest index of group 2 > length of supplied molecule"
self.group1_COM = np.sum(xyz[self.group1].T * AtomicMass()[Z_num[self.group1] - 1],
1) / np.sum(AtomicMass()[Z_num[self.group1] - 1])
self.group2_COM = np.sum(xyz[self.group2].T * AtomicMass()[Z_num[self.group2] - 1],
1) / np.sum(AtomicMass()[Z_num[self.group2] - 1])
self.unit_vec = (self.group2_COM - self.group1_COM) / np.linalg.norm(self.group2_COM - self.group1_COM)
self.unit_vec = self.unit_vec.T
# return self
def describe(self):
print(f' Increasing / decreasing distance between group1 and group2 using centers of masses as centers.\n'
f' Both groups move.')
print(f' Group 1: {self.group1}')
print(f' Group 2: {self.group2}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group1] -= self.unit_vec * amplitude / 2
xyz[self.group2] += self.unit_vec * amplitude / 2
return xyz
class Transformation_distanceCOM_1side(Transformation):
# Move GROUP 2 toward/away from GROUP 1 in distance, using center of mass as ref centers for each group
# Vector is from group1 to group2. Negative amplitude is shrinking.
# GROUP 1 is fixed.
def __init__(self, group1, group2, amplitude0=0, reprep=True, **kwargs):
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(group2) > 0), 'Cannot operate on empty set'
self.group1 = np.array(group1)
self.group2 = np.array(group2)
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
assert (np.max(self.group2) <= len(xyz)), \
"Index out of bound: largest index of group 2 > length of supplied molecule"
self.group1_COM = np.sum(xyz[self.group1].T * AtomicMass()[Z_num[self.group1] - 1],
1) / np.sum(AtomicMass()[Z_num[self.group1] - 1])
self.group2_COM = np.sum(xyz[self.group2].T * AtomicMass()[Z_num[self.group2] - 1],
1) / np.sum(AtomicMass()[Z_num[self.group2] - 1])
self.unit_vec = (self.group2_COM - self.group1_COM) / np.linalg.norm(self.group2_COM - self.group1_COM)
self.unit_vec = self.unit_vec.T
# return self
def describe(self):
print(f' Increasing / decreasing distance between group1 and group2 using centers of masses as centers.\n'
f' Only group 2 moves.')
print(f' Group 1: {self.group1}')
print(f' Group 2: {self.group2}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
xyz[self.group2] += self.unit_vec * amplitude
return xyz
class Transformation_rotation(Transformation):
def __init__(self, group1, axis_groups, amplitude0=0, reprep=True, **kwargs):
# A, B, and C can be group of atoms.
# Centers will be the mean of their coordinates.
# If axis is length 2 (AB), use vector AB as the rotation axis
# If axis is length 3 (ABC), use the center of central group as the center,
# the cross vector of AB and BC as axis.
# Amplitude is in degrees
# Rotation is counterclockwise for an observer to whom the axis vector is pointing (right hand rule)
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(axis_groups) > 0), 'Cannot operate on empty set'
assert (len(axis_groups) == 2) or (len(axis_groups) == 3), 'Axis must be defined with 2 or 3 groups'
for i in np.arange(len(axis_groups)):
assert (len(axis_groups[i]) > 0), f'Axis group {i} is empty'
self.group1 = group1
self.axis_groups = axis_groups
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
for i in np.arange(len(self.axis_groups)):
assert (np.max(self.axis_groups[i]) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
self.group1_mean = np.mean(xyz[self.group1], 0)
self.A_mean = np.mean(xyz[self.axis_groups[0]], 0)
self.B_mean = np.mean(xyz[self.axis_groups[1]], 0)
if len(self.axis_groups) == 3:
self.C_mean = np.mean(xyz[self.axis_groups[2]], 0)
if len(self.axis_groups) == 2: # Then use AB as vector
self.axis = self.B_mean - self.A_mean
if len(self.axis_groups) == 3: # Use cross product of AB and BC as vector
self.axis = np.cross(self.B_mean - self.A_mean, self.C_mean - self.B_mean)
# return self
def describe(self):
if len(self.axis_groups) == 2:
print(f' Rotate group 1 along the axis from center of axis_group 1 to center of axis_group 2.\n')
elif len(self.axis_groups) == 3:
print(f' Rotate group 1 along the axis normal to the plane spanned by\n'
f' center of axis_group 1 to center of axis_group 2 and \n'
f' center of axis_group 2 to center of axis_group 3. \n'
f' Center is defined as simple mean of coordinates in that group.')
print(f' Group 1: {self.group1}')
print(f' Axis_group 1: {self.axis_groups[0]}')
print(f' Axis_group 2: {self.axis_groups[1]}')
if len(self.axis_groups) == 3:
print(f' Axis_group 3: {self.axis_groups[2]}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
# Shift reference frame, rotate, then shift back
if len(self.axis_groups) == 2:
xyz[self.group1] = rotation3D((xyz[self.group1] - self.A_mean).T, self.axis, amplitude).T + self.A_mean
if len(self.axis_groups) == 3:
xyz[self.group1] = rotation3D((xyz[self.group1] - self.B_mean).T, self.axis, amplitude).T + self.B_mean
return xyz
class Transformation_rotationCOM(Transformation):
def __init__(self, group1, axis_groups, amplitude0=0, reprep=True, **kwargs):
# A, B, and C can be group of atoms.
# Centers will be the mean of their coordinates.
# If axis is length 2 (AB), use vector AB as the rotation axis
# If axis is length 3 (ABC), use the center of central group as the center,
# the cross vector of AB and BC as axis.
# Amplitude is in degrees
# Rotation is counterclockwise for an observer to whom the axis vector is pointing (right hand rule)
super().__init__(**kwargs)
assert (len(group1) > 0) and (len(axis_groups) > 0), 'Cannot operate on empty set'
assert (len(axis_groups) == 2) or (len(axis_groups) == 3), 'Axis must be defined with 2 or 3 groups'
for i in np.arange(len(axis_groups)):
assert (len(axis_groups[i]) > 0), f'Axis group {i} is empty'
self.group1 = group1
self.axis_groups = axis_groups
self.amplitude0 = amplitude0
self.reprep = reprep
def prepare(self, xyz, Z_num):
assert (np.max(self.group1) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
for i in np.arange(len(self.axis_groups)):
assert (np.max(self.axis_groups) <= len(xyz)), \
"Index out of bound: largest index of group 1 > length of supplied molecule"
self.A_COM = np.sum(xyz[self.axis_groups[0]].T * AtomicMass()[Z_num[self.axis_groups[0]] - 1],
1) / np.sum(AtomicMass()[Z_num[self.axis_groups[0]] - 1])
self.B_COM = np.sum(xyz[self.axis_groups[1]].T * AtomicMass()[Z_num[self.axis_groups[1]] - 1],
1) / np.sum(AtomicMass()[Z_num[self.axis_groups[1]] - 1])
if len(self.axis_groups) == 3:
self.C_COM = np.sum(xyz[self.axis_groups[2]].T * AtomicMass()[Z_num[self.axis_groups[2]] - 1],
1) / np.sum(AtomicMass()[Z_num[self.axis_groups[2]] - 1])
if len(self.axis_groups) == 2: # Then use AB as vector
self.axis = self.B_COM - self.A_COM
if len(self.axis_groups) == 3: # Use cross product of AB and BC as vector
self.axis = np.cross(self.B_COM - self.A_COM, self.C_COM - self.B_COM)
# return self
def describe(self):
if len(self.axis_groups) == 2:
print(f' Rotate group 1 along the axis from center of axis_group 1 to center of axis_group 2\n')
elif len(self.axis_groups) == 3:
print(f' Rotate group 1 along the axis normal to the plane spanned by\n'
f' center of axis_group 1 to center of axis_group 2 and \n'
f' center of axis_group 2 to center of axis_group 3. \n'
f' Center is defined as center of mass in that group.')
print(f' Group 1: {self.group1}')
print(f' Axis_group 1: {self.axis_groups[0]}')
print(f' Axis_group 2: {self.axis_groups[1]}')
if len(self.axis_groups) == 3:
print(f' Axis_group 3: {self.axis_groups[2]}')
def transform(self, xyz, Z_num, amplitude=None):
if amplitude is None:
amplitude = self.amplitude0
if self.reprep:
self.prepare(xyz, Z_num)
if len(self.axis_groups) == 2:
xyz[self.group1] = rotation3D((xyz[self.group1] - self.A_COM).T, self.axis, amplitude).T + self.A_COM
if len(self.axis_groups) == 3:
xyz[self.group1] = rotation3D((xyz[self.group1] - self.B_COM).T, self.axis, amplitude).T + self.B_COM
return xyz
def rotation3D(v, axis, degrees):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. Using the Euler-Rodrigues formula:
https://stackoverflow.com/questions/6802577/rotation-of-3d-vector
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
theta = degrees * math.pi / 180
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
return np.dot(rot_mat, v)
# this seems to fix the issues with links to the classes/subclasses
all_classes = [Transformation_distance,
Transformation_distance_1side,
Transformation_distance_1side_expand,
Transformation_move_vector,
Transformation_group_vector,
Transformation_vibration,
Transformation_distanceCOM,
Transformation_distanceCOM_1side,
Transformation_rotation,
Transformation_rotationCOM]
[issubclass(i, Transformation) for i in all_classes]
| StarcoderdataPython |
3251660 | """
tracked_object.py defines TrackedObj -> a datatype which tracks on object methods
"""
import numpy as np
import operator
import functools
import copy
import math
from collections.abc import Iterable
import uuid
def reset_array_prov(array, array_id = None):
if array_id == None:
array_id = uuid.uuid4()
for index, x in np.ndenumerate(array):
x.set_provenance(uuid.uuid4())
x.write((array_id, array.shape, index))
return array
def set_array_prov(array, fp, array_id = None):
if array_id == None:
array_id = uuid.uuid4()
new_array = np.empty(array.shape, dtype=object)
for index, x in np.ndenumerate(array):
new_array[index] = TrackedObj(x, fp, uuid.uuid4())
new_array[index].write((array_id, array.shape, index))
return array
def add_provenance_copy(orig_func):
#doesn't quite work when we return a data type that is not a float (works for a bit, but might not have
# different functions)
@functools.wraps(orig_func)
def funct(ref, *args):
args2 = []
provenance = [ref.id]
for arg in args:
if hasattr(arg, 'id') and hasattr(arg, 'value'):
provenance += arg.id
args2.append(arg.value)
else:
args2.append(arg)
args_size = len(args2)
if args_size != 0:
value = orig_func(ref,*args2)
else:
value = orig_func(ref)
if not isinstance(value, Iterable):
output = ref.__class__(value, ref.fp)
for id in provenance:
output.write(id)
else:
outputs = []
for v in value:
out = ref.__class__(v, ref.fp)
outputs.append(out)
for id in provenance:
out.write(id)
output = tuple(outputs)
return output
return funct
class TrackedObj(object):
""" Currently only supports float methods but can technically work for any data type
"""
def __init__(self, value, file_pointer, id = None):
self.value = value
if id == None:
self.id = uuid.uuid4()
else:
self.id = id
self.fp = file_pointer
def write(self, prev, next = None):
if next == None:
next = self.id
tup = str((prev, next))
self.fp.write(tup)
def set_provenance(self, id):
self.id = id
def set_value(self, value):
self.value = value
@add_provenance_copy
def __abs__(self):
return abs(self.value)
@add_provenance_copy
def __add__(self, other):
return self.value + other
@add_provenance_copy
def __bool__(self):
return bool(self.value)
@add_provenance_copy
def __divmod__(self, other):
return divmod(self.value, other)
@add_provenance_copy
def __eq__(self, other):
return self.value == other
@add_provenance_copy
def __float__(self):
#TODO: We might want to change to datatype of the value without losing the provenance
# How can we differentiate between the two cases?
return float(self.value)
@add_provenance_copy
def __floordiv__(self, other):
return self.value//other
@add_provenance_copy
def __ge__(self, other):
return self.value >= other
# def __format__(self, format_spec: str):
# return self.value.__format__(format_spec)
# def __get_format__(self):
# return self.value.__getformat__()
@add_provenance_copy
def __gt__(self, other):
return self.value > other
@add_provenance_copy
def __hash__(self):
return hash(self.value)
@add_provenance_copy
def __le__(self, other):
return self.value <= other
@add_provenance_copy
def __lt__(self, other):
return self.value < other
@add_provenance_copy
def __mod__(self, other):
return self.value % other
@add_provenance_copy
def __mul__(self, other):
return self.value * other
@add_provenance_copy
def __ne__(self, other):
return self.value != other
@add_provenance_copy
def __neg__(self):
return -self.value
@add_provenance_copy
def __pos__(self):
return +self.value
@add_provenance_copy
def __pow__(self, other):
return self.value ** other
@add_provenance_copy
def __radd__(self, other):
return other + self.value
@add_provenance_copy
def __rdivmod__(self, other):
return divmod(other, self.value)
#TODO: __reduce__ __reduce_ex__ function not defined?
@add_provenance_copy
def __rfloordiv__(self, other):
return other //self.value
@add_provenance_copy
def __rmod__(self, other):
return other % self.value
@add_provenance_copy
def __rmul__(self, other):
return other * self.value
@add_provenance_copy
def __round__(self):
return round(self.value)
@add_provenance_copy
def __rpow__(self, other):
return other ** self.value
@add_provenance_copy
def __rsub__(self, other):
return other - self.value
@add_provenance_copy
def __rtruediv__(self, other):
return other / self.value
#def __setformat__
@add_provenance_copy
def __sub__(self, other):
return self.value - other
@add_provenance_copy
def __truediv__(self, other):
return self.value/other
@add_provenance_copy
def __trunc__(self):
return math.trunc(self.value)
@add_provenance_copy
def as_integer_ratio(self):
return self.value.as_integer_ratio()
@add_provenance_copy
def conjugate(self):
return self.value.conjugate()
# def from_hex
@add_provenance_copy
def hex(self):
return self.value.hex()
@add_provenance_copy
def imag(self):
return self.value.imag()
@add_provenance_copy
def is_integer(self):
return self.value.is_integer()
@add_provenance_copy
def real(self):
return self.value.real()
def __str__(self):
return str((self.value, self.provenance))
def __repr__(self):
return str((self.value, self.provenance))
# why get and set? -> set might make sense
# arr = np.empty((3, 1), dtype=object)
# arr[0] = TrackedObj(0, None)
# arr[1] = TrackedObj(5, None)
# arr[2] = TrackedObj(8, None)
# reset_array_prov(arr)
# print(arr.sum().provenance)
# save_array_prov(arr, './logs')
# x = TrackedObj(10.10, 1)
# import math
# print(math.trunc(x)) | StarcoderdataPython |
126622 | from bluetooth_shower_head import bluetoothManager_head
from bluetooth_scale import bluetoothManager_scale
from influx_poster import database_post
import signal
import sys
import time
if __name__ == '__main__':
runtime = 480
try:
database_post("indicator", 1.0, "startstop").start()
print("instatiating threads")
shower_head_thread = bluetoothManager_head()
shower_scale_thread = bluetoothManager_scale()
print("starting head thread")
shower_head_thread.start()
time.sleep(5)
print("starting scale thread")
shower_scale_thread.start()
print("running for " + str(runtime) + " please waite")
time.sleep(runtime)
print("ending thread")
database_post("indicator", 0.0, "startstop").start()
shower_head_thread.endBluetooth()
shower_scale_thread.endBluetooth()
except KeyboardInterrupt:
print("W: interrupt received, stopping")
shower_head_thread.endBluetooth()
shower_scale_thread.endBluetooth()
except Exception as error:
shower_head_thread.endBluetooth()
shower_scale_thread.endBluetooth()
print(repr(error))
finally:
print("cleaning up")
| StarcoderdataPython |
3395917 | import numpy as np
from skimage import exposure
def image_from_separated_rgb(image_r, image_g, image_b, clip_min_pct=2, clip_max_pct=98, equalize_hist=True):
'''
return array(m, n, 3) of RGB image (0-255)
'''
image_b = np.clip(image_b, np.percentile(image_b, clip_min_pct), np.percentile(image_b, clip_max_pct))
image_g = np.clip(image_g, np.percentile(image_g, clip_min_pct), np.percentile(image_g, clip_max_pct))
image_r = np.clip(image_r, np.percentile(image_r, clip_min_pct), np.percentile(image_r, clip_max_pct))
if equalize_hist == True:
image_b = exposure.equalize_hist(image_b)
image_g = exposure.equalize_hist(image_g)
image_r = exposure.equalize_hist(image_r)
image_b = (image_b - image_b.min()) / (image_b.max() - image_b.min())
image_g = (image_g - image_g.min()) / (image_g.max() - image_g.min())
image_r = (image_r - image_r.min()) / (image_r.max() - image_r.min())
image_b = image_b * 255
image_g = image_g * 255
image_r = image_r * 255
# image = make_lupton_rgb(image_r, image_g, image_b, stretch=5, Q=5)
image = np.array(list(zip(image_r.flatten(), image_g.flatten(), image_b.flatten())), dtype=np.int16).reshape(*image_r.shape, 3)
return image | StarcoderdataPython |
166255 | from django.db import models
from django.db.models import Sum, Q
from django.utils import timezone
from decimal import Decimal
class LedgerAccount(models.Model):
"""A particular account in the accounting ledger system.
All transactions must have a left side (debit) and a right side (credit),
and they must add up to zero.
This implements the double-entry bookkeeping system. More background
information is available in the Wikipedia:
https://en.wikipedia.org/wiki/Double-entry_bookkeeping_system
The normal balance for each account is determined by the
:py:attr:`account_type`, in particular whether the integer value
is less than zero:
+-----------+-----+---------+
| Type | Int | Balance |
+===========+=====+=========+
| Expense | -2 | debit |
+-----------+-----+---------+
| Asset | -1 | debit |
+-----------+-----+---------+
| Equity | 0 | credit |
+-----------+-----+---------+
| Liability | 1 | credit |
+-----------+-----+---------+
| Income | 2 | credit |
+-----------+-----+---------+
:param gnucash_account:
Corresponding Gnucash account. Used for data exchange with the core
accounting system.
:param account_type:
Type of account, based on double-entry bookkeeping principles. This
is from *our* perspective: an account where a member deposits money
would be a Liability account.
"""
TYPE_ASSET = -1
TYPE_EXPENSE = -2
TYPE_EQUITY = 0
TYPE_LIABILITY = 1
TYPE_INCOME = 2
TYPE_CHOICES = (
(TYPE_ASSET, 'Asset'),
(TYPE_EXPENSE, 'Expense'),
(TYPE_EQUITY, 'Equity'),
(TYPE_LIABILITY, 'Liability'),
(TYPE_INCOME, 'Income'),
)
gnucash_account = models.TextField(blank=True)
account_type = models.SmallIntegerField(choices=TYPE_CHOICES)
def __str__(self):
if self.gnucash_account is not None and len(self.gnucash_account) > 0:
return "%s account '%s'" % (self.get_account_type_display(),
self.gnucash_account)
else:
try:
return "%s account for member %s" % (
self.get_account_type_display(), self.member.name)
except models.fields.related.RelatedObjectDoesNotExist:
return "%s account %d" % (self.get_account_type_display(),
self.pk)
def _get_credits(self):
"""The sum of all credit transactions on this account.
>>> acct.credits
Decimal('42.00')
"""
agg = self.credit_transactions.all().aggregate(Sum('amount'))
the_sum = agg['amount__sum']
if the_sum is None:
the_sum = Decimal('0.00')
return the_sum
credits = property(_get_credits)
def _get_debits(self):
"""The sum of all debit transactions on this account.
>>> acct.debits
Decimal('23.23')
"""
agg = self.debit_transactions.all().aggregate(Sum('amount'))
the_sum = agg['amount__sum']
if the_sum is None:
the_sum = Decimal('0.00')
return the_sum
debits = property(_get_debits)
def _get_balance(self):
"""The raw balance of debits and credits on this account. Simply
put, this returns debits minus credits without any regard for the
account's normal balance.
>>> acct.debits
Decimal('42.00')
>>> acct.credits
Decimal('69.00')
>>> acct.balance
Decimal('-27.00')
The summation of balance across all :py:class:`LedgerAccount` will
(should?) equal *exactly* zero.
See also: :py:attr:`account_balance`
"""
return self.debits - self.credits
balance = property(_get_balance)
def _get_account_balance(self):
"""The balance of this account.
Unlike :py:attr:`balance`, this flips the sign for credit accounts
(e.g. Equity, Liability, Income).
>>> acct.account_type is TYPE_LIABILITY
True
>>> acct.debits
Decimal('42.00')
>>> acct.credits
Decimal('69.00')
>>> acct.account_balance
Decimal('-27.00')
"""
if self.account_type < 0:
return self.balance
else:
return -self.balance
account_balance = property(_get_account_balance)
def get_account_transactions(self):
"""Returns a :class:`django.db.models.query.QuerySet` with all
:py:class:`LedgerEntry` instances referencing this account, whether
credit or debit.
Example to print all effective dates and amounts:
>>> for txn in acct.get_account_transactions():
... print(txn.effective_date, txn.account_net(acct))
"""
qq = Q(debit_account=self) | Q(credit_account=self)
txns = LedgerEntry.objects.filter(qq)
txns = txns.order_by('effective_date', 'created_date')
return txns
class LedgerEntry(models.Model):
"""A financial transaction, implemented as a transfer between two
:py:class:`LedgerAccount` instances.
:param effective_date:
:class:`django.db.models.DateField` containing the effective date of
this transaction. Defaults to :func:`django.utils.timezone.now`.
:param created_date:
:class:`django.db.models.DateTimeField` containing the time this
instance was created. Immutable.
:param modified_date:
:class:`django.db.models.DateTimeField` containing the time this
instance was last modified. Immutable.
:param debit_account:
:class:`LedgerAccount` for the *left side* of a transaction.
:param credit_account:
:class:`LedgerAccount` for the *right side* of a transaction.
:param amount:
:class:`django.db.models.DecimalField` with the exact amount of this
transaction.
:param details:
:class:`django.db.models.TextField` free-form description for this
transaction.
"""
effective_date = models.DateField(default=timezone.now)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
debit_account = models.ForeignKey(LedgerAccount,
related_name="debit_transactions")
credit_account = models.ForeignKey(LedgerAccount,
related_name="credit_transactions")
amount = models.DecimalField(max_digits=8, decimal_places=2)
details = models.TextField()
class Meta:
verbose_name = 'ledger entry'
verbose_name_plural = 'ledger entries'
def __str__(self):
return "Amount %.2f (debit '%s', credit '%s', description '%s')" % (
self.amount, self.debit_account, self.credit_account, self.details)
def account_net(self, account):
"""Net effect of this transaction on *account*.
Equation:
:py:attr:`amount` = debit - credit
This method returns :py:attr:`amount` if *account* is the debit
(left-hand) account and -:py:attr:`amount` if *account* is the credit
(right-hand) account.
:param account:
Account to compute net effect upon.
"""
amt = 0
if self.debit_account == account:
amt += self.amount
if self.credit_account == account:
amt -= self.amount
return amt
class PaymentMethod(models.Model):
"""A valid method of payment, for adding money to a member's account.
This provides a mapping between a payment processor's API and two
:py:class:`LedgerAccount` instances, one for revenue and another for
fees.
Equation:
revenue = payment - fees
where *payment* is the amount credited to the member's account.
:param name:
User-friendly description of this payment method.
:param api:
The API to use on the back end, selected from :py:attr:`API_CHOICES`
:param is_recurring:
True if this method can be used for recurring payments (e.g. Stripe),
False if it cannot (e.g. Cash).
:param is_automated:
True if this method is completely automated from the end user's
perspective (e.g. Stripe), False if it requires intervention (e.g.
Cheque).
:param revenue_account:
:py:class:`LedgerAccount` for the net transaction amount. Must
be TYPE_ASSET.
:param fee_account:
:py:class:`LedgerAccount` for the transaction fees. Must be
TYPE_EXPENSE.
"""
API_NONE = 0
API_STRIPEIO = 1
API_STRIPEIO_BITCOIN = 2
API_PAYPAL = 3
API_CHOICES = (
(API_NONE, 'None'),
(API_STRIPEIO, 'Stripe'),
#(API_STRIPEIO_BITCOIN, 'Stripe Bitcoin'),
#(API_PAYPAL, 'PayPal'),
)
name = models.CharField(max_length=200)
is_recurring = models.BooleanField()
is_automated = models.BooleanField()
api = models.PositiveSmallIntegerField(choices=API_CHOICES,
default=API_NONE)
revenue_account = models.ForeignKey(
LedgerAccount, related_name="+",
limit_choices_to={'account_type': LedgerAccount.TYPE_ASSET})
fee_account = models.ForeignKey(
LedgerAccount, related_name="+",
limit_choices_to={'account_type': LedgerAccount.TYPE_EXPENSE},
blank=True, null=True)
def __str__(self):
if self.api is not self.API_NONE:
return "%s (via %s)" % (self.name, self.get_api_display())
else:
return "%s" % self.name
| StarcoderdataPython |
1770868 | <gh_stars>1-10
import unittest
import numpy as np
from intfft import fft, ifft
class TestFFT(unittest.TestCase):
# confirm the input arguments .
def _test_fft_input_type(self, dtype):
xr1 = np.arange(2**7, dtype=dtype)
xi1 = np.arange(2**7, dtype=dtype)
_, _ = fft(xr1, xi1)
def test_fft_input_type_i8(self):self._test_fft_input_type(np.int8)
def test_fft_input_type_i16(self):self._test_fft_input_type(np.int16)
def test_fft_input_type_i32(self):self._test_fft_input_type(np.int32)
def test_fft_input_type_u8(self):self._test_fft_input_type(np.uint8)
def test_fft_input_type_u16(self):self._test_fft_input_type(np.uint16)
# confirm the input arguments .
def _test_ifft_input_type(self, dtype):
xr1 = np.arange(2**7, dtype=dtype)
xi1 = np.arange(2**7, dtype=dtype)
_, _ = ifft(xr1, xi1)
def test_ifft_input_type_i8(self):self._test_ifft_input_type(np.int8)
def test_ifft_input_type_i16(self):self._test_ifft_input_type(np.int16)
def test_ifft_input_type_i32(self):self._test_ifft_input_type(np.int32)
def test_ifft_input_type_u8(self):self._test_ifft_input_type(np.uint8)
def test_ifft_input_type_u16(self):self._test_ifft_input_type(np.uint16)
# confirm the input arguments type(error)
def _test_fft_input_ar_type_error(self, dtype):
ar = np.arange(2**7, dtype=dtype)
ai = np.arange(2**7, dtype=np.int32)
with self.assertRaisesRegex(Exception, "incompatible function arguments. The following argument types are supported:"):
_, _ = fft(ar, ai)
def test_fft_input_ar_type_error_i64(self):self._test_fft_input_ar_type_error(np.int64)
def test_fft_input_ar_type_error_u32(self):self._test_fft_input_ar_type_error(np.uint32)
def test_fft_input_ar_type_error_u64(self):self._test_fft_input_ar_type_error(np.uint64)
def test_fft_input_ar_type_error_f32(self):self._test_fft_input_ar_type_error(np.float32)
def test_fft_input_ar_type_error_f64(self):self._test_fft_input_ar_type_error(np.float64)
def test_fft_input_ar_type_error_c64(self):self._test_fft_input_ar_type_error(np.complex64)
def test_fft_input_ar_type_error_c128(self):self._test_fft_input_ar_type_error(np.complex128)
# confirm the input arguments type(error)
def _test_fft_input_ai_type_error(self, dtype):
ar = np.arange(2**7, dtype=np.int32)
ai = np.arange(2**7, dtype=dtype)
with self.assertRaisesRegex(Exception, "incompatible function arguments. The following argument types are supported:"):
_, _ = fft(ar, ai)
def test_fft_input_ai_type_error_i64(self):self._test_fft_input_ai_type_error(np.int64)
def test_fft_input_ai_type_error_u32(self):self._test_fft_input_ai_type_error(np.uint32)
def test_fft_input_ai_type_error_u64(self):self._test_fft_input_ai_type_error(np.uint64)
def test_fft_input_ai_type_error_f32(self):self._test_fft_input_ai_type_error(np.float32)
def test_fft_input_ai_type_error_f64(self):self._test_fft_input_ai_type_error(np.float64)
def test_fft_input_ai_type_error_c64(self):self._test_fft_input_ai_type_error(np.complex64)
def test_fft_input_ai_type_error_c128(self):self._test_fft_input_ai_type_error(np.complex128)
# confirm the input arguments type(error)
def _test_ifft_input_ar_error(self, dtype):
ar = np.arange(2**7, dtype=dtype)
ai = np.arange(2**7, dtype=np.int32)
with self.assertRaisesRegex(Exception, "incompatible function arguments. The following argument types are supported:"):
_, _ = ifft(ar, ai)
def test_ifft_input_ar_error_i64(self):self._test_ifft_input_ar_error(np.int64)
def test_ifft_input_ar_error_u32(self):self._test_ifft_input_ar_error(np.uint32)
def test_ifft_input_ar_error_u64(self):self._test_ifft_input_ar_error(np.uint64)
def test_ifft_input_ar_error_f32(self):self._test_ifft_input_ar_error(np.float32)
def test_ifft_input_ar_error_f64(self):self._test_ifft_input_ar_error(np.float64)
def test_ifft_input_ar_error_c64(self):self._test_ifft_input_ar_error(np.complex64)
def test_ifft_input_ar_error_c128(self):self._test_ifft_input_ar_error(np.complex128)
# confirm the input arguments type(error)
def _test_ifft_input_ai_error(self, dtype):
ar = np.arange(2**7, dtype=np.int32)
ai = np.arange(2**7, dtype=dtype)
with self.assertRaisesRegex(Exception, "incompatible function arguments. The following argument types are supported:"):
_, _ = ifft(ar, ai)
def test_ifft_input_ai_error_i64(self):self._test_ifft_input_ai_error(np.int64)
def test_ifft_input_ai_error_u32(self):self._test_ifft_input_ai_error(np.uint32)
def test_ifft_input_ai_error_u64(self):self._test_ifft_input_ai_error(np.uint64)
def test_ifft_input_ai_error_f32(self):self._test_ifft_input_ai_error(np.float32)
def test_ifft_input_ai_error_f64(self):self._test_ifft_input_ai_error(np.float64)
def test_ifft_input_ai_error_c64(self):self._test_ifft_input_ai_error(np.complex64)
def test_ifft_input_ai_error_c128(self):self._test_ifft_input_ai_error(np.complex128)
# confirm the output type is "int32"
def _test_fft_output_type(self, dtype):
xr1 = np.arange(2**7, dtype=dtype)
xi1 = np.arange(2**7, dtype=dtype)
xr2, xi2 = fft(xr1, xi1)
self.assertTrue(xr2.dtype==np.int32)
self.assertTrue(xi2.dtype==np.int32)
def test_fft_output_type_i8(self):self._test_fft_output_type(np.int8)
def test_fft_output_type_i16(self):self._test_fft_output_type(np.int16)
def test_fft_output_type_i32(self):self._test_fft_output_type(np.int32)
def test_fft_output_type_u8(self):self._test_fft_output_type(np.uint8)
def test_fft_output_type_u16(self):self._test_fft_output_type(np.uint16)
# confirm the input arguments is immutable.
def _test_fft_input_immutable(self, dtype):
xr1 = np.arange(2**7, dtype=dtype)
xi1 = np.arange(2**7, dtype=dtype)
xr1_ = xr1.copy()
xi1_ = xi1.copy()
_, _ = fft(xr1, xi1)
self.assertTrue(np.all(xr1_==xr1))
self.assertTrue(np.all(xi1_==xi1))
def test_fft_input_immutable_i8(self):self._test_fft_input_immutable(np.int8)
def test_fft_input_immutable_i16(self):self._test_fft_input_immutable(np.int16)
def test_fft_input_immutable_i32(self):self._test_fft_input_immutable(np.int32)
def test_fft_input_immutable_u8(self):self._test_fft_input_immutable(np.uint8)
def test_fft_input_immutable_u16(self):self._test_fft_input_immutable(np.uint16)
# confirm the input arguments is immutable.
def _test_ifft_input_immutable(self, dtype):
xr1 = np.arange(2**7, dtype=dtype)
xi1 = np.arange(2**7, dtype=dtype)
xr1_ = xr1.copy()
xi1_ = xi1.copy()
_, _ = ifft(xr1, xi1)
self.assertTrue(np.all(xr1_==xr1))
self.assertTrue(np.all(xi1_==xi1))
def test_ifft_input_immutable_i8(self):self._test_ifft_input_immutable(np.int8)
def test_ifft_input_immutable_i16(self):self._test_ifft_input_immutable(np.int16)
def test_ifft_input_immutable_i32(self):self._test_ifft_input_immutable(np.int32)
def test_ifft_input_immutable_u8(self):self._test_ifft_input_immutable(np.uint8)
def test_ifft_input_immutable_u16(self):self._test_ifft_input_immutable(np.uint16)
# confirm invertible
def _test_fft_invertible(self, dtype):
xr1 = np.arange(2**7, dtype=dtype)
xi1 = np.arange(2**7, dtype=dtype)
xr2, xi2 = fft(xr1, xi1)
xr3, xi3 = ifft(xr2, xi2)
self.assertTrue(np.all(xr3==xr1))
self.assertTrue(np.all(xi3==xi1))
def test_fft_invertible_i8(self):self._test_fft_invertible(np.int8)
def test_fft_invertible_i16(self):self._test_fft_invertible(np.int16)
def test_fft_invertible_i32(self):self._test_fft_invertible(np.int32)
def test_fft_invertible_u8(self):self._test_fft_invertible(np.uint8)
def test_fft_invertible_u16(self):self._test_fft_invertible(np.uint16)
# confirm input max values
def _test_fft_input_max(self, n):
xr1 = np.array([(2**31)//n-1]*n, dtype=np.int32)
xi1 = np.array([(2**31)//n-1]*n, dtype=np.int32)
xr2, xi2 = fft(xr1, xi1)
xr3, xi3 = ifft(xr2, xi2)
self.assertTrue(np.all(xr3==xr1))
self.assertTrue(np.all(xi3==xi1))
def test_fft_input_max_01(self):self._test_fft_input_max(2**1)
def test_fft_input_max_02(self):self._test_fft_input_max(2**2)
def test_fft_input_max_03(self):self._test_fft_input_max(2**3)
def test_fft_input_max_04(self):self._test_fft_input_max(2**4)
def test_fft_input_max_05(self):self._test_fft_input_max(2**5)
def test_fft_input_max_06(self):self._test_fft_input_max(2**6)
def test_fft_input_max_07(self):self._test_fft_input_max(2**7)
def test_fft_input_max_08(self):self._test_fft_input_max(2**8)
def test_fft_input_max_09(self):self._test_fft_input_max(2**9)
def test_fft_input_max_10(self):self._test_fft_input_max(2**10)
def test_fft_input_max_11(self):self._test_fft_input_max(2**11)
def test_fft_input_max_12(self):self._test_fft_input_max(2**12)
def test_fft_input_max_13(self):self._test_fft_input_max(2**13)
def test_fft_input_max_14(self):self._test_fft_input_max(2**14)
def test_fft_input_max_15(self):self._test_fft_input_max(2**15)
def test_fft_input_max_16(self):self._test_fft_input_max(2**16)
def test_fft_input_max_17(self):self._test_fft_input_max(2**17)
def test_fft_input_max_18(self):self._test_fft_input_max(2**18)
def test_fft_input_max_19(self):self._test_fft_input_max(2**19)
def test_fft_input_max_20(self):self._test_fft_input_max(2**20)
# confirm input max values(error)
def _test_fft_input_ar_max_error(self, n):
xr1 = np.array([(2**31)//n]*n, dtype=np.int32)
xi1 = np.zeros(n, dtype=np.int32)
with self.assertRaisesRegex(Exception, "value range is assumed to be \[-\d+, \d+\]"):
_, _ = fft(xr1, xi1)
def test_fft_input_ar_max_error_01(self):self._test_fft_input_ar_max_error(2**1)
def test_fft_input_ar_max_error_02(self):self._test_fft_input_ar_max_error(2**2)
def test_fft_input_ar_max_error_03(self):self._test_fft_input_ar_max_error(2**3)
def test_fft_input_ar_max_error_04(self):self._test_fft_input_ar_max_error(2**4)
def test_fft_input_ar_max_error_05(self):self._test_fft_input_ar_max_error(2**5)
def test_fft_input_ar_max_error_06(self):self._test_fft_input_ar_max_error(2**6)
def test_fft_input_ar_max_error_07(self):self._test_fft_input_ar_max_error(2**7)
def test_fft_input_ar_max_error_08(self):self._test_fft_input_ar_max_error(2**8)
def test_fft_input_ar_max_error_09(self):self._test_fft_input_ar_max_error(2**9)
def test_fft_input_ar_max_error_10(self):self._test_fft_input_ar_max_error(2**10)
def test_fft_input_ar_max_error_11(self):self._test_fft_input_ar_max_error(2**11)
def test_fft_input_ar_max_error_12(self):self._test_fft_input_ar_max_error(2**12)
def test_fft_input_ar_max_error_13(self):self._test_fft_input_ar_max_error(2**13)
def test_fft_input_ar_max_error_14(self):self._test_fft_input_ar_max_error(2**14)
def test_fft_input_ar_max_error_15(self):self._test_fft_input_ar_max_error(2**15)
def test_fft_input_ar_max_error_16(self):self._test_fft_input_ar_max_error(2**16)
def test_fft_input_ar_max_error_17(self):self._test_fft_input_ar_max_error(2**17)
def test_fft_input_ar_max_error_18(self):self._test_fft_input_ar_max_error(2**18)
def test_fft_input_ar_max_error_19(self):self._test_fft_input_ar_max_error(2**19)
def test_fft_input_ar_max_error_20(self):self._test_fft_input_ar_max_error(2**20)
# confirm input max values(error)
def _test_fft_input_ai_max_error(self, n):
xr1 = np.zeros(n, dtype=np.int32)
xi1 = np.array([(2**31)//n]*n, dtype=np.int32)
with self.assertRaisesRegex(Exception, "value range is assumed to be \[-\d+, \d+\]"):
_, _ = fft(xr1, xi1)
def test_fft_input_ai_max_error_01(self):self._test_fft_input_ai_max_error(2**1)
def test_fft_input_ai_max_error_02(self):self._test_fft_input_ai_max_error(2**2)
def test_fft_input_ai_max_error_03(self):self._test_fft_input_ai_max_error(2**3)
def test_fft_input_ai_max_error_04(self):self._test_fft_input_ai_max_error(2**4)
def test_fft_input_ai_max_error_05(self):self._test_fft_input_ai_max_error(2**5)
def test_fft_input_ai_max_error_06(self):self._test_fft_input_ai_max_error(2**6)
def test_fft_input_ai_max_error_07(self):self._test_fft_input_ai_max_error(2**7)
def test_fft_input_ai_max_error_08(self):self._test_fft_input_ai_max_error(2**8)
def test_fft_input_ai_max_error_09(self):self._test_fft_input_ai_max_error(2**9)
def test_fft_input_ai_max_error_10(self):self._test_fft_input_ai_max_error(2**10)
def test_fft_input_ai_max_error_11(self):self._test_fft_input_ai_max_error(2**11)
def test_fft_input_ai_max_error_12(self):self._test_fft_input_ai_max_error(2**12)
def test_fft_input_ai_max_error_13(self):self._test_fft_input_ai_max_error(2**13)
def test_fft_input_ai_max_error_14(self):self._test_fft_input_ai_max_error(2**14)
def test_fft_input_ai_max_error_15(self):self._test_fft_input_ai_max_error(2**15)
def test_fft_input_ai_max_error_16(self):self._test_fft_input_ai_max_error(2**16)
def test_fft_input_ai_max_error_17(self):self._test_fft_input_ai_max_error(2**17)
def test_fft_input_ai_max_error_18(self):self._test_fft_input_ai_max_error(2**18)
def test_fft_input_ai_max_error_19(self):self._test_fft_input_ai_max_error(2**19)
def test_fft_input_ai_max_error_20(self):self._test_fft_input_ai_max_error(2**20)
# confirm input min values
def _test_fft_input_min(self, n):
xr1 = np.array([-(2**31)//n]*n, dtype=np.int32)
xi1 = np.array([-(2**31)//n]*n, dtype=np.int32)
xr2, xi2 = fft(xr1, xi1)
xr3, xi3 = ifft(xr2, xi2)
self.assertTrue(np.all(xr3==xr1))
self.assertTrue(np.all(xi3==xi1))
def test_fft_input_min_01(self):self._test_fft_input_min(2**1)
def test_fft_input_min_02(self):self._test_fft_input_min(2**2)
def test_fft_input_min_03(self):self._test_fft_input_min(2**3)
def test_fft_input_min_04(self):self._test_fft_input_min(2**4)
def test_fft_input_min_05(self):self._test_fft_input_min(2**5)
def test_fft_input_min_06(self):self._test_fft_input_min(2**6)
def test_fft_input_min_07(self):self._test_fft_input_min(2**7)
def test_fft_input_min_08(self):self._test_fft_input_min(2**8)
def test_fft_input_min_09(self):self._test_fft_input_min(2**9)
def test_fft_input_min_10(self):self._test_fft_input_min(2**10)
def test_fft_input_min_11(self):self._test_fft_input_min(2**11)
def test_fft_input_min_12(self):self._test_fft_input_min(2**12)
def test_fft_input_min_13(self):self._test_fft_input_min(2**13)
def test_fft_input_min_14(self):self._test_fft_input_min(2**14)
def test_fft_input_min_15(self):self._test_fft_input_min(2**15)
def test_fft_input_min_16(self):self._test_fft_input_min(2**16)
def test_fft_input_min_17(self):self._test_fft_input_min(2**17)
def test_fft_input_min_18(self):self._test_fft_input_min(2**18)
def test_fft_input_min_19(self):self._test_fft_input_min(2**19)
def test_fft_input_min_20(self):self._test_fft_input_min(2**20)
# confirm input max values(error)
def _test_fft_input_ar_min_error(self, n):
xr1 = np.array([-(2**31)//n-1]*n, dtype=np.int32)
xi1 = np.zeros(n, dtype=np.int32)
with self.assertRaisesRegex(Exception, "value range is assumed to be \[-\d+, \d+\]"):
_, _ = fft(xr1, xi1)
def test_fft_input_ar_min_error_01(self):self._test_fft_input_ar_min_error(2**1)
def test_fft_input_ar_min_error_02(self):self._test_fft_input_ar_min_error(2**2)
def test_fft_input_ar_min_error_03(self):self._test_fft_input_ar_min_error(2**3)
def test_fft_input_ar_min_error_04(self):self._test_fft_input_ar_min_error(2**4)
def test_fft_input_ar_min_error_05(self):self._test_fft_input_ar_min_error(2**5)
def test_fft_input_ar_min_error_06(self):self._test_fft_input_ar_min_error(2**6)
def test_fft_input_ar_min_error_07(self):self._test_fft_input_ar_min_error(2**7)
def test_fft_input_ar_min_error_08(self):self._test_fft_input_ar_min_error(2**8)
def test_fft_input_ar_min_error_09(self):self._test_fft_input_ar_min_error(2**9)
def test_fft_input_ar_min_error_10(self):self._test_fft_input_ar_min_error(2**10)
def test_fft_input_ar_min_error_11(self):self._test_fft_input_ar_min_error(2**11)
def test_fft_input_ar_min_error_12(self):self._test_fft_input_ar_min_error(2**12)
def test_fft_input_ar_min_error_13(self):self._test_fft_input_ar_min_error(2**13)
def test_fft_input_ar_min_error_14(self):self._test_fft_input_ar_min_error(2**14)
def test_fft_input_ar_min_error_15(self):self._test_fft_input_ar_min_error(2**15)
def test_fft_input_ar_min_error_16(self):self._test_fft_input_ar_min_error(2**16)
def test_fft_input_ar_min_error_17(self):self._test_fft_input_ar_min_error(2**17)
def test_fft_input_ar_min_error_18(self):self._test_fft_input_ar_min_error(2**18)
def test_fft_input_ar_min_error_19(self):self._test_fft_input_ar_min_error(2**19)
def test_fft_input_ar_min_error_20(self):self._test_fft_input_ar_min_error(2**20)
# confirm input max values(error)
def _test_fft_input_ai_min_error(self, n):
xr1 = np.zeros(n, dtype=np.int32)
xi1 = np.array([-(2**31)//n-1]*n, dtype=np.int32)
with self.assertRaisesRegex(Exception, "value range is assumed to be \[-\d+, \d+\]"):
_, _ = fft(xr1, xi1)
def test_fft_input_ai_min_error_01(self):self._test_fft_input_ai_min_error(2**1)
def test_fft_input_ai_min_error_02(self):self._test_fft_input_ai_min_error(2**2)
def test_fft_input_ai_min_error_03(self):self._test_fft_input_ai_min_error(2**3)
def test_fft_input_ai_min_error_04(self):self._test_fft_input_ai_min_error(2**4)
def test_fft_input_ai_min_error_05(self):self._test_fft_input_ai_min_error(2**5)
def test_fft_input_ai_min_error_06(self):self._test_fft_input_ai_min_error(2**6)
def test_fft_input_ai_min_error_07(self):self._test_fft_input_ai_min_error(2**7)
def test_fft_input_ai_min_error_08(self):self._test_fft_input_ai_min_error(2**8)
def test_fft_input_ai_min_error_09(self):self._test_fft_input_ai_min_error(2**9)
def test_fft_input_ai_min_error_10(self):self._test_fft_input_ai_min_error(2**10)
def test_fft_input_ai_min_error_11(self):self._test_fft_input_ai_min_error(2**11)
def test_fft_input_ai_min_error_12(self):self._test_fft_input_ai_min_error(2**12)
def test_fft_input_ai_min_error_13(self):self._test_fft_input_ai_min_error(2**13)
def test_fft_input_ai_min_error_14(self):self._test_fft_input_ai_min_error(2**14)
def test_fft_input_ai_min_error_15(self):self._test_fft_input_ai_min_error(2**15)
def test_fft_input_ai_min_error_16(self):self._test_fft_input_ai_min_error(2**16)
def test_fft_input_ai_min_error_17(self):self._test_fft_input_ai_min_error(2**17)
def test_fft_input_ai_min_error_18(self):self._test_fft_input_ai_min_error(2**18)
def test_fft_input_ai_min_error_19(self):self._test_fft_input_ai_min_error(2**19)
def test_fft_input_ai_min_error_20(self):self._test_fft_input_ai_min_error(2**20)
# confirm invertible(random input)
def _test_fft_invertible_random(self, n):
for _ in range(10):
xr1 = np.random.randint(-(2**31)//n, (2**31)//n, n, dtype=np.int32)
xi1 = np.random.randint(-(2**31)//n, (2**31)//n, n, dtype=np.int32)
xr2, xi2 = fft(xr1, xi1)
xr3, xi3 = ifft(xr2, xi2)
self.assertTrue(np.all(xr3==xr1))
self.assertTrue(np.all(xi3==xi1))
def test_fft_invertible_random_00(self):self._test_fft_invertible_random(2**0)
def test_fft_invertible_random_01(self):self._test_fft_invertible_random(2**1)
def test_fft_invertible_random_02(self):self._test_fft_invertible_random(2**2)
def test_fft_invertible_random_03(self):self._test_fft_invertible_random(2**3)
def test_fft_invertible_random_04(self):self._test_fft_invertible_random(2**4)
def test_fft_invertible_random_05(self):self._test_fft_invertible_random(2**5)
def test_fft_invertible_random_06(self):self._test_fft_invertible_random(2**6)
def test_fft_invertible_random_07(self):self._test_fft_invertible_random(2**7)
def test_fft_invertible_random_08(self):self._test_fft_invertible_random(2**8)
def test_fft_invertible_random_09(self):self._test_fft_invertible_random(2**9)
def test_fft_invertible_random_10(self):self._test_fft_invertible_random(2**10)
def test_fft_invertible_random_11(self):self._test_fft_invertible_random(2**11)
def test_fft_invertible_random_12(self):self._test_fft_invertible_random(2**12)
def test_fft_invertible_random_13(self):self._test_fft_invertible_random(2**13)
def test_fft_invertible_random_14(self):self._test_fft_invertible_random(2**14)
def test_fft_invertible_random_15(self):self._test_fft_invertible_random(2**15)
def test_fft_invertible_random_16(self):self._test_fft_invertible_random(2**16)
def test_fft_invertible_random_17(self):self._test_fft_invertible_random(2**17)
def test_fft_invertible_random_18(self):self._test_fft_invertible_random(2**18)
def test_fft_invertible_random_19(self):self._test_fft_invertible_random(2**19)
def test_fft_invertible_random_20(self):self._test_fft_invertible_random(2**20)
# confirm output value
def test_fft_output_value(self):
xr1 = np.arange(2**5, dtype=np.int32)
xi1 = np.zeros(2**5, dtype=np.int32)
xr2, xi2 = fft(xr1, xi1)
xr2_ = [496, -20, -18, -16, -17, -15, -17, -16, -16, -17, -15, -17, -17, -15, -14, -16, -16, -16, -16, -18, -15, -15, -17, -12, -16, -15, -15, -17, -15, -15, -16, -16]
xi2_ = [0, 158, 79, 51, 39, 34, 25, 18, 16, 15, 12, 2, 6, 6, 2, 3, 0, 0, -1, -1, -7, -10, -13, -14, -16, -17, -26, -28, -38, -58, -78, -159]
self.assertTrue(np.all(xr2 == xr2_))
self.assertTrue(np.all(xi2 == xi2_))
# confirm intput type list
def test_fft_input_list(self):
xr1 = list(range(2**5))
xi1 = [0] * (2**5)
xr2, xi2 = fft(xr1, xi1)
xr2_ = [496, -20, -18, -16, -17, -15, -17, -16, -16, -17, -15, -17, -17, -15, -14, -16, -16, -16, -16, -18, -15, -15, -17, -12, -16, -15, -15, -17, -15, -15, -16, -16]
xi2_ = [0, 158, 79, 51, 39, 34, 25, 18, 16, 15, 12, 2, 6, 6, 2, 3, 0, 0, -1, -1, -7, -10, -13, -14, -16, -17, -26, -28, -38, -58, -78, -159]
self.assertTrue(np.all(xr2 == xr2_))
self.assertTrue(np.all(xi2 == xi2_))
# confirm intput type list
def test_ifft_input_list(self):
xr1 = [496, -20, -18, -16, -17, -15, -17, -16, -16, -17, -15, -17, -17, -15, -14, -16, -16, -16, -16, -18, -15, -15, -17, -12, -16, -15, -15, -17, -15, -15, -16, -16]
xi1 = [0, 158, 79, 51, 39, 34, 25, 18, 16, 15, 12, 2, 6, 6, 2, 3, 0, 0, -1, -1, -7, -10, -13, -14, -16, -17, -26, -28, -38, -58, -78, -159]
xr2, xi2 = ifft(xr1, xi1)
xr2_ = np.arange(2**5, dtype=np.int32)
xi2_ = np.zeros(2**5, dtype=np.int32)
self.assertTrue(np.all(xr2 == xr2_))
self.assertTrue(np.all(xi2 == xi2_))
# confirm input with strides
def test_fft_input_with_strides(self):
xr1 = np.c_[np.arange(2**5, dtype=np.int32), np.arange(2**5, dtype=np.int32)].flatten()
xi1 = np.zeros(2**6, dtype=np.int32)
xr2, xi2 = fft(xr1[::2], xi1[::2])
xr2_ = [496, -20, -18, -16, -17, -15, -17, -16, -16, -17, -15, -17, -17, -15, -14, -16, -16, -16, -16, -18, -15, -15, -17, -12, -16, -15, -15, -17, -15, -15, -16, -16]
xi2_ = [0, 158, 79, 51, 39, 34, 25, 18, 16, 15, 12, 2, 6, 6, 2, 3, 0, 0, -1, -1, -7, -10, -13, -14, -16, -17, -26, -28, -38, -58, -78, -159]
self.assertTrue(np.all(xr2 == xr2_))
self.assertTrue(np.all(xi2 == xi2_))
# confirm input shape(error)
def _test_fft_input_shape_error(self, shape1, shape2, regex):
x1 = np.zeros(shape1, dtype=np.int32)
x2 = np.zeros(shape2, dtype=np.int32)
with self.assertRaisesRegex(Exception, regex):
_, _ = fft(x1, x2)
def test_fft_input_error_unexpected_ndim_ar(self):self._test_fft_input_shape_error((2**10,1), (2**10,), "ar\.ndim != 1")
def test_fft_input_error_unexpected_ndim_ai(self):self._test_fft_input_shape_error((2**10,), (2**10,1), "ai\.ndim != 1")
def test_fft_input_error_different_shape0(self):self._test_fft_input_shape_error(2**9, 2**10, "ar\.shape\(0\) != ai\.shape\(0\)")
def test_fft_input_error_not_pow2(self):self._test_fft_input_shape_error((2**10+1,), (2**10+1), "ar\.shape\(0\) is not a power of 2")
# confirm input shape(error)
def _test_ifft_input_shape_error(self, shape1, shape2, regex):
x1 = np.zeros(shape1, dtype=np.int32)
x2 = np.zeros(shape2, dtype=np.int32)
with self.assertRaisesRegex(Exception, regex):
_, _ = ifft(x1, x2)
def test_ifft_input_error_unexpected_ndim_ar(self):self._test_ifft_input_shape_error((2**10,1), (2**10,), "ar\.ndim != 1")
def test_ifft_input_error_unexpected_ndim_ai(self):self._test_ifft_input_shape_error((2**10,), (2**10,1), "ai\.ndim != 1")
def test_ifft_input_error_different_shape0(self):self._test_ifft_input_shape_error(2**9, 2**10, "ar\.shape\(0\) != ai\.shape\(0\)")
def test_ifft_input_error_not_pow2(self):self._test_ifft_input_shape_error((2**10+1,), (2**10+1), "ar\.shape\(0\) is not a power of 2")
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
106546 | <gh_stars>10-100
from glob import glob
from os import chdir, getcwd, makedirs, path
from subprocess import call
from .get_app import get_app_name
from .logic import check_app_path, check_injection, check_pkg_injection
C_None = "\x1b[0;39m"
C_BRed = "\x1b[1;31m"
def remove_app_injection(args):
# 1 | Check for valid folder
check_app_path(args)
# 2 | Get the application name
appname = get_app_name(args)
# 3 | Check for injection
check_injection(verbose_mode=False)
# 4
print("[i] Changing the start order.")
try:
print("[i] Please enter yes twice.")
call(f"rm -i '{appname}'", shell=True)
call("rm -i 'payload'", shell=True)
if path.isfile(path.abspath(getcwd()) + "/" + appname):
print(C_BRed + "[!] Cannot continue without your permission." + C_None)
raise Exception("Permission denied")
call(f"mv 'injectra' '{appname}'", shell=True)
print("[+] The operation was sucessful.")
quit()
except Exception:
print(C_BRed + "[!] Cannot remove the injection." + C_None)
quit()
def remove_pkg_injection(args):
# 1 | Gathering filename
output = args.pkg[0]
if output[:-1] == "/":
output = output.split("/")[-2]
else:
output = output.split("/")[-1]
# 2 | Creating temporary directory
print("[i] Creating temporary directory...")
try:
makedirs(f".tmp_{output}")
# call(f"cp -r '{args.pkg[0]}' '{output}'", shell=True)
chdir(f".tmp_{output}")
print("[+] The package was sucessfully cloned.")
except Exception:
print(C_BRed + f"[!] Cannot write to output: .tmp_{output}" + C_None)
quit()
# 3 | Decompression of package
print("[i] Decompressing the package...")
try:
call(f"xar -xf '{args.pkg[0]}'", shell=True)
except Exception:
print(C_BRed + "[!] Cannot decompress the package." + C_None)
print("[i] Make sure xar is available.")
quit()
# 4 | Fetching injected packages
print("[i] Fetching injectable packages.")
injectable_pkgs = list(glob("*.pkg"))
injected_pkgs = []
if len(injectable_pkgs) == 0:
if check_pkg_injection():
injected_pkgs.append(".")
chdir("../")
else:
for pkg in injectable_pkgs:
chdir(pkg)
if check_pkg_injection():
injected_pkgs.append(pkg)
chdir("../../")
# 5 | Removing injection
print("[i] Removing injections...")
identify_string = '#!/bin/sh\nDIR=$(cd "$(dirname "$0")"; pwd) ; cd $DIR\n$DIR/payload &\n$DIR/injectra &'
for pkg_path in injected_pkgs:
chdir(pkg_path + "/Scripts/")
print("[i] Identifing previous state.")
scripts = list(glob("*"))
if "preinstall" in scripts:
with open("preinstall", "r") as file:
if file.read() == identify_string:
injection_point = "preinstall"
elif "postinstall" in scripts:
with open("postinstall", "r") as file:
if file.read() == identify_string:
injection_point = "postinstall"
else:
print(C_BRed + "[!] Fatal Error, while removing injection." + C_None)
quit()
print("[i] Removing an injection.")
try:
call(f"rm {injection_point}", shell=True)
call(f"rm payload", shell=True)
call(f"mv injectra {injection_point}", shell=True)
except Exception:
print(C_BRed + "[!] Could not remove injection." + C_None)
quit()
chdir("../../")
# 6 | Repacking the package
print("[i] Repacking the package...")
try:
print("Please answer with yes...")
call(f"rm {args.pkg[0]}")
call(f"pkgutil --flatten . {args.pkg[0]}", shell=True)
except Exception:
print(C_BRed + "[!] Could not repack the package." + C_None)
print("[i] Make sure pkgutil is available.")
quit()
# 7 | Cleaning up
print("[i] Cleaning up.")
try:
call(f"rm -rf '.tmp_{output}'", shell=True)
except Exception:
print(C_BRed + "[!] Could not clean up." + C_None)
quit()
print("[i] Removed the injections.")
| StarcoderdataPython |
1677995 | <gh_stars>1-10
from pyramid.httpexceptions import HTTPBadRequest
from chsdi.lib.helpers import get_from_configuration
from chsdi.lib.helpers import float_raise_nan
class HeightValidation:
def __init__(self):
self._lon = None
self._lat = None
self._elevation_models = None
@property
def lon(self):
return self._lon
@property
def lat(self):
return self._lat
@property
def elevation_models(self):
return self._elevation_models
@lon.setter
def lon(self, value):
if value is None:
raise HTTPBadRequest("Missing parameter 'easting'/'lon'")
try:
self._lon = float_raise_nan(value)
except ValueError:
raise HTTPBadRequest(
"Please provide numerical values for the parameter 'easting'/'lon'")
@lat.setter
def lat(self, value):
if value is None:
raise HTTPBadRequest("Missing parameter 'norhting'/'lat'")
try:
self._lat = float_raise_nan(value)
except ValueError:
raise HTTPBadRequest(
"Please provide numerical values for the parameter 'northing'/'lat'")
@elevation_models.setter
def elevation_models(self, value):
if value is None:
value = get_from_configuration('raster.available')
if not isinstance(value, list):
value = [model.strip() for model in value.split(',')]
for i in value:
if i not in get_from_configuration('raster.available'):
raise HTTPBadRequest(
"Please provide a valid name for the elevation model DTM25, DTM2 or COMB")
self._elevation_models = value
| StarcoderdataPython |
103044 | <gh_stars>1-10
# Generated by Django 3.2.7 on 2021-10-05 19:06
import django.core.validators
from django.db import migrations, models
from django.db.migrations.operations.fields import RemoveField
import django.db.models.deletion
from supplemental_content.models import AbstractModel
def make_category(id, title, description, order):
return {
"id": id,
"title": title,
"description": description,
"order": order,
"children": [],
}
def migrate_categories(apps, schema_editor):
OldCategory = apps.get_model("supplemental_content", "OldCategory")
Category = apps.get_model("supplemental_content", "Category")
SubCategory = apps.get_model("supplemental_content", "SubCategory")
# no cases of 3-level depth before now, so deal with 2 levels only
old_categories = OldCategory.objects.all()
parent_categories = [i for i in old_categories if i.parent is None]
child_categories = [i for i in old_categories if i.parent is not None]
new_categories = {}
# construct tree of old parent categories
for category in parent_categories:
new_categories[category.id] = make_category(
category.id, category.title, category.description, category.order
)
# append child categories
for child in child_categories:
try:
new_categories[child.parent.id]["children"].append(make_category(
child.id, child.title, child.description, child.order
))
except KeyError:
pass
# create new category objects
for category in list(new_categories.values()):
parent = Category.objects.create(
old_id=category["id"],
name=category["title"],
description=category["description"],
order=category["order"],
)
for child in category["children"]:
SubCategory.objects.create(
old_id=child["id"],
name=child["title"],
description=child["description"],
order=child["order"],
parent=parent,
)
def migrate_sections(apps, schema_editor):
OldRegulationSection = apps.get_model("supplemental_content", "OldRegulationSection")
Section = apps.get_model("supplemental_content", "Section")
for section in OldRegulationSection.objects.all():
Section.objects.create(
title=int(section.title),
part=int(section.part),
section_id=int(section.section),
old_id=section.id,
)
def migrate_supplemental_content(apps, schema_editor):
OldSupplementaryContent = apps.get_model("supplemental_content", "OldSupplementaryContent")
SupplementalContent = apps.get_model("supplemental_content", "SupplementalContent")
AbstractCategory = apps.get_model("supplemental_content", "AbstractCategory")
Section = apps.get_model("supplemental_content", "Section")
for content in OldSupplementaryContent.objects.all():
# acquire category from old ID
new_category = None
try:
if content.category:
new_category = AbstractCategory.objects.filter(old_id=content.category.id)[0]
except IndexError:
pass
# acquire list of sections from old ID's
new_sections = []
if content.sections:
for section in content.sections.all():
try:
new_sections.append(
Section.objects.filter(old_id=section.id)[0]
)
except IndexError:
pass
# build new supplemental content object
new_content = SupplementalContent.objects.create(
name=content.title,
description=content.description,
url=content.url,
date=content.date,
approved=content.approved,
created_at=content.created_at,
updated_at=content.updated_at,
category=new_category,
)
new_content.locations.set(new_sections)
new_content.save()
class Migration(migrations.Migration):
dependencies = [
('supplemental_content', '0007_auto_20210831_1612'),
]
operations = [
migrations.RenameModel(
old_name='Category',
new_name='OldCategory',
),
migrations.RenameModel(
old_name='RegulationSection',
new_name='OldRegulationSection',
),
migrations.RenameModel(
old_name='SupplementaryContent',
new_name='OldSupplementaryContent',
),
migrations.CreateModel(
name='AbstractCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True)),
('description', models.TextField(blank=True, null=True)),
('order', models.IntegerField(blank=True, default=0)),
('show_if_empty', models.BooleanField(default=False)),
('old_id', models.IntegerField()),
],
bases=(models.Model, AbstractModel),
),
migrations.CreateModel(
name='AbstractLocation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.IntegerField()),
('part', models.IntegerField()),
],
bases=(models.Model, AbstractModel),
),
migrations.CreateModel(
name='AbstractSupplementalContent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('approved', models.BooleanField(default=False)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='supplemental_content', to='supplemental_content.abstractcategory')),
('locations', models.ManyToManyField(blank=True, null=True, related_name='supplemental_content', to='supplemental_content.AbstractLocation')),
],
bases=(models.Model, AbstractModel),
),
migrations.CreateModel(
name='Category',
fields=[
('abstractcategory_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractcategory')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
bases=('supplemental_content.abstractcategory',),
),
migrations.CreateModel(
name='Section',
fields=[
('abstractlocation_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractlocation')),
('section_id', models.IntegerField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='supplemental_content.abstractlocation')),
('old_id', models.IntegerField()),
],
options={
'verbose_name': 'Section',
'verbose_name_plural': 'Sections',
},
bases=('supplemental_content.abstractlocation',),
),
migrations.CreateModel(
name='SubCategory',
fields=[
('abstractcategory_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractcategory')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_categories', to='supplemental_content.category')),
],
options={
'verbose_name': 'Sub-category',
'verbose_name_plural': 'Sub-categories',
},
bases=('supplemental_content.abstractcategory',),
),
migrations.CreateModel(
name='SubjectGroup',
fields=[
('abstractlocation_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractlocation')),
('subject_group_id', models.CharField(max_length=512)),
],
options={
'verbose_name': 'Subject Group',
'verbose_name_plural': 'Subject Groups',
},
bases=('supplemental_content.abstractlocation',),
),
migrations.CreateModel(
name='Subpart',
fields=[
('abstractlocation_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractlocation')),
('subpart_id', models.CharField(max_length=12)),
],
options={
'verbose_name': 'Subpart',
'verbose_name_plural': 'Subparts',
},
bases=('supplemental_content.abstractlocation',),
),
migrations.CreateModel(
name='SubSubCategory',
fields=[
('abstractcategory_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractcategory')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_sub_categories', to='supplemental_content.subcategory')),
],
options={
'verbose_name': 'Sub-sub-category',
'verbose_name_plural': 'Sub-sub-categories',
},
bases=('supplemental_content.abstractcategory',),
),
migrations.CreateModel(
name='SupplementalContent',
fields=[
('abstractsupplementalcontent_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='supplemental_content.abstractsupplementalcontent')),
('name', models.CharField(blank=True, max_length=512, null=True)),
('description', models.TextField(blank=True, null=True)),
('url', models.URLField(blank=True, max_length=512, null=True)),
('date', models.CharField(blank=True, help_text='Leave blank or enter one of: "YYYY", "YYYY-MM", or "YYYY-MM-DD".', max_length=10, null=True, validators=[django.core.validators.RegexValidator(message='Date field must be blank or of format "YYYY", "YYYY-MM", or "YYYY-MM-DD"! For example: 2021, 2021-01, or 2021-01-31.', regex='^\\d{4}((-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]))|(-(0[1-9]|1[0-2])))?$')])),
],
options={
'verbose_name': 'Supplemental Content',
'verbose_name_plural': 'Supplemental Content',
},
bases=('supplemental_content.abstractsupplementalcontent',),
),
migrations.RunPython(migrate_sections),
migrations.RunPython(migrate_categories),
migrations.RunPython(migrate_supplemental_content),
migrations.AlterModelOptions(
name='section',
options={'ordering': ['title', 'part', 'section_id'], 'verbose_name': 'Section', 'verbose_name_plural': 'Sections'},
),
migrations.AlterModelOptions(
name='subjectgroup',
options={'ordering': ['title', 'part', 'subject_group_id'], 'verbose_name': 'Subject Group', 'verbose_name_plural': 'Subject Groups'},
),
migrations.AlterModelOptions(
name='subpart',
options={'ordering': ['title', 'part', 'subpart_id'], 'verbose_name': 'Subpart', 'verbose_name_plural': 'Subparts'},
),
migrations.AlterModelOptions(
name='abstractlocation',
options={'ordering': ['title', 'part', 'section__section_id', 'subpart__subpart_id', 'subjectgroup__subject_group_id']},
),
migrations.RemoveField(
model_name='AbstractCategory',
name='old_id',
),
migrations.RemoveField(
model_name='Section',
name='old_id',
),
migrations.AlterUniqueTogether(
name='oldregulationsection',
unique_together=None,
),
migrations.RemoveField(
model_name='oldregulationsection',
name='supplementary_content',
),
migrations.RemoveField(
model_name='oldsupplementarycontent',
name='category',
),
migrations.DeleteModel(
name='OldCategory',
),
migrations.DeleteModel(
name='OldRegulationSection',
),
migrations.DeleteModel(
name='OldSupplementaryContent',
),
]
| StarcoderdataPython |
3375273 | import json
import sys
import time
import os
import boto3
rek = boto3.client('rekognition')
sqs = boto3.client('sqs')
sns = boto3.client('sns')
s3 = boto3.client('s3')
start_job_id = ''
def GetJobID(event):
'''
Get the Identity code from the Queue
'''
for record in event['Records']:
body = json.loads(record['body'])
print(body)
rek_message = json.loads(body["Message"])
start_job_id = rek_message["JobId"]
print('JOB ID: ', start_job_id)
return start_job_id
def GetVideoName(event):
'''
Get the video name from the Queue
'''
for record in event['Records']:
body = json.loads(record['body'])
rek_message = json.loads(body["Message"])
video = rek_message["Video"]
video_name = video["S3ObjectName"]
print('Nome do Video: ', video_name)
return video_name
def GetFaceDetectionResults(start_job_id):
max_results = 10
pagination_token = ''
finished = False
content = []
while not finished:
response = rek.get_face_detection(JobId=start_job_id,
MaxResults=max_results,
NextToken=pagination_token)
for FaceDetection in response['Faces']:
reaction = {}
face = FaceDetection['Face']
reaction['Timestamp'] = FaceDetection['Timestamp']
reaction['Confidence'] = face['Confidence']
for instance in face['Emotions']:
reaction[instance['Type']] = instance['Confidence']
content.append(reaction)
if 'NextToken' in response:
pagination_token = response['NextToken']
else:
finished = True
return content
def UploadVideo(content, video_name):
video_name = video_name.replace('.mp4', '.json')
bucket = os.getenv('BUCKET_NAME')
key = f'analyzed_videos/{video_name}'
print('Enviando arquivo ao S3')
s3.put_object(
Body=json.dumps(content),
Bucket=bucket,
Key=key
)
def lambda_handler(event, context):
start_job_id = GetJobID(event)
content = GetFaceDetectionResults(start_job_id)
video_name = GetVideoName(event)
UploadVideo(content, video_name)
| StarcoderdataPython |
191284 | <gh_stars>0
import pytest
from serverless_tasks import task
# Prove that a task behaves like a normal function when called
# like a normal function
def test_a_task_can_be_called_normally():
@task()
def a_function():
return 42
assert a_function() == 42
def test_a_task_can_be_called_with_args():
@task()
def a_function(x):
return x
assert a_function(54) == 54
@pytest.mark.asyncio
async def test_an_async_task_can_be_called_normally():
@task()
async def a_function():
return 42
assert await a_function() == 42
@pytest.mark.asyncio
async def test_an_async_task_can_be_called_with_args():
@task()
async def a_function(x):
return x
assert await a_function(54) == 54
| StarcoderdataPython |
198642 |
#%%
from datetime import datetime
import pandas as pd
import numpy as np
from pandas.core import frame
import geopandas as gpd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.api as sm
# %%
def process_index_pivot(vi_link, v_index):
v_index = str(v_index)
vi_tbl = pd.read_csv(vi_link)
vi_tbl['year'] = vi_tbl.month.astype(str).str[0:4]
vi_tbl['month'] = vi_tbl.month.astype(str).str[5:7]
vi_tbl = vi_tbl\
.groupby(['year', 'month'])[v_index]\
.aggregate('mean')\
.unstack()\
.reset_index()
vi_tbl.columns = v_index + "_" + vi_tbl.columns
vi_tbl.index = vi_tbl.index.astype(int)
return vi_tbl
# %%
sa_evi = process_index_pivot('sa_all_polygons_evi.csv', 'EVI').set_index('EVI_year')
sa_ndsi = process_index_pivot('sa_all_polygons_ndsi.csv', 'NDSI').set_index('NDSI_year')
sa_nbr = process_index_pivot('sa_all_polygons_nbrt.csv', 'NBRT').set_index('NBRT_year')
sa_yield = pd.read_csv('sa_yield.csv', index_col = 'year')
sa_yield.index = sa_yield.index.astype(str)
sa_rain = pd.read_csv('sa_crop_rain.csv', index_col = 'year')
sa_rain.index = sa_rain.index.astype(str)
sa_rand_forrest_table = pd.concat([sa_evi, sa_ndsi, sa_nbr, sa_yield], axis = 1)
sa_rand_forrest_table = sa_rand_forrest_table[sa_rand_forrest_table.index != '2021'].fillna(method='bfill')
# %%
# Labels are the values we want to predict
labels = np.array(sa_rand_forrest_table['Yield'])
# Remove the labels from the features
# axis 1 refers to the columns
features = sa_rand_forrest_table.drop(['Yield', 'production', 'hectares'], axis = 1)
# Saving feature names for later use
feature_list = list(features.columns)
# Convert to numpy array
features = np.array(features)
# %%
# Using Skicit-learn to split data into training and testing sets
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets
train_features, test_features, train_labels, test_labels = \
train_test_split(features,
labels,
test_size = 0.25,
random_state = 42)
print('Training Features Shape:', train_features.shape)
print('Training Labels Shape:', train_labels.shape)
print('Testing Features Shape:', test_features.shape)
print('Testing Labels Shape:', test_labels.shape)
# %%
# Import the model we are using
from sklearn.ensemble import RandomForestRegressor
# Instantiate model with 1000 decision trees
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
# Train the model on training data
rf.fit(train_features, train_labels)
# %%
# Use the forest's predict method on the test data
predictions = rf.predict(test_features)
# Calculate the absolute errors
errors = abs(predictions - test_labels)
# Print out the mean absolute error (mae)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'tonnes per hectare.')
# %%
# Calculate mean absolute percentage error (MAPE)
mape = 100 * (errors / test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
# %%
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rf.estimators_[5]
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rf.estimators_[5]
# Export the image to a dot file
export_graphviz(tree, out_file = 'tree.dot',
feature_names = feature_list,
rounded = True,
precision = 1)
# Use dot file to create a graph
(graph, ) = pydot.graph_from_dot_file('tree.dot')
# Write graph to a png file
# graph.write_png('tree.png')
# %%
# Get numerical feature importances
importances = list(rf.feature_importances_)
# List of tuples with variable and importance
feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# Print out the feature and importances
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances];
# %%
# Import matplotlib for plotting and use magic command for Jupyter Notebooks
import matplotlib.pyplot as plt
# Set the style
plt.style.use('fivethirtyeight')
# list of x locations for plotting
x_values = list(range(len(importances)))
# Make a bar chart
plt.bar(x_values, importances, orientation = 'vertical')
# Tick labels for x axis
plt.xticks(x_values, feature_list, rotation='vertical')
# Axis labels and title
plt.ylabel('Importance'); plt.xlabel('Variable'); plt.title('Variable Importances')
| StarcoderdataPython |
3322325 | <reponame>acc-cosc-1336-spring-2022/acc-cosc-1336-spring-2022-DaltonTaff
import unittest
from src.homework.i_dictionaries_sets.dictionary import get_p_distance
from src.homework.i_dictionaries_sets.dictionary import get_p_distance_matrix
class Test_Config(unittest.TestCase):
def test_p_distance(self):
list1 = ['T','T','T','C','C','A','T','T','T','A']
list2 = ['G','A','T','T','C','A','T','T','T','C']
self.assertEqual(0.4, get_p_distance(list1, list2))
def test_get_p_distance_matrix(self):
list1 = [
['T','T','T','C','C','A','T','T','T','A'],
['G','A','T','T','C','A','T','T','T','C'],
['T','T','T','C','C','A','T','T','T','T'],
['G','T','T','C','C','A','T','T','T','A'] ]
distance_matrix = [ [0.0, 0.4, 0.1, 0.1],
[0.4, 0.0, 0.4, 0.3],
[0.1, 0.4, 0.0, 0.2],
[0.1, 0.3, 0.2, 0.0] ]
self.assertEqual(get_p_distance, get_p_distance_matrix(list1)) | StarcoderdataPython |
1629277 | <filename>experiment.py
'''
Created on 08.06.2015
@author: marinavidovic
'''
import os
import utils
import motif
from datetime import datetime
import pdb
import numpy as np
from openopt import NLP
import func
import grad
import pickle
import view
class Experiment:
'''
classdocs
'''
num_motifs = 1
poimdegree = 2
lamall_k = 2
vecsize=np.zeros(4)
optvar = np.ones(4)
def __init__(self, type):
'''
type -> real or toy
repetitions
'''
self.type = type
self.motif = None
self.poim = None
self.aQ = None
self.Q = None
self.maxdegree = None
self.maxPOIM = None
self.diffPOIM = None
self.L = None
self.result = None
self.out_pwm = None
self.out_sig = None
self.out_lam = None
self.out_mu = None
self.out_fopt = None
self.out_time = None
self.out_iter = None
self.out_eavals = None
self.out_istop = None
self.small_k = None
self.dictname = None
def setup(self,path,poimpath,savepoims,optvar,ini_pwm,ini_mu,ini_sig,ini_lam,P,M,replacenucleotids=[],replaceposition=[],num_motifs=1,maxdegree=6,small_k=2,solver='ralg',iprint=2, maxIter=10000, ftol=1e-03, gradtol=1e-03, diffInt=1e-03, contol=1e-03, maxFunEvals=1e04, maxTime=1000):
self.num_motifs = num_motifs
self.small_k = small_k
self.maxdegree = maxdegree
self.path = path
self.poimpath = poimpath
self.motif = motif.Motif()
self.solver = solver
self.optvar =optvar
self.ini_mu = ini_mu
self.ini_sig = ini_sig
self.ini_lam = ini_lam
self.P = P
self.M = M
self.replacenucleotids = replacenucleotids
self.replaceposition = replaceposition
self.iprint = iprint
self.maxIter = maxIter
self.ftol = ftol
self.gradtol = gradtol
self.diffInt = diffInt
self.contol = contol
self.maxFunEvals = maxFunEvals
self.maxTime = maxTime
self.read(poimpath)
self.iniPWM(ini_pwm)
self.folderStruct(self.path)
if savepoims:
view.figurepoimsimple(self.Q[1], self.path + "/poim.eps", 0)
view.figuremaxPOIM(self.diffPOIM, self.path + "/diffpoim.eps", 0)
def iniPWM(self, ini_pwm):
if ini_pwm == 'greedy':
self.ini_pwm = utils.greedy_init(self.Q, self.P, self.M, self.ini_mu)
elif ini_pwm == 'random':
self.ini_pwm=utils.random_init(self.P, self.M)
else:
self.ini_pwm = ini_pwm
def read(self,poimpath):
self.aQ, self.Q, self.maxdegree, self.maxPOIM, self.diffPOIM, self.L = utils.read_data_from_file(poimpath)
def folderStruct(self,path):
dt = datetime.now()
tt = dt.timetuple()
self.dictname = "exp"
for i in range(6):
self.dictname += "_"+str(tt[i])
self.path = path+self.dictname
if not os.path.exists(self.path):
os.makedirs(self.path)
else:
print "Experiment path already exists."
def input(self):
'''
organize optimization variable x regarding
optvar[0]: pwm
optvar[1]: lam
optvar[2]: sig
optvar[3]: mu
the values which are set to one in optvar are optimization variables
'''
x1, x2, x3, x4 = 0, 0, 0, 0
x = []
if (self.optvar[0] == 1):
x1, self.vecsize[0] = utils.matr2vec(self.ini_pwm, self.P, self.M)
x = np.append(x, x1)
if (self.optvar[1] == 1):
x2, self.vecsize[1] = utils.list2vec(self.ini_lam, self.P, self.M)
self.vecsize[1] = self.vecsize[1] + self.vecsize[0]
if (self.optvar[2] == 1):
x3, self.vecsize[2] = utils.list2vec(self.ini_sig, self.P, self.M)
self.vecsize[2] = self.vecsize[1] + self.vecsize[2]
if (self.optvar[3] == 1):
x4, self.vecsize[3] = utils.list2vec(self.ini_mu, self.P, self.M)
self.vecsize[3] = self.vecsize[2] + self.vecsize[3]
x = []
if (self.vecsize[0] != 0):
x = np.append(x, x1)
if (self.vecsize[1] != 0):
x = np.append(x, x2)
if (self.vecsize[2] != 0):
x = np.append(x, x3)
if (self.vecsize[3] != 0):
x = np.append(x, x4)
lb_mu, lb_r_lam, lb_sig = [], [], []
ub_mu, ub_r_lam, ub_sig = [], [], []
lb_r_lam = np.ones(self.vecsize[1]) * 0.005
if (self.vecsize[2] != 0):
lb_sig = np.ones(self.vecsize[2] - self.vecsize[1]) * 0.0001
ub_sig = np.ones(self.vecsize[2] - self.vecsize[1]) * (self.L - self.maxdegree + 1)
if (self.vecsize[3] != 0):
lb_mu = np.zeros(max(0, self.vecsize[3] - self.vecsize[2]))
ub_mu = np.ones(max(0, self.vecsize[3] - self.vecsize[2])) * (self.L - self.maxdegree + 1)
lb = np.append(np.append(lb_r_lam, lb_sig), lb_mu)
ub_r_lam = np.ones(self.vecsize[1]) * 10
ub = np.append(np.append(ub_r_lam, ub_sig), ub_mu)
lenA=int(np.max(self.vecsize))
lenk=int(self.vecsize[0])/4
Aeq=np.zeros((lenk,lenA))
beq=np.ones(lenk)
for i in range(lenk):
for pk in range(i,lenA-2,lenk):
Aeq[i,pk] = 1
return x,Aeq,beq,lb,ub
def optimize(self):
x,Aeq,beq,lb,ub=self.input()
#p = NLP(func.extend_callfunc, x, df=grad.extend_callgrad, Aeq=Aeq,beq=beq, lb=lb, ub=ub, args=(self.Q, self.P, self.M, self.L,self.ini_pwm, self.ini_mu, self.ini_sig, self.ini_lam, self.maxdegree, self.vecsize, "5", self.optvar, self.lamall_k), diffInt=self.diffInt, ftol=self.ftol, plot=0, iprint=self.iprint,maxIter = self.maxIter, maxFunEvals = self.maxFunEvals, show=False, contol=self.contol)
self.parameter_prompt()
p = NLP(func.extend_callfunc, x, df=grad.extend_callgrad, lb=lb, ub=ub, args=(self.Q, self.P, self.M, self.L,self.ini_pwm, self.ini_mu, self.ini_sig, self.ini_lam, self.maxdegree, self.vecsize, "5", self.optvar, self.small_k), diffInt=self.diffInt, ftol=self.ftol, plot=0, iprint=self.iprint,maxIter = self.maxIter, maxFunEvals = self.maxFunEvals, show=False, contol=self.contol)
#p.checkdf()
#p.checkdc()
#p.checkdh()
self.result = p._solve(self.solver)
self.output()
def output(self):
#==========================================================================
#
#p.iterTime = []
# p.iterValues.x = [] # iter points
# p.iterValues.f = [] # iter ObjFunc Values
# p.iterValues.r = [] # iter MaxResidual
# p.iterValues.rt = [] # iter MaxResidual Type: 'c', 'h', 'lb' etc
# p.iterValues.ri = [] # iter MaxResidual Index
# p.solutions = [] # list of solutions, may contain several elements for interalg and mb other solvers
#==========================================================================
'''
save raw result vector
'''
fobj = open(self.path+ "/result_raw_" + self.dictname + ".pkl", 'wb')
pickle.dump(self.result, fobj)
fobj.close()
if (self.optvar[0] == 1):
self.out_pwm = utils.converter(self.result.xf[:self.vecsize[0]], self.P, self.M)
else:
self.out_pwm = self.ini_pwm
if (self.optvar[1] == 1):
self.out_lam = utils.listconverter(self.result.xf[self.vecsize[0] :self.vecsize[1]], self.P, self.M)
else:
self.out_lam = self.ini_lam
if (self.optvar[2] == 1):
self.out_sig = utils.listconverter(self.result.xf[self.vecsize[1]:self.vecsize[2]], self.P, self.M)
else:
self.out_sig = self.ini_sig
if (self.optvar[3] == 1):
self.out_mu = utils.listconverter(self.result.xf[self.vecsize[2]:], self.P, self.M)
else:
self.out_mu = self.ini_mu
self.out_fopt = self.result.ff
self.out_time = self.result.elapsed['solver_time']
self.out_iter = None
self.out_eavals = self.result.evals['f']
self.out_istop = self.result.istop
fobj = open(self.path+ "/output_"+self.dictname+".pkl", 'wb')
pickle.dump([self.out_pwm, self.out_lam, self.out_sig, self.out_mu,self.out_fopt, self.out_time,self.out_eavals,self.out_iter,self.out_istop ], fobj)
fobj.close()
utils.parameter2file(self.path + "/result_prepared_" + self.dictname + ".txt",self.solver,self.small_k,self.ini_pwm,self.ini_lam,self.ini_sig,self.ini_mu,self.out_pwm, self.out_lam, self.out_sig, self.out_mu,self.out_fopt, self.out_time,self.out_eavals,self.out_iter,self.out_istop,self.optvar, self.M, self.P,self.maxIter,self.ftol,self.gradtol,self.diffInt,self.contol,self.maxFunEvals,self.maxTime)
utils.makeJasparMotif(self.out_pwm, 1, 1, self.P, self.M, self.path+ "/jaspar_"+self.dictname+".txt",self.replacenucleotids,self.replaceposition)
utils.seqLogo(self.path,"/jaspar_"+self.dictname)
def parameter_prompt(self):
utils.parameter_prompt(self.Q, self.ini_pwm, self.ini_mu, self.ini_sig, self.ini_lam, self.M, self.P, self.L, self.diffPOIM, "optimization parameters")
| StarcoderdataPython |
3248141 | from random import randint
def generate_auth_code() -> str:
"""
Generate a six-numbers random code.
"""
code = ''
for _ in range(6):
code += str(randint(0, 9))
return code
class Get_Auth_Code:
"""
Monostate for storing a generated code
"""
_state: dict = {
'AuthCode': generate_auth_code()
}
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
obj.__dict__ = cls._state
return obj
def __init__(self, flag=None):
"""
Flag for saving a code for use
and after it, generate another.
"""
if flag is not None:
self._state['AuthCode'] = generate_auth_code()
def __str__(self):
"""
Return the generated code when
the class is called as a string
"""
return self._state['AuthCode']
| StarcoderdataPython |
1799344 | <filename>main.py
from numpy import cos, sin, pi
import matplotlib.pyplot as plt
import functions as func
np = func.np
'''
Constantes
'''
phi = -28.93 / 180*pi # Latitude do local (negativo se for latitude Sul).
e = 23.4 / 180*pi # Obliquidade da eclíptica.
w_s = 850 # Constante solar na superfície da Terra em W/s.
v_sa = 2*pi/365.25 # Velocidade angular do Sol em seu caminho na eclíptica em rad/dia.
v_sd = 2*pi # Velocidade angular do Sol na coordenada ângulo horário em rad/dia.
'''
Posição dos dois vértices, adjacentes ao vértice na origem, do quadrilátero.
'''
inclination = 27.5
p_1 = np.array([1,0,0])
p_2 = np.array([0, cos(inclination/180*pi), -sin(inclination/180*pi)])
'''
Limites de integração da integral da área projetada em função do tempo. A integração é feita numéricamente divindo a área em retângulos
'''
delta_t = 0.1 # largura dos retângulos.
t_x_max = 7000 # número máximo de retângulos que são calculados por vez.
t_i = 0 # tempo inicial (limite inferior)
t_f = 365.25 # tempo final (limite superior)
solar_energy = func.LigthArea(phi, e, w_s, v_sa, v_sd, p_1, p_2)
solar_energy.time_interval_steps(delta_t, t_x_max, t_i, t_f) # Definindo o intervalo de integração.
total_energy = solar_energy.calculate_energy() # Calculando a energia incidente
print(total_energy)
# solar_energy.range_orientation([-4, 4], [27-1, 27+6], 1)
| StarcoderdataPython |
3397007 | <filename>task_adaptation/data/clevr_test.py
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clevr.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from task_adaptation.data import clevr
from task_adaptation.data import data_testing_lib
import tensorflow.compat.v1 as tf
class CLEVRDataCountAllTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(CLEVRDataCountAllTest, self).setUp(
data_wrapper=clevr.CLEVRData(task="count_all"),
num_classes=8,
expected_num_samples=dict(
train=63000,
val=7000,
trainval=70000,
test=15000,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class CLEVRDataCountCylindersTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(CLEVRDataCountCylindersTest, self).setUp(
data_wrapper=clevr.CLEVRData(task="count_cylinders"),
num_classes=11,
expected_num_samples=dict(
train=63000,
val=7000,
trainval=70000,
test=15000,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class CLEVRDataClosestTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(CLEVRDataClosestTest, self).setUp(
data_wrapper=clevr.CLEVRData(task="closest_object_distance"),
num_classes=6,
expected_num_samples=dict(
train=63000,
val=7000,
trainval=70000,
test=15000,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
1794003 | <filename>mods/tests/mocks/BirModule/back/module.py
from libvis.modules.Base import BaseModule
from .bottles import beer
class Bir(BaseModule):
name="BirModule"
def __init__(self, count):
super().__init__()
self.text = beer(count)
def vis_set(self, key, value):
super().vis_set(key, value) # same as self[key] = value
if key=='count':
try:
rhymes = beer(int(value))
self.text = rhymes
except:
pass
| StarcoderdataPython |
126539 | <gh_stars>0
class ClassDictionary:
def __init__(self, value):
self.dict_value = value
def __getattribute__(self, name):
if name == "dict_value":
return super().__getattribute__(name)
result = self.dict_value.get(name)
return result
def __setattr__(self, name, value):
if name == "dict_value":
return super().__setattr__(name, value)
self.dict_value[name] = value
def pop(self, name, default=None):
return self.dict_value.pop(name, default)
| StarcoderdataPython |
3289374 | <filename>tests/khmer_tst_utils.py
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: <EMAIL>
#
import tempfile
import os
import shutil
thisdir = os.path.dirname(__file__)
thisdir = os.path.abspath(thisdir)
tempfile.tempdir = thisdir
def get_test_data(filename):
return os.path.join(thisdir, 'test-data', filename)
cleanup_list = []
def get_temp_filename(filename, tempdir=None):
if tempdir is None:
tempdir = tempfile.mkdtemp(prefix='khmertest_')
cleanup_list.append(tempdir)
return os.path.join(tempdir, filename)
def cleanup():
global cleanup_list
for path in cleanup_list:
shutil.rmtree(path, ignore_errors=True)
cleanup_list = []
| StarcoderdataPython |
110418 | #program to locate Python site-packages.
import site
def main():
return (site.getsitepackages())
print(main()) | StarcoderdataPython |
1756179 | import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
import cv2
import scipy.io as sio
import numpy as np
img=cv2.imread('./att_seg_rgb.jpg')
# img=img.transpose((1,2,0))
print(img.shape)
att_dict=sio.loadmat('/data2/gyang/PGA-net/segmentation/sp_ag_weights_2020-06-05-16-13-46.mat')
print(att_dict['sp_feat'].shape)
atts=np.squeeze(att_dict['sp_feat'])
num=atts.shape
for i in range(num[0]):
att=atts[i]
# print(att)
att=cv2.resize(att,(500,375))
jet = plt.get_cmap('jet')
cNorm = colors.Normalize()
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
colorVal = scalarMap.to_rgba(255-att)
filename='./att/sp_'+str(i).zfill(3)+'.png'
plt.imsave(filename, colorVal)
# att_dict=sio.loadmat('/data2/gyang/PGA-net/segmentation/att2020-06-05-15-51-20.mat')
# atts = np.squeeze(att_dict['att'])
# print(att_dict['att'].shape)
#
# num=atts.shape
# for i in range(num[0]):
# for j in range(num[1]):
# att=atts[i,j]
# # print(att)
# att=cv2.resize(att,(375,500))
# jet = plt.get_cmap('jet')
# cNorm = colors.Normalize()
# scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# colorVal = scalarMap.to_rgba(att)
# filename='./att/att_'+str(i)+'_'+str(j).zfill(3)+'.png'
# plt.imsave(filename, colorVal)
| StarcoderdataPython |
4814858 | <reponame>p517332051/face_benchmark
from .OctResNet import *
from .Octconv import OctaveConv,Conv_BN_ACT,Conv_BN,Conv_ACT | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.