blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f8876fff305088abd093d8ebb3d9b549b9a3963
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/1935. Maximum Number of Words You Can Type/1935.py
|
0494b94282b003cc4172cbabc98fe33c719f2a07
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 222
|
py
|
1935.py
|
class Solution:
def canBeTypedWords(self, text: str, brokenLetters: str) -> int:
ans = 0
broken = set(brokenLetters)
for word in text.split():
ans += all(c not in broken for c in word)
return ans
|
30f23775bb356fe02c72c373b093dfce7b621dbb
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/workers/support/imported_script.py
|
2f9c6a81d9d92a1a9f6d660abce74adc694f00e4
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
imported_script.py
|
def main(request, response):
return [(b'Content-Type', request.GET[b'mime'])], u""
|
723979b07a124c7858019e53fbefefa4dc07ce24
|
de4d86d528f6a45943e7f369144b91740b7ad67b
|
/scripts/generator/db.py
|
689c048b07a941dbef2c519207f8fc29e256d799
|
[
"MIT"
] |
permissive
|
yihong0618/running_page
|
4bf1fe654631cb2fd58c082d97cb990da34ce266
|
07e7c3219634ffa181b8f07defee9c75b9e68e31
|
refs/heads/master
| 2023-09-03T03:14:47.726778
| 2023-08-27T12:54:29
| 2023-08-27T12:54:29
| 296,233,312
| 2,875
| 1,095
|
MIT
| 2023-09-13T10:07:16
| 2020-09-17T05:59:22
|
Python
|
UTF-8
|
Python
| false
| false
| 4,434
|
py
|
db.py
|
import datetime
import random
import string
import time
from geopy.geocoders import Nominatim
from sqlalchemy import Column, Float, Integer, Interval, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
import random
import string
# random user name 8 letters
def randomword():
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(8))
# reverse the location (lan, lon) -> location detail
g = Nominatim(user_agent=randomword())
ACTIVITY_KEYS = [
"run_id",
"name",
"distance",
"moving_time",
"type",
"start_date",
"start_date_local",
"location_country",
"summary_polyline",
"average_heartrate",
"average_speed",
]
class Activity(Base):
__tablename__ = "activities"
run_id = Column(Integer, primary_key=True)
name = Column(String)
distance = Column(Float)
moving_time = Column(Interval)
elapsed_time = Column(Interval)
type = Column(String)
start_date = Column(String)
start_date_local = Column(String)
location_country = Column(String)
summary_polyline = Column(String)
average_heartrate = Column(Float)
average_speed = Column(Float)
streak = None
def to_dict(self):
out = {}
for key in ACTIVITY_KEYS:
attr = getattr(self, key)
if isinstance(attr, (datetime.timedelta, datetime.datetime)):
out[key] = str(attr)
else:
out[key] = attr
if self.streak:
out["streak"] = self.streak
return out
def update_or_create_activity(session, run_activity):
created = False
try:
activity = (
session.query(Activity).filter_by(run_id=int(run_activity.id)).first()
)
if not activity:
start_point = run_activity.start_latlng
location_country = getattr(run_activity, "location_country", "")
# or China for #176 to fix
if not location_country and start_point or location_country == "China":
try:
location_country = str(
g.reverse(f"{start_point.lat}, {start_point.lon}")
)
# limit (only for the first time)
except:
print("+++++++limit+++++++")
time.sleep(2)
try:
location_country = str(
g.reverse(f"{start_point.lat}, {start_point.lon}")
)
except:
pass
activity = Activity(
run_id=run_activity.id,
name=run_activity.name,
distance=run_activity.distance,
moving_time=run_activity.moving_time,
elapsed_time=run_activity.elapsed_time,
type=run_activity.type,
start_date=run_activity.start_date,
start_date_local=run_activity.start_date_local,
location_country=location_country,
average_heartrate=run_activity.average_heartrate,
average_speed=float(run_activity.average_speed),
summary_polyline=(
run_activity.map and run_activity.map.summary_polyline or ""
),
)
session.add(activity)
created = True
else:
activity.name = run_activity.name
activity.distance = float(run_activity.distance)
activity.moving_time = run_activity.moving_time
activity.elapsed_time = run_activity.elapsed_time
activity.type = run_activity.type
activity.average_heartrate = run_activity.average_heartrate
activity.average_speed = float(run_activity.average_speed)
activity.summary_polyline = (
run_activity.map and run_activity.map.summary_polyline or ""
)
except Exception as e:
print(f"something wrong with {run_activity.id}")
print(str(e))
pass
return created
def init_db(db_path):
engine = create_engine(
f"sqlite:///{db_path}", connect_args={"check_same_thread": False}
)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)
return session()
|
b5b0442768432487e0413e22d8a215ec4ab1f3cb
|
e73547787354afd9b717ea57fe8dd0695d161821
|
/tools/splat/segtypes/n64/segment.py
|
8f9c3cccd35b5640776ca2bdb3b30385c335f1a5
|
[
"MIT"
] |
permissive
|
pmret/papermario
|
8b514b19653cef8d6145e47499b3636b8c474a37
|
9774b26d93f1045dd2a67e502b6efc9599fb6c31
|
refs/heads/main
| 2023-08-31T07:09:48.951514
| 2023-08-21T18:07:08
| 2023-08-21T18:07:08
| 287,151,133
| 904
| 139
| null | 2023-09-14T02:44:23
| 2020-08-13T01:22:57
|
C
|
UTF-8
|
Python
| false
| false
| 75
|
py
|
segment.py
|
from segtypes.segment import Segment
class N64Segment(Segment):
pass
|
4678d72cfef22f308bd5b5027d2dc1f695ca362b
|
07e810873aa0134ba5017ccfef641d1038ca9b92
|
/hs_tracking/views.py
|
b58d4aca18cc1d4763ba6b51f25309c284d50e4e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
hydroshare/hydroshare
|
9093e6dce047a30d4b2b7720257a7841d209353f
|
69855813052243c702c9b0108d2eac3f4f1a768f
|
refs/heads/develop
| 2023-09-04T12:52:30.816709
| 2023-08-30T16:46:20
| 2023-08-30T16:46:20
| 24,703,136
| 207
| 57
|
BSD-3-Clause
| 2023-09-14T20:20:16
| 2014-10-02T02:19:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,286
|
py
|
views.py
|
import csv
import urllib.parse
from io import StringIO
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from . import models as hs_tracking
from .models import Session, Variable
from .utils import authentic_redirect_url, get_std_log_fields
class AppLaunch(TemplateView):
def get(self, request, **kwargs):
# get the query parameters and remove the redirect url b/c
# we don't need to log this.
querydict = dict(request.GET)
url = querydict.pop('url', [None])[0]
if not authentic_redirect_url(url):
return HttpResponseForbidden()
# encode url placeholder values received from the front-end
url_placeholders = ["HS_JS_AGG_KEY", "HS_JS_MAIN_FILE_KEY", "HS_JS_FILE_KEY"]
for placeholder in url_placeholders:
placeholder_value = querydict.pop(placeholder, [''])[0]
if placeholder in url:
encoded_value = urllib.parse.quote(placeholder_value)
url = url.replace(placeholder, encoded_value)
# log app launch details if user is logged in
if request.user.is_authenticated:
# get user session and standard fields
session = Session.objects.for_request(request, request.user)
fields = get_std_log_fields(request, session)
# parse the query and param portions of the url
purl = urllib.parse.urlparse(url)
# extract the app url args so they can be logged
app_args = urllib.parse.parse_qs(purl.query)
# update the log fields with the extracted request and url params
fields.update(querydict)
fields.update(app_args)
# clean up the formatting of the query and app arg dicts
# i.e. represent lists in csv format without brackets [ ]
# so that the log records don't need to be cleaned later.
fields.update(dict((k, ','.join(v)) for k, v in list(fields.items())
if type(v) == list))
# format and save the log message
msg = Variable.format_kwargs(**fields)
session.record('app_launch', value=msg)
return HttpResponseRedirect(url)
class UseTrackingView(TemplateView):
template_name = 'hs_tracking/tracking.html'
@method_decorator(user_passes_test(lambda u: u.is_staff))
def dispatch(self, *args, **kwargs):
return super(UseTrackingView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
return {}
class VisitorProfileReport(TemplateView):
@method_decorator(user_passes_test(lambda u: u.is_staff))
def dispatch(self, *args, **kwargs):
return super(VisitorProfileReport, self).dispatch(*args, **kwargs)
def get(self, request, **kwargs):
"""Download a CSV report of use tracking data."""
f = StringIO()
w = csv.writer(f)
w.writerow(hs_tracking.VISITOR_FIELDS)
visitors = hs_tracking.Visitor.objects.all()
for v in visitors:
info = v.export_visitor_information()
row = [info[field] for field in hs_tracking.VISITOR_FIELDS]
w.writerow(row)
f.seek(0)
return HttpResponse(f.read(), content_type="text/csv")
class HistoryReport(TemplateView):
# @method_decorator(user_passes_test(lambda u: u.is_staff))
def dispatch(self, *args, **kwargs):
return super(HistoryReport, self).dispatch(*args, **kwargs)
def get(self, request, **kwargs):
"""Download a CSV report of use tracking data."""
f = StringIO()
w = csv.writer(f)
w.writerow(
['visitor', 'session', 'session start', 'timestamp', 'variable', 'type', 'value'])
variables = hs_tracking.Variable.objects.all().order_by('timestamp')
for v in variables:
row = [v.session.visitor.id, v.session.id, v.session.begin, v.timestamp,
v.name, v.get_type_display(), v.value]
w.writerow(row)
f.seek(0)
return HttpResponse(f.read(), content_type="text/csv")
|
eb5a1445c23ede7b1a8eba786e0ef9080895cf4e
|
dbd1399c4e8d01e02f4f94de7b1bbdb123838c0c
|
/manual_test/cookbook.py
|
7702a6ccec3c8b30fa4cb3908710ede9ae339f8b
|
[
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
xhtml2pdf/xhtml2pdf
|
fe4416904bf2cedcce5af67b413152545c7d2499
|
f5bd8520699a2742aa2d960826b19d9594864fe0
|
refs/heads/master
| 2023-08-09T23:27:01.613275
| 2023-07-24T12:19:19
| 2023-07-24T12:19:19
| 1,755,413
| 1,218
| 398
|
Apache-2.0
| 2023-07-24T12:19:20
| 2011-05-16T13:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,709
|
py
|
cookbook.py
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 176 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-03-15 00:11:47 +0100 (Sa, 15 Mrz 2008) $"
"""
HTML/CSS to PDF converter
Most people know how to write a page with HTML and CSS. Why not using these skills to dynamically generate PDF
documents using it? The "pisa" project http://www.htmltopdf.org enables you to to this quite simple.
"""
import six
from xhtml2pdf import pisa
from faker import Faker
# Shortcut for dumping all logs to the screen
pisa.showLogging()
def html2pdf(data, filename, open_file=False):
"""
Simple test showing how to create a PDF file from
PML Source String. Also shows errors and tries to start
the resulting PDF
"""
pdf = pisa.CreatePDF(
six.StringIO(data),
open(filename, "wb"))
if open_file and (not pdf.err):
pisa.startViewer(filename)
return not pdf.err
if __name__ == "__main__":
HTMLTEST = """
<html>
<style>
@page {
size: a4 portrait;
@frame header_frame { /* Static Frame */
-pdf-frame-content: header_content;
left: 50pt; width: 512pt; top: 50pt; height: 40pt;
}
@frame content_frame { /* Content Frame */
left: 50pt; width: 512pt; top: 90pt; height: 632pt;
}
@frame footer_frame { /* Another static Frame */
-pdf-frame-content: footer_content;
left: 50pt; width: 512pt; top: 772pt; height: 20pt;
}
}
</style>
<body dir="rtl">
<p>Hello <strong style="color: #f00;">World</strong>
<hr>
<table border="1" style="background: #eee; padding: 0.5em;">
<tr>
<td>Amount</td>
<td>Description</td>
<td>Total</td>
</tr>
<tr>
<td>1</td>
<td>Good weather</td>
<td>0 EUR</td>
</tr>
<tr style="font-weight: bold">
<td colspan="2" align="right">Sum</td>
<td>0 EUR</td>
</tr>
</table>
<p> Esto es un texto al revez</p>
<p dir="rtl">Esto es un texto al revez</p>
<p>
أشار إنفرسيني ، في تصريحات نُشرت يوم الأحد على الموقع الإلكتروني لصحيفة Il Post ، إلى حقيقة أن سفينة Geo Barents ، التي تديرها منظمة أطباء بلا حدود الإنسانية (MSF) ، نفذت عمليتي إنقاذ في وسط البحر الأبيض المتوسط دون الحاجة إلى تفويض من الحكومة الإيطالية.
</p>
<a name="anchorpoint">abc</a>
%s
<div id="header_content" >Lyrics-R-Us</div>
<div id="footer_content" >(c) - page <pdf:pagenumber>
of <pdf:pagecount>
</div>
</body>
</html>
"""
fake = Faker()
html = HTMLTEST%(
"<br>".join(["<p>%s <span style=\"color: #f00;\"><pdf:pagenumber> of <pdf:pagecount> </span></p>"%fake.text() for x in range(1)])
)
html2pdf(html, "test.pdf", open_file=False)
|
2d9ef3dc420309241820c5b443f562257215d668
|
353730afc44b31cf4efded67a4e2835d19c75922
|
/tensorly/__init__.py
|
acce8186fe3e30202c34e80e9916770183825c64
|
[
"BSD-3-Clause"
] |
permissive
|
tensorly/tensorly
|
605529bf5206f1977c6067f96f47bec439355246
|
de05e178850eb2abe43ec1a40f80624ca606807d
|
refs/heads/main
| 2023-08-31T14:01:45.527525
| 2023-08-20T18:28:25
| 2023-08-20T18:28:25
| 71,603,727
| 1,533
| 334
|
NOASSERTION
| 2023-09-08T18:10:37
| 2016-10-21T23:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
__init__.py
|
__version__ = "0.8.1"
import sys
from .base import unfold, fold
from .base import tensor_to_vec, vec_to_tensor
from .base import partial_unfold, partial_fold
from .base import partial_tensor_to_vec, partial_vec_to_tensor
from .cp_tensor import (
cp_to_tensor,
cp_to_unfolded,
cp_to_vec,
cp_norm,
cp_mode_dot,
cp_normalize,
validate_cp_rank,
)
from .tucker_tensor import (
tucker_to_tensor,
tucker_to_unfolded,
tucker_to_vec,
tucker_mode_dot,
validate_tucker_rank,
)
from .tt_tensor import (
tt_to_tensor,
tt_to_unfolded,
tt_to_vec,
validate_tt_rank,
pad_tt_rank,
)
from .tt_matrix import (
tt_matrix_to_tensor,
tt_matrix_to_tensor,
validate_tt_matrix_rank,
tt_matrix_to_unfolded,
tt_matrix_to_vec,
tt_matrix_to_matrix,
)
from .tr_tensor import tr_to_tensor, tr_to_unfolded, tr_to_vec, validate_tr_rank
from .tenalg import SVD_FUNS, svd_interface, truncated_svd
from .backend import (
set_backend,
get_backend,
# backend_context,
# backend_manager,
# _get_backend_dir, _get_backend_method,
)
# from . import backend as backend_manager
from .backend import (
context,
tensor,
is_tensor,
shape,
ndim,
to_numpy,
copy,
float64,
concatenate,
reshape,
transpose,
moveaxis,
arange,
any,
ones,
zeros,
zeros_like,
eye,
where,
conj,
index,
index_update,
clip,
max,
min,
argmax,
argmin,
all,
mean,
sum,
prod,
sign,
abs,
sqrt,
norm,
dot,
kron,
solve,
lstsq,
qr,
kr,
stack,
maximum,
eps,
finfo,
matmul,
index_update,
check_random_state,
randn,
log,
log2,
exp,
sin,
cos,
tan,
asin,
acos,
atan,
arcsin,
arccos,
arctan,
sinh,
sort,
cosh,
tanh,
arcsinh,
arccosh,
arctanh,
asinh,
acosh,
atanh,
e,
pi,
inf,
nan,
)
from . import backend
from . import decomposition
from . import plugins
from . import metrics
from . import regression
from . import tenalg
from . import random
from . import datasets
# Add Backend functions, dynamically dispatched
def __dir__():
"""Returns the module's __dir__, including the local variables
and augmenting it with the dynamically dispatched variables from backend.
"""
static_items = list(sys.modules[__name__].__dict__.keys())
return backend.get_backend_dir() + static_items
# return _get_backend_dir() + static_items
__getattr__ = backend.__getattribute__
# override_module_dispatch(__name__,
# backend_manager.__getattribute__,
# full_dir)
# # override_module_dispatch(__name__, _get_backend_method, full_dir)
# del override_module_dispatch, full_dir#, _get_backend_method
|
2c6707d8ac6cbd755eb1078bb5adcde4d980c0f8
|
99a5229ba31d633b202252e1fda6194c70c83c38
|
/opsdroid/cli/logs.py
|
4a11feb9ae79deace303ce46e5f30de0d65a419f
|
[
"Apache-2.0"
] |
permissive
|
opsdroid/opsdroid
|
1f5aeaa9a18e5c268ad7bfb46664f969f243814d
|
41246da2f6f379a889dadd1d3b4e139b65d3c9fb
|
refs/heads/master
| 2023-08-31T11:54:51.735969
| 2023-08-15T12:21:27
| 2023-08-15T12:21:27
| 64,034,523
| 835
| 593
|
Apache-2.0
| 2023-08-27T13:54:59
| 2016-07-23T20:18:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
logs.py
|
"""The logs subcommand for opsdroid cli."""
import click
import tailer
from opsdroid.const import DEFAULT_LOG_FILENAME
@click.group(invoke_without_command=True)
@click.option("-f", "follow", is_flag=True, help="Print the logs in real time")
@click.pass_context
def logs(ctx, follow):
"""Print the content of the log file into the terminal.
Open opsdroid logs and prints the contents of the file into the terminal.
If you wish to follow the logs in real time you can use the `-f` flag which
will allow you to do this.
Args:
ctx (:obj:`click.Context`): The current click cli context.
follow(bool): Set by the `-f` flag to trigger the print of the logs in real time.
Returns:
int: the exit code. Always returns 0 in this case.
"""
with open(DEFAULT_LOG_FILENAME, "r") as log:
if follow:
click.echo("Now following logs in real time, press CTRL+C to stop.")
for line in tailer.follow(log):
click.echo(line)
click.echo(log.read())
ctx.exit(0)
|
99685cff9e253b395dc2c8e55e02eecc2c7ff08a
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/ios/build/bots/scripts/plugin/test_plugins_test.py
|
0fa254d31bd8bb23cdc2b308d2c883739e65b87a
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 12,407
|
py
|
test_plugins_test.py
|
#!/usr/bin/env vpython3
# Copyright 2022 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest.mock as mock
import sys
import unittest
import subprocess
import os
import signal
import glob
# if the current directory is in scripts (pwd), then we need to
# add plugin in order to import from that directory
if os.path.split(os.path.dirname(__file__))[1] != 'plugin':
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'plugin'))
# if executing from plugin directory, pull in scripts
else:
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
from plugin_constants import PLUGIN_PROTOS_PATH, MAX_RECORDED_COUNT
from test_plugins import VideoRecorderPlugin, BasePlugin, FileCopyPlugin
import iossim_util
sys.path.append(PLUGIN_PROTOS_PATH)
import test_plugin_service_pb2
import test_plugin_service_pb2_grpc
TEST_DEVICE_ID = '123'
TEST_DEVICE_NAME = 'simulator_x_y'
TEST_DEVICE_PATH = '/root/dir'
TEST_CASE_NAME = '[AAA_BBB]'
TEST_CASE_INFO = test_plugin_service_pb2.TestCaseInfo(name=TEST_CASE_NAME)
TEST_DEVICE_INFO = test_plugin_service_pb2.DeviceInfo(name=TEST_DEVICE_NAME)
OUT_DIR = 'out/dir'
class BasePluginTest(unittest.TestCase):
@mock.patch("iossim_util.get_simulator_list")
def test_get_udid_and_path_for_device_name_no_cache(self, mock_get_list):
mock_get_list.return_value = {
'devices': {
'RUNTIME': [{
'name': TEST_DEVICE_NAME,
'udid': TEST_DEVICE_ID
}]
}
}
base_plugin = BasePlugin('DEVICE_ID', 'OUT_DIR')
self.assertEqual(
base_plugin.get_udid_and_path_for_device_name(TEST_DEVICE_NAME,
[TEST_DEVICE_PATH]),
(TEST_DEVICE_ID, TEST_DEVICE_PATH))
mock_get_list.assert_called_once_with(TEST_DEVICE_PATH)
self.assertEqual(
base_plugin.devices.get(TEST_DEVICE_NAME), {
'UDID': TEST_DEVICE_ID,
'path': TEST_DEVICE_PATH,
})
@mock.patch('iossim_util.get_simulator_list')
def test_get_udid_and_path_for_device_name_with_cache(self, mock_get_list):
base_plugin = BasePlugin('DEVICE_ID', 'OUT_DIR')
base_plugin.devices['NAME'] = {
'UDID': TEST_DEVICE_ID,
'path': TEST_DEVICE_PATH
}
self.assertEqual(
base_plugin.get_udid_and_path_for_device_name('NAME'),
(TEST_DEVICE_ID, TEST_DEVICE_PATH))
mock_get_list.assert_not_called()
class VideoRecorderPluginTest(unittest.TestCase):
@mock.patch("subprocess.Popen")
def test_test_case_will_start_succeed(self, mock_popen):
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_will_start(request)
file_name = video_recorder_plugin.get_video_file_name(TEST_CASE_NAME, 0)
file_dir = os.path.join(OUT_DIR, file_name)
cmd = [
'xcrun', 'simctl', 'io', TEST_DEVICE_ID, 'recordVideo', '--codec=h264',
'-f', file_dir
]
mock_popen.assert_called_once_with(cmd)
self.assertTrue(video_recorder_plugin.recording_process.test_case_name ==
TEST_CASE_NAME)
@mock.patch("subprocess.Popen")
def test_test_case_will_start_exceedMaxRecordedCount(self, mock_popen):
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.testcase_recorded_count[
TEST_CASE_NAME] = MAX_RECORDED_COUNT
video_recorder_plugin.test_case_will_start(request)
mock_popen.assert_not_called()
@mock.patch("subprocess.Popen")
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_test_case_will_start_previousProcessNotTerminated(
self, mock_os_remove, mock_os_kill, mock_popen):
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_will_start(request)
video_recorder_plugin.test_case_will_start(request)
mock_os_kill.assert_called_once_with(mock.ANY, signal.SIGTERM)
file_name = video_recorder_plugin.get_video_file_name(TEST_CASE_NAME, 0)
file_dir = os.path.join(OUT_DIR, file_name)
mock_os_remove.assert_called_once_with(file_dir)
cmd = [
'xcrun', 'simctl', 'io', TEST_DEVICE_ID, 'recordVideo', '--codec=h264',
'-f', file_dir
]
mock_popen.assert_called_with(cmd)
@mock.patch("subprocess.Popen")
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_test_case_did_fail_succeed(self, mock_os_remove, mock_os_kill,
mock_popen):
# first, start recording
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_will_start(request)
# then test case fails
request = test_plugin_service_pb2.TestCaseDidFailRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_did_fail(request)
mock_os_kill.assert_called_once_with(mock.ANY, signal.SIGINT)
mock_os_remove.assert_not_called()
self.assertTrue(video_recorder_plugin.recording_process.process == None)
self.assertTrue(
video_recorder_plugin.recording_process.test_case_name == None)
self.assertTrue(
video_recorder_plugin.testcase_recorded_count[TEST_CASE_NAME] == 1)
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_test_case_did_fail_noRecordingRunning(self, mock_os_remove,
mock_os_kill):
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseDidFailRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_did_fail(request)
mock_os_kill.assert_not_called()
mock_os_remove.assert_not_called()
@mock.patch("subprocess.Popen")
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_test_case_did_finish_succeed(self, mock_os_remove, mock_os_kill,
mock_popen):
# first, start recording
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_will_start(request)
# then test case finishes
request = test_plugin_service_pb2.TestCaseDidFinishRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_did_finish(request)
mock_os_kill.assert_called_once_with(mock.ANY, signal.SIGTERM)
file_name = video_recorder_plugin.get_video_file_name(TEST_CASE_NAME, 0)
file_dir = os.path.join(OUT_DIR, file_name)
mock_os_remove.assert_called_once_with(file_dir)
self.assertTrue(video_recorder_plugin.recording_process.process == None)
self.assertTrue(
video_recorder_plugin.recording_process.test_case_name == None)
self.assertTrue(
TEST_CASE_NAME not in video_recorder_plugin.testcase_recorded_count)
@mock.patch("subprocess.Popen")
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_test_case_did_finish_remove_file_failed(self, mock_os_remove,
mock_os_kill, mock_popen):
# first, start recording
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_will_start(request)
# then test case finishes
mock_os_remove.side_effect = FileNotFoundError
request = test_plugin_service_pb2.TestCaseDidFinishRequest(
test_case_info=TEST_CASE_INFO)
# this should not throw exception because it's caught
video_recorder_plugin.test_case_did_finish(request)
mock_os_kill.assert_called_once_with(mock.ANY, signal.SIGTERM)
file_name = video_recorder_plugin.get_video_file_name(TEST_CASE_NAME, 0)
file_dir = os.path.join(OUT_DIR, file_name)
mock_os_remove.assert_called_once_with(file_dir)
self.assertTrue(video_recorder_plugin.recording_process.process == None)
self.assertTrue(
video_recorder_plugin.recording_process.test_case_name == None)
self.assertTrue(
TEST_CASE_NAME not in video_recorder_plugin.testcase_recorded_count)
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_test_case_did_finish_noRecordingRunning(self, mock_os_remove,
mock_os_kill):
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseDidFinishRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_did_finish(request)
mock_os_kill.assert_not_called()
mock_os_remove.assert_not_called()
@mock.patch("subprocess.Popen")
@mock.patch("os.kill")
@mock.patch("os.remove")
def test_reset_succeed(self, mock_os_remove, mock_os_kill, mock_popen):
# first, start recording
video_recorder_plugin = VideoRecorderPlugin(TEST_DEVICE_ID, OUT_DIR)
request = test_plugin_service_pb2.TestCaseWillStartRequest(
test_case_info=TEST_CASE_INFO)
video_recorder_plugin.test_case_will_start(request)
# reset
video_recorder_plugin.reset()
mock_os_kill.assert_called_once_with(mock.ANY, signal.SIGTERM)
file_name = video_recorder_plugin.get_video_file_name(TEST_CASE_NAME, 0)
file_dir = os.path.join(OUT_DIR, file_name)
mock_os_remove.assert_called_once_with(file_dir)
self.assertTrue(video_recorder_plugin.recording_process.process == None)
self.assertTrue(
video_recorder_plugin.recording_process.test_case_name == None)
# reset again to make sure no exception is thrown
video_recorder_plugin.reset()
class FileCopyPluginTest(unittest.TestCase):
@mock.patch("os.path.exists")
@mock.patch("os.mkdir")
@mock.patch("glob.glob")
@mock.patch("shutil.move")
def testOutputPathExists(self, move_mock: mock.MagicMock,
glob_mock: mock.MagicMock,
mkdir_mock: mock.MagicMock,
path_mock: mock.MagicMock):
path_mock.return_value = True
glob_mock.return_value = ["glob_return_value"]
file_copy_plugin = FileCopyPlugin('GLOB_PATTERN', OUT_DIR)
file_copy_plugin.devices[TEST_DEVICE_NAME] = {
'UDID': TEST_DEVICE_ID,
'path': TEST_DEVICE_PATH
}
request = test_plugin_service_pb2.TestBundleWillFinishRequest(
device_info=TEST_DEVICE_INFO)
file_copy_plugin.test_bundle_will_finish(request)
mkdir_mock.assert_not_called()
path_mock.assert_called_once_with(OUT_DIR)
glob_mock.assert_called_once_with(
os.path.join(TEST_DEVICE_PATH, TEST_DEVICE_ID, "GLOB_PATTERN"))
move_mock.assert_called_once_with("glob_return_value", OUT_DIR)
@mock.patch("os.path.exists")
@mock.patch("os.mkdir")
@mock.patch("glob.glob")
@mock.patch("shutil.move")
def testOutputPathDoesNotExist(self, move_mock: mock.MagicMock,
glob_mock: mock.MagicMock,
mkdir_mock: mock.MagicMock,
path_mock: mock.MagicMock):
path_mock.return_value = False
glob_mock.return_value = ["glob_return_value"]
file_copy_plugin = FileCopyPlugin('GLOB_PATTERN', OUT_DIR)
file_copy_plugin.devices[TEST_DEVICE_NAME] = {
'UDID': TEST_DEVICE_ID,
'path': TEST_DEVICE_PATH
}
request = test_plugin_service_pb2.TestBundleWillFinishRequest(
device_info=TEST_DEVICE_INFO)
file_copy_plugin.test_bundle_will_finish(request)
mkdir_mock.assert_called_once_with(OUT_DIR)
path_mock.assert_called_once_with(OUT_DIR)
glob_mock.assert_called_once_with(
os.path.join(TEST_DEVICE_PATH, TEST_DEVICE_ID, "GLOB_PATTERN"))
move_mock.assert_called_once_with("glob_return_value", OUT_DIR)
if __name__ == '__main__':
unittest.main()
|
40aa492ac5493a569409a7a9869e6cc751314e1e
|
3c30ac9d17f355e39111595ec8c0709b862769c7
|
/gen_api.py
|
318e6a519dacc4456e92ea2c84955e6102e7b339
|
[] |
permissive
|
zgpio/tree.nvim
|
4804b2632f719f04c0b4fa8fe0c5c78bbf6d5255
|
2f540c2d00a2573b8b694a4d9e512f379cd5ea86
|
refs/heads/master
| 2022-06-21T12:06:23.308637
| 2022-06-10T09:46:54
| 2022-06-10T09:46:54
| 213,804,520
| 221
| 10
|
BSD-3-Clause
| 2020-08-07T05:22:23
| 2019-10-09T02:42:39
|
C++
|
UTF-8
|
Python
| false
| false
| 3,076
|
py
|
gen_api.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader
import msgpack, sys, os, subprocess
import re
class InvalidType(Exception):
pass
class NativeType:
def __init__(self, name, expect_ref = False):
self.name = name
self.expect_ref = expect_ref
REMAP_T = {
'ArrayOf(Integer, 2)': NativeType('std::vector<Integer>', True),
'Boolean': NativeType('bool'),
'String': NativeType('std::string', True),
'void': NativeType('void'),
'Window': NativeType('Window'),
'Buffer': NativeType('Buffer'),
'Tabpage': NativeType('Tabpage'),
'Integer': NativeType('Integer'),
'Object': NativeType('Object', True),
'Array': NativeType('Array', True),
'Dictionary': NativeType('Dictionary', True)
}
def convert_type_to_native(nvim_t, enable_ref_op, const_ref=True, ref=False):
array_of = r'ArrayOf\(\s*(\w+)\s*\)'
# print(nvim_t.__class__)
# print(array_of.__class__)
obj = re.match(array_of, nvim_t)
if obj:
ret = 'std::vector<%s>' % convert_type_to_native(obj.groups()[0], False)
return 'const ' + ret + '&' if enable_ref_op else ret
if nvim_t in REMAP_T:
native_t = REMAP_T[nvim_t]
if const_ref:
return 'const ' + native_t.name + '&' if enable_ref_op and native_t.expect_ref else native_t.name
elif ref:
return native_t.name + '&' if enable_ref_op and native_t.expect_ref else native_t.name
else:
return native_t.name
else:
print("unknown nvim type name: " + str(nvim_t))
raise InvalidType()
#TODO: implement error handler
#return nvim_t
def main():
env = Environment(loader=FileSystemLoader('templates', encoding='utf8'))
api_info = subprocess.check_output(["nvim", '--api-info'])
unpacked_api = msgpack.unpackb(api_info, raw=False)
# generate nvim.hpp
functions = []
for f in unpacked_api['functions']:
if 'deprecated_since' in f and f['deprecated_since']<=3:
continue
d = {}
# if re.match(r'(n?vim_)?(ui.*|(un)?subscribe|.*(de|a)ttach.*)', f['name']):
# print('This is ui function: ' + f['name'])
# continue
d['name'] = f['name']
d['short_name'] = f['name'][5:] if f['name'].startswith('nvim_') else f['name']
try:
d['return'] = convert_type_to_native(f['return_type'], False)
d['args'] = [{'type': convert_type_to_native(arg[0], True),
'name': arg[1]} for arg in f['parameters']]
functions.append(d)
except InvalidType as e:
print("invalid function = " + str(f))
tpl = env.get_template('nvim.hpp')
api = tpl.render({'functions': functions})
with open(os.path.join("./gen", "nvim.hpp"), 'w') as f:
f.write(api)
tplcpp = env.get_template('nvim.cpp')
apicpp = tplcpp.render({'functions': functions})
with open(os.path.join("./gen", "nvim.cpp"), 'w') as f:
f.write(apicpp)
if __name__ == '__main__':
main()
|
617c1d259a78b451144bde517ebdff243af69b5f
|
e910318d01528d82040507a49eeeb8dade45b31f
|
/examples/hwapi/button_reaction.py
|
e5a139a575f2c27be84884d20c645fcee2987410
|
[
"MIT"
] |
permissive
|
pfalcon/pycopy
|
e844480a5e5cd463530328889daed2ba87552b8a
|
3ac90ae9c3c6bbebfba9cada2d37025e35c62796
|
refs/heads/pfalcon
| 2023-08-30T09:39:52.290147
| 2022-09-08T16:42:38
| 2022-09-08T16:42:38
| 15,507,576
| 753
| 71
|
MIT
| 2021-05-08T04:59:21
| 2013-12-29T11:38:47
|
C
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
button_reaction.py
|
import utime
import machine
from hwconfig import LED, BUTTON
# machine.time_pulse_us() function demo
print(
"""\
Let's play an interesting game:
You click button as fast as you can, and I tell you how slow you are.
Ready? Cliiiiick!
"""
)
while 1:
delay = machine.time_pulse_us(BUTTON, 1, 10 * 1000 * 1000)
if delay < 0:
print("Well, you're *really* slow")
else:
print("You are as slow as %d microseconds!" % delay)
utime.sleep_ms(10)
|
cbd73dfbeeb34eee5d032200c91358ac32830ce4
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/pytorch/dimenet/main.py
|
eb154bcb776bb054b0368f3539d83c4387e8181b
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 9,971
|
py
|
main.py
|
import copy
from pathlib import Path
import click
import dgl
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from dgl.data.utils import Subset
from logzero import logger
from modules.dimenet import DimeNet
from modules.dimenet_pp import DimeNetPP
from modules.initializers import GlorotOrthogonal
from qm9 import QM9
from ruamel.yaml import YAML
from sklearn.metrics import mean_absolute_error
from torch.utils.data import DataLoader
def split_dataset(
dataset, num_train, num_valid, shuffle=False, random_state=None
):
"""Split dataset into training, validation and test set.
Parameters
----------
dataset
We assume that ``len(dataset)`` gives the number of datapoints and ``dataset[i]``
gives the ith datapoint.
num_train : int
Number of training datapoints.
num_valid : int
Number of validation datapoints.
shuffle : bool, optional
By default we perform a consecutive split of the dataset. If True,
we will first randomly shuffle the dataset.
random_state : None, int or array_like, optional
Random seed used to initialize the pseudo-random number generator.
This can be any integer between 0 and 2^32 - 1 inclusive, an array
(or other sequence) of such integers, or None (the default value).
If seed is None, then RandomState will try to read data from /dev/urandom
(or the Windows analogue) if available or seed from the clock otherwise.
Returns
-------
list of length 3
Subsets for training, validation and test.
"""
from itertools import accumulate
num_data = len(dataset)
assert num_train + num_valid < num_data
lengths = [num_train, num_valid, num_data - num_train - num_valid]
if shuffle:
indices = np.random.RandomState(seed=random_state).permutation(num_data)
else:
indices = np.arange(num_data)
return [
Subset(dataset, indices[offset - length : offset])
for offset, length in zip(accumulate(lengths), lengths)
]
@torch.no_grad()
def ema(ema_model, model, decay):
msd = model.state_dict()
for k, ema_v in ema_model.state_dict().items():
model_v = msd[k].detach()
ema_v.copy_(ema_v * decay + (1.0 - decay) * model_v)
def edge_init(edges):
R_src, R_dst = edges.src["R"], edges.dst["R"]
dist = torch.sqrt(F.relu(torch.sum((R_src - R_dst) ** 2, -1)))
# d: bond length, o: bond orientation
return {"d": dist, "o": R_src - R_dst}
def _collate_fn(batch):
graphs, line_graphs, labels = map(list, zip(*batch))
g, l_g = dgl.batch(graphs), dgl.batch(line_graphs)
labels = torch.tensor(labels, dtype=torch.float32)
return g, l_g, labels
def train(device, model, opt, loss_fn, train_loader):
model.train()
epoch_loss = 0
num_samples = 0
for g, l_g, labels in train_loader:
g = g.to(device)
l_g = l_g.to(device)
labels = labels.to(device)
logits = model(g, l_g)
loss = loss_fn(logits, labels.view([-1, 1]))
epoch_loss += loss.data.item() * len(labels)
num_samples += len(labels)
opt.zero_grad()
loss.backward()
opt.step()
return epoch_loss / num_samples
@torch.no_grad()
def evaluate(device, model, valid_loader):
model.eval()
predictions_all, labels_all = [], []
for g, l_g, labels in valid_loader:
g = g.to(device)
l_g = l_g.to(device)
logits = model(g, l_g)
labels_all.extend(labels)
predictions_all.extend(
logits.view(
-1,
)
.cpu()
.numpy()
)
return np.array(predictions_all), np.array(labels_all)
@click.command()
@click.option(
"-m",
"--model-cnf",
type=click.Path(exists=True),
help="Path of model config yaml.",
)
def main(model_cnf):
yaml = YAML(typ="safe")
model_cnf = yaml.load(Path(model_cnf))
model_name, model_params, train_params, pretrain_params = (
model_cnf["name"],
model_cnf["model"],
model_cnf["train"],
model_cnf["pretrain"],
)
logger.info(f"Model name: {model_name}")
logger.info(f"Model params: {model_params}")
logger.info(f"Train params: {train_params}")
if model_params["targets"] in ["mu", "homo", "lumo", "gap", "zpve"]:
model_params["output_init"] = nn.init.zeros_
else:
# 'GlorotOrthogonal' for alpha, R2, U0, U, H, G, and Cv
model_params["output_init"] = GlorotOrthogonal
logger.info("Loading Data Set")
dataset = QM9(label_keys=model_params["targets"], edge_funcs=[edge_init])
# data split
train_data, valid_data, test_data = split_dataset(
dataset,
num_train=train_params["num_train"],
num_valid=train_params["num_valid"],
shuffle=True,
random_state=train_params["data_seed"],
)
logger.info(f"Size of Training Set: {len(train_data)}")
logger.info(f"Size of Validation Set: {len(valid_data)}")
logger.info(f"Size of Test Set: {len(test_data)}")
# data loader
train_loader = DataLoader(
train_data,
batch_size=train_params["batch_size"],
shuffle=True,
collate_fn=_collate_fn,
num_workers=train_params["num_workers"],
)
valid_loader = DataLoader(
valid_data,
batch_size=train_params["batch_size"],
shuffle=False,
collate_fn=_collate_fn,
num_workers=train_params["num_workers"],
)
test_loader = DataLoader(
test_data,
batch_size=train_params["batch_size"],
shuffle=False,
collate_fn=_collate_fn,
num_workers=train_params["num_workers"],
)
# check cuda
gpu = train_params["gpu"]
device = f"cuda:{gpu}" if gpu >= 0 and torch.cuda.is_available() else "cpu"
# model initialization
logger.info("Loading Model")
if model_name == "dimenet":
model = DimeNet(
emb_size=model_params["emb_size"],
num_blocks=model_params["num_blocks"],
num_bilinear=model_params["num_bilinear"],
num_spherical=model_params["num_spherical"],
num_radial=model_params["num_radial"],
cutoff=model_params["cutoff"],
envelope_exponent=model_params["envelope_exponent"],
num_before_skip=model_params["num_before_skip"],
num_after_skip=model_params["num_after_skip"],
num_dense_output=model_params["num_dense_output"],
num_targets=len(model_params["targets"]),
output_init=model_params["output_init"],
).to(device)
elif model_name == "dimenet++":
model = DimeNetPP(
emb_size=model_params["emb_size"],
out_emb_size=model_params["out_emb_size"],
int_emb_size=model_params["int_emb_size"],
basis_emb_size=model_params["basis_emb_size"],
num_blocks=model_params["num_blocks"],
num_spherical=model_params["num_spherical"],
num_radial=model_params["num_radial"],
cutoff=model_params["cutoff"],
envelope_exponent=model_params["envelope_exponent"],
num_before_skip=model_params["num_before_skip"],
num_after_skip=model_params["num_after_skip"],
num_dense_output=model_params["num_dense_output"],
num_targets=len(model_params["targets"]),
extensive=model_params["extensive"],
output_init=model_params["output_init"],
).to(device)
else:
raise ValueError(f"Invalid Model Name {model_name}")
if pretrain_params["flag"]:
torch_path = pretrain_params["path"]
target = model_params["targets"][0]
model.load_state_dict(torch.load(f"{torch_path}/{target}.pt"))
logger.info("Testing with Pretrained model")
predictions, labels = evaluate(device, model, test_loader)
test_mae = mean_absolute_error(labels, predictions)
logger.info(f"Test MAE {test_mae:.4f}")
return
# define loss function and optimization
loss_fn = nn.L1Loss()
opt = optim.Adam(
model.parameters(),
lr=train_params["lr"],
weight_decay=train_params["weight_decay"],
amsgrad=True,
)
scheduler = optim.lr_scheduler.StepLR(
opt, train_params["step_size"], gamma=train_params["gamma"]
)
# model training
best_mae = 1e9
no_improvement = 0
# EMA for valid and test
logger.info("EMA Init")
ema_model = copy.deepcopy(model)
for p in ema_model.parameters():
p.requires_grad_(False)
best_model = copy.deepcopy(ema_model)
logger.info("Training")
for i in range(train_params["epochs"]):
train_loss = train(device, model, opt, loss_fn, train_loader)
ema(ema_model, model, train_params["ema_decay"])
if i % train_params["interval"] == 0:
predictions, labels = evaluate(device, ema_model, valid_loader)
valid_mae = mean_absolute_error(labels, predictions)
logger.info(
f"Epoch {i} | Train Loss {train_loss:.4f} | Val MAE {valid_mae:.4f}"
)
if valid_mae > best_mae:
no_improvement += 1
if no_improvement == train_params["early_stopping"]:
logger.info("Early stop.")
break
else:
no_improvement = 0
best_mae = valid_mae
best_model = copy.deepcopy(ema_model)
else:
logger.info(f"Epoch {i} | Train Loss {train_loss:.4f}")
scheduler.step()
logger.info("Testing")
predictions, labels = evaluate(device, best_model, test_loader)
test_mae = mean_absolute_error(labels, predictions)
logger.info("Test MAE {:.4f}".format(test_mae))
if __name__ == "__main__":
main()
|
44adead84336610a9048aa16631d4ad09ccc11c5
|
55ee564f2f152dc0a38f3cbf920b242ba1d19028
|
/setup.py
|
97ec77bf7ef04407e68d22cff42b1a0dcd029ef6
|
[
"MIT",
"Python-2.0"
] |
permissive
|
tasdikrahman/vocabulary
|
ab2e9aba6e3ff6fbda99b762f1a67545debe4a45
|
54403c5981af25dc3457796b57048ae27f09e9be
|
refs/heads/master
| 2022-03-04T22:35:01.694549
| 2017-12-25T08:27:39
| 2017-12-25T08:27:39
| 47,144,923
| 269
| 62
|
MIT
| 2019-11-04T04:42:35
| 2015-11-30T20:45:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
from os import path
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
from vocabulary.version import VERSION
__version__ = VERSION
# here = path.abspath(path.dirname(__file__))
# # get the dependencies and installs
# if sys.version_info[:2] <= (2, 7):
# with open(path.join(here, 'requirements.txt')) as f:
# all_reqs = f.read().split('\n')
# else:
# with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
# all_reqs = f.read().split('\n')
# install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
# dependency_links = [x.strip().replace('git+', '') for x in all_reqs if 'git+' not in x]
try:
if sys.version_info[:2] <= (2, 7):
readme = open("README.rst")
else:
readme = open("README.rst", encoding="utf8")
long_description = str(readme.read())
finally:
readme.close()
setup(
name='Vocabulary',
author='Tasdik Rahman',
version=VERSION,
author_email='tasdik95@gmail.com',
description="Module to get meaning, synonym, antonym, part_of_speech, usage_example, pronunciation and hyphenation for a given word",
long_description=long_description,
url='https://github.com/tasdikrahman/vocabulary',
license='MIT',
install_requires=[
"requests==2.13.0",
"mock==2.0.0"
],
#dependency_links=dependency_links,
# adding package data to it
packages=find_packages(exclude=['contrib', 'docs']),
download_url='https://github.com/tasdikrahman/vocabulary/tarball/' + __version__,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords=['Dictionary', 'Vocabulary', 'simple dictionary', 'pydict', 'dictionary module']
)
|
c209f0f6b9072245cddc6a7b90a23b6b5c8e90c7
|
8e342677ebe58bfb6c7360b555044acc6964f333
|
/src/einsteinpy/examples.py
|
6a05ffc7f404f8318d557667f0f7eab370d7829f
|
[
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
einsteinpy/einsteinpy
|
5d48142cab52f7c629d78b47088d804ff3eafae9
|
1bd1b27e142b0a0ec2e26bf2611468dbf50d9cf8
|
refs/heads/main
| 2023-08-17T19:17:34.239265
| 2023-01-31T20:43:54
| 2023-01-31T20:43:54
| 168,302,584
| 594
| 292
|
MIT
| 2023-08-12T07:30:41
| 2019-01-30T07:56:15
|
Python
|
UTF-8
|
Python
| false
| false
| 794
|
py
|
examples.py
|
import numpy as np
from einsteinpy.geodesic import Timelike
__all__ = ["precession"]
def precession():
"""
An example to showcase the usage of the various modules in ``einsteinpy``.
Here, we assume a Schwarzschild spacetime and obtain a test particle orbit, that
shows apsidal precession.
Returns
-------
geod: ~einsteinpy.geodesic.Timelike
Timelike Geodesic, defining test particle trajectory
"""
# Defining initial conditions
metric = "Schwarzschild"
position = [40.0, np.pi / 2, 0.0]
momentum = [0.0, 0.0, 3.83405]
# Calculating Geodesic
geod = Timelike(
metric=metric,
metric_params=(),
position=position,
momentum=momentum,
steps=5500,
delta=1.0,
)
return geod
|
d27766e6bc24ad07c877da64a5dc73b45df133d7
|
23652304566b1869ca65b95b116ee43d16e134f3
|
/tests/h/models/document/_meta_test.py
|
db2692f3fcd8ff23d955798c1bce9e3d18042bf1
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
hypothesis/h
|
29399a26990856c336b05022e827541dd8aeedab
|
232446d776fdb906d2fb253cf0a409c6813a08d6
|
refs/heads/main
| 2023-08-30T16:21:33.754658
| 2023-08-30T09:26:50
| 2023-08-30T09:40:48
| 3,910,945
| 2,558
| 452
|
BSD-2-Clause
| 2023-09-14T11:25:06
| 2012-04-02T19:56:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,312
|
py
|
_meta_test.py
|
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
import sqlalchemy as sa
from h_matchers import Any
from h.models import Document, DocumentMeta
from h.models.document import ConcurrentUpdateError, create_or_update_document_meta
class TestDocumentMeta:
def test_repr(self):
meta = DocumentMeta(id=1234)
repr_string = repr(meta)
assert "DocumentMet" in repr_string
assert "1234" in repr_string
class TestCreateOrUpdateDocumentMeta:
def test_it_creates_a_new_DocumentMeta_if_there_is_no_existing_one(
self, db_session, meta_attrs
):
# Add one non-matching DocumentMeta to the database to be ignored.
db_session.add(DocumentMeta(**dict(meta_attrs, type="noise")))
create_or_update_document_meta(session=db_session, **meta_attrs)
document_meta = db_session.query(DocumentMeta).all()[-1]
assert document_meta == Any.object.with_attrs(meta_attrs)
@pytest.mark.parametrize("correct_document", (True, False))
def test_it_updates_an_existing_DocumentMeta_if_there_is_one(
self, db_session, meta_attrs, correct_document
):
original_attrs = meta_attrs
updated_attrs = dict(
original_attrs,
value="new value",
# This should be ignored either way.
document=meta_attrs["document"] if correct_document else Document(),
created=datetime.now(), # This should be ignored.
updated=datetime.now(),
)
document_meta = DocumentMeta(**original_attrs)
db_session.add(document_meta)
create_or_update_document_meta(session=db_session, **updated_attrs)
assert document_meta.value == updated_attrs["value"]
assert document_meta.updated == updated_attrs["updated"]
assert document_meta.created == original_attrs["created"]
assert document_meta.document == original_attrs["document"]
assert (
len(db_session.query(DocumentMeta).all()) == 1
), "It shouldn't have added any new objects to the db"
@pytest.mark.parametrize(
"doc_title,final_title",
((None, "attr_title"), ("", "attr_title"), ("doc_title", "doc_title")),
)
def test_it_denormalizes_title_to_document_when_falsy(
self, db_session, meta_attrs, doc_title, final_title
):
meta_attrs["value"] = ["attr_title"]
meta_attrs["document"] = document = Document(title=doc_title)
db_session.add(document)
create_or_update_document_meta(session=db_session, **meta_attrs)
document = db_session.query(Document).get(document.id)
assert document.title == final_title
def test_it_logs_a_warning_with_existing_meta_on_a_different_doc(
self, log, mock_db_session, factories, meta_attrs
):
document_one = factories.Document()
document_two = factories.Document()
existing_document_meta = factories.DocumentMeta(document=document_one)
mock_db_session.query.return_value.filter.return_value.one_or_none.return_value = (
existing_document_meta
)
create_or_update_document_meta(
session=mock_db_session, **dict(meta_attrs, document=document_two)
)
assert log.warning.call_count == 1
def test_raises_retryable_error_when_flush_fails(
self, db_session, monkeypatch, meta_attrs
):
def err():
raise sa.exc.IntegrityError(None, None, None)
monkeypatch.setattr(db_session, "flush", err)
with pytest.raises(ConcurrentUpdateError):
with db_session.no_autoflush: # prevent premature IntegrityError
create_or_update_document_meta(session=db_session, **meta_attrs)
@pytest.fixture
def meta_attrs(self):
return {
"claimant": "http://example.com/claimant",
"type": "title",
"value": "the title",
"document": Document(),
"created": datetime.now() - timedelta(days=1),
"updated": datetime.now(),
}
@pytest.fixture()
def mock_db_session(self, db_session):
return Mock(spec=db_session)
@pytest.fixture
def log(self, patch):
return patch("h.models.document._meta.log")
|
03b786441a9c02ac448ccea3e47087c4310bc7a2
|
7c3bace625eb6ece1b06326940b8e89ba3fdc68f
|
/tests/test_motionevents.py
|
8d1edc1d523993ea97fd6bddc59162833fc94355
|
[
"Apache-2.0"
] |
permissive
|
AirtestProject/Airtest
|
64c218a54e6a28cba42af0ffe8a81d30703ffcca
|
bf49dfad0be05125df75c64ea47a282132bc03d5
|
refs/heads/master
| 2023-08-31T05:31:32.059552
| 2023-08-22T06:23:36
| 2023-08-22T06:23:36
| 118,709,540
| 7,580
| 1,256
|
Apache-2.0
| 2023-08-22T06:23:37
| 2018-01-24T04:00:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
test_motionevents.py
|
# encoding=utf-8
from airtest.core.android.android import ADB, Android
from airtest.core.android.touch_methods.base_touch import MotionEvent, DownEvent, UpEvent, MoveEvent, SleepEvent
import unittest
import warnings
import time
warnings.simplefilter("always")
class TestMotionEvents(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.device = Android()
@classmethod
def tearDownClass(cls):
pass
def test_multi_touch(self):
"""
测试两指同时点击
"""
multitouch_event = [
DownEvent((100, 100), 0),
DownEvent((300, 300), 1), # second finger
SleepEvent(1),
UpEvent(0), UpEvent(1)]
self.device.touch_proxy.perform(multitouch_event)
def test_swipe(self):
"""
测试滑动
"""
swipe_event = [DownEvent((500, 500)), SleepEvent(0.1)]
for i in range(5):
swipe_event.append(MoveEvent((500 + 100 * i, 500 + 100 * i)))
swipe_event.append(SleepEvent(0.2))
swipe_event.append(UpEvent())
self.device.touch_proxy.perform(swipe_event)
def test_retry_touch(self):
"""
测试在指令内容异常时,能够自动尝试重连
"""
# 在安卓10部分型号手机上,如果乱序发送指令,可能会导致maxtouch断开连接
events = [MoveEvent((100, 100), 0), UpEvent(), DownEvent((165, 250), 0), SleepEvent(0.2), UpEvent(), DownEvent((165, 250), 0), SleepEvent(0.2), UpEvent()]
self.device.touch_proxy.perform(events)
time.sleep(3)
self.device.touch((165, 250))
def test_horizontal(self):
"""
如果设备是横屏,必须要加上坐标转换(竖屏也可以加)
"""
ori_transformer = self.device.touch_proxy.ori_transformer
touch_landscape_point = [DownEvent(ori_transformer((100, 100))), SleepEvent(1), UpEvent()]
self.device.touch_proxy.perform(touch_landscape_point)
|
cbf4c87670ea1be043918275f902a71c6de57020
|
a98c221480dea8907aede403606b4450ec86b9f3
|
/tests/test_models/test_deepar.py
|
f6d02d93335c66937a11c9e29f7758aca2e7ef56
|
[
"MIT"
] |
permissive
|
LongxingTan/Time-series-prediction
|
c38c05724b50e36e43647ddef591582556578822
|
b6381e47fa110b76334b907ff3973bdb2d0f1091
|
refs/heads/master
| 2023-08-31T18:00:31.865106
| 2023-08-19T15:48:03
| 2023-08-19T15:48:03
| 131,993,562
| 746
| 152
|
MIT
| 2023-09-12T11:59:21
| 2018-05-03T12:43:20
|
Python
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
test_deepar.py
|
import unittest
import tensorflow as tf
import tfts
from tfts import AutoModel, KerasTrainer, Trainer
from tfts.models.deepar import DeepAR
class DeepARTest(unittest.TestCase):
def test_model(self):
predict_sequence_length = 8
custom_model_params = {}
model = DeepAR(predict_sequence_length=predict_sequence_length, custom_model_params=custom_model_params)
x = tf.random.normal([2, predict_sequence_length, 3])
loc, scale = model(x)
self.assertEqual(loc.shape, (2, predict_sequence_length, 1))
self.assertEqual(scale.shape, (2, predict_sequence_length, 1))
|
e261082c059ac91513602805f891b8dbc8bf8dad
|
beab4b9703df6c4e9bda54fada11a6d985ea2c5a
|
/sanic/base/root.py
|
3f3ba58e56448a77ccf7700da95147636349b980
|
[
"MIT"
] |
permissive
|
sanic-org/sanic
|
d3db62482914061a1f6a8f7d94b6127c2876cb3e
|
47215d4635184bdfb1d5cff000d19390f19219ab
|
refs/heads/main
| 2023-09-05T01:04:31.432228
| 2023-08-30T17:03:22
| 2023-08-30T17:03:22
| 59,720,190
| 3,523
| 439
|
MIT
| 2023-09-14T05:45:11
| 2016-05-26T04:38:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,075
|
py
|
root.py
|
import re
from typing import Any, Optional
from sanic.base.meta import SanicMeta
from sanic.exceptions import SanicException
from sanic.mixins.exceptions import ExceptionMixin
from sanic.mixins.listeners import ListenerMixin
from sanic.mixins.middleware import MiddlewareMixin
from sanic.mixins.routes import RouteMixin
from sanic.mixins.signals import SignalMixin
from sanic.mixins.static import StaticMixin
VALID_NAME = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_\-]*$")
class BaseSanic(
RouteMixin,
StaticMixin,
MiddlewareMixin,
ListenerMixin,
ExceptionMixin,
SignalMixin,
metaclass=SanicMeta,
):
__slots__ = ("name",)
def __init__(
self, name: Optional[str] = None, *args: Any, **kwargs: Any
) -> None:
class_name = self.__class__.__name__
if name is None:
raise SanicException(
f"{class_name} instance cannot be unnamed. "
"Please use Sanic(name='your_application_name') instead.",
)
if not VALID_NAME.match(name):
raise SanicException(
f"{class_name} instance named '{name}' uses an invalid "
"format. Names must begin with a character and may only "
"contain alphanumeric characters, _, or -."
)
self.name = name
for base in BaseSanic.__bases__:
base.__init__(self, *args, **kwargs) # type: ignore
def __str__(self) -> str:
return f"<{self.__class__.__name__} {self.name}>"
def __repr__(self) -> str:
return f'{self.__class__.__name__}(name="{self.name}")'
def __setattr__(self, name: str, value: Any) -> None:
try:
super().__setattr__(name, value)
except AttributeError as e:
raise AttributeError(
f"Setting variables on {self.__class__.__name__} instances is "
"not allowed. You should change your "
f"{self.__class__.__name__} instance to use "
f"instance.ctx.{name} instead.",
) from e
|
9a79f391b16bb0613072345a9bcfdb75c3c4fc3e
|
559f3dec0964d2e0f86c6c871371fe779cf3726c
|
/contrib/CityscapesSOTA/scripts/train.py
|
a5a41202be788049f9f4cbc638e8f05d0733fd43
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleSeg
|
319ab26665ea492527a1949671650135123ffc39
|
2c8c35a8949fef74599f5ec557d340a14415f20d
|
refs/heads/release/2.8
| 2023-08-31T09:08:06.724717
| 2023-08-18T01:59:56
| 2023-08-18T01:59:56
| 204,380,779
| 8,531
| 1,866
|
Apache-2.0
| 2023-09-12T02:30:42
| 2019-08-26T02:32:22
|
Python
|
UTF-8
|
Python
| false
| false
| 11,777
|
py
|
train.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from collections import deque
import shutil
import paddle
import paddle.nn.functional as F
from paddleseg.utils import TimeAverager, calculate_eta, resume, logger
from paddleseg.core.val import evaluate
def check_logits_losses(logits_list, losses):
len_logits = len(logits_list)
len_losses = len(losses['types'])
if len_logits != len_losses:
raise RuntimeError(
'The length of logits_list should equal to the types of loss config: {} != {}.'
.format(len_logits, len_losses))
def loss_computation(logits_list, labels, losses, edges=None):
check_logits_losses(logits_list, losses)
loss_list = []
for i in range(len(logits_list)):
logits = logits_list[i]
loss_i = losses['types'][i]
coef_i = losses['coef'][i]
if loss_i.__class__.__name__ in ('BCELoss', 'FocalLoss'
) and loss_i.edge_label:
# If use edges as labels According to loss type.
loss_list.append(coef_i * loss_i(logits, edges))
elif loss_i.__class__.__name__ == 'MixedLoss':
mixed_loss_list = loss_i(logits, labels)
for mixed_loss in mixed_loss_list:
loss_list.append(coef_i * mixed_loss)
elif loss_i.__class__.__name__ in ("KLLoss", ):
loss_list.append(coef_i *
loss_i(logits_list[0], logits_list[1].detach()))
else:
loss_list.append(coef_i * loss_i(logits, labels))
return loss_list
def train(model,
train_dataset,
val_dataset=None,
aug_eval=False,
flip_horizontal_eval=False,
optimizer=None,
save_dir='output',
iters=10000,
batch_size=2,
resume_model=None,
save_interval=1000,
log_iters=10,
num_workers=0,
use_vdl=False,
losses=None,
keep_checkpoint_max=5):
"""
Launch training.
Args:
model(nn.Layer): A sementic segmentation model.
train_dataset (paddle.io.Dataset): Used to read and process training datasets.
val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets.
aug_eval (bool, optional): Whether to use mulit-scales and flip augment for evaluation. Default: False.
flip_horizontal_eval (bool, optional): Whether to use flip horizontally augment. It is valid when `aug_eval` is True. Default: True.
optimizer (paddle.optimizer.Optimizer): The optimizer.
save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'.
iters (int, optional): How may iters to train the model. Defualt: 10000.
batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2.
resume_model (str, optional): The path of resume model.
save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000.
log_iters (int, optional): Display logging information at every log_iters. Default: 10.
num_workers (int, optional): Num workers for data loader. Default: 0.
use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False.
losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']).
The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient.
keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5.
"""
nranks = paddle.distributed.ParallelEnv().nranks
local_rank = paddle.distributed.ParallelEnv().local_rank
start_iter = 0
if resume_model is not None:
start_iter = resume(model, optimizer, resume_model)
if not os.path.isdir(save_dir):
if os.path.exists(save_dir):
os.remove(save_dir)
os.makedirs(save_dir)
if nranks > 1:
# Initialize parallel training environment.
paddle.distributed.init_parallel_env()
ddp_model = paddle.DataParallel(model)
batch_sampler = paddle.io.DistributedBatchSampler(
train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
loader = paddle.io.DataLoader(
train_dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
return_list=True, )
if use_vdl:
from visualdl import LogWriter
log_writer = LogWriter(save_dir)
avg_loss = 0.0
avg_loss_list = []
iters_per_epoch = len(batch_sampler)
best_mean_iou = -1.0
best_model_iter = -1
reader_cost_averager = TimeAverager()
batch_cost_averager = TimeAverager()
save_models = deque()
batch_start = time.time()
iter = start_iter
while iter < iters:
for data in loader:
iter += 1
if iter > iters:
break
reader_cost_averager.record(time.time() - batch_start)
images = data[0]
labels = data[1].astype('int64')
edges = None
if len(data) == 3:
edges = data[2].astype('int64')
if hasattr(train_dataset,
'shuffle') and iter % iters_per_epoch == 0:
train_dataset.shuffle()
if nranks > 1:
logits_list = ddp_model(images)
else:
logits_list = model(images)
loss_list = loss_computation(
logits_list=logits_list,
labels=labels,
losses=losses,
edges=edges)
loss = sum(loss_list)
loss.backward()
optimizer.step()
lr = optimizer.get_lr()
if isinstance(optimizer._learning_rate,
paddle.optimizer.lr.LRScheduler):
optimizer._learning_rate.step()
model.clear_gradients()
avg_loss += float(loss)
if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list]
else:
for i in range(len(loss_list)):
avg_loss_list[i] += loss_list[i].numpy()
batch_cost_averager.record(
time.time() - batch_start, num_samples=batch_size)
if (iter) % log_iters == 0 and local_rank == 0:
avg_loss /= log_iters
avg_loss_list = [l[0] / log_iters for l in avg_loss_list]
remain_iters = iters - iter
avg_train_batch_cost = batch_cost_averager.get_average()
avg_train_reader_cost = reader_cost_averager.get_average()
eta = calculate_eta(remain_iters, avg_train_batch_cost)
logger.info(
"[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}"
.format((iter - 1
) // iters_per_epoch + 1, iter, iters, avg_loss,
lr, avg_train_batch_cost, avg_train_reader_cost,
batch_cost_averager.get_ips_average(), eta))
if use_vdl:
log_writer.add_scalar('Train/loss', avg_loss, iter)
# Record all losses if there are more than 2 losses.
if len(avg_loss_list) > 1:
avg_loss_dict = {}
for i, value in enumerate(avg_loss_list):
avg_loss_dict['loss_' + str(i)] = value
for key, value in avg_loss_dict.items():
log_tag = 'Train/' + key
log_writer.add_scalar(log_tag, value, iter)
log_writer.add_scalar('Train/lr', lr, iter)
log_writer.add_scalar('Train/batch_cost',
avg_train_batch_cost, iter)
log_writer.add_scalar('Train/reader_cost',
avg_train_reader_cost, iter)
avg_loss = 0.0
avg_loss_list = []
reader_cost_averager.reset()
batch_cost_averager.reset()
if (iter % save_interval == 0 or
iter == iters) and (val_dataset is not None):
num_workers = 1 if num_workers > 0 else 0
metrics = evaluate(
model,
val_dataset,
aug_eval=aug_eval,
scales=1.0,
flip_horizontal=flip_horizontal_eval,
flip_vertical=False,
is_slide=False,
stride=None,
crop_size=None,
num_workers=num_workers)
mean_iou, acc = metrics[0], metrics[1]
model.train()
if (iter % save_interval == 0 or iter == iters) and local_rank == 0:
current_save_dir = os.path.join(save_dir,
"iter_{}".format(iter))
if not os.path.isdir(current_save_dir):
os.makedirs(current_save_dir)
paddle.save(model.state_dict(),
os.path.join(current_save_dir, 'model.pdparams'))
paddle.save(optimizer.state_dict(),
os.path.join(current_save_dir, 'model.pdopt'))
save_models.append(current_save_dir)
if len(save_models) > keep_checkpoint_max > 0:
model_to_remove = save_models.popleft()
shutil.rmtree(model_to_remove)
if val_dataset is not None:
if mean_iou > best_mean_iou:
best_mean_iou = mean_iou
best_model_iter = iter
best_model_dir = os.path.join(save_dir, "best_model")
paddle.save(
model.state_dict(),
os.path.join(best_model_dir, 'model.pdparams'))
logger.info(
'[EVAL] The model with the best validation mIoU ({:.4f}) was saved at iter {}.'
.format(best_mean_iou, best_model_iter))
if use_vdl:
log_writer.add_scalar('Evaluate/mIoU', mean_iou, iter)
log_writer.add_scalar('Evaluate/Acc', acc, iter)
batch_start = time.time()
# Calculate flops.
if local_rank == 0:
def count_syncbn(m, x, y):
x = x[0]
nelements = x.numel()
m.total_ops += int(2 * nelements)
_, c, h, w = images.shape
flops = paddle.flops(
model, [1, c, h, w],
custom_ops={paddle.nn.SyncBatchNorm: count_syncbn})
logger.info(flops)
# Sleep for half a second to let dataloader release resources.
time.sleep(0.5)
if use_vdl:
log_writer.close()
|
8a8c5dc873ff682a87ad3bab54fa485dc4bac398
|
fe85b4811c93510006b666858d6029156f167f89
|
/scripts/speedtest-fs.py
|
56eb234f3fb4e9a8ce4ecceea370de84c08e93e5
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
9001/copyparty
|
39207421ccdc501566105da562a168996e0f9b4c
|
48a3898aa692770735a926b0c18300d7da8b021f
|
refs/heads/hovudstraum
| 2023-08-18T15:19:36.934124
| 2023-08-16T19:57:19
| 2023-08-16T19:57:19
| 188,700,274
| 273
| 21
|
MIT
| 2023-08-09T20:50:27
| 2019-05-26T15:28:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,677
|
py
|
speedtest-fs.py
|
#!/usr/bin/env python3
import os
import sys
import stat
import time
import signal
import traceback
import threading
from queue import Queue
"""speedtest-fs: filesystem performance estimate"""
__author__ = "ed <copyparty@ocv.me>"
__copyright__ = 2020
__license__ = "MIT"
__url__ = "https://github.com/9001/copyparty/"
def get_spd(nbyte, nfiles, nsec):
if not nsec:
return "0.000 MB 0 files 0.000 sec 0.000 MB/s 0.000 f/s"
mb = nbyte / (1024 * 1024.0)
spd = mb / nsec
nspd = nfiles / nsec
return f"{mb:.3f} MB {nfiles} files {nsec:.3f} sec {spd:.3f} MB/s {nspd:.3f} f/s"
class Inf(object):
def __init__(self, t0):
self.msgs = []
self.errors = []
self.reports = []
self.mtx_msgs = threading.Lock()
self.mtx_reports = threading.Lock()
self.n_byte = 0
self.n_file = 0
self.n_sec = 0
self.n_done = 0
self.t0 = t0
thr = threading.Thread(target=self.print_msgs)
thr.daemon = True
thr.start()
def msg(self, fn, n_read):
with self.mtx_msgs:
self.msgs.append(f"{fn} {n_read}")
def err(self, fn):
with self.mtx_reports:
self.errors.append(f"{fn}\n{traceback.format_exc()}")
def print_msgs(self):
while True:
time.sleep(0.02)
with self.mtx_msgs:
msgs = self.msgs
self.msgs = []
if not msgs:
continue
msgs = msgs[-64:]
spd = get_spd(self.n_byte, len(self.reports), self.n_sec)
msgs = [f"{spd} {x}" for x in msgs]
print("\n".join(msgs))
def report(self, fn, n_byte, n_sec):
with self.mtx_reports:
self.reports.append([n_byte, n_sec, fn])
self.n_byte += n_byte
self.n_sec += n_sec
def done(self):
with self.mtx_reports:
self.n_done += 1
def get_files(dir_path):
for fn in os.listdir(dir_path):
fn = os.path.join(dir_path, fn)
st = os.stat(fn).st_mode
if stat.S_ISDIR(st):
yield from get_files(fn)
if stat.S_ISREG(st):
yield fn
def worker(q, inf, read_sz):
while True:
fn = q.get()
if not fn:
break
n_read = 0
try:
t0 = time.time()
with open(fn, "rb") as f:
while True:
buf = f.read(read_sz)
if not buf:
break
n_read += len(buf)
inf.msg(fn, n_read)
inf.report(fn, n_read, time.time() - t0)
except:
inf.err(fn)
inf.done()
def sighandler(signo, frame):
os._exit(0)
def main():
signal.signal(signal.SIGINT, sighandler)
root = "."
if len(sys.argv) > 1:
root = sys.argv[1]
t0 = time.time()
q = Queue(256)
inf = Inf(t0)
num_threads = 8
read_sz = 32 * 1024
targs = (q, inf, read_sz)
for _ in range(num_threads):
thr = threading.Thread(target=worker, args=targs)
thr.daemon = True
thr.start()
for fn in get_files(root):
q.put(fn)
for _ in range(num_threads):
q.put(None)
while inf.n_done < num_threads:
time.sleep(0.1)
t2 = time.time()
print("\n")
log = inf.reports
log.sort()
for nbyte, nsec, fn in log[-64:]:
spd = get_spd(nbyte, len(log), nsec)
print(f"{spd} {fn}")
print()
print("\n".join(inf.errors))
print(get_spd(inf.n_byte, len(log), t2 - t0))
if __name__ == "__main__":
main()
|
3c29eaf0b5550cf301aaa842cfe7659f2665d945
|
c71b7a8a9dd7bf7c9496b1df2acc1e52a2a913d0
|
/onadata/libs/serializers/user_profile_serializer.py
|
e1003f2a629bfece9549736b7484d85b14fc8fd0
|
[
"BSD-2-Clause"
] |
permissive
|
kobotoolbox/kobocat
|
a5c6fb6a9d3dabe71b5e3c082e4261c4475cbf7f
|
b8d93d4da649f323af111cf7247206554be7c8b1
|
refs/heads/main
| 2023-08-10T00:05:49.384348
| 2023-07-06T04:47:59
| 2023-07-06T04:47:59
| 14,497,749
| 101
| 135
|
BSD-2-Clause
| 2023-09-13T14:57:13
| 2013-11-18T16:16:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
user_profile_serializer.py
|
# coding: utf-8
from rest_framework import serializers
from onadata.apps.main.models import UserProfile
from onadata.libs.constants import CAN_VIEW_PROFILE
from onadata.libs.serializers.fields.json_field import JsonField
class UserProfileSerializer(serializers.Serializer):
id = serializers.ReadOnlyField(source='user.id')
username = serializers.ReadOnlyField(source='user.username')
name = serializers.ReadOnlyField()
email = serializers.ReadOnlyField(source='user.email')
city = serializers.ReadOnlyField()
country = serializers.ReadOnlyField()
organization = serializers.ReadOnlyField()
website = serializers.ReadOnlyField(source='home_page')
twitter = serializers.ReadOnlyField()
gravatar = serializers.ReadOnlyField()
require_auth = serializers.ReadOnlyField()
class Meta:
model = UserProfile
fields = (
'id',
'username',
'name',
'email',
'city',
'country',
'organization',
'website',
'twitter',
'gravatar',
'require_auth',
)
def to_representation(self, obj):
"""
Serialize objects -> primitives.
"""
ret = super().to_representation(obj)
request = self.context['request'] \
if 'request' in self.context else None
if 'email' in ret and request is None or request.user \
and not request.user.has_perm(CAN_VIEW_PROFILE, obj):
del ret['email']
return ret
class UserProfileWithTokenSerializer(UserProfileSerializer):
api_token = serializers.SerializerMethodField()
temp_token = serializers.SerializerMethodField()
class Meta:
model = UserProfile
fields = (
'id',
'username',
'name',
'email',
'city',
'country',
'organization',
'website',
'twitter',
'gravatar',
'require_auth',
'user',
'api_token',
'temp_token',
)
def get_api_token(self, object):
return object.user.auth_token.key
def get_temp_token(self, object):
request = self.context['request']
session_key = None
if request:
session = request.session
session_key = session.session_key
return session_key
|
07738ef51c6e6a0b0c2f95de84292af769799e61
|
4a6e511a6e931756390f0f26a79026e244bb8c63
|
/esda/tests/test_topo.py
|
f4c002ca3e63d60133e21f16686c41c4bcac4328
|
[
"BSD-3-Clause"
] |
permissive
|
pysal/esda
|
a19bd295d805bd70048bd9ee1e4f4a003efe7c1e
|
68125173e2992e02246711795b1e1b55c12f0db5
|
refs/heads/main
| 2023-08-22T02:12:25.327911
| 2023-08-04T00:00:22
| 2023-08-04T00:00:22
| 81,873,636
| 191
| 55
|
BSD-3-Clause
| 2023-09-05T10:39:41
| 2017-02-13T21:26:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,927
|
py
|
test_topo.py
|
from unittest import TestCase
import numpy
import pandas
from ..topo import isolation, prominence, to_elevation, weights
class TopoTester(TestCase):
def setUp(self):
self.points = numpy.array(
[[0, 0], [0, 1], [1, 1], [2, 0.5], [0.5, 0.5], [0.75, 0]]
)
self.marks = numpy.array([-1, 0.5, 1, 2, 3, 1.25])
self.cxn = weights.Voronoi(self.points)
def test_prominence_valid(self):
w = self.cxn
marks = self.marks
prom = prominence(marks, w, verbose=False, progressbar=False)
assert numpy.isnan(prom).sum().item() == 3
assert (prom == 0).sum().item() == 1
assert prom[-2] == 1.75
assert prom[-3] == 0.75
marks2 = marks.copy()
marks2[-2] = -3
prom = prominence(marks2, w, verbose=False, progressbar=False)
assert prom[~numpy.isnan(prom)].max() == 5
assert prom[~numpy.isnan(prom)].min() == 0
assert numpy.isnan(prom).sum() == 3
def test_prominence_options(self):
marks = self.marks
cxn = self.cxn
default = prominence(marks, cxn)
retvals = prominence(marks, cxn, return_all=True)
middle = prominence(marks, cxn, middle="median")
assert isinstance(default, numpy.ndarray)
assert isinstance(retvals, pandas.DataFrame)
numpy.testing.assert_array_equal(default, retvals.prominence.values)
assert not numpy.allclose(default, middle)
def test_isolation_options(self):
marks = self.marks
points = self.points
default = isolation(marks, points)
retvals = isolation(marks, points, return_all=True)
metrics = isolation(marks, points, metric="haversine")
middle = isolation(marks, points, middle="median")
assert isinstance(default, numpy.ndarray)
assert isinstance(retvals, pandas.DataFrame)
numpy.testing.assert_array_equal(default, retvals.isolation)
assert not numpy.allclose(default, metrics)
assert not numpy.allclose(default, middle)
def test_isolation_valid(self):
# results should be valid
marks = self.marks
points = self.points
iso = isolation(marks, points, return_all=True).assign(marks=marks)
assert iso.loc[0, "index"] == 0
assert numpy.isnan(iso.loc[4, "parent_rank"])
assert (iso.dropna().parent_index == 4).all()
assert (
iso.sort_values("marks", ascending=False).index == (
iso.sort_values("rank").index
)
).all()
assert iso.loc[3, "isolation"] == 1.5
assert iso.loc[2, "gap"] == (
marks[iso.loc[2, "parent_index"].astype(int)] - marks[2]
)
marks2 = self.marks.copy()
marks2[-2] = 0
iso = isolation(marks2, points, return_all=True).assign(marks=marks2)
assert iso.loc[0, "index"] == 0
assert numpy.isnan(iso.loc[3, "parent_index"])
assert (iso.dropna().parent_index == [4, 2, 5, 5, 3]).all()
assert (
iso.sort_values("marks", ascending=False).index == (
iso.sort_values("rank").index
)
).all()
assert iso.loc[1, "isolation"] == 1
assert iso.loc[2, "gap"] == (
marks2[iso.loc[2, "parent_index"].astype(int)] - marks2[2]
)
def test_to_elevation(self):
onedim = to_elevation(self.marks)
twodim = to_elevation(self.points)
random = to_elevation(numpy.random.normal(size=(100, 4)))
assert onedim.ndim == 1
assert onedim.min() == 0
assert onedim.max() == 4
shift = self.marks - self.marks.min()
assert (onedim == shift).all()
assert twodim.ndim == 1
assert twodim.min() == 0
assert twodim.max() > 1
assert random.ndim == 1
assert random.min() >= 0
assert random.max() >= 0
|
6fc9331ed9fc3855dac4f2f1565c421c2a35ed67
|
a9d9df92f8a61fa3f3649b408e8457b8fdb0b85a
|
/knowledge_repo/app/utils/image.py
|
4521f7cb34f6cf5196ee65484c50f7f3f1dfc8bf
|
[
"Apache-2.0"
] |
permissive
|
airbnb/knowledge-repo
|
ae7e122e10e505c568511999cf55352eb74646e1
|
71fd3fd42db7866257f86f37235ca0b5d81f5378
|
refs/heads/master
| 2023-07-23T08:06:15.180434
| 2023-04-17T04:04:39
| 2023-04-17T04:04:39
| 65,949,398
| 5,668
| 846
|
Apache-2.0
| 2023-07-20T11:16:46
| 2016-08-17T23:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
image.py
|
from knowledge_repo._version import __optional_dependencies__
from knowledge_repo.constants import PDF
from knowledge_repo.utils.dependencies import check_dependencies
import imghdr
import io
import os
ALLOWED_IMAGE_TYPES = ('png', 'jpeg', 'gif')
def get_file_extension(filepath):
return os.path.splitext(filepath)[1]
def is_allowed_image_format(img_file):
""" Checks if a given file is an image"""
return imghdr.what(img_file) in ALLOWED_IMAGE_TYPES
def is_pdf(filename):
return get_file_extension(filename) == '.pdf'
def pdf_page_to_png(src_pdf, pagenum=0, resolution=154):
"""
Returns specified PDF page as wand.image.Image png.
:param PyPDF2.PdfFileReader src_pdf: PDF from which to take pages.
:param int pagenum: Page number to take.
:param int resolution: Resolution for resulting png in DPI.
"""
check_dependencies(__optional_dependencies__[PDF])
# Import libraries within this function so as to avoid
# import-time dependence
import PyPDF2
# TODO: When we start using this again, document which
# system-level libraries are required.
from wand.image import Image
dst_pdf = PyPDF2.PdfFileWriter()
dst_pdf.addPage(src_pdf.getPage(pagenum))
pdf_bytes = io.BytesIO()
dst_pdf.write(pdf_bytes)
pdf_bytes.seek(0)
img = Image(file=pdf_bytes, resolution=resolution)
img.convert("png")
return img
|
a38876a11d68f3db488d77498eae1eff14a04de9
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/django/contrib/auth/admin.py
|
f9532abc14162cbbc2e737f09653b2688d7d2353
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 9,153
|
py
|
admin.py
|
from django.conf import settings
from django.contrib import admin, messages
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import (
AdminPasswordChangeForm,
UserChangeForm,
UserCreationForm,
)
from django.contrib.auth.models import Group, User
from django.core.exceptions import PermissionDenied
from django.db import router, transaction
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path, reverse
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
search_fields = ("name",)
ordering = ("name",)
filter_horizontal = ("permissions",)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == "permissions":
qs = kwargs.get("queryset", db_field.remote_field.model.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs["queryset"] = qs.select_related("content_type")
return super().formfield_for_manytomany(db_field, request=request, **kwargs)
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
add_form_template = "admin/auth/user/add_form.html"
change_user_password_template = None
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name", "email")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = (
(
None,
{
"classes": ("wide",),
"fields": ("username", "password1", "password2"),
},
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ("username", "email", "first_name", "last_name", "is_staff")
list_filter = ("is_staff", "is_superuser", "is_active", "groups")
search_fields = ("username", "first_name", "last_name", "email")
ordering = ("username",)
filter_horizontal = (
"groups",
"user_permissions",
)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super().get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults["form"] = self.add_form
defaults.update(kwargs)
return super().get_form(request, obj, **defaults)
def get_urls(self):
return [
path(
"<id>/password/",
self.admin_site.admin_view(self.user_change_password),
name="auth_user_password_change",
),
] + super().get_urls()
# RemovedInDjango60Warning: when the deprecation ends, replace with:
# def lookup_allowed(self, lookup, value, request):
def lookup_allowed(self, lookup, value, request=None):
# Don't allow lookups involving passwords.
return not lookup.startswith("password") and super().lookup_allowed(
lookup, value, request
)
@sensitive_post_parameters_m
@csrf_protect_m
def add_view(self, request, form_url="", extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._add_view(request, form_url, extra_context)
def _add_view(self, request, form_url="", extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
"order to add users, Django requires that your user "
'account have both the "Add user" and "Change user" '
"permissions set."
)
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.opts.get_field(self.model.USERNAME_FIELD)
defaults = {
"auto_populated_fields": (),
"username_help_text": username_field.help_text,
}
extra_context.update(defaults)
return super().add_view(request, form_url, extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=""):
user = self.get_object(request, unquote(id))
if not self.has_change_permission(request, user):
raise PermissionDenied
if user is None:
raise Http404(
_("%(name)s object with primary key %(key)r does not exist.")
% {
"name": self.opts.verbose_name,
"key": escape(id),
}
)
if request.method == "POST":
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
msg = gettext("Password changed successfully.")
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(
reverse(
"%s:%s_%s_change"
% (
self.admin_site.name,
user._meta.app_label,
user._meta.model_name,
),
args=(user.pk,),
)
)
else:
form = self.change_password_form(user)
fieldsets = [(None, {"fields": list(form.base_fields)})]
admin_form = admin.helpers.AdminForm(form, fieldsets, {})
context = {
"title": _("Change password: %s") % escape(user.get_username()),
"adminForm": admin_form,
"form_url": form_url,
"form": form,
"is_popup": (IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET),
"is_popup_var": IS_POPUP_VAR,
"add": True,
"change": False,
"has_delete_permission": False,
"has_change_permission": True,
"has_absolute_url": False,
"opts": self.opts,
"original": user,
"save_as": False,
"show_save": True,
**self.admin_site.each_context(request),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.change_user_password_template
or "admin/auth/user/change_password.html",
context,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if "_addanother" not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST = request.POST.copy()
request.POST["_continue"] = 1
return super().response_add(request, obj, post_url_continue)
|
7d5ac3b82ce17cd409388b5bff242ad932529bc9
|
d0106bb9704e35392a96e20cc3d1ea2d33b6eab4
|
/apps/document/__init__.py
|
614a030ff8764263631334fab9f9aa2b6ffaeadc
|
[] |
no_license
|
openitsystem/itops
|
f0fb716e3061900af2dea13017d2c12ae4367a31
|
5552af663ed2c668a16b9c687c2a50ed02595a01
|
refs/heads/master
| 2023-03-09T05:57:23.373691
| 2021-05-19T07:27:59
| 2021-05-19T07:27:59
| 185,988,028
| 144
| 55
| null | 2022-12-27T15:36:28
| 2019-05-10T12:52:43
|
Python
|
UTF-8
|
Python
| false
| false
| 66
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
# @Time : 2019/2/1 11:24
# @Author :
|
690eb3c2ae0c2e6a8d88f7e8596c8594b736a549
|
ec7591c3f478c43e76257aaa500d8f6a2e763d74
|
/stanza/tests/common/test_data_conversion.py
|
d6d45da7200d3f7f5b845b89c485fe30b6ec2251
|
[
"Apache-2.0"
] |
permissive
|
stanfordnlp/stanza
|
5cc3dbe70a96dd565639b7dae1efde6b4fa76985
|
c530c9af647d521262b56b717bcc38b0cfc5f1b8
|
refs/heads/main
| 2023-09-01T12:01:38.980322
| 2023-03-14T16:10:05
| 2023-03-14T16:10:05
| 104,854,615
| 4,281
| 599
|
NOASSERTION
| 2023-09-10T00:31:36
| 2017-09-26T08:00:56
|
Python
|
UTF-8
|
Python
| false
| false
| 12,197
|
py
|
test_data_conversion.py
|
"""
Basic tests of the data conversion
"""
import pytest
import tempfile
from zipfile import ZipFile
import stanza
from stanza.utils.conll import CoNLL
from stanza.models.common.doc import Document
from stanza.tests import *
pytestmark = pytest.mark.pipeline
# data for testing
CONLL = [[['1', 'Nous', 'il', 'PRON', '_', 'Number=Plur|Person=1|PronType=Prs', '3', 'nsubj', '_', 'start_char=0|end_char=4'],
['2', 'avons', 'avoir', 'AUX', '_', 'Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin', '3', 'aux:tense', '_', 'start_char=5|end_char=10'],
['3', 'atteint', 'atteindre', 'VERB', '_', 'Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part', '0', 'root', '_', 'start_char=11|end_char=18'],
['4', 'la', 'le', 'DET', '_', 'Definite=Def|Gender=Fem|Number=Sing|PronType=Art', '5', 'det', '_', 'start_char=19|end_char=21'],
['5', 'fin', 'fin', 'NOUN', '_', 'Gender=Fem|Number=Sing', '3', 'obj', '_', 'start_char=22|end_char=25'],
['6-7', 'du', '_', '_', '_', '_', '_', '_', '_', 'start_char=26|end_char=28'],
['6', 'de', 'de', 'ADP', '_', '_', '8', 'case', '_', '_'],
['7', 'le', 'le', 'DET', '_', 'Definite=Def|Gender=Masc|Number=Sing|PronType=Art', '8', 'det', '_', '_'],
['8', 'sentier', 'sentier', 'NOUN', '_', 'Gender=Masc|Number=Sing', '5', 'nmod', '_', 'start_char=29|end_char=36'],
['9', '.', '.', 'PUNCT', '_', '_', '3', 'punct', '_', 'start_char=36|end_char=37']]]
DICT = [[{'id': (1,), 'text': 'Nous', 'lemma': 'il', 'upos': 'PRON', 'feats': 'Number=Plur|Person=1|PronType=Prs', 'head': 3, 'deprel': 'nsubj', 'misc': 'start_char=0|end_char=4'},
{'id': (2,), 'text': 'avons', 'lemma': 'avoir', 'upos': 'AUX', 'feats': 'Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin', 'head': 3, 'deprel': 'aux:tense', 'misc': 'start_char=5|end_char=10'},
{'id': (3,), 'text': 'atteint', 'lemma': 'atteindre', 'upos': 'VERB', 'feats': 'Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part', 'head': 0, 'deprel': 'root', 'misc': 'start_char=11|end_char=18'},
{'id': (4,), 'text': 'la', 'lemma': 'le', 'upos': 'DET', 'feats': 'Definite=Def|Gender=Fem|Number=Sing|PronType=Art', 'head': 5, 'deprel': 'det', 'misc': 'start_char=19|end_char=21'},
{'id': (5,), 'text': 'fin', 'lemma': 'fin', 'upos': 'NOUN', 'feats': 'Gender=Fem|Number=Sing', 'head': 3, 'deprel': 'obj', 'misc': 'start_char=22|end_char=25'},
{'id': (6, 7), 'text': 'du', 'misc': 'start_char=26|end_char=28'},
{'id': (6,), 'text': 'de', 'lemma': 'de', 'upos': 'ADP', 'head': 8, 'deprel': 'case'},
{'id': (7,), 'text': 'le', 'lemma': 'le', 'upos': 'DET', 'feats': 'Definite=Def|Gender=Masc|Number=Sing|PronType=Art', 'head': 8, 'deprel': 'det'},
{'id': (8,), 'text': 'sentier', 'lemma': 'sentier', 'upos': 'NOUN', 'feats': 'Gender=Masc|Number=Sing', 'head': 5, 'deprel': 'nmod', 'misc': 'start_char=29|end_char=36'},
{'id': (9,), 'text': '.', 'lemma': '.', 'upos': 'PUNCT', 'head': 3, 'deprel': 'punct', 'misc': 'start_char=36|end_char=37'}]]
def test_conll_to_dict():
dicts = CoNLL.convert_conll(CONLL)
assert dicts == DICT
def test_dict_to_conll():
document = Document(DICT)
# :c = no comments
conll = [[sentence.split("\t") for sentence in doc.split("\n")] for doc in "{:c}".format(document).split("\n\n")]
assert conll == CONLL
def test_dict_to_doc_and_doc_to_dict():
"""
Test the conversion from raw dict to Document and back
This code path will first turn start_char|end_char into start_char & end_char fields in the Document
That version to a dict will have separate fields for each of those
Finally, the conversion from that dict to a list of conll entries should convert that back to misc
"""
document = Document(DICT)
dicts = document.to_dict()
document = Document(dicts)
conll = [[sentence.split("\t") for sentence in doc.split("\n")] for doc in "{:c}".format(document).split("\n\n")]
assert conll == CONLL
# sample is two sentences long so that the tests check multiple sentences
RUSSIAN_SAMPLE="""
# sent_id = yandex.reviews-f-8xh5zqnmwak3t6p68y4rhwd4e0-1969-9253
# genre = review
# text = Как- то слишком мало цветов получают актёры после спектакля.
1 Как как-то ADV _ Degree=Pos|PronType=Ind 7 advmod _ SpaceAfter=No
2 - - PUNCT _ _ 3 punct _ _
3 то то PART _ _ 1 list _ deprel=list:goeswith
4 слишком слишком ADV _ Degree=Pos 5 advmod _ _
5 мало мало ADV _ Degree=Pos 6 advmod _ _
6 цветов цветок NOUN _ Animacy=Inan|Case=Gen|Gender=Masc|Number=Plur 7 obj _ _
7 получают получать VERB _ Aspect=Imp|Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act 0 root _ _
8 актёры актер NOUN _ Animacy=Anim|Case=Nom|Gender=Masc|Number=Plur 7 nsubj _ _
9 после после ADP _ _ 10 case _ _
10 спектакля спектакль NOUN _ Animacy=Inan|Case=Gen|Gender=Masc|Number=Sing 7 obl _ SpaceAfter=No
11 . . PUNCT _ _ 7 punct _ _
# sent_id = 4
# genre = social
# text = В женщине важна верность, а не красота.
1 В в ADP _ _ 2 case _ _
2 женщине женщина NOUN _ Animacy=Anim|Case=Loc|Gender=Fem|Number=Sing 3 obl _ _
3 важна важный ADJ _ Degree=Pos|Gender=Fem|Number=Sing|Variant=Short 0 root _ _
4 верность верность NOUN _ Animacy=Inan|Case=Nom|Gender=Fem|Number=Sing 3 nsubj _ SpaceAfter=No
5 , , PUNCT _ _ 8 punct _ _
6 а а CCONJ _ _ 8 cc _ _
7 не не PART _ Polarity=Neg 8 advmod _ _
8 красота красота NOUN _ Animacy=Inan|Case=Nom|Gender=Fem|Number=Sing 4 conj _ SpaceAfter=No
9 . . PUNCT _ _ 3 punct _ _
""".strip()
RUSSIAN_TEXT = ["Как- то слишком мало цветов получают актёры после спектакля.", "В женщине важна верность, а не красота."]
RUSSIAN_IDS = ["yandex.reviews-f-8xh5zqnmwak3t6p68y4rhwd4e0-1969-9253", "4"]
def check_russian_doc(doc):
"""
Refactored the test for the Russian doc so we can use it to test various file methods
"""
lines = RUSSIAN_SAMPLE.split("\n")
assert len(doc.sentences) == 2
assert lines[0] == doc.sentences[0].comments[0]
assert lines[1] == doc.sentences[0].comments[1]
assert lines[2] == doc.sentences[0].comments[2]
for sent_idx, (expected_text, expected_id, sentence) in enumerate(zip(RUSSIAN_TEXT, RUSSIAN_IDS, doc.sentences)):
assert expected_text == sentence.text
assert expected_id == sentence.sent_id
assert sent_idx == sentence.index
assert len(sentence.comments) == 3
sentences = "{:C}".format(doc)
sentences = sentences.split("\n\n")
assert len(sentences) == 2
sentence = sentences[0].split("\n")
assert len(sentence) == 14
assert lines[0] == sentence[0]
assert lines[1] == sentence[1]
assert lines[2] == sentence[2]
# assert that the weird deprel=list:goeswith was properly handled
assert doc.sentences[0].words[2].head == 1
assert doc.sentences[0].words[2].deprel == "list:goeswith"
def test_write_russian_doc(tmp_path):
"""
Specifically test the write_doc2conll method
"""
filename = tmp_path / "russian.conll"
doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)
check_russian_doc(doc)
CoNLL.write_doc2conll(doc, filename)
with open(filename) as fin:
text = fin.read()
# the conll docs have to end with \n\n
assert text.endswith("\n\n")
# but to compare against the original, strip off the whitespace
text = text.strip()
# we skip the first sentence because the "deprel=list:goeswith" is weird
# note that the deprel itself is checked in check_russian_doc
text = text[text.find("# sent_id = 4"):]
sample = RUSSIAN_SAMPLE[RUSSIAN_SAMPLE.find("# sent_id = 4"):]
assert text == sample
doc2 = CoNLL.conll2doc(filename)
check_russian_doc(doc2)
def test_doc_with_comments():
"""
Test that a doc with comments gets converted back with comments
"""
doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)
check_russian_doc(doc)
def test_unusual_misc():
"""
The above RUSSIAN_SAMPLE resulted in a blank misc field in one particular implementation of the conll code
(the below test would fail)
"""
doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)
sentences = "{:C}".format(doc).split("\n\n")
assert len(sentences) == 2
sentence = sentences[0].split("\n")
assert len(sentence) == 14
for word in sentence:
pieces = word.split("\t")
assert len(pieces) == 1 or len(pieces) == 10
if len(pieces) == 10:
assert all(piece for piece in pieces)
def test_file():
"""
Test loading a doc from a file
"""
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "russian.conll")
with open(filename, "w", encoding="utf-8") as fout:
fout.write(RUSSIAN_SAMPLE)
doc = CoNLL.conll2doc(input_file=filename)
check_russian_doc(doc)
def test_zip_file():
"""
Test loading a doc from a zip file
"""
with tempfile.TemporaryDirectory() as tempdir:
zip_file = os.path.join(tempdir, "russian.zip")
filename = "russian.conll"
with ZipFile(zip_file, "w") as zout:
with zout.open(filename, "w") as fout:
fout.write(RUSSIAN_SAMPLE.encode())
doc = CoNLL.conll2doc(input_file=filename, zip_file=zip_file)
check_russian_doc(doc)
SIMPLE_NER = """
# text = Teferi's best friend is Karn
# sent_id = 0
1 Teferi _ _ _ _ 0 _ _ start_char=0|end_char=6|ner=S-PERSON
2 's _ _ _ _ 1 _ _ start_char=6|end_char=8|ner=O
3 best _ _ _ _ 2 _ _ start_char=9|end_char=13|ner=O
4 friend _ _ _ _ 3 _ _ start_char=14|end_char=20|ner=O
5 is _ _ _ _ 4 _ _ start_char=21|end_char=23|ner=O
6 Karn _ _ _ _ 5 _ _ start_char=24|end_char=28|ner=S-PERSON
""".strip()
def test_simple_ner_conversion():
"""
Test that tokens get properly created with NER tags
"""
doc = CoNLL.conll2doc(input_str=SIMPLE_NER)
assert len(doc.sentences) == 1
sentence = doc.sentences[0]
assert len(sentence.tokens) == 6
EXPECTED_NER = ["S-PERSON", "O", "O", "O", "O", "S-PERSON"]
for token, ner in zip(sentence.tokens, EXPECTED_NER):
assert token.ner == ner
# check that the ner, start_char, end_char fields were not put on the token's misc
# those should all be set as specific fields on the token
assert not token.misc
assert len(token.words) == 1
# they should also not reach the word's misc field
assert not token.words[0].misc
conll = "{:C}".format(doc)
assert conll == SIMPLE_NER
MWT_NER = """
# text = This makes John's headache worse
# sent_id = 0
1 This _ _ _ _ 0 _ _ start_char=0|end_char=4|ner=O
2 makes _ _ _ _ 1 _ _ start_char=5|end_char=10|ner=O
3-4 John's _ _ _ _ _ _ _ start_char=11|end_char=17|ner=S-PERSON
3 John _ _ _ _ 2 _ _ _
4 's _ _ _ _ 3 _ _ _
5 headache _ _ _ _ 4 _ _ start_char=18|end_char=26|ner=O
6 worse _ _ _ _ 5 _ _ start_char=27|end_char=32|ner=O
""".strip()
def test_mwt_ner_conversion():
"""
Test that tokens including MWT get properly created with NER tags
Note that this kind of thing happens with the EWT tokenizer for English, for example
"""
doc = CoNLL.conll2doc(input_str=MWT_NER)
assert len(doc.sentences) == 1
sentence = doc.sentences[0]
assert len(sentence.tokens) == 5
EXPECTED_NER = ["O", "O", "S-PERSON", "O", "O"]
EXPECTED_WORDS = [1, 1, 2, 1, 1]
for token, ner, expected_words in zip(sentence.tokens, EXPECTED_NER, EXPECTED_WORDS):
assert token.ner == ner
# check that the ner, start_char, end_char fields were not put on the token's misc
# those should all be set as specific fields on the token
assert not token.misc
assert len(token.words) == expected_words
# they should also not reach the word's misc field
assert not token.words[0].misc
conll = "{:C}".format(doc)
assert conll == MWT_NER
|
8945afa9a2b5db582feeeeee5a720d46baf893a8
|
9071dc219693bde591ad12fb31c43c635f3a3f5e
|
/git_machete/bin.py
|
5f6d780e678dc1e72da35dd5fe4ce6ab749f519f
|
[
"MIT"
] |
permissive
|
VirtusLab/git-machete
|
67f51e49d44601daee4cc40fa27de87ecc029af6
|
dca261b0f8c56edb65557d178321a21177872b05
|
refs/heads/master
| 2023-08-17T07:58:32.883018
| 2023-08-12T14:52:48
| 2023-08-12T14:52:48
| 122,743,101
| 711
| 45
|
MIT
| 2023-09-08T07:39:27
| 2018-02-24T13:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 767
|
py
|
bin.py
|
import sys
# Since this shim needs to be compatible with Python 2,
# let's skip mypy checks, as type annotations were only introduced in Python 3.5.
def main(): # type: ignore
def validate_python_version(): # type: ignore
if sys.version_info[:2] < (3, 6):
# String interpolations were only introduced in Python 3.6
version_str = "{}.{}.{}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro) # noqa: FS002
sys.stderr.write(
"Python {} is no longer supported. \n".format(version_str) + # noqa: FS002
"Please switch to Python 3.6 or higher.\n")
sys.exit(1)
validate_python_version() # type: ignore
from . import cli
cli.main()
|
e89a6e08d47f4be355950e47b4f11f86bfc0deab
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/lang/c/register_variables/TestRegisterVariables.py
|
af0ad2a08719deb58fe35f946483f16316bef416
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 7,610
|
py
|
TestRegisterVariables.py
|
"""Check that compiler-generated register values work correctly"""
from __future__ import print_function
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
# This method attempts to figure out if a given variable
# is in a register.
#
# Return:
# True if the value has a readable value and is in a register
# False otherwise
def is_variable_in_register(frame, var_name):
# Ensure we can lookup the variable.
var = frame.FindVariable(var_name)
# print("\nchecking {}...".format(var_name))
if var is None or not var.IsValid():
# print("{} cannot be found".format(var_name))
return False
# Check that we can get its value. If not, this
# may be a variable that is just out of scope at this point.
value = var.GetValue()
# print("checking value...")
if value is None:
# print("value is invalid")
return False
# else:
# print("value is {}".format(value))
# We have a variable and we can get its value. The variable is in
# a register if we cannot get an address for it, assuming it is
# not a struct pointer. (This is an approximation - compilers can
# do other things with spitting up a value into multiple parts of
# multiple registers, but what we're verifying here is much more
# than it was doing before).
var_addr = var.GetAddress()
# print("checking address...")
if var_addr.IsValid():
# We have an address, it must not be in a register.
# print("var {} is not in a register: has a valid address {}".format(var_name, var_addr))
return False
else:
# We don't have an address but we can read the value.
# It is likely stored in a register.
# print("var {} is in a register (we don't have an address for it)".format(var_name))
return True
def is_struct_pointer_in_register(frame, var_name, trace):
# Ensure we can lookup the variable.
var = frame.FindVariable(var_name)
if trace:
print("\nchecking {}...".format(var_name))
if var is None or not var.IsValid():
# print("{} cannot be found".format(var_name))
return False
# Check that we can get its value. If not, this
# may be a variable that is just out of scope at this point.
value = var.GetValue()
# print("checking value...")
if value is None:
if trace:
print("value is invalid")
return False
else:
if trace:
print("value is {}".format(value))
var_loc = var.GetLocation()
if trace:
print("checking location: {}".format(var_loc))
if var_loc is None or var_loc.startswith("0x"):
# The frame var is not in a register but rather a memory location.
# print("frame var {} is not in a register".format(var_name))
return False
else:
# print("frame var {} is in a register".format(var_name))
return True
def re_expr_equals(val_type, val):
# Match ({val_type}) ${sum_digits} = {val}
return re.compile(r'\(' + val_type + '\) \$\d+ = ' + str(val))
class RegisterVariableTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(compiler="clang", compiler_version=['<', '3.5'])
@expectedFailureAll(compiler="gcc", compiler_version=[
'>=', '4.8.2'], archs=["i386"])
@expectedFailureAll(compiler="gcc", compiler_version=[
'<', '4.9'], archs=["x86_64"])
def test_and_run_command(self):
"""Test expressions on register values."""
# This test now ensures that each probable
# register variable location is actually a register, and
# if so, whether we can print out the variable there.
# It only requires one of them to be handled in a non-error
# way.
register_variables_count = 0
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
lldbutil.run_break_set_by_source_regexp(
self, "break", num_expected_locations=3)
####################
# First breakpoint
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# Try some variables that should be visible
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if is_variable_in_register(frame, 'a'):
register_variables_count += 1
self.expect("expr a", VARIABLES_DISPLAYED_CORRECTLY,
patterns=[re_expr_equals('int', 2)])
if is_struct_pointer_in_register(frame, 'b', self.TraceOn()):
register_variables_count += 1
self.expect("expr b->m1", VARIABLES_DISPLAYED_CORRECTLY,
patterns=[re_expr_equals('int', 3)])
#####################
# Second breakpoint
self.runCmd("continue")
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# Try some variables that should be visible
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if is_struct_pointer_in_register(frame, 'b', self.TraceOn()):
register_variables_count += 1
self.expect("expr b->m2", VARIABLES_DISPLAYED_CORRECTLY,
patterns=[re_expr_equals('int', 5)])
if is_variable_in_register(frame, 'c'):
register_variables_count += 1
self.expect("expr c", VARIABLES_DISPLAYED_CORRECTLY,
patterns=[re_expr_equals('int', 5)])
#####################
# Third breakpoint
self.runCmd("continue")
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# Try some variables that should be visible
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
if is_variable_in_register(frame, 'f'):
register_variables_count += 1
self.expect("expr f", VARIABLES_DISPLAYED_CORRECTLY,
patterns=[re_expr_equals('float', '3.1')])
# Validate that we verified at least one register variable
self.assertTrue(
register_variables_count > 0,
"expected to verify at least one variable in a register")
# print("executed {} expressions with values in registers".format(register_variables_count))
self.runCmd("kill")
|
be50da08231ee501290a3dbb011b303d31fa7513
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/template_tests/syntax_tests/test_resetcycle.py
|
fb67b3368ee6e19553cead48be1ddf70bd668861
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,329
|
py
|
test_resetcycle.py
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class ResetCycleTagTests(SimpleTestCase):
@setup({"resetcycle01": "{% resetcycle %}"})
def test_resetcycle01(self):
with self.assertRaisesMessage(TemplateSyntaxError, "No cycles in template."):
self.engine.get_template("resetcycle01")
@setup({"resetcycle02": "{% resetcycle undefinedcycle %}"})
def test_resetcycle02(self):
with self.assertRaisesMessage(
TemplateSyntaxError, "Named cycle 'undefinedcycle' does not exist."
):
self.engine.get_template("resetcycle02")
@setup({"resetcycle03": "{% cycle 'a' 'b' %}{% resetcycle undefinedcycle %}"})
def test_resetcycle03(self):
with self.assertRaisesMessage(
TemplateSyntaxError, "Named cycle 'undefinedcycle' does not exist."
):
self.engine.get_template("resetcycle03")
@setup({"resetcycle04": "{% cycle 'a' 'b' as ab %}{% resetcycle undefinedcycle %}"})
def test_resetcycle04(self):
with self.assertRaisesMessage(
TemplateSyntaxError, "Named cycle 'undefinedcycle' does not exist."
):
self.engine.get_template("resetcycle04")
@setup(
{
"resetcycle05": (
"{% for i in test %}{% cycle 'a' 'b' %}{% resetcycle %}{% endfor %}"
)
}
)
def test_resetcycle05(self):
output = self.engine.render_to_string("resetcycle05", {"test": list(range(5))})
self.assertEqual(output, "aaaaa")
@setup(
{
"resetcycle06": "{% cycle 'a' 'b' 'c' as abc %}"
"{% for i in test %}"
"{% cycle abc %}"
"{% cycle '-' '+' %}"
"{% resetcycle %}"
"{% endfor %}"
}
)
def test_resetcycle06(self):
output = self.engine.render_to_string("resetcycle06", {"test": list(range(5))})
self.assertEqual(output, "ab-c-a-b-c-")
@setup(
{
"resetcycle07": "{% cycle 'a' 'b' 'c' as abc %}"
"{% for i in test %}"
"{% resetcycle abc %}"
"{% cycle abc %}"
"{% cycle '-' '+' %}"
"{% endfor %}"
}
)
def test_resetcycle07(self):
output = self.engine.render_to_string("resetcycle07", {"test": list(range(5))})
self.assertEqual(output, "aa-a+a-a+a-")
@setup(
{
"resetcycle08": "{% for i in outer %}"
"{% for j in inner %}"
"{% cycle 'a' 'b' %}"
"{% endfor %}"
"{% resetcycle %}"
"{% endfor %}"
}
)
def test_resetcycle08(self):
output = self.engine.render_to_string(
"resetcycle08", {"outer": list(range(2)), "inner": list(range(3))}
)
self.assertEqual(output, "abaaba")
@setup(
{
"resetcycle09": "{% for i in outer %}"
"{% cycle 'a' 'b' %}"
"{% for j in inner %}"
"{% cycle 'X' 'Y' %}"
"{% endfor %}"
"{% resetcycle %}"
"{% endfor %}"
}
)
def test_resetcycle09(self):
output = self.engine.render_to_string(
"resetcycle09", {"outer": list(range(2)), "inner": list(range(3))}
)
self.assertEqual(output, "aXYXbXYX")
@setup(
{
"resetcycle10": "{% for i in test %}"
"{% cycle 'X' 'Y' 'Z' as XYZ %}"
"{% cycle 'a' 'b' 'c' as abc %}"
"{% if i == 1 %}"
"{% resetcycle abc %}"
"{% endif %}"
"{% endfor %}"
}
)
def test_resetcycle10(self):
output = self.engine.render_to_string("resetcycle10", {"test": list(range(5))})
self.assertEqual(output, "XaYbZaXbYc")
@setup(
{
"resetcycle11": "{% for i in test %}"
"{% cycle 'X' 'Y' 'Z' as XYZ %}"
"{% cycle 'a' 'b' 'c' as abc %}"
"{% if i == 1 %}"
"{% resetcycle XYZ %}"
"{% endif %}"
"{% endfor %}"
}
)
def test_resetcycle11(self):
output = self.engine.render_to_string("resetcycle11", {"test": list(range(5))})
self.assertEqual(output, "XaYbXcYaZb")
|
fbe1e0018fe32a5cdf401a90b67a7da2d587fb4a
|
b38247a5d84d8b52ce8363f8dd81629cfbe17f65
|
/reagent/net_builder/parametric_dqn_net_builder.py
|
d37091cc7dcf81cdafc3094a787a06c4ca774a12
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/ReAgent
|
7f2b82eaaf7a19e58cc50aacc307d7b001231440
|
c5f1a8371a677b4f8fb0882b600bf331eba5259d
|
refs/heads/main
| 2023-09-05T15:56:49.175072
| 2023-08-29T21:48:40
| 2023-08-29T21:48:40
| 98,565,575
| 1,480
| 290
|
BSD-3-Clause
| 2023-09-12T23:09:30
| 2017-07-27T17:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
parametric_dqn_net_builder.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.models.base import ModelBase
from reagent.prediction.predictor_wrapper import ParametricDqnWithPreprocessor
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbParametricDqnPredictorWrapper as ParametricDqnPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import ParametricDqnPredictorWrapper
class ParametricDQNNetBuilder:
"""
Base class for parametric DQN net builder.
"""
@abc.abstractmethod
def build_q_network(
self,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
output_dim: int = 1,
) -> ModelBase:
pass
def build_serving_module(
self,
q_network: ModelBase,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
action_preprocessor = Preprocessor(
action_normalization_data.dense_normalization_parameters, False
)
dqn_with_preprocessor = ParametricDqnWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor, action_preprocessor
)
return ParametricDqnPredictorWrapper(
dqn_with_preprocessor=dqn_with_preprocessor
)
|
59d038437a69ab38dfccdc3468e2bf2bd532e0be
|
ebf2f274c49b5a19f02fc4bfd1eb79b31973bd6d
|
/docs/src/examples/python/mat_completion.py
|
05bf3f46c13b3d933c10f4596423da8f16c7f753
|
[
"MIT"
] |
permissive
|
cvxgrp/scs
|
6b07cfbdd1405ced6615608aa8f03c55cb809928
|
f5f054be7dd71ee0d80c4c0eec0df1e9f0ccb123
|
refs/heads/master
| 2023-09-06T02:12:13.387138
| 2023-04-13T08:51:52
| 2023-04-13T08:51:52
| 14,811,835
| 496
| 131
|
MIT
| 2023-04-12T22:38:26
| 2013-11-29T23:11:16
|
C
|
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
mat_completion.py
|
import scs
import numpy as np
import scipy as sp
from scipy import sparse
np.random.seed(1)
# The vec function as documented in api/cones
def vec(S):
n = S.shape[0]
S = np.copy(S)
S *= np.sqrt(2)
S[range(n), range(n)] /= np.sqrt(2)
return S[np.triu_indices(n)]
# The mat function as documented in api/cones
def mat(s):
n = int((np.sqrt(8 * len(s) + 1) - 1) / 2)
S = np.zeros((n, n))
S[np.triu_indices(n)] = s / np.sqrt(2)
S = S + S.T
S[range(n), range(n)] /= np.sqrt(2)
return S
dim = 15 # dim x dim matrix
vlen = int(dim * (dim + 1) / 2) # length of vector x = vec(X)
# Generate true matrix
rank = dim // 5 # low rank
X = np.random.randn(dim, rank)
X = X @ X.T
#############################################################################
# Let's first do some basic sanity checks to ensure that mat, vec are working:
# mat(vec( . )) should be identity
print(f"Should be ~ 0: {np.linalg.norm(X - mat(vec(X)))}")
# Trace( . ) should be vec(I)' vec( . )
print(f"Should be ~ 0: {np.trace(X) - vec(np.eye(dim)) @ vec(X)}")
#############################################################################
num_measurements = vlen // 2 # how many measurements are revealed
# Generate random measurement indices
measurement_idxs = np.random.choice(
np.arange(vlen), size=num_measurements, replace=False
)
# Create A matrix
Ad = np.zeros((num_measurements, vlen))
for i in range(num_measurements):
Ad[i, measurement_idxs[i]] = 1.0
# Noisy measurements of X
measurements = Ad @ vec(X) + 0.01 * np.random.randn(num_measurements) # + noise
# Auxiliary data
In = sparse.eye(vlen)
Im = sparse.eye(num_measurements)
On = sparse.csc_matrix((vlen, vlen))
Onm = sparse.csc_matrix((vlen, num_measurements))
# SCS data
P = sparse.block_diag([On, sparse.eye(num_measurements)], format="csc")
A = sparse.vstack(
[
# zero cone
sparse.hstack([Ad, -Im]),
# positive semidefinite cone
sparse.hstack([-In, Onm]),
],
format="csc",
)
b = np.hstack([measurements, np.zeros(vlen)])
c = np.hstack([np.zeros(vlen + num_measurements)])
data = dict(P=P, A=A, b=b, c=c)
cone = dict(z=num_measurements, s=dim)
# Setup workspace
solver = scs.SCS(data, cone, eps_abs=1e-6, eps_rel=1e-6)
print(f"Solving for lambda = 0")
sol = solver.solve() # lambda = 0
X_hat = mat(sol["x"][:vlen])
print(f"Error: {np.linalg.norm(X_hat - X) / np.linalg.norm(X)}")
# Solve for different values of lambda
lambdas = np.logspace(-6, 1, 11)
for lam in lambdas:
print(f"Solving for lambda = {lam}")
# Re-use workspace, just update the `c` vector
c_new = np.hstack([lam * vec(np.eye(dim)), np.zeros(num_measurements)])
solver.update(c=c_new)
# Solve updated problem
sol = solver.solve() # will warm-start automatically
X_hat = mat(sol["x"][:vlen])
# What is the norm error?
print(f"Error : {np.linalg.norm(X_hat - X) / np.linalg.norm(X)}")
|
f9a68a8f7aacfc0561d06f0436f7b15c3178be67
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python/1180.py
|
19222fbd1c668b58d35a5c5a8deb44e396134fed
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
1180.py
|
__________________________________________________________________________________________________
class Solution:
def countLetters(self, S: str) -> int:
n = len(S)
res = 1
cur = 1
for i in range(1, n):
if S[i] == S[i - 1]:
cur += 1
else:
cur = 1
res += cur
return res
__________________________________________________________________________________________________
__________________________________________________________________________________________________
|
aaa66432391b449effbe9801506cb74423333cde
|
b32df2ffae14c3ca8083f36f93165c220aef5e44
|
/blueoil/networks/lmnet_multi.py
|
9614de65ff82ef0c0c8affc9a8e2961f788a398a
|
[
"Apache-2.0"
] |
permissive
|
blue-oil/blueoil
|
213659909b6eac26dd249f878a03ed732b639539
|
0c9160b524b17482d59ae48a0c11384f1d26dccc
|
refs/heads/master
| 2023-01-24T05:10:54.825811
| 2021-04-22T08:46:56
| 2021-04-22T08:46:56
| 153,597,157
| 252
| 111
|
Apache-2.0
| 2021-05-10T05:02:45
| 2018-10-18T09:19:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,732
|
py
|
lmnet_multi.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import tensorflow as tf
from blueoil.metrics.metrics import tp_tn_fp_fn, tp_tn_fp_fn_for_each
def safe_log(arg):
return tf.math.log(tf.clip_by_value(arg, 1e-10, 1.0))
# TODO(wakisaka): WIP
class LmnetMulti:
"""Multi label prediction"""
version = 0.01
@property
def placeholders(self):
"""placeholders"""
images_placeholder = tf.compat.v1.placeholder(
tf.float32,
shape=(self.batch_size, self.image_size[0], self.image_size[1], 3),
name="images_placeholder")
labels_placeholder = tf.compat.v1.placeholder(
tf.bool,
shape=(self.batch_size, self.num_classes),
name="labels_placeholder")
return images_placeholder, labels_placeholder
def inference(self, images, is_training):
"""inference.
Args:
images: images tensor. shape is (batch_num, height, width, channel)
is_training:
"""
base = self.base(images, is_training)
softmax = tf.sigmoid(base)
self.output = tf.identity(softmax, name="output")
return self.output
def loss(self, output, labels):
"""loss.
Args:
output: network output sigmoided tensor.
labels: multi label encoded tensor. shape is (batch_num, num_classes)
"""
with tf.name_scope("loss"):
labels = tf.cast(labels, tf.float32)
if self.is_debug:
labels = tf.Print(labels, [tf.shape(labels), tf.argmax(labels, 1)], message="labels:", summarize=200)
output = tf.Print(output, [tf.shape(output), tf.argmax(output, 1)], message="output:", summarize=200)
loss = tf.reduce_mean(
- tf.reduce_sum(
(labels * safe_log(output)) + ((1 - labels) * safe_log(1 - output)),
axis=[1],
)
)
tf.compat.v1.summary.scalar("loss", loss)
return loss
def metrics(self, output, labels, thresholds=[0.3, 0.5, 0.7]):
self.metrics_for_each_class(output, labels, thresholds)
with tf.name_scope("metrics"):
for threshold in thresholds:
tp, tn, fp, fn = tp_tn_fp_fn(output, labels, threshold=threshold)
accuracy = (tp + tn) / (tp + tn + fp + fn)
tf.compat.v1.summary.scalar("accuracy/prob_{}".format(threshold), accuracy)
recall = (tp) / (tp + fn)
tf.compat.v1.summary.scalar("recall/prob_{}".format(threshold), recall)
precision = (tp) / (tp + fp)
tf.compat.v1.summary.scalar("precision/prob_{}".format(threshold), precision)
return accuracy
def metrics_for_each_class(self, output, labels, thresholds=[0.3, 0.5, 0.7]):
with tf.name_scope("metrics"):
for threshold in thresholds:
tp_tn_fp_fn = tp_tn_fp_fn_for_each(output, labels, threshold=threshold)
for label_i in range(len(self.classes)):
tp = tf.gather(tf.gather(tp_tn_fp_fn, 0), label_i)
tn = tf.gather(tf.gather(tp_tn_fp_fn, 1), label_i)
fp = tf.gather(tf.gather(tp_tn_fp_fn, 2), label_i)
fn = tf.gather(tf.gather(tp_tn_fp_fn, 3), label_i)
accuracy = (tp + tn) / (tp + tn + fp + fn)
tf.compat.v1.summary.scalar(
"accuracy/prob_{}/{}".format(threshold, self.classes[label_i]),
accuracy
)
recall = (tp) / (tp + fn)
tf.compat.v1.summary.scalar("recall/prob_{}/{}".format(threshold, self.classes[label_i]), recall)
precision = (tp) / (tp + fp)
tf.compat.v1.summary.scalar(
"precision/prob_{}/{}".format(threshold, self.classes[label_i]),
precision
)
return accuracy
|
32f6fb5dce560d167d1138216f59d118ac55e68f
|
c2c212ba42ebfa35f3b6122344978bc94ec8fa67
|
/tests/test_addapinch_1.py
|
7cc722bc157a7f4e3e3b2683c118a9140ccd42e1
|
[
"MIT"
] |
permissive
|
hhursev/recipe-scrapers
|
0cd6b7db4ef23ca825f2354f5d1ba76076a14813
|
8ced0227b3b16c532fc5ebf3060c99ee0452adab
|
refs/heads/main
| 2023-09-03T07:33:29.684121
| 2023-09-01T21:15:50
| 2023-09-01T21:15:50
| 42,446,168
| 1,276
| 443
|
MIT
| 2023-09-14T16:34:09
| 2015-09-14T12:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
test_addapinch_1.py
|
# mypy: allow-untyped-defs
from recipe_scrapers.addapinch import AddAPinch
from tests import ScraperTest
class TestAddAPinchScraper(ScraperTest):
scraper_class = AddAPinch
test_file_name = "addapinch_1"
def test_host(self):
self.assertEqual("addapinch.com", self.harvester_class.host())
def test_author(self):
self.assertEqual("Robyn Stone | Add a Pinch", self.harvester_class.author())
def test_title(self):
self.assertEqual(
"The Best Chocolate Cake Recipe {Ever}", self.harvester_class.title()
)
def test_category(self):
self.assertEqual("Dessert", self.harvester_class.category())
def test_total_time(self):
self.assertEqual(45, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("24 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://addapinch.com/wp-content/uploads/2020/04/chocolate-cake-DSC_1768.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
expected_ingredients = [
"2 cups all-purpose flour",
"2 cups sugar",
"3/4 cup unsweetened cocoa powder",
"2 teaspoons baking powder",
"1 1/2 teaspoons baking soda",
"1 teaspoon kosher salt",
"1 teaspoon espresso powder (homemade or store-bought)",
"1 cup milk (or buttermilk, almond, or coconut milk)",
"1/2 cup vegetable oil (or canola oil, or melted coconut oil)",
"2 large eggs",
"2 teaspoons vanilla extract",
"1 cup boiling water",
"Chocolate Buttercream Frosting Recipe",
]
self.assertEqual(expected_ingredients, self.harvester_class.ingredients())
def test_instructions(self):
expected_instructions = (
"Preheat oven to 350º F. Prepare two 9-inch cake pans by spraying with baking spray or buttering and lightly flouring.\n"
"For the chocolate cake:\n"
"Add flour, sugar, cocoa, baking powder, baking soda, salt and espresso powder to a large bowl or the bowl of a stand mixer. Whisk through to combine or, using your paddle attachment, stir through flour mixture until combined well.\n"
"Add milk, vegetable oil, eggs, and vanilla to flour mixture and mix together on medium speed until well combined. Reduce speed and carefully add boiling water to the cake batter until well combined.\n"
"Distribute cake batter evenly between the two prepared cake pans. Bake for 30-35 minutes, until a toothpick or cake tester inserted in the center of the chocolate cake comes out clean.\n"
"Remove from the oven and allow to cool for about 10 minutes, remove from the pan and cool completely.\n"
"Frost cake with Chocolate Buttercream Frosting."
)
self.assertEqual(expected_instructions, self.harvester_class.instructions())
def test_ratings(self):
self.assertEqual(5.0, self.harvester_class.ratings())
def test_cuisine(self):
self.assertEqual("American", self.harvester_class.cuisine())
def test_description(self):
self.assertEqual(
"The Best Chocolate Cake Recipe - A one bowl chocolate cake recipe that is quick, easy, and delicious! Updated with gluten-free, dairy-free, and egg-free options!",
self.harvester_class.description(),
)
|
3546d75b7796cf7b6a24821552dce529209b48fe
|
e7bf1ff05319acc59bba5af5890041bd82c3e197
|
/mne/commands/__init__.py
|
813e2309557515566064002ac0c00c0b81ab9d88
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mne-tools/mne-python
|
7e8d7e945dfbbee6432a4955cf050fa823f2d34b
|
f44636f00666b8eb869417960926d01690ff4f42
|
refs/heads/main
| 2023-09-04T03:05:37.402100
| 2023-09-03T14:15:18
| 2023-09-03T14:15:18
| 1,301,584
| 2,437
| 1,418
|
BSD-3-Clause
| 2023-09-14T19:23:38
| 2011-01-28T03:31:13
|
Python
|
UTF-8
|
Python
| false
| false
| 167
|
py
|
__init__.py
|
"""Command-line utilities."""
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
submodules=["utils"],
submod_attrs={},
)
|
6f19d1a6898f24632c01c47ef6cfc6c36bc4be18
|
2247493654c160426c1655281aa7f1dca2bc98dd
|
/reg_tests/test_files/mixedTetPipe/pipe_tet_catalyst.py
|
deb8a0b3156f65788de1aaee452a726776da8cac
|
[
"BSD-2-Clause"
] |
permissive
|
NaluCFD/Nalu
|
12999b0e3b76dbeab8fc184f38b65a13b1180bce
|
3286651e494894ac5948c41bf985f987d20c2370
|
refs/heads/master
| 2023-08-10T02:48:04.179859
| 2023-08-02T19:02:46
| 2023-08-02T19:02:46
| 69,712,764
| 138
| 185
|
NOASSERTION
| 2023-09-14T16:42:19
| 2016-10-01T01:25:20
|
C
|
UTF-8
|
Python
| false
| false
| 7,190
|
py
|
pipe_tet_catalyst.py
|
# script-version: 2.0
# Catalyst state generated using paraview version 5.9.0
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# ----------------------------------------------------------------
# setup views used in the visualization
# ----------------------------------------------------------------
# get the material library
materialLibrary1 = GetMaterialLibrary()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [844, 539]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.CenterOfRotation = [0.0, -1.043081283569336e-06, 0.0]
renderView1.StereoType = 'Crystal Eyes'
renderView1.CameraPosition = [2.443379419932841, 17.382832235131602, -20.055095079935207]
renderView1.CameraFocalPoint = [0.0, -1.043081283569336e-06, 0.0]
renderView1.CameraViewUp = [0.9500725701327958, 0.16904480929559032, 0.26227078360252315]
renderView1.CameraFocalDisk = 1.0
renderView1.CameraParallelScale = 10.09950424491012
renderView1.BackEnd = 'OSPRay raycaster'
renderView1.OSPRayMaterialLibrary = materialLibrary1
SetActiveView(None)
# ----------------------------------------------------------------
# setup view layouts
# ----------------------------------------------------------------
# create new layout object 'Layout #1'
layout1 = CreateLayout(name='Layout #1')
layout1.AssignView(0, renderView1)
layout1.SetSize(844, 539)
# ----------------------------------------------------------------
# restore active view
SetActiveView(renderView1)
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
# create a new 'ExodusIIReader'
input = ExodusIIReader(registrationName='input', FileName=['/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.0', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.1', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.2', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.3', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.4', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.5', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.6', '/data/tjotaha/src/nalu_catalyst_5.9/Nalu/reg_tests/mesh/pipeTet.g.8.7'])
input.SideSetArrayStatus = []
input.ElementBlocks = ['Unnamed block ID: 1']
# ----------------------------------------------------------------
# setup the visualization in view 'renderView1'
# ----------------------------------------------------------------
# show data from input
inputDisplay = Show(input, renderView1, 'UnstructuredGridRepresentation')
# get color transfer function/color map for 'vtkBlockColors'
vtkBlockColorsLUT = GetColorTransferFunction('vtkBlockColors')
vtkBlockColorsLUT.InterpretValuesAsCategories = 1
vtkBlockColorsLUT.AnnotationsInitialized = 1
vtkBlockColorsLUT.Annotations = ['0', '0', '1', '1', '2', '2', '3', '3', '4', '4', '5', '5', '6', '6', '7', '7', '8', '8', '9', '9', '10', '10', '11', '11']
vtkBlockColorsLUT.ActiveAnnotatedValues = ['0', '1']
vtkBlockColorsLUT.IndexedColors = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.63, 0.63, 1.0, 0.67, 0.5, 0.33, 1.0, 0.5, 0.75, 0.53, 0.35, 0.7, 1.0, 0.75, 0.5]
# get opacity transfer function/opacity map for 'vtkBlockColors'
vtkBlockColorsPWF = GetOpacityTransferFunction('vtkBlockColors')
# trace defaults for the display properties.
inputDisplay.Representation = 'Surface With Edges'
inputDisplay.ColorArrayName = ['FIELD', 'vtkBlockColors']
inputDisplay.LookupTable = vtkBlockColorsLUT
inputDisplay.SelectTCoordArray = 'None'
inputDisplay.SelectNormalArray = 'None'
inputDisplay.SelectTangentArray = 'None'
inputDisplay.OSPRayScaleArray = 'GlobalNodeId'
inputDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
inputDisplay.SelectOrientationVectors = 'None'
inputDisplay.ScaleFactor = 2.0
inputDisplay.SelectScaleArray = 'GlobalNodeId'
inputDisplay.GlyphType = 'Arrow'
inputDisplay.GlyphTableIndexArray = 'GlobalNodeId'
inputDisplay.GaussianRadius = 0.1
inputDisplay.SetScaleArray = ['POINTS', 'GlobalNodeId']
inputDisplay.ScaleTransferFunction = 'PiecewiseFunction'
inputDisplay.OpacityArray = ['POINTS', 'GlobalNodeId']
inputDisplay.OpacityTransferFunction = 'PiecewiseFunction'
inputDisplay.DataAxesGrid = 'GridAxesRepresentation'
inputDisplay.PolarAxes = 'PolarAxesRepresentation'
inputDisplay.ScalarOpacityFunction = vtkBlockColorsPWF
inputDisplay.ScalarOpacityUnitDistance = 0.2813007527538719
inputDisplay.OpacityArrayName = ['POINTS', 'GlobalNodeId']
inputDisplay.ExtractedBlockIndex = 2
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
inputDisplay.ScaleTransferFunction.Points = [1.0, 0.0, 0.5, 0.0, 69056.0, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
inputDisplay.OpacityTransferFunction.Points = [1.0, 0.0, 0.5, 0.0, 69056.0, 1.0, 0.5, 0.0]
# setup the color legend parameters for each legend in this view
# get color legend/bar for vtkBlockColorsLUT in view renderView1
vtkBlockColorsLUTColorBar = GetScalarBar(vtkBlockColorsLUT, renderView1)
vtkBlockColorsLUTColorBar.Title = 'vtkBlockColors'
vtkBlockColorsLUTColorBar.ComponentTitle = ''
# set color bar visibility
vtkBlockColorsLUTColorBar.Visibility = 1
# show color legend
inputDisplay.SetScalarBarVisibility(renderView1, True)
# ----------------------------------------------------------------
# setup color maps and opacity mapes used in the visualization
# note: the Get..() functions create a new object, if needed
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# setup extractors
# ----------------------------------------------------------------
# create extractor
pNG1 = CreateExtractor('PNG', renderView1, registrationName='PNG1')
# trace defaults for the extractor.
# init the 'PNG' selected for 'Writer'
pNG1.Writer.FileName = 'CatalystTestImage_%.6ts%cm.png'
pNG1.Writer.ImageResolution = [844, 539]
pNG1.Writer.Format = 'PNG'
# ----------------------------------------------------------------
# restore active source
SetActiveSource(pNG1)
# ----------------------------------------------------------------
# ------------------------------------------------------------------------------
# Catalyst options
from paraview import catalyst
options = catalyst.Options()
options.ExtractsOutputDirectory = 'catalyst_test_image_output'
options.GlobalTrigger = 'TimeStep'
options.CatalystLiveTrigger = 'TimeStep'
# ------------------------------------------------------------------------------
if __name__ == '__main__':
from paraview.simple import SaveExtractsUsingCatalystOptions
# Code for non in-situ environments; if executing in post-processing
# i.e. non-Catalyst mode, let's generate extracts using Catalyst options
SaveExtractsUsingCatalystOptions(options)
|
918f329e4fd99fb5b9a739d28b581dec78cbe00c
|
417e21443179541fcf48fde9407b3fd3f58d4406
|
/tests/components/test_dialogue_async.py
|
39ee9e50faa01455a56fb3c966846a9c939e3f5c
|
[
"Apache-2.0"
] |
permissive
|
cisco/mindmeld
|
549e23eb6ee1385d2d1729ca532f1265d954276f
|
bd3547d5c1bd092dbd4a64a90528dfc2e2b3844a
|
refs/heads/master
| 2023-08-28T07:34:09.771290
| 2023-01-31T18:12:37
| 2023-01-31T18:12:37
| 177,415,822
| 671
| 194
|
Apache-2.0
| 2023-03-15T06:53:24
| 2019-03-24T13:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 10,165
|
py
|
test_dialogue_async.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dialogue
----------------------------------
Tests for dialogue module.
These tests apply only when async/await are supported.
"""
# pylint: disable=locally-disabled,redefined-outer-name
import asyncio
import pytest
from mindmeld.components import Conversation, DialogueManager
from mindmeld.components.dialogue import DialogueResponder
from mindmeld.components.request import Params
from .test_dialogue import create_request, create_responder
@pytest.fixture
def dm():
dialogue_manager = DialogueManager(async_mode=True)
dialogue_manager.called_async_handler = False
@dialogue_manager.handle(domain="domain")
async def domain(ctx, handler):
pass
@dialogue_manager.handle(intent="intent")
async def intent(ctx, handler):
pass
@dialogue_manager.handle(domain="domain", intent="intent")
async def domain_intent(ctx, handler):
pass
@dialogue_manager.handle(intent="intent", has_entity="entity_1")
async def intent_entity_1(ctx, handler):
pass
@dialogue_manager.handle(intent="intent", has_entity="entity_2")
async def intent_entity_2(ctx, handler):
pass
@dialogue_manager.handle(
intent="intent", has_entities=("entity_1", "entity_2", "entity_3")
)
async def intent_entities(ctx, handler):
pass
@dialogue_manager.handle(targeted_only=True)
async def targeted_only(ctx, handler):
pass
# Defined to test default use
@dialogue_manager.handle()
async def dummy_ruleless(ctx, handler):
pass
@dialogue_manager.handle(default=True)
async def default(ctx, handler):
pass
@dialogue_manager.handle(intent="async")
async def async_handler(_, responder):
await asyncio.sleep(0.050)
dialogue_manager.called_async_handler = True
responder.reply("this is the async handler")
return dialogue_manager
class TestDialogueManager:
"""Tests for the dialogue manager"""
@pytest.mark.asyncio
async def test_default(self, dm):
"""Default dialogue state when no rules match
This will select the rule with default=True"""
request = create_request("other", "other")
result = await dm.apply_handler(request, create_responder(request))
assert result.dialogue_state == "default"
def test_default_uniqueness(self, dm):
with pytest.raises(AssertionError):
@dm.handle(default=True)
async def default2(x, y):
pass
def test_default_kwarg_exclusion(self, dm):
with pytest.raises(ValueError):
@dm.handle(intent="intent", default=True)
async def default3(x, y):
pass
def test_sync_handler(self, dm):
with pytest.raises(TypeError):
@dm.handle(intent="sync")
def sync_handler(x, y):
pass
def test_sync_middleware(self, dm):
with pytest.raises(TypeError):
@dm.middleware
def middleware(x, y, z):
pass
@pytest.mark.asyncio
async def test_domain(self, dm):
"""Correct dialogue state is found for a domain"""
request = create_request("domain", "other")
result = await dm.apply_handler(request, create_responder(request))
assert result.dialogue_state == "domain"
@pytest.mark.asyncio
async def test_domain_intent(self, dm):
"""Correct state should be found for domain and intent"""
request = create_request("domain", "intent")
result = await dm.apply_handler(request, create_responder(request))
assert result.dialogue_state == "domain_intent"
@pytest.mark.asyncio
async def test_intent(self, dm):
"""Correct state should be found for intent"""
request = create_request("other", "intent")
result = await dm.apply_handler(request, create_responder(request))
assert result.dialogue_state == "intent"
@pytest.mark.asyncio
async def test_intent_entity(self, dm):
"""Correctly match intent and entity"""
request = create_request("domain", "intent", [{"type": "entity_2"}])
result = await dm.apply_handler(request, create_responder(request))
assert result.dialogue_state == "intent_entity_2"
@pytest.mark.asyncio
async def test_intent_entity_tiebreak(self, dm):
"""Correctly break ties between rules of equal complexity"""
request = create_request(
"domain", "intent", [{"type": "entity_1"}, {"type": "entity_2"}]
)
result = await dm.apply_handler(request, create_responder(request))
assert result.dialogue_state == "intent_entity_1"
@pytest.mark.asyncio
async def test_intent_entities(self, dm):
"""Correctly break ties between rules of equal complexity"""
context = create_request(
"domain",
"intent",
[{"type": "entity_1"}, {"type": "entity_2"}, {"type": "entity_3"}],
)
result = await dm.apply_handler(context, create_responder(context))
assert result.dialogue_state == "intent_entities"
@pytest.mark.asyncio
async def test_target_dialogue_state_management(self, dm):
"""Correctly sets the dialogue state based on the target_dialogue_state"""
context = create_request("domain", "intent")
result = await dm.apply_handler(
context, create_responder(context), target_dialogue_state="intent_entity_2"
)
assert result.dialogue_state == "intent_entity_2"
def test_targeted_only_kwarg_exclusion(self, dm):
with pytest.raises(ValueError):
@dm.handle(intent="intent", targeted_only=True)
async def targeted_only2(x, y):
pass
@pytest.mark.asyncio
async def test_middleware_single(self, dm):
"""Adding a single middleware works"""
async def _middle(request, responder, handler):
responder.middle = True
await handler(request, responder)
async def _handler(request, responder):
assert responder.middle
responder.handler = True
dm.add_middleware(_middle)
dm.add_dialogue_rule("middleware_test", _handler, intent="middle")
request = create_request("domain", "middle")
response = create_responder(request)
result = await dm.apply_handler(request, response)
assert result.dialogue_state == "middleware_test"
assert result.handler
@pytest.mark.asyncio
async def test_middleware_multiple(self, dm):
"""Adding multiple middleware works"""
async def _first(ctx, responder, handler):
responder.frame["middles"] = responder.frame.get("middles", []) + ["first"]
await handler(ctx, responder)
async def _second(ctx, responder, handler):
responder.frame["middles"] = responder.frame.get("middles", []) + ["second"]
await handler(ctx, responder)
async def _handler(ctx, responder):
# '_first' should have been called first, then '_second'
assert responder.frame["middles"] == ["first", "second"]
dm.add_middleware(_first)
dm.add_middleware(_second)
dm.add_dialogue_rule("middleware_test", _handler, intent="middle")
ctx = create_request("domain", "middle")
result = await dm.apply_handler(ctx, create_responder(ctx))
assert result.dialogue_state == "middleware_test"
@pytest.mark.asyncio
async def test_async_handler(dm):
"""Test asynchronous dialogue state handler works correctly"""
assert not dm.called_async_handler
request = create_request("domain", "async")
response = create_responder(request)
result = await dm.apply_handler(request, response)
assert dm.called_async_handler
assert result.dialogue_state == "async_handler"
assert len(result.directives) == 1
assert result.directives[0]["name"] == "reply"
assert result.directives[0]["payload"] == {"text": "this is the async handler"}
@pytest.mark.asyncio
async def test_async_middleware(dm):
"""Adding a single async middleware works"""
async def _middle(request, responder, handler):
responder.middle = True
await handler(request, responder)
async def _handler(request, responder):
assert responder.middle
responder.handler = True
dm.add_middleware(_middle)
dm.add_dialogue_rule("middleware_test", _handler, intent="middle")
request = create_request("domain", "middle")
response = create_responder(request)
result = await dm.apply_handler(request, response)
dm.apply_handler(request, response)
assert result.dialogue_state == "middleware_test"
assert result.handler
@pytest.mark.conversation
@pytest.mark.asyncio
async def test_convo_params_are_cleared(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path)
convo.params = Params(
allowed_intents=["store_info.find_nearest_store"],
target_dialogue_state="welcome",
)
await convo.say("close door")
assert convo.params == Params()
@pytest.mark.conversation
def test_convo_force_sync_creation(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that force sync kwarg works correctly when passed to convo
at creation.
"""
convo = Conversation(
app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True
)
response = convo.process("close door")
assert isinstance(response, DialogueResponder)
@pytest.mark.conversation
def test_convo_force_sync_invocation(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that force sync kwarg works correctly when passed to convo
at invocation.
"""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path)
response = convo.process("close door", force_sync=True)
assert isinstance(response, DialogueResponder)
|
f4f23528a6a479ce55a86d4c4a8523b27d917f80
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/modules/aptly.py
|
a3409abfc3b86e8d2bef3cdb1dc1979fce291b8b
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 15,646
|
py
|
aptly.py
|
"""
Aptly Debian repository manager.
.. versionadded:: 2018.3.0
"""
import logging
import os
import re
import salt.utils.json
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
_DEFAULT_CONFIG_PATH = "/etc/aptly.conf"
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "aptly"
def __virtual__():
"""
Only works on systems with the aptly binary in the system path.
"""
if salt.utils.path.which("aptly"):
return __virtualname__
return (False, "The aptly binaries required cannot be found or are not installed.")
def _cmd_run(cmd):
"""
Run the aptly command.
:return: The string output of the command.
:rtype: str
"""
cmd.insert(0, "aptly")
cmd_ret = __salt__["cmd.run_all"](cmd, ignore_retcode=True)
if cmd_ret["retcode"] != 0:
log.debug("Unable to execute command: %s\nError: %s", cmd, cmd_ret["stderr"])
return cmd_ret["stdout"]
def _format_repo_args(
comment=None, component=None, distribution=None, uploaders_file=None, saltenv="base"
):
"""
Format the common arguments for creating or editing a repository.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str saltenv: The environment the file resides in.
:return: A list of the arguments formatted as aptly arguments.
:rtype: list
"""
ret = list()
cached_uploaders_path = None
settings = {
"comment": comment,
"component": component,
"distribution": distribution,
}
if uploaders_file:
cached_uploaders_path = __salt__["cp.cache_file"](uploaders_file, saltenv)
if not cached_uploaders_path:
log.error("Unable to get cached copy of file: %s", uploaders_file)
return False
for setting in settings:
if settings[setting] is not None:
ret.append("-{}={}".format(setting, settings[setting]))
if cached_uploaders_path:
ret.append("-uploaders-file={}".format(cached_uploaders_path))
return ret
def _validate_config(config_path):
"""
Validate that the configuration file exists and is readable.
:param str config_path: The path to the configuration file for the aptly instance.
:return: None
:rtype: None
"""
log.debug("Checking configuration file: %s", config_path)
if not os.path.isfile(config_path):
message = "Unable to get configuration file: {}".format(config_path)
log.error(message)
raise SaltInvocationError(message)
def get_config(config_path=_DEFAULT_CONFIG_PATH):
"""
Get the configuration data.
:param str config_path: The path to the configuration file for the aptly instance.
:return: A dictionary containing the configuration data.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_config
"""
_validate_config(config_path)
cmd = ["config", "show", "-config={}".format(config_path)]
cmd_ret = _cmd_run(cmd)
return salt.utils.json.loads(cmd_ret)
def list_repos(config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
"""
List all of the repos.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary of the repositories.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.list_repos
"""
_validate_config(config_path)
ret = dict()
cmd = ["repo", "list", "-config={}".format(config_path), "-raw=true"]
cmd_ret = _cmd_run(cmd)
repos = [line.strip() for line in cmd_ret.splitlines()]
log.debug("Found repositories: %s", len(repos))
for name in repos:
ret[name] = get_repo(
name=name, config_path=config_path, with_packages=with_packages
)
return ret
def get_repo(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
"""
Get the details of the repository.
:param str name: The name of the repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the repo.
:return: A dictionary containing information about the repository.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_repo name="test-repo"
"""
_validate_config(config_path)
with_packages = str(bool(with_packages)).lower()
ret = dict()
cmd = [
"repo",
"show",
"-config={}".format(config_path),
"-with-packages={}".format(with_packages),
name,
]
cmd_ret = _cmd_run(cmd)
for line in cmd_ret.splitlines():
try:
# Extract the settings and their values, and attempt to format
# them to match their equivalent setting names.
items = line.split(":")
key = items[0].lower().replace("default", "").strip()
key = " ".join(key.split()).replace(" ", "_")
ret[key] = salt.utils.stringutils.to_none(
salt.utils.stringutils.to_num(items[1].strip())
)
except (AttributeError, IndexError):
# If the line doesn't have the separator or is otherwise invalid, skip it.
log.debug("Skipping line: %s", line)
if ret:
log.debug("Found repository: %s", name)
else:
log.debug("Unable to find repository: %s", name)
return ret
def new_repo(
name,
config_path=_DEFAULT_CONFIG_PATH,
comment=None,
component=None,
distribution=None,
uploaders_file=None,
from_snapshot=None,
saltenv="base",
):
"""
Create the new repository.
:param str name: The name of the repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str from_snapshot: The snapshot to initialize the repository contents from.
:param str saltenv: The environment the file resides in.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' aptly.new_repo name="test-repo" comment="Test main repo" component="main" distribution="trusty"
"""
_validate_config(config_path)
current_repo = __salt__["aptly.get_repo"](name=name, config_path=config_path)
if current_repo:
log.debug("Repository already exists: %s", name)
return True
cmd = ["repo", "create", "-config={}".format(config_path)]
repo_params = _format_repo_args(
comment=comment,
component=component,
distribution=distribution,
uploaders_file=uploaders_file,
saltenv=saltenv,
)
cmd.extend(repo_params)
cmd.append(name)
if from_snapshot:
cmd.extend(["from", "snapshot", from_snapshot])
_cmd_run(cmd)
repo = __salt__["aptly.get_repo"](name=name, config_path=config_path)
if repo:
log.debug("Created repo: %s", name)
return True
log.error("Unable to create repo: %s", name)
return False
def set_repo(
name,
config_path=_DEFAULT_CONFIG_PATH,
comment=None,
component=None,
distribution=None,
uploaders_file=None,
saltenv="base",
):
"""
Configure the repository settings.
:param str name: The name of the repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param str comment: The description of the repository.
:param str component: The default component to use when publishing.
:param str distribution: The default distribution to use when publishing.
:param str uploaders_file: The repository upload restrictions config.
:param str from_snapshot: The snapshot to initialize the repository contents from.
:param str saltenv: The environment the file resides in.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' aptly.set_repo name="test-repo" comment="Test universe repo" component="universe" distribution="xenial"
"""
_validate_config(config_path)
failed_settings = dict()
# Only check for settings that were passed in and skip the rest.
settings = {
"comment": comment,
"component": component,
"distribution": distribution,
}
for setting in list(settings):
if settings[setting] is None:
settings.pop(setting, None)
current_settings = __salt__["aptly.get_repo"](name=name, config_path=config_path)
if not current_settings:
log.error("Unable to get repo: %s", name)
return False
# Discard any additional settings that get_repo gives
# us that are not present in the provided arguments.
for current_setting in list(current_settings):
if current_setting not in settings:
current_settings.pop(current_setting, None)
# Check the existing repo settings to see if they already have the desired values.
if settings == current_settings:
log.debug("Settings already have the desired values for repository: %s", name)
return True
cmd = ["repo", "edit", "-config={}".format(config_path)]
repo_params = _format_repo_args(
comment=comment,
component=component,
distribution=distribution,
uploaders_file=uploaders_file,
saltenv=saltenv,
)
cmd.extend(repo_params)
cmd.append(name)
_cmd_run(cmd)
new_settings = __salt__["aptly.get_repo"](name=name, config_path=config_path)
# Check the new repo settings to see if they have the desired values.
for setting in settings:
if settings[setting] != new_settings[setting]:
failed_settings.update({setting: settings[setting]})
if failed_settings:
log.error("Unable to change settings for the repository: %s", name)
return False
log.debug(
"Settings successfully changed to the desired values for repository: %s", name
)
return True
def delete_repo(name, config_path=_DEFAULT_CONFIG_PATH, force=False):
"""
Remove the repository.
:param str name: The name of the repository.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool force: Whether to remove the repository even if it is used as the source
of an existing snapshot.
:return: A boolean representing whether all changes succeeded.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' aptly.delete_repo name="test-repo"
"""
_validate_config(config_path)
force = str(bool(force)).lower()
current_repo = __salt__["aptly.get_repo"](name=name, config_path=config_path)
if not current_repo:
log.debug("Repository already absent: %s", name)
return True
cmd = [
"repo",
"drop",
"-config={}".format(config_path),
"-force={}".format(force),
name,
]
_cmd_run(cmd)
repo = __salt__["aptly.get_repo"](name=name, config_path=config_path)
if repo:
log.error("Unable to remove repo: %s", name)
return False
log.debug("Removed repo: %s", name)
return True
def list_mirrors(config_path=_DEFAULT_CONFIG_PATH):
"""
Get a list of all the mirrors.
:param str config_path: The path to the configuration file for the aptly instance.
:return: A list of the mirror names.
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' aptly.list_mirrors
"""
_validate_config(config_path)
cmd = ["mirror", "list", "-config={}".format(config_path), "-raw=true"]
cmd_ret = _cmd_run(cmd)
ret = [line.strip() for line in cmd_ret.splitlines()]
log.debug("Found mirrors: %s", len(ret))
return ret
def list_published(config_path=_DEFAULT_CONFIG_PATH):
"""
Get a list of all the published repositories.
:param str config_path: The path to the configuration file for the aptly instance.
:return: A list of the published repository names.
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' aptly.list_published
"""
_validate_config(config_path)
cmd = ["publish", "list", "-config={}".format(config_path), "-raw=true"]
cmd_ret = _cmd_run(cmd)
ret = [line.strip() for line in cmd_ret.splitlines()]
log.debug("Found published repositories: %s", len(ret))
return ret
def list_snapshots(config_path=_DEFAULT_CONFIG_PATH, sort_by_time=False):
"""
Get a list of all the snapshots.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool sort_by_time: Whether to sort by creation time instead of by name.
:return: A list of the snapshot names.
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' aptly.list_snapshots
"""
_validate_config(config_path)
cmd = ["snapshot", "list", "-config={}".format(config_path), "-raw=true"]
if sort_by_time:
cmd.append("-sort=time")
else:
cmd.append("-sort=name")
cmd_ret = _cmd_run(cmd)
ret = [line.strip() for line in cmd_ret.splitlines()]
log.debug("Found snapshots: %s", len(ret))
return ret
def cleanup_db(config_path=_DEFAULT_CONFIG_PATH, dry_run=False):
"""
Remove data regarding unreferenced packages and delete files in the package pool that
are no longer being used by packages.
:param bool dry_run: Report potential changes without making any changes.
:return: A dictionary of the package keys and files that were removed.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.cleanup_db
"""
_validate_config(config_path)
dry_run = str(bool(dry_run)).lower()
ret = {"deleted_keys": list(), "deleted_files": list()}
cmd = [
"db",
"cleanup",
"-config={}".format(config_path),
"-dry-run={}".format(dry_run),
"-verbose=true",
]
cmd_ret = _cmd_run(cmd)
type_pattern = r"^List\s+[\w\s]+(?P<package_type>(file|key)s)[\w\s]+:$"
list_pattern = r"^\s+-\s+(?P<package>.*)$"
current_block = None
for line in cmd_ret.splitlines():
if current_block:
match = re.search(list_pattern, line)
if match:
package_type = "deleted_{}".format(current_block)
ret[package_type].append(match.group("package"))
else:
current_block = None
# Intentionally not using an else here, in case of a situation where
# the next list header might be bordered by the previous list.
if not current_block:
match = re.search(type_pattern, line)
if match:
current_block = match.group("package_type")
log.debug("Package keys identified for deletion: %s", len(ret["deleted_keys"]))
log.debug("Package files identified for deletion: %s", len(ret["deleted_files"]))
return ret
|
9da300e25592a8aa33dc19a4dfb7ae4e0da5d5e6
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/connector/exchange/bitmart/bitmart_constants.py
|
5df3f5930da9e5d640f3762aa8b2e8bd9814ce4e
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
bitmart_constants.py
|
# A single source of truth for constant variables related to the exchange
from hummingbot.core.api_throttler.data_types import RateLimit
from hummingbot.core.data_type.in_flight_order import OrderState
EXCHANGE_NAME = "bitmart"
REST_URL = "https://api-cloud.bitmart.com"
WSS_PUBLIC_URL = "wss://ws-manager-compress.bitmart.com/api?protocol=1.1"
WSS_PRIVATE_URL = "wss://ws-manager-compress.bitmart.com/user?protocol=1.1"
WS_PING_TIMEOUT = 20 * 0.8
DEFAULT_DOMAIN = ""
MAX_ORDER_ID_LEN = 32
HBOT_ORDER_ID_PREFIX = ""
BROKER_ID = "hummingbotfound"
PUBLIC_TRADE_CHANNEL_NAME = "spot/trade"
PUBLIC_DEPTH_CHANNEL_NAME = "spot/depth50"
PRIVATE_ORDER_PROGRESS_CHANNEL_NAME = "spot/user/order"
# REST API ENDPOINTS
CHECK_NETWORK_PATH_URL = "system/service"
GET_TRADING_RULES_PATH_URL = "spot/v1/symbols/details"
GET_LAST_TRADING_PRICES_PATH_URL = "spot/v1/ticker"
GET_ORDER_BOOK_PATH_URL = "spot/v1/symbols/book"
CREATE_ORDER_PATH_URL = "spot/v1/submit_order"
CANCEL_ORDER_PATH_URL = "spot/v2/cancel_order"
GET_ACCOUNT_SUMMARY_PATH_URL = "spot/v1/wallet"
GET_ORDER_DETAIL_PATH_URL = "spot/v1/order_detail"
GET_TRADE_DETAIL_PATH_URL = "spot/v1/trades"
SERVER_TIME_PATH = "system/time"
# WS API ENDPOINTS
WS_CONNECT = "WSConnect"
WS_SUBSCRIBE = "WSSubscribe"
# BitMart has a per method API limit
RATE_LIMITS = [
RateLimit(limit_id=CHECK_NETWORK_PATH_URL, limit=10, time_interval=1),
RateLimit(limit_id=GET_TRADING_RULES_PATH_URL, limit=30, time_interval=5),
RateLimit(limit_id=GET_LAST_TRADING_PRICES_PATH_URL, limit=30, time_interval=5),
RateLimit(limit_id=GET_ORDER_BOOK_PATH_URL, limit=30, time_interval=5),
RateLimit(limit_id=CREATE_ORDER_PATH_URL, limit=150, time_interval=5),
RateLimit(limit_id=CANCEL_ORDER_PATH_URL, limit=150, time_interval=5),
RateLimit(limit_id=GET_ACCOUNT_SUMMARY_PATH_URL, limit=30, time_interval=5),
RateLimit(limit_id=GET_ORDER_DETAIL_PATH_URL, limit=150, time_interval=5),
RateLimit(limit_id=GET_TRADE_DETAIL_PATH_URL, limit=30, time_interval=5),
RateLimit(limit_id=SERVER_TIME_PATH, limit=10, time_interval=1),
RateLimit(limit_id=WS_CONNECT, limit=30, time_interval=60),
RateLimit(limit_id=WS_SUBSCRIBE, limit=100, time_interval=10),
]
ORDER_STATE = {
"1": OrderState.FAILED,
"2": OrderState.OPEN,
"3": OrderState.FAILED,
"4": OrderState.OPEN,
"5": OrderState.PARTIALLY_FILLED,
"6": OrderState.FILLED,
"7": OrderState.PENDING_CANCEL,
"8": OrderState.CANCELED,
}
|
45e4087d846cbb664e515fa63ab293a3f0fcd58b
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/programmers/난이도별/level01.자연수_뒤집어_배열로_만들기/dkdlelk99.py
|
5a2c1019a820a1e582cd1672fb8b74f3b0ba71b1
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
dkdlelk99.py
|
def solution(n):
s = str(n)
answer = [int(i) for i in s]
answer.reverse()
return answer
print(solution(19) == [9, 1])
print(solution(12345) == [5,4,3,2,1])
|
6c99facadb0a784e84c5a1b55fd9d4cbcd406e71
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/masu/test/external/accounts/labels/aws/test_aws_account_alias.py
|
e08f5aa955e7c88bab5ec5026a541dd6af845075
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,215
|
py
|
test_aws_account_alias.py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AWSAccountAlias object."""
from unittest.mock import patch
from masu.database.account_alias_accessor import AccountAliasAccessor
from masu.external.accounts.labels.aws.aws_account_alias import AWSAccountAlias
from masu.test import MasuTestCase
from masu.util.aws.common import AwsArn
class AWSAccountAliasTest(MasuTestCase):
"""Test Cases for the AWSAccountAlias object."""
def setUp(self):
"""Set up test case."""
super().setUp()
self.account_id = "111111111111"
role_arn = f"arn:aws:iam::{self.account_id}:role/CostManagement"
self.credentials = {"role_arn": role_arn}
self.arn = AwsArn(self.credentials)
def test_initializer(self):
"""Test AWSAccountAlias initializer."""
schema = "org1234567"
accessor = AWSAccountAlias(self.credentials, schema)
self.assertEqual(accessor._arn.arn, self.arn.arn)
self.assertEqual(accessor._schema, schema)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization", return_value=[])
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_alias_no_alias(self, mock_get_alias, mock_get_account_names):
"""Test updating alias when none is set."""
mock_get_alias.return_value = (self.account_id, None)
accessor = AWSAccountAlias(self.credentials, "org1234567")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "org1234567")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertIsNone(db_access._obj.account_alias)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization", return_value=[])
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_alias_with_alias(self, mock_get_alias, mock_get_account_names):
"""Test updating alias."""
alias = "hccm-alias"
mock_get_alias.return_value = (self.account_id, alias)
accessor = AWSAccountAlias(self.credentials, "org1234567")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "org1234567")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertEqual(db_access._obj.account_alias, alias)
mock_get_alias.return_value = (self.account_id, None)
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "org1234567")
self.assertIsNone(db_access._obj.account_alias)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization")
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_via_orgs(self, mock_get_alias, mock_get_account_names):
"""Test update alias with org api response."""
alias = "hccm-alias"
mock_get_alias.return_value = (self.account_id, alias)
member_account_id = "1234598760"
member_account_name = "hccm-member"
account_names = [
{"id": self.account_id, "name": alias},
{"id": member_account_id, "name": member_account_name},
]
mock_get_account_names.return_value = account_names
accessor = AWSAccountAlias(self.credentials, "org1234567")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "org1234567")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertEqual(db_access._obj.account_alias, alias)
member_db_access = AccountAliasAccessor(member_account_id, "org1234567")
self.assertEqual(member_db_access._obj.account_id, member_account_id)
self.assertEqual(member_db_access._obj.account_alias, member_account_name)
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_names_by_organization")
@patch("masu.external.accounts.labels.aws.aws_account_alias.get_account_alias_from_role_arn")
def test_update_account_via_orgs_partial(self, mock_get_alias, mock_get_account_names):
"""Test update alias with org api with partial response."""
alias = "hccm-alias"
mock_get_alias.return_value = (self.account_id, alias)
member_account_id = "1234596750"
account_names = [{"id": self.account_id, "name": alias}, {"id": member_account_id}]
mock_get_account_names.return_value = account_names
accessor = AWSAccountAlias(self.credentials, "org1234567")
accessor.update_account_alias()
db_access = AccountAliasAccessor(self.account_id, "org1234567")
self.assertEqual(db_access._obj.account_id, self.account_id)
self.assertEqual(db_access._obj.account_alias, alias)
member_db_access = AccountAliasAccessor(member_account_id, "org1234567")
self.assertEqual(member_db_access._obj.account_id, member_account_id)
self.assertEqual(member_db_access._obj.account_alias, member_account_id)
|
040beb9f93d4f658ead3c0a08f6b5b5616505918
|
e58aaa29a356d19f3b43b614db08e47f387dd0af
|
/sol0.py
|
87096dbc0187c9c589afbba07eb1fbec119b9f89
|
[] |
no_license
|
posquit0/PythonChallenge
|
100ad89779de24cf3039c95bf63d4c00012c2025
|
fa709fc8170d02a6511d5f07f5a7d314b180ff82
|
refs/heads/master
| 2020-05-19T21:30:40.198767
| 2015-01-27T08:12:45
| 2015-01-27T08:12:45
| 27,856,891
| 742
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
sol0.py
|
#!/usr/bin/env python
# encoding: utf-8
def main():
print 2 ** 38
if __name__ == '__main__':
main()
|
f998ad18b30d6be88b07d6f6cae7614982c9c950
|
3d063af394b4b55ea49ded7915d0793602015859
|
/python/news/newser.py
|
df6b57d2daf9c6fd584a99fd8756471eb796f5fa
|
[
"Apache-2.0"
] |
permissive
|
ringgaard/sling
|
00edad71195bfe71aa11e2e8dda97109c047e6e5
|
a612c5823954552ba422b441a7c7d57c1a5b4fcb
|
refs/heads/master
| 2023-08-07T15:24:10.569228
| 2023-08-02T12:25:44
| 2023-08-02T12:25:44
| 106,742,468
| 141
| 10
|
Apache-2.0
| 2020-08-03T13:25:36
| 2017-10-12T20:34:28
|
C++
|
UTF-8
|
Python
| false
| false
| 11,745
|
py
|
newser.py
|
# Copyright 2021 Ringgaard Research ApS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# News reader.
import re
import json
import sling
import sling.flags as flags
import sling.log as log
import sling.net
import sling.util
import sling.crawl.news as news
flags.define("--port",
help="port number for the HTTP server",
default=8080,
type=int,
metavar="PORT")
# Parse command line flags.
flags.parse()
news.init()
# Initialize web server.
app = sling.net.HTTPServer(flags.arg.port)
app.static("/common", "app", internal=True)
app.redirect("/", "/news/")
# Initialize web text analyzer.
webanalyzer = sling.WebsiteAnalysis()
# Initialize commons store.
commons = sling.Store()
n_name = commons["name"]
n_description = commons["description"]
n_publisher = commons["P123"]
n_publication_date = commons["P577"]
n_full_work = commons["P953"]
n_media = commons["media"]
n_author_name_string = commons["P2093"]
n_creator = commons["P170"]
n_language = commons["P407"]
n_lex = commons["lex"]
commons.freeze()
# Main page.
app.page("/news",
"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name=viewport content="width=device-width, initial-scale=1">
<title>News</title>
<link rel="icon" href="/common/image/appicon.ico" type="image/x-icon" />
<script type="module" src="/news/app.js"></script>
</head>
<body style="display: none">
<news-app id="app">
<md-toolbar>
<md-toolbar-logo></md-toolbar-logo>
<div id="title">KnolNews</div>
<md-input
id="query"
type="search"
placeholder="News URL...">
</md-input>
<md-spacer></md-spacer>
<md-icon-button id="copy" icon="content_copy"></md-icon-button>
</md-toolbar>
<md-content>
<article-panel id="article">
</article-panel>
</md-content>
</news-app
</body>
</html>
""")
app.js("/news/app.js",
"""
import {store, frame, settings} from "/common/lib/global.js";
import {Component} from "/common/lib/component.js";
import {value_text, LabelCollector} from "/common/lib/datatype.js";
import {DocumentViewer} from "/common/lib/docviewer.js";
import {MdApp, MdCard, inform} from "/common/lib/material.js";
const n_name = frame("name");
const n_description = frame("description");
const n_media = frame("media");
const n_lex = frame("lex");
const n_publisher = frame("P123");
const n_published = frame("P577");
const n_url = frame("P953");
const n_item_type = frame("/w/item");
const n_time_type = frame("/w/time");
class NewsApp extends MdApp {
onconnected() {
this.attach(this.oncopy, "click", "#copy");
window.onkeydown = e => {
if (e.key === "Enter") this.onfetch();
if (e.key === "Escape") this.onclear();
}
}
async onfetch() {
let url = this.find("#query").value().trim();
this.style.cursor = "wait";
try {
let r = await fetch(`/news/fetch?url=${encodeURIComponent(url)}`);
let article = await store.parse(r);
this.find("#article").update(article);
this.find("md-content").scrollTop = 0;
console.log(article.text(true));
} catch (e) {
inform("Error fetch article: " + e.toString());
}
this.style.cursor = "";
}
onclear() {
this.find("#query").clear();
}
oncopy(e) {
let article = this.find("article-panel");
if (article) article.copy();
}
static stylesheet() {
return `
$ md-input {
display: flex;
max-width: 600px;
}
$ md-input input {
font-size: 16px;
}
$ #title {
padding-right: 16px;
}
`;
}
}
Component.register(NewsApp);
class KbLink extends Component {
onconnected() {
this.attach(this.onclick, "click");
}
onclick(e) {
console.log("click", this.attrs.ref);
window.open(`${settings.kbservice}/kb/${this.attrs.ref}`, "_blank");
}
static stylesheet() {
return `
$ {
cursor: pointer;
color: #0b0080;
}
$:hover {
cursor: pointer;
text-decoration: underline;
}
`;
}
}
Component.register(KbLink);
class ArticlePanel extends MdCard {
visible() { return this.state; }
copy() {
let article = this.state;
navigator.clipboard.writeText(article.text());
}
async onupdate() {
let article = this.state;
if (article) {
let collector = new LabelCollector(store);
collector.add(article);
await collector.retrieve();
}
}
onupdated() {
let article = this.state;
this.find("#title").update(article.get(n_name));
this.find("#summary").update(article.get(n_description));
this.find("#document").update(article.get(n_lex));
this.find("#image").update(article.get(n_media));
}
render() {
function sitename(url) {
try {
let host = new URL(url).hostname;
if (host.startsWith("www.")) host = host.slice(4);
return host;
} catch (e) {
return "???";
}
}
function html(value, dt) {
if (value === undefined) return "";
let [text, encoded] = value_text(value, null, dt);
let anchor = Component.escape(text);
if (encoded && dt != n_time_type) {
let ref = value && value.id;
return `<kb-link ref="${ref}">${anchor}</kb-link>`;
} else {
return anchor;
}
}
let article = this.state;
let publisher = article.get(n_publisher);
let published = article.get(n_published);
let url = article.get(n_url);
let h = new Array();
h.push('<div class="content">');
h.push('<md-text id="title"></md-text>');
h.push('<div class="source">');
if (publisher) {
h.push(`<span id="publisher">${html(publisher, n_item_type)}</span>`);
}
if (url) {
h.push(`<a id="newsurl" href="${url}" target="_blank" rel="noreferrer">`);
h.push(sitename(url));
h.push('</a>');
}
h.push('</div>');
if (published) {
h.push(`<div id="published">${html(published, n_time_type)}</div>`);
}
h.push('<md-text id="summary"></md-text>');
h.push('<md-image id="image"></md-image>');
h.push('<document-viewer id="document"></document-viewer>');
h.push('</div>');
return h.join("");
}
static stylesheet() {
return `
$ {
display: flex;
justify-content: center;
}
$ div.content {
max-width: 800px;
}
$ #title {
display: block;
font: bold 40px helvetica;
padding: 12px 0px;
}
$ .source {
display: flex;
gap: 8px;
}
$ #publisher {
font-weight: bold;
}
$ #newsurl {
text-decoration: none;
cursor: pointer;
color: green;
}
$ #newsurl:hover {
text-decoration: underline;
}
$ #summary {
display: block;
font: 500 1.2rem anubis, serif;
line-height: 1.5;
padding: 12px 0px;
}
$ #image {
padding: 16px 0px;
}
$ #image img {
max-height: 500px;
max-width: 100%;
}
$ document-viewer {
font-size: 1.2rem;
}
`;
}
}
Component.register(ArticlePanel);
document.body.style = null;
""")
def sling_date(y, m, d):
return y * 10000 + m * 100 + d
def parse_date(s):
if s is None: return None
m = re.match("^(\d\d\d\d)-(\d\d)-(\d\d)", s)
if m != None:
return sling_date(int(m[1]), int(m[2]), int(m[3]))
return s
checked_hostnames = set()
def fetch_news(url):
# Trim news url.
trimmed_url = news.trim_url(url)
# Try to fetch article from database.
article = news.crawldb[trimmed_url];
# Handle redirects.
if article and article.startswith(b"#REDIRECT "):
trimmed_url = article[10:]
log.info("redirect", trimmed_url)
article = self.crawldb[trimmed_url];
if article: print("cached", trimmed_url)
# Fetch directly if article not in database.
if article is None:
if sling.net.private(url): return 403
try:
article = news.retrieve_article(trimmed_url)
except Exception as e:
log.info("Error retrieving article:", url, e)
article = None
return trimmed_url, article
ldtypes = [
"Article",
"NewsArticle",
"ReportageNewsArticle",
"article",
]
def extract_jsonld(ld, props):
if type(ld) is not dict: return;
if ld.get("@type") not in ldtypes: return
url = ld.get("url")
if url and "url" not in props: props["url"] = url
publisher = ld.get("publisher")
if publisher is not None and "publisher" not in props:
if type(publisher) is list: publisher = publisher[0]
name = publisher.get("name");
if name is not None: props["publisher"] = name
published = ld.get("datePublished")
if published is not None and "published" not in props:
props["published"] = published
headline = ld.get("headline")
if published is not None and "title" not in props:
props["title"] = headline
description = ld.get("description")
if description is not None and "summary" not in props:
props["summary"] = description
image = ld.get("image")
if image is not None and "image" not in props:
url = image.get("url");
if url is not None: props["image"] = url
@app.route("/news/fetch")
def handle_fetch(request):
# Get news url.
url = request.param("url")
if url is None: return 500
print("url", url)
# Fetch news article.
url, article = fetch_news(url)
if article is None: return 404
# Analyze web page.
webanalyzer.analyze(article)
# Extract meta data and content from article.
page = webanalyzer.extract(article)
for k, v in page.metadata().items():
print("meta", k, ":", v)
print("props start");
props = page.properties()
for k, v in props.items():
print("prop", k, ":", v)
print("props end");
for j in page.jsonld():
try:
ld = json.loads(j)
except Exception as e:
print("JSON-LD error:", e, ld)
continue
print(json.dumps(ld, indent=2))
if type(ld) is list:
for part in ld: extract_jsonld(part, props)
else:
graph = ld.get("@graph")
if graph is not None:
for part in graph: extract_jsonld(part, props)
else:
extract_jsonld(ld, props)
# Get site information.
store = sling.Store(commons)
url = props.get("url", url)
sitename = news.sitename(url)
site = news.sites.get(sitename)
if site is not None and site.qid is not None:
publisher = store[site.qid]
elif site is not None and site.name is not None:
publisher = site.name
else:
publisher = props.get("publisher")
# Get publication date.
published = parse_date(props.get("published"))
# Build article frame.
b = sling.util.FrameBuilder(store)
b.add(n_name, props.get("title"))
b.add(n_description, props.get("summary"))
b.add(n_publisher, publisher)
b.add(n_publication_date, published)
b.add(n_full_work, url)
b.add(n_media, props.get("image"))
b.add(n_author_name_string, props.get("author"))
b.add(n_creator, props.get("creator"))
b.add(n_language, props.get("language"))
text = page.text()
if text is not None and len(text) > 0: b.add(n_lex, text)
return b.create()
# Run app until shutdown.
log.info("running")
app.run()
log.info("stopped")
|
4807344f9749ea54b7e3751be4fb1ec57bc2ab36
|
b38247a5d84d8b52ce8363f8dd81629cfbe17f65
|
/reagent/test/training/cb/test_deep_represent_linucb.py
|
03f61e09238fe1e460d283930b0a584dfbe158db
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/ReAgent
|
7f2b82eaaf7a19e58cc50aacc307d7b001231440
|
c5f1a8371a677b4f8fb0882b600bf331eba5259d
|
refs/heads/main
| 2023-09-05T15:56:49.175072
| 2023-08-29T21:48:40
| 2023-08-29T21:48:40
| 98,565,575
| 1,480
| 290
|
BSD-3-Clause
| 2023-09-12T23:09:30
| 2017-07-27T17:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
test_deep_represent_linucb.py
|
"""
How to use:
buck test reagent:training_tests -- TestDeepRepresentLinUCB
"""
import unittest
import torch
from reagent.core.types import CBInput
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler
from reagent.models.deep_represent_linucb import DeepRepresentLinearRegressionUCB
from reagent.models.fully_connected_network import FullyConnectedNetwork
from reagent.training.cb.deep_represent_linucb_trainer import DeepRepresentLinUCBTrainer
from reagent.training.parameters import DeepRepresentLinUCBTrainerParameters
class TestDeepRepresentLinUCB(unittest.TestCase):
"""
This tests the trainer of DeepRepresentLinUCB.
"""
def setUp(self):
self.params = DeepRepresentLinUCBTrainerParameters(lr=1e-1)
input_dim = 100
sizes = [20]
linucb_inp_dim = 5
activations = ["relu", "relu"]
customized_layers = FullyConnectedNetwork(
[input_dim] + sizes + [linucb_inp_dim],
activations,
use_batch_norm=False,
dropout_ratio=0.0,
normalize_output=False,
use_layer_norm=False,
)
policy_network = DeepRepresentLinearRegressionUCB(
input_dim=input_dim,
sizes=sizes + [linucb_inp_dim],
activations=activations,
mlp_layers=customized_layers,
)
self.policy = Policy(scorer=policy_network, sampler=GreedyActionSampler())
self.trainer = DeepRepresentLinUCBTrainer(self.policy, **self.params.asdict())
self.batch = CBInput(
context_arm_features=torch.rand(2, 2, input_dim),
action=torch.tensor([[0], [1]], dtype=torch.long),
reward=torch.tensor([[1.5], [-2.3]]),
) # random Gaussian features
def test_linucb_training_step(self):
self.trainer.training_step(self.batch, 0)
assert len(self.batch.action) == len(self.batch.reward)
assert len(self.batch.action) == self.batch.context_arm_features.shape[0]
loss = self.trainer.training_step(batch=self.batch, batch_idx=0)
self.assertIsInstance(loss, torch.Tensor)
self.assertEqual(loss.size(), ())
|
d7f2383b380368828eea0319d9fe7dd9785244c1
|
399fb9306ee201a5d92d75d0981bf5529f86e0cb
|
/modules/filter_bed.py
|
a3f0c155b31d5bcdc29c912c48eed2b8a46e4abb
|
[
"MIT"
] |
permissive
|
hillerlab/TOGA
|
221b0bd52c8a62bcc59f84c1ef2145cdf7d0b3bd
|
2449d3156413fbad090a8bdf51f03e50ccb9c185
|
refs/heads/master
| 2023-09-02T22:16:22.654802
| 2023-08-16T11:16:44
| 2023-08-16T11:16:44
| 277,817,661
| 101
| 16
|
MIT
| 2023-09-09T22:58:48
| 2020-07-07T13:00:49
|
Python
|
UTF-8
|
Python
| false
| false
| 7,629
|
py
|
filter_bed.py
|
#!/usr/bin/env python3
"""Filter bed-12 file.
Remove:
- incomplete annotations
- genes without CDS
"""
import argparse
import sys
import re
from collections import Counter
from version import __version__
try:
from modules.common import die
from modules.common import eprint
except ImportError:
from common import die
from commom import eprint
__author__ = "Bogdan Kirilenko, 2020."
__email__ = "bogdan.kirilenko@senckenberg.de"
__credits__ = ["Michael Hiller", "Virag Sharma", "David Jebb"]
ALLOWED_CHARSET = "a-zA-Z0-9._-"
ALLOWED_CHARSET_RE = rf"[^{ALLOWED_CHARSET}]"
def parse_args():
"""Read args, check."""
app = argparse.ArgumentParser()
app.add_argument("input", help="Bed-12 formatted annotation track.")
app.add_argument(
"output", default="stdout", help="Output destination, stdout as default"
)
app.add_argument(
"--out_of_frame",
action="store_true",
dest="out_of_frame",
help="Do not skip out-of-frame genes.",
)
# print help if there are no args
if len(sys.argv) < 2:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args
def prepare_bed_file(bed_file, output, ouf=False, save_rejected=None, only_chrom=None):
"""Filter the bed file given and save the updated version."""
new_lines = [] # keep updated lines
rejected = [] # keep IDs of skipped transcripts + the reason why
names = Counter() # we need to make sure that all names are unique
allowed_re = re.compile(ALLOWED_CHARSET_RE).search
broken_names = []
f = open(bed_file, "r")
for num, line in enumerate(f, 1):
# parse bed file according to specification
line_data = line.rstrip().split("\t")
if len(line_data) != 12:
f.close() # this is for sure an error
# it is possible only if something except a bed12 was provided
die(
"Error! Bed 12 file is required! Got a file with {len(line_data)} fields instead"
)
chrom = line_data[0]
if only_chrom and chrom != only_chrom:
# TOGA allows to perform the analysis on a specific chromosome only
# is so, we can skip all transcripts that located on other chromosomes
continue
chromStart = int(line_data[1])
chromEnd = int(line_data[2])
name = line_data[3] # gene_name usually
corr_name = not bool(allowed_re(name))
if corr_name is False:
broken_names.append(name)
# TODO: check weird characters in the transcript name
# bed_score = int(line_data[4]) # never used
# strand = line_data[5] # otherwise:
# strand = True if line_data[5] == '+' else False
thickStart = int(line_data[6])
thickEnd = int(line_data[7])
# itemRgb = line_data[8] # never used
blockCount = int(line_data[9])
blockSizes = [int(x) for x in line_data[10].split(",") if x != ""]
blockStarts = [int(x) for x in line_data[11].split(",") if x != ""]
blockEnds = [blockStarts[i] + blockSizes[i] for i in range(blockCount)]
blockAbsStarts = [blockStarts[i] + chromStart for i in range(blockCount)]
blockAbsEnds = [blockEnds[i] + chromStart for i in range(blockCount)]
blockNewStarts, blockNewEnds = [], []
names[name] += 1
if thickStart > thickEnd:
f.close() # according to bed12 specification this should never happen
sys.stderr.write(f"Problem occurred at line {num}, gene {name}\n")
die("Error! Bed file is corrupted, thickEnd MUST be >= thickStart")
elif thickStart == thickEnd:
# this means that this is a non-coding transcript
# TOGA cannot process them: we can skip it
rejected.append((name, "No CDS"))
continue
if thickStart < chromStart or thickEnd > chromEnd:
# a very strange (but still possible) case
f.close() # for sure an error with input data
sys.stderr.write(f"Problem occurred at line {num}, gene {name}\n")
die("Error! Bed file is corrupted, thickRange is outside chromRange!")
# now select CDS only
# we keep UTRs in the filtered file
# however, we need CDS to check whether it's correct (% 3 == 0)
for block_num in range(blockCount):
blockStart = blockAbsStarts[block_num]
blockEnd = blockAbsEnds[block_num]
# skip the block if it is entirely UTR
if blockEnd <= thickStart:
continue
elif blockStart >= thickEnd:
continue
# if we are here: this is not an entirely UTR exon
# it might intersect the CDS border or to be in the CDS entirely
# remove UTRs: block start must be >= CDS_start (thickStart)
# block end must be <= CDS_end (thickEnd)
blockNewStart = blockStart if blockStart >= thickStart else thickStart
blockNewEnd = blockEnd if blockEnd <= thickEnd else thickEnd
blockNewStarts.append(blockNewStart - thickStart)
blockNewEnds.append(blockNewEnd - thickStart)
if len(blockNewStarts) == 0:
# even it thickStart != thickEnd this transcript still can be non-coding
# but if there are no blocks in the CDS -> we can catch this
rejected.append((name, "No CDS"))
continue
block_new_count = len(blockNewStarts)
blockNewSizes = [
blockNewEnds[i] - blockNewStarts[i] for i in range(block_new_count)
]
if sum(blockNewSizes) % 3 != 0 and not ouf:
# this is an out-of-frame (or incomplete transcript)
# ideally CDS length should be divisible by 3
# not ouf means that we like to keep such transcripts for some reason
rejected.append((name, "Out-of-frame gene"))
continue
# we keep this transcript: add in to the list
new_line = "\t".join([str(x) for x in line_data])
new_lines.append(new_line)
f.close()
# if not allowed characters in transcript names: list them
if len(broken_names) > 0:
eprint("Error! Some transcript names contain not allowed characters")
for t in broken_names:
eprint(t)
die(f"Allowed characters are: {ALLOWED_CHARSET}")
# if there are non-unique transcript IDs: die
# I kill it there, not earlier to show them altogether
if any(v > 1 for v in names.values()):
eprint("Error! There are non-uniq transcript IDs:")
duplicates = [k for k, v in names.items() if v > 1]
for d in duplicates:
eprint(d)
die("Abort")
if len(new_lines) == 0:
# no transcripts pass the filter: probably an input data mistake
sys.exit(
f"Error! No reference annotation tracks left after filtering procedure! Abort"
)
# write transcripts that passed the filter to the output file
f = open(output, "w") if output != "stdout" else sys.stdout
f.write("\n".join(new_lines) + "\n")
f.close() if output != "stdout" else None
if save_rejected:
# save transcripts that didn't pass the filter + reason why
f = open(save_rejected, "w")
for elem in rejected:
f.write(f"{elem[0]}\t{elem[1]}\n")
f.close()
def main():
"""Entry point."""
args = parse_args()
prepare_bed_file(args.input, args.output, args.out_of_frame)
sys.exit(0)
if __name__ == "__main__":
main()
|
85a157d70989303bf8b1e176439845403d92a876
|
60f51a35a7eef79b6a93b2fb048f6dddeb847a2f
|
/umap/tests/test_licence.py
|
fd5e606b4c0175b06c4764adcbcbe1529fae1c63
|
[
"WTFPL"
] |
permissive
|
umap-project/umap
|
ece2b331d18fbaac77210485f69c6c27a2514b7c
|
08f1e3f61b60e3ceb05826e389335177d7f442f8
|
refs/heads/master
| 2023-08-22T12:32:33.516511
| 2023-08-21T15:26:48
| 2023-08-21T15:26:48
| 40,087,362
| 923
| 254
|
WTFPL
| 2023-09-14T08:54:46
| 2015-08-02T17:37:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
test_licence.py
|
import pytest
from umap.models import DataLayer, Map
pytestmark = pytest.mark.django_db
def test_licence_delete_should_not_remove_linked_maps(map, licence, datalayer):
assert map.licence == licence
licence.delete()
assert Map.objects.filter(pk=map.pk).exists()
assert DataLayer.objects.filter(pk=datalayer.pk).exists()
|
6392e90a96649dbea7b82f791c4b573316c1c5d0
|
0c8ac66ae050e1a98dd8afd7525c9ed74ec5d300
|
/permission_handlers/basic.py
|
fe199aa05895cdf1da8256efec80dc4781812a3d
|
[] |
no_license
|
TareqMonwer/Django-School-Management
|
5b1c8145d04082063bc14fc9db1ce38b4db97a9d
|
3d425d300a77ad505089a3a4c0a9dc71cacbe89a
|
refs/heads/master
| 2023-08-19T23:36:34.359488
| 2023-08-13T05:53:42
| 2023-08-13T05:53:42
| 221,053,244
| 409
| 163
| null | 2023-08-13T05:53:44
| 2019-11-11T19:22:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
basic.py
|
"""
Handling permissions for users who are assigned
for basic level actions in the project. (view few data, modify some of their data etc).
UserTypes: Student, Teacher
"""
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
@login_required
def permission_error(request):
return HttpResponse('You don\'t have right permissio to access this page.')
def user_is_verified(user):
return user.approval_status == 'a' if user.is_authenticated else False
def user_is_student(user):
return user_is_verified(user) and user.requested_role == 'student' \
if user.is_authenticated else False
def user_is_teacher(user):
return user_is_verified(user) and user.requested_role == 'teacher' \
if user.is_authenticated else False
|
669ba5d3ddcb833f1e01465ccec198b7daee4b80
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/tprr/src/reader_downstream.py
|
b081a38729addd1e06a8d879010ab8225a044073
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 9,011
|
py
|
reader_downstream.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""downstream Model for reader"""
import numpy as np
from mindspore import nn, ops
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
from mindspore import dtype as mstype
dst_type = mstype.float16
dst_type2 = mstype.float32
class Linear(nn.Cell):
"""module of reader downstream"""
def __init__(self, linear_weight_shape, linear_bias_shape):
"""init function"""
super(Linear, self).__init__()
self.matmul = nn.MatMul()
self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, linear_weight_shape).astype(np.float32)),
name=None)
self.add = P.Add()
self.add_bias = Parameter(Tensor(np.random.uniform(0, 1, linear_bias_shape).astype(np.float32)), name=None)
self.relu = nn.ReLU()
def construct(self, hidden_state):
"""construct function"""
output = self.matmul(ops.Cast()(hidden_state, dst_type), ops.Cast()(self.matmul_w, dst_type))
output = self.add(ops.Cast()(output, dst_type2), self.add_bias)
output = self.relu(output)
return output
class BertLayerNorm(nn.Cell):
"""Normalization module of reader downstream"""
def __init__(self, bert_layer_norm_weight_shape, bert_layer_norm_bias_shape, eps=1e-12):
"""init function"""
super(BertLayerNorm, self).__init__()
self.reducemean = P.ReduceMean(keep_dims=True)
self.sub = P.Sub()
self.pow = P.Pow()
self.add = P.Add()
self.sqrt = P.Sqrt()
self.div = P.Div()
self.mul = P.Mul()
self.variance_epsilon = eps
self.bert_layer_norm_weight = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_weight_shape)
.astype(np.float32)), name=None)
self.bert_layer_norm_bias = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_bias_shape)
.astype(np.float32)), name=None)
def construct(self, x):
"""construct function"""
u = self.reducemean(x, -1)
s = self.reducemean(self.pow(self.sub(x, u), 2), -1)
x = self.div(self.sub(x, u), self.sqrt(self.add(s, self.variance_epsilon)))
output = self.mul(self.bert_layer_norm_weight, x)
output = self.add(output, self.bert_layer_norm_bias)
return output
class SupportingOutputLayer(nn.Cell):
"""module of reader downstream"""
def __init__(self, linear_1_weight_shape, linear_1_bias_shape, bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape):
"""init function"""
super(SupportingOutputLayer, self).__init__()
self.linear_1 = Linear(linear_weight_shape=linear_1_weight_shape,
linear_bias_shape=linear_1_bias_shape)
self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape=bert_layer_norm_bias_shape)
self.matmul = nn.MatMul()
self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, (8192, 1)).astype(np.float32)), name=None)
def construct(self, x):
"""construct function"""
output = self.linear_1(x)
output = self.bert_layer_norm(output)
output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.matmul_w, dst_type))
return ops.Cast()(output, dst_type2)
class PosOutputLayer(nn.Cell):
"""module of reader downstream"""
def __init__(self, linear_weight_shape, linear_bias_shape, bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape):
"""init function"""
super(PosOutputLayer, self).__init__()
self.linear_1 = Linear(linear_weight_shape=linear_weight_shape,
linear_bias_shape=linear_bias_shape)
self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape,
bert_layer_norm_bias_shape=bert_layer_norm_bias_shape)
self.matmul = nn.MatMul()
self.linear_2_weight = Parameter(Tensor(np.random.uniform(0, 1, (4096, 1)).astype(np.float32)), name=None)
self.add = P.Add()
self.linear_2_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)
def construct(self, state):
"""construct function"""
output = self.linear_1(state)
output = self.bert_layer_norm(output)
output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.linear_2_weight, dst_type))
output = self.add(ops.Cast()(output, dst_type2), self.linear_2_bias)
return output
class MaskInvalidPos(nn.Cell):
"""module of reader downstream"""
def __init__(self):
"""init function"""
super(MaskInvalidPos, self).__init__()
self.squeeze = P.Squeeze(2)
self.sub = P.Sub()
self.mul = P.Mul()
def construct(self, pos_pred, context_mask):
"""construct function"""
output = self.squeeze(pos_pred)
invalid_pos_mask = self.mul(self.sub(1.0, context_mask), 1e30)
output = self.sub(output, invalid_pos_mask)
return output
class Reader_Downstream(nn.Cell):
"""Downstream model for reader"""
def __init__(self):
"""init function"""
super(Reader_Downstream, self).__init__()
self.add = P.Add()
self.para_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)
self.para_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192),
linear_1_bias_shape=(8192,),
bert_layer_norm_weight_shape=(8192,),
bert_layer_norm_bias_shape=(8192,))
self.sent_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)
self.sent_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192),
linear_1_bias_shape=(8192,),
bert_layer_norm_weight_shape=(8192,),
bert_layer_norm_bias_shape=(8192,))
self.start_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096),
linear_bias_shape=(4096,),
bert_layer_norm_weight_shape=(4096,),
bert_layer_norm_bias_shape=(4096,))
self.end_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096),
linear_bias_shape=(4096,),
bert_layer_norm_weight_shape=(4096,),
bert_layer_norm_bias_shape=(4096,))
self.mask_invalid_pos = MaskInvalidPos()
self.gather_input_weight = Tensor(np.array(0))
self.gather = P.Gather()
self.type_linear_1 = nn.Dense(in_channels=4096, out_channels=4096, has_bias=True)
self.relu = nn.ReLU()
self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=(4096,), bert_layer_norm_bias_shape=(4096,))
self.type_linear_2 = nn.Dense(in_channels=4096, out_channels=3, has_bias=True)
def construct(self, para_state, sent_state, state, context_mask):
"""construct function"""
para_logit = self.para_output_layer(para_state)
para_logit = self.add(para_logit, self.para_bias)
sent_logit = self.sent_output_layer(sent_state)
sent_logit = self.add(sent_logit, self.sent_bias)
start = self.start_output_layer(state)
start = self.mask_invalid_pos(start, context_mask)
end = self.end_output_layer(state)
end = self.mask_invalid_pos(end, context_mask)
cls_emb = self.gather(state, self.gather_input_weight, 1)
q_type = self.type_linear_1(cls_emb)
q_type = self.relu(q_type)
q_type = self.bert_layer_norm(q_type)
q_type = self.type_linear_2(q_type)
return q_type, start, end, para_logit, sent_logit
|
92f5d22fa3a30392da22f67cc2f039ecfbf221bb
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/CreditPayCompensateDetailVO.py
|
afd8764194c85463fdd210c232f199942717b671
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,591
|
py
|
CreditPayCompensateDetailVO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CreditPayChargePricingVO import CreditPayChargePricingVO
from alipay.aop.api.domain.CreditPayClauseVO import CreditPayClauseVO
from alipay.aop.api.domain.CreditPayIntPricingVO import CreditPayIntPricingVO
class CreditPayCompensateDetailVO(object):
def __init__(self):
self._charge_pricing_list = None
self._clauses = None
self._instal_itrv = None
self._instal_type = None
self._int_pricing = None
@property
def charge_pricing_list(self):
return self._charge_pricing_list
@charge_pricing_list.setter
def charge_pricing_list(self, value):
if isinstance(value, list):
self._charge_pricing_list = list()
for i in value:
if isinstance(i, CreditPayChargePricingVO):
self._charge_pricing_list.append(i)
else:
self._charge_pricing_list.append(CreditPayChargePricingVO.from_alipay_dict(i))
@property
def clauses(self):
return self._clauses
@clauses.setter
def clauses(self, value):
if isinstance(value, list):
self._clauses = list()
for i in value:
if isinstance(i, CreditPayClauseVO):
self._clauses.append(i)
else:
self._clauses.append(CreditPayClauseVO.from_alipay_dict(i))
@property
def instal_itrv(self):
return self._instal_itrv
@instal_itrv.setter
def instal_itrv(self, value):
self._instal_itrv = value
@property
def instal_type(self):
return self._instal_type
@instal_type.setter
def instal_type(self, value):
self._instal_type = value
@property
def int_pricing(self):
return self._int_pricing
@int_pricing.setter
def int_pricing(self, value):
if isinstance(value, CreditPayIntPricingVO):
self._int_pricing = value
else:
self._int_pricing = CreditPayIntPricingVO.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.charge_pricing_list:
if isinstance(self.charge_pricing_list, list):
for i in range(0, len(self.charge_pricing_list)):
element = self.charge_pricing_list[i]
if hasattr(element, 'to_alipay_dict'):
self.charge_pricing_list[i] = element.to_alipay_dict()
if hasattr(self.charge_pricing_list, 'to_alipay_dict'):
params['charge_pricing_list'] = self.charge_pricing_list.to_alipay_dict()
else:
params['charge_pricing_list'] = self.charge_pricing_list
if self.clauses:
if isinstance(self.clauses, list):
for i in range(0, len(self.clauses)):
element = self.clauses[i]
if hasattr(element, 'to_alipay_dict'):
self.clauses[i] = element.to_alipay_dict()
if hasattr(self.clauses, 'to_alipay_dict'):
params['clauses'] = self.clauses.to_alipay_dict()
else:
params['clauses'] = self.clauses
if self.instal_itrv:
if hasattr(self.instal_itrv, 'to_alipay_dict'):
params['instal_itrv'] = self.instal_itrv.to_alipay_dict()
else:
params['instal_itrv'] = self.instal_itrv
if self.instal_type:
if hasattr(self.instal_type, 'to_alipay_dict'):
params['instal_type'] = self.instal_type.to_alipay_dict()
else:
params['instal_type'] = self.instal_type
if self.int_pricing:
if hasattr(self.int_pricing, 'to_alipay_dict'):
params['int_pricing'] = self.int_pricing.to_alipay_dict()
else:
params['int_pricing'] = self.int_pricing
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CreditPayCompensateDetailVO()
if 'charge_pricing_list' in d:
o.charge_pricing_list = d['charge_pricing_list']
if 'clauses' in d:
o.clauses = d['clauses']
if 'instal_itrv' in d:
o.instal_itrv = d['instal_itrv']
if 'instal_type' in d:
o.instal_type = d['instal_type']
if 'int_pricing' in d:
o.int_pricing = d['int_pricing']
return o
|
cfc3409eb17bac4be849c1c684f5ceec542af3ac
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/list_remove.py
|
81b2b3d973c397122e90ca3bac9252eb4e00b16d
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
list_remove.py
|
a = [1, 2, 3]
print(a.remove(2))
print(a)
try:
a.remove(2)
except ValueError:
print("Raised ValueError")
else:
raise AssertionError("Did not raise ValueError")
|
2b0cd646abfbfb23aceb98e50f1666bdd1d5551b
|
2bd7a9bd2aa6ea6ef41745af4388d607ade5800e
|
/pySOT/optimization_problems/branin.py
|
19c2a77d4086aea912b91be3043e9b018149dc02
|
[
"BSD-3-Clause"
] |
permissive
|
dme65/pySOT
|
c5071eca7bc35d90d590b517bbabca13cb3e27fd
|
c8f04fd4ed30d49bb61adb008134741319b512a4
|
refs/heads/master
| 2021-11-06T02:57:12.418452
| 2021-09-07T15:33:47
| 2021-10-27T17:48:18
| 36,836,292
| 208
| 52
|
NOASSERTION
| 2021-09-07T17:49:25
| 2015-06-03T23:27:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
branin.py
|
import numpy as np
from .optimization_problem import OptimizationProblem
class Branin(OptimizationProblem):
"""Branin function
Details: http://www.sfu.ca/~ssurjano/branin.html
Global optimum: :math:`f(-\\pi,12.275)=0.397887`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = 0.397887
self.minimum = np.array([-np.pi, 12.275])
self.dim = 2
self.lb = -3.0 * np.ones(2)
self.ub = 3.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
self.info = "2-dimensional Branin function \nGlobal optimum: " + "f(-pi, 12.275) = 0.397887"
def eval(self, x):
"""Evaluate the Branin function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
x1 = x[0]
x2 = x[1]
t = 1 / (8 * np.pi)
s = 10
r = 6
c = 5 / np.pi
b = 5.1 / (4 * np.pi ** 2)
a = 1
term1 = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2
term2 = s * (1 - t) * np.cos(x1)
return term1 + term2 + s
|
b7e01f205a855e5eeb4ea85f4af524fb5ae336d1
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/LLM/peft_example/sequence_cls/bert/run_customized_by_oneself_lora.py
|
1b553a24d871f8822c59d3359ee69b6f74bfdb0f
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 841
|
py
|
run_customized_by_oneself_lora.py
|
"""
@file : run_customized_by_oneself_lora.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2023-04-21
"""
from transformers.models.bert import BertForSequenceClassification, BertTokenizer
from peft import LoraConfig, get_peft_model
model = BertForSequenceClassification.from_pretrained('./mengzi_pretrain')
# # 查看当前网络中都有哪些模块
# for x in model.modules(): # 查看模块
# print(x)
# # 准备lora
# 计划在那个模块上应用lora, lora内部的代码是通过正则匹配的 可参考LoraConfig提供的写法。
TARGET_MODULES = [
"query",
"key",
]
peft_config = LoraConfig(task_type="SEQ_CLS", target_modules=TARGET_MODULES, inference_mode=False, r=8, lora_alpha=16, lora_dropout=0.1)
model = get_peft_model(model, peft_config)
print(model.print_trainable_parameters())
exit()
|
76a0e7bac988ad24354644980178729153b8f620
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/test/test_modules.py
|
96b5aa4be07d6727bc74d53b9d96e1331becbf0c
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 40,743
|
py
|
test_modules.py
|
# Owner(s): ["module: nn"]
from itertools import chain, product
from inspect import signature, isgenerator
from copy import deepcopy
import tempfile
from operator import methodcaller
import torch
from torch._subclasses.meta_utils import assert_metadata_eq
from torch.testing._internal.common_cuda import with_tf32_off
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, toleranceOverride, tol, skipMeta)
from torch.testing._internal.common_modules import module_db, modules, ModuleErrorEnum, TrainEvalMode
from torch.testing._internal.common_utils import (
TestCase, run_tests, freeze_rng_state, mock_wrapper, get_tensors_from, gradcheck,
gradgradcheck)
from unittest.mock import patch, call
class TestModule(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
precision = 1e-5
rel_tol = 1e-5
def _assert_module_parameters_and_buffer_are(self, module, device, dtype):
# Check device placement and dtype for created parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg or methods
# such as `float()` applies to.
if not isinstance(device, torch.device):
device = torch.device(device)
def _check_module(items, name, device=device, dtype=dtype):
for item_name, item in items:
self.assertEqual(
item.device, device,
f'{name} {item_name} is on device {item.device} instead of the expected device {device}')
if item.dtype.is_floating_point:
self.assertEqual(
item.dtype, dtype,
f'{name} {item_name} is of dtype {item.dtype} instead of the expected dtype {dtype}')
_check_module(module.named_parameters(), "Parameter")
_check_module(module.named_buffers(), "Buffer")
@modules(module_db)
def test_forward(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
dtype_to_method_caller = {
torch.float32: methodcaller("float"),
torch.float64: methodcaller("double"),
}
for module_input in module_inputs:
if module_input.forward_input is None:
continue
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
outputs = m(*args, **kwargs)
# === Compare outputs to a reference if one is specified. ===
# TODO: Handle precision
reference_fn = module_input.reference_fn
if reference_fn is not None:
ref_outputs = reference_fn(m, *args, **kwargs)
self.assertEqual(outputs, ref_outputs)
# === Use the method call and verify the parameters and buffers ===
if dtype in dtype_to_method_caller:
dtype_to_method_caller[dtype](m)
m(*args, **kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
# Tests passing factory kwargs (e.g. device / dtype) during module instantiation.
# They should be applied to any created parameters and buffers.
@modules(module_db)
def test_factory_kwargs(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
# Check if this module creates parameters or registers buffers.
# The mock magic here passes through to the real Parameter / register_buffer
# logic and is only used to check call inputs.
module_creates_params_or_buffers = False
parameter_new = mock_wrapper(torch.nn.Parameter.__new__)
with patch.object(torch.nn.Parameter, '__new__', parameter_new):
register_buffer = mock_wrapper(torch.nn.Module.register_buffer)
with patch.object(torch.nn.Module, 'register_buffer', register_buffer):
m = module_cls(*args, **kwargs)
m.train(training)
# Check if a parameter or buffer was created with a tensor not passed to the constructor.
constructor_tensors = get_tensors_from(args, kwargs)
for mock in [parameter_new.mock, register_buffer.mock]:
for call_args, call_kwargs in mock.call_args_list:
call_tensors = get_tensors_from(call_args, call_kwargs)
if len(call_tensors) > 0 and not constructor_tensors.intersection(call_tensors):
module_creates_params_or_buffers = True
break
if not module_creates_params_or_buffers:
continue
# Instantiate module with the factory kwargs.
kwargs.update({
'device': device,
'dtype': dtype,
})
if issubclass(module_info.module_cls, torch.nn.modules.lazy.LazyModuleMixin):
# Ensure device and dtype are passed to all UninitializedParameters and UninitializedBuffers.
uninit_param_new = mock_wrapper(torch.nn.UninitializedParameter.__new__)
with patch.object(torch.nn.UninitializedParameter, '__new__', uninit_param_new):
uninit_buffer_new = mock_wrapper(torch.nn.UninitializedBuffer.__new__)
with patch.object(torch.nn.UninitializedBuffer, '__new__', uninit_buffer_new):
m = module_cls(*args, **kwargs)
m.train(training)
uninit_param_new.mock.assert_has_calls(
[call(device=device, dtype=dtype) for _ in uninit_param_new.mock.mock_calls])
uninit_buffer_new.mock.assert_has_calls(
[call(device=device, dtype=dtype) for _ in uninit_buffer_new.mock.mock_calls])
else:
# Check device placement and dtype for created parameters and buffers.
# Only verify floating point dtypes since that's what the kwarg applies to.
m = module_cls(*args, **kwargs)
m.train(training)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
@onlyCUDA
@modules(module_db)
def test_multiple_device_transfer(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs_device = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
module_inputs_cpu = module_info.module_inputs_func(module_info, device="cpu", dtype=dtype,
requires_grad=False, training=training)
for module_input_device, module_input_cpu in zip(module_inputs_device, module_inputs_cpu):
if module_input_device.forward_input is None:
continue
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input_device.constructor_input.args, module_input_device.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass on GPU ===
input_device_args = module_input_device.forward_input.args
input_device_kwargs = module_input_device.forward_input.kwargs
m(*input_device_args, **input_device_kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
# === Move to CPU ===
input_cpu_args = module_input_cpu.forward_input.args
input_cpu_kwargs = module_input_cpu.forward_input.kwargs
m.cpu()
m(*input_cpu_args, **input_cpu_kwargs)
self._assert_module_parameters_and_buffer_are(m, "cpu", dtype)
# === Move back to GPU and forward pass ===
m.cuda()
m(*input_device_args, **input_device_kwargs)
self._assert_module_parameters_and_buffer_are(m, device, dtype)
if torch.cuda.device_count() >= 2:
# === test cross-GPU transfer works
def _to_device1(objs):
if isinstance(objs, (tuple, list)):
return type(objs)(_to_device1(item) for item in objs)
elif isinstance(objs, dict):
return {name: _to_device1(item) for name, item in objs.items()}
elif isinstance(objs, torch.Tensor):
return objs.cuda(1)
else:
return objs
input_device_1_args = _to_device1(input_device_args)
input_device_1_kwargs = _to_device1(input_device_kwargs)
m.cuda(1)
with torch.cuda.device(1):
m(*input_device_1_args, **input_device_1_kwargs)
self._assert_module_parameters_and_buffer_are(m, torch.device("cuda:1"), dtype)
@modules(module_db)
def test_repr(self, device, dtype, module_info, training):
# Test module can be represented with repr and str without errors.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Check that these methods do not raise errors
m.__repr__()
str(m)
@modules(module_db)
def test_pickle(self, device, dtype, module_info, training):
# Test that module can be pickled and unpickled.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
output = m(*args, **kwargs)
# === Check unpickled module gives the same output. ===
with tempfile.TemporaryFile() as f:
torch.save(m, f)
f.seek(0)
m_copy = torch.load(f)
output_from_copy = m_copy(*args, **kwargs)
self.assertEqual(output, output_from_copy)
@skipMeta
@modules([module_info for module_info in module_db
if 'inplace' in signature(module_info.module_cls).parameters])
def test_check_inplace(self, device, dtype, module_info, training):
# Check if the inplace variant of the module gives the same result as the out of place
# variant.
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m_op = module_cls(*args, **kwargs, inplace=False)
m_op.to(device).to(dtype)
m_op.train(training)
m_inplace = module_cls(*args, **kwargs, inplace=True)
m_inplace.to(device).to(dtype)
m_inplace.train(training)
# === Inplace modules only supports inplace operations on the first argument ===
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
# === Do not allow the first input to be in input_kwargs ===
forward_sig = signature(m_op).parameters
self.assertGreaterEqual(len(forward_sig), 1)
first_param_name = next(iter(forward_sig.items()))
self.assertNotIn(first_param_name, input_kwargs)
# === Out of place operation does not write to original tensor ===
self.assertGreaterEqual(len(input_args), 1)
input_version = input_args[0]._version
with freeze_rng_state():
output_op = m_op(*input_args, **input_kwargs)
self.assertEqual(input_args[0]._version, input_version)
# === Check that the inplace operation gives the same result ===
input_arg_copy = deepcopy(input_args)
input_arg_clone = tuple(i.clone() for i in input_arg_copy)
input_clone_version = input_arg_clone[0]._version
with freeze_rng_state():
output_ip = m_inplace(*input_arg_clone, **input_kwargs)
self.assertGreater(input_arg_clone[0]._version, input_clone_version)
self.assertEqual(output_op, output_ip)
# === Check that the gradients are the same ===
grad = output_op.data.clone().normal_()
output_op.backward(grad)
output_ip.backward(grad)
self.assertEqual(input_args[0].grad, input_arg_copy[0].grad)
def _traverse_obj(self, obj, func):
if isinstance(obj, (tuple, list)):
return type(obj)(self._traverse_obj(o, func) for o in obj)
elif isgenerator(obj):
return tuple(self._traverse_obj(o, func) for o in obj)
elif isinstance(obj, dict):
return {name: self._traverse_obj(o, func) for name, o in obj.items()}
elif isinstance(obj, (torch.Tensor, torch.nn.Parameter)):
return func(obj)
def _retain_grad(self, obj):
# gradients needs to be retained to check for grad. This is useful when
# non-leafs are present in the graph.
def inner_retain_grad(obj):
if obj.requires_grad:
obj.retain_grad()
self._traverse_obj(obj, inner_retain_grad)
def _get_grads(self, obj):
def inner_get_grad(obj):
if obj.requires_grad:
return obj.grad
return self._traverse_obj(obj, inner_get_grad)
def _zero_grad(self, obj):
def inner_zero_grad(obj):
if obj.grad is not None:
obj.grad = None
self._traverse_obj(obj, inner_zero_grad)
@modules(module_db)
def test_non_contiguous_tensors(self, device, dtype, module_info, training):
# Check modules work with non-contiguous tensors
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
def _make_non_contiguous(obj):
def inner_make_non_contiguous(obj):
# Scalar tensors can not be made non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return obj
out = torch.repeat_interleave(obj, 2, dim=-1)
out = out[..., ::2].detach()
out.requires_grad = obj.requires_grad
return out
return self._traverse_obj(obj, inner_make_non_contiguous)
def _can_be_noncontiguous(obj):
if isinstance(obj, (tuple, list)):
return any(_can_be_noncontiguous(o) for o in obj)
elif isinstance(obj, dict):
return any(_can_be_noncontiguous(o) for o in obj.values())
# scalar tensors can not be non-contiguous
if not isinstance(obj, torch.Tensor) or obj.dim() == 0:
return False
return True
for module_input in module_inputs:
if module_input.forward_input is None:
continue
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if not (_can_be_noncontiguous(input_args) or _can_be_noncontiguous(input_kwargs)):
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
self._retain_grad((input_args, input_kwargs))
# === Forward with default input
with freeze_rng_state():
default_output = m(*input_args, **input_kwargs)
if isinstance(default_output, torch.Tensor):
grad_output = default_output.clone().detach_().normal_()
default_output.backward(grad_output, retain_graph=True)
else:
grad_output = tuple(self._traverse_obj(o, lambda o: o.clone().detach_().normal_() if o.requires_grad else None)
for o in default_output)
flattened_default_output, _ = torch.utils._pytree.tree_flatten(default_output)
flattened_grad_output, _ = torch.utils._pytree.tree_flatten(grad_output)
for o, g_o in zip(flattened_default_output, flattened_grad_output):
if (o.requires_grad):
o.backward(g_o, retain_graph=True)
default_input_args_grad, default_input_kwargs_grad = deepcopy(self._get_grads((input_args, input_kwargs)))
default_param_grad = deepcopy([p.grad for p in m.parameters()])
# === Construct non-contiguous tensors ===
nc_input_args, nc_input_kwargs = _make_non_contiguous((input_args, input_kwargs))
nc_grad_output = _make_non_contiguous(grad_output)
# === Compare results with non-contiguous and contiguous tensors ===
inputs = [(input_args, input_kwargs), (nc_input_args, nc_input_kwargs)]
grads = [grad_output, nc_grad_output]
for (in_args, in_kwargs), g_out in product(inputs, grads):
g_out_copy = deepcopy(g_out)
self._zero_grad((in_args, in_kwargs))
self._zero_grad(m.parameters())
with freeze_rng_state():
out = m(*in_args, **in_kwargs)
if isinstance(out, torch.Tensor):
out.backward(g_out_copy, retain_graph=True)
else:
flattened_out, _ = torch.utils._pytree.tree_flatten(out)
flattened_g_out_copy, _ = torch.utils._pytree.tree_flatten(g_out_copy)
for o, g_o in zip(flattened_out, flattened_g_out_copy):
if o.requires_grad:
o.backward(g_o, retain_graph=True)
input_args_grad, input_kwargs_grad = self._get_grads((in_args, in_kwargs))
self.assertEqual(out, default_output)
self.assertEqual(input_args_grad, default_input_args_grad, atol=1e-4, rtol=0)
self.assertEqual(input_kwargs_grad, default_input_kwargs_grad, atol=1e-4, rtol=0)
param_grad = [p.grad for p in m.parameters()]
self.assertEqual(param_grad, default_param_grad)
def _test_gradients_helper(self, device, dtype, module_info, training, check):
# Check gradients
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=True, training=training)
# === Set nondet tol for gradcheck to user-defined value if on CUDA and cudNN is enabled
gradcheck_nondet_tol = 0.0
if (torch.device(device).type == 'cuda' and torch.backends.cudnn.enabled):
gradcheck_nondet_tol = module_info.gradcheck_nondet_tol
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
params = tuple(m.parameters())
# === Lazy modules need to see an input to initialize params before gradcheck is run. ===
input_args, input_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
if issubclass(module_info.module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
m(*input_args, **input_kwargs)
# === Perform gradient check on the input_args ===
other_kwargs = {}
kwarg_tensors = []
for name, obj in input_kwargs.items():
if isinstance(obj, torch.Tensor):
kwarg_tensors.append((name, obj))
else:
other_kwargs[name] = obj
def fn_to_gradcheck(*flat_input_and_params):
input_and_params = torch.utils._pytree.tree_unflatten(flat_input_and_params, flat_spec)
new_input_args = input_and_params[:len(input_args)]
kwarg_args = input_and_params[-len(kwarg_tensors):]
new_kwargs = {name: obj for (name, _), obj in zip(kwarg_tensors, kwarg_args)}
with freeze_rng_state():
output = m(*new_input_args, **new_kwargs, **other_kwargs)
output_flattened, _ = torch.utils._pytree.tree_flatten(output)
return output_flattened
# check total derivative
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
# check partial derivatives
old_params_requires_grad = [p.requires_grad for p in params]
for p in params:
p.requires_grad = False
old_kwargs_requires_grad = [obj.requires_grad for (_, obj) in kwarg_tensors]
for (_, obj) in kwarg_tensors:
obj.requires_grad = False
for p, old in zip(params, old_params_requires_grad):
p.requires_grad = old
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
p.requires_grad = False
for (_, obj), old in zip(kwarg_tensors, old_kwargs_requires_grad):
obj.requires_grad = old
grad_input = input_args + params + tuple(obj for (_, obj) in kwarg_tensors)
flat_input, flat_spec = torch.utils._pytree.tree_flatten(grad_input)
self.assertTrue(check(fn_to_gradcheck, flat_input, nondet_tol=gradcheck_nondet_tol))
obj.requires_grad = False
@modules(module_db, allowed_dtypes=[torch.double])
def test_grad(self, device, dtype, module_info, training):
self._test_gradients_helper(device, dtype, module_info, training, gradcheck)
@modules([m for m in module_db if m.supports_gradgrad],
allowed_dtypes=[torch.double])
def test_gradgrad(self, device, dtype, module_info, training):
self._test_gradients_helper(device, dtype, module_info, training, gradgradcheck)
@onlyCUDA
@with_tf32_off # Turn off TF32 to compute at full precision https://github.com/pytorch/pytorch/issues/86798
@toleranceOverride({torch.float32: tol(5e-2, 0),
torch.float64: tol(4e-4, 0)})
@modules(module_db)
def test_cpu_gpu_parity(self, device, dtype, module_info, training):
# TODO: RNN / GRU / LSTM don't support backwards on eval mode for cuDNN; skip this in a
# nicer way for eval mode only.
# See https://github.com/pytorch/pytorch/issues/79161
rnn_modules = {torch.nn.RNN, torch.nn.GRU, torch.nn.LSTM}
if (module_info.module_cls in rnn_modules
and not training
and 'cuda' in device
and torch.backends.cudnn.enabled):
return
# Test cpu and gpu results are the same
module_cls = module_info.module_cls
module_inputs_cpu = module_info.module_inputs_func(module_info, device="cpu", dtype=dtype,
requires_grad=True, training=training)
def _to_device(obj):
if isinstance(obj, torch.Tensor):
res = obj.detach().to(device=device)
res.requires_grad = obj.requires_grad
return res
elif isinstance(obj, tuple):
return tuple(_to_device(o) for o in obj)
elif isinstance(obj, dict):
return {key: _to_device(o) for key, o in obj.items()}
else:
return deepcopy(obj)
for module_input in module_inputs_cpu:
# === Move input from cpu to device ===
cpu_forward_args = module_input.forward_input.args
cpu_forward_kwargs = module_input.forward_input.kwargs
gpu_forward_args, gpu_forward_kwargs = _to_device((cpu_forward_args, cpu_forward_kwargs))
self._retain_grad((cpu_forward_args, cpu_forward_kwargs, gpu_forward_args, gpu_forward_kwargs))
# === Construct module on cpu and gpu ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
cpu_module = module_cls(*args, **kwargs).to(dtype).to("cpu")
cpu_module.train(training)
gpu_module = module_cls(*args, **kwargs).to(dtype).to(device)
gpu_module.train(training)
# === Lazy modules need to see an input to initialize params ===
if issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin):
with torch.no_grad():
cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
gpu_p.data.copy_(cpu_p)
# === Compare forward output between cpu and gpu ===
cpu_outputs = cpu_module(*cpu_forward_args, **cpu_forward_kwargs)
gpu_outputs = gpu_module(*gpu_forward_args, **gpu_forward_kwargs)
self.assertEqual(cpu_outputs, gpu_outputs)
# === Run backwards on CPU and GPU and compare results ===
def check_backward(cpu_output, gpu_output):
cpu_grad_output = cpu_output.clone().normal_()
gpu_grad_output = cpu_grad_output.type_as(gpu_output)
cpu_output.backward(cpu_grad_output, retain_graph=True)
gpu_output.backward(gpu_grad_output, retain_graph=True)
cpu_grad_input = self._get_grads(cpu_forward_args)
gpu_grad_input = self._get_grads(gpu_forward_args)
self.assertEqual(cpu_grad_input, gpu_grad_input)
for cpu_p, gpu_p in zip(cpu_module.parameters(), gpu_module.parameters()):
self.assertEqual(cpu_p.grad, gpu_p.grad)
cpu_grad_kwarg_input = self._get_grads(cpu_forward_kwargs)
gpu_grad_kwarg_input = self._get_grads(gpu_forward_kwargs)
self.assertEqual(cpu_grad_kwarg_input, gpu_grad_kwarg_input)
for _ in range(5):
if isinstance(cpu_outputs, torch.Tensor):
check_backward(cpu_outputs, gpu_outputs)
else:
flatten_cpu_outputs, _ = torch.utils._pytree.tree_flatten(cpu_outputs)
flatten_gpu_outputs, _ = torch.utils._pytree.tree_flatten(gpu_outputs)
for cpu_output, gpu_output in zip(flatten_cpu_outputs, flatten_gpu_outputs):
if cpu_output.requires_grad:
check_backward(cpu_output, gpu_output)
@with_tf32_off
@modules(module_db)
def test_memory_format(self, device, dtype, module_info, training):
is_sm86or80 = device.startswith("cuda") and (torch.cuda.get_device_capability(0) == (8, 6)
or torch.cuda.get_device_capability(0) == (8, 0))
# TODO tighten it to a specific module
atol, rtol = (3e-3, 7e-3) if is_sm86or80 else (None, None)
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
module_memformat_affects_out = module_info.module_memformat_affects_out
def _get_mem_formats(channels_last=False, channels_last_3d=False):
if channels_last:
return ([torch.contiguous_format, torch.channels_last],
[torch.preserve_format, torch.contiguous_format, torch.channels_last])
elif channels_last_3d:
return ([torch.contiguous_format, torch.channels_last_3d],
[torch.preserve_format, torch.contiguous_format, torch.channels_last_3d])
else:
return ([torch.contiguous_format],
[torch.preserve_format, torch.contiguous_format])
# Check that at least one Tensor input has dim == n
def _check_dims(obj, n):
if isinstance(obj, torch.Tensor):
return obj.dim() == n
elif isinstance(obj, (tuple, list)):
return any(_check_dims(o, n) for o in obj)
else:
return False
# Called after _check_dims, when we know that >= 1 tensor can be converted to mem_format
def _to_mem_format(mem_format, obj):
def inner_to_mem_format(obj):
d = obj.dim()
if ((mem_format == torch.channels_last and d != 4)
or (mem_format == torch.channels_last_3d and d != 5)):
return obj
return obj.to(memory_format=mem_format)
return self._traverse_obj(obj, inner_to_mem_format)
def _check_out_mem_format(output, input_mem_format, module_mem_format):
def inner_check_out_mem_format(output):
d = output.dim()
if (d == 4 and ((input_mem_format == torch.channels_last)
or (module_mem_format == torch.channels_last and module_memformat_affects_out))):
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last))
elif (d == 5 and ((input_mem_format == torch.channels_last_3d)
or (module_mem_format == torch.channels_last_3d and module_memformat_affects_out))):
self.assertTrue(output.is_contiguous(memory_format=torch.channels_last_3d))
else:
self.assertTrue(output.is_contiguous())
return self._traverse_obj(output, inner_check_out_mem_format)
for module_input in module_inputs:
if module_input.forward_input is None:
continue
supports_channels_last = _check_dims(module_input.forward_input.args, 4)
supports_channels_last_3d = _check_dims(module_input.forward_input.args, 5)
input_mem_formats, module_mem_formats = _get_mem_formats(supports_channels_last, supports_channels_last_3d)
with freeze_rng_state():
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# === Get output in (contiguous, contiguous) configuration. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
desired_outputs = m(*args, **kwargs)
for input_mem_format in input_mem_formats:
# === Change memformat of input. ===
module_input.forward_input.args = _to_mem_format(input_mem_format,
module_input.forward_input.args)
module_input.forward_input.kwargs = _to_mem_format(input_mem_format,
module_input.forward_input.kwargs)
for module_mem_format in module_mem_formats:
# === Change memformat of module ===
m.to(memory_format=module_mem_format)
# === Do forward pass. ===
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
outputs = m(*args, **kwargs)
# === Compare outputs to (contiguous, contiguous) output. ===
if input_mem_format != torch.contiguous_format or module_mem_formats != torch.contiguous_format:
self.assertEqual(outputs, desired_outputs, rtol=rtol, atol=atol)
# === Check mem format of output. ===
_check_out_mem_format(outputs, input_mem_format, module_mem_format)
# Test whether train and eval modes differ for each module. Use to verify
# that the ModuleInfo entry flag is correct.
@modules(module_db, train_eval_mode=TrainEvalMode.train_only)
def test_if_train_and_eval_modes_differ(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
# Run forward inputs through to see if the training flag is accessed during forward.
for module_input in module_inputs:
if module_input.forward_input is None:
continue
# === Instantiate the module. ===
args, kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
m = module_cls(*args, **kwargs)
m.to(device).to(dtype)
m.train(training)
# Remove training attribute and see if forward still works.
delattr(m, 'training')
# === Do forward pass. ===
try:
args, kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
m(*args, **kwargs)
except AttributeError as e:
if "'training'" in str(e):
self.assertTrue(module_info.train_and_eval_differ,
f"The ModuleInfo entry for {module_info.name} has "
"train_and_eval_differ=False, but the training mode was found to "
"affect the forward pass. Consider setting train_and_eval_differ=True "
"for this ModuleInfo entry.")
else:
raise e
@onlyCPU
@modules(module_db)
def test_device_ctx_init(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
module_inputs = module_info.module_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
with torch.device('meta'):
module_inputs_meta = module_info.module_inputs_func(module_info, device=None, dtype=dtype,
requires_grad=False, training=training)
for module_input, module_input_meta in zip(module_inputs, module_inputs_meta):
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
fw_args, fw_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
c_args_meta, c_kwargs_meta = module_input_meta.constructor_input.args, module_input_meta.constructor_input.kwargs
fw_args_meta, fw_kwargs_meta = module_input_meta.forward_input.args, module_input_meta.forward_input.kwargs
m_cpu = module_cls(*c_args, **c_kwargs)
with torch.device('meta'):
m = module_cls(*c_args_meta, **c_kwargs_meta)
for (p_meta, p_cpu) in chain(zip(m.parameters(), m_cpu.parameters()),
zip(m.buffers(), m_cpu.buffers())):
if torch.nn.parameter.is_lazy(p_meta):
continue
self.assertTrue(p_meta.is_meta)
assert_metadata_eq(self.assertEqual, p_meta, p_cpu)
@modules([module for module in module_db if module.module_error_inputs_func is not None])
def test_errors(self, device, dtype, module_info, training):
module_cls = module_info.module_cls
error_inputs = module_info.module_error_inputs_func(module_info, device=device, dtype=dtype,
requires_grad=False, training=training)
for error_input in error_inputs:
module_input = error_input.module_error_input
c_args, c_kwargs = module_input.constructor_input.args, module_input.constructor_input.kwargs
if error_input.error_on == ModuleErrorEnum.CONSTRUCTION_ERROR:
with self.assertRaisesRegex(error_input.error_type, error_input.error_regex):
m = module_cls(*c_args, **c_kwargs)
elif error_input.error_on == ModuleErrorEnum.FORWARD_ERROR:
m = module_cls(*c_args, **c_kwargs)
fw_args, fw_kwargs = module_input.forward_input.args, module_input.forward_input.kwargs
with self.assertRaisesRegex(error_input.error_type, error_input.error_regex):
m(*fw_args, **fw_kwargs)
else:
raise NotImplementedError(f"Unknown error type {error_input.error_on}")
instantiate_device_type_tests(TestModule, globals(), allow_mps=True)
if __name__ == '__main__':
run_tests()
|
5151b6327ca4e1e032e011216a1b30b39042fd56
|
5e66707ccdea0c000e6e269fce6907ee3cfcdbde
|
/galaxy/importer/loaders/module.py
|
8a0b2aa9e97eb2962f3670aa0db72a7fc27daaae
|
[
"Apache-2.0"
] |
permissive
|
ansible/galaxy
|
f629046d579d7cd4e484cdf1e27ad68fe7b170a2
|
6a374cacdf0f04de94486913bba5285e24e178d3
|
refs/heads/devel
| 2023-09-04T09:21:43.542346
| 2023-08-25T16:58:09
| 2023-08-25T16:58:09
| 24,333,272
| 972
| 419
|
Apache-2.0
| 2023-08-25T17:38:20
| 2014-09-22T15:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,259
|
py
|
module.py
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import ast
import os
from galaxy import constants
from galaxy.importer import linters
from galaxy.importer import models
from galaxy.importer.utils import ast as ast_utils
from galaxy.importer.loaders import base
from galaxy.importer import exceptions as exc
class ModuleLoader(base.BaseLoader):
content_types = constants.ContentType.MODULE
linters = linters.Flake8Linter
def __init__(self, content_type, path, root, logger=None):
super().__init__(content_type, path, root, logger=logger)
self.documentation = None
self.metadata = None
def make_name(self):
return base.make_module_name(self.path)
def load(self):
self._parse_module()
description = ''
if self.documentation:
description = self.documentation.get('short_description', '')
readme = self._get_readme(os.path.dirname(self.path))
return models.Content(
name=self.name,
path=self.rel_path,
content_type=self.content_type,
readme=readme,
description=description,
metadata={
'ansible_metadata': self.metadata,
'documentation': self.documentation
}
)
def _parse_module(self):
with open(self.path) as fp:
code = fp.read()
try:
module = ast.parse(code) # type: ast.Module
assert isinstance(module, ast.Module), 'Module expected'
except SyntaxError as e:
raise exc.ContentLoadError(
"Syntax error while parsing module {0}: Line {1}:{2} {3}"
.format(os.path.basename(self.path),
e.lineno, e.offset, e.text))
for node in module.body:
if not isinstance(node, ast.Assign):
continue
name = node.targets[0].id
if name == 'ANSIBLE_METADATA':
self.metadata = self._parse_metdata(node)
elif name == 'DOCUMENTATION':
try:
self.documentation = ast_utils.parse_ast_doc(node)
except ValueError as e:
self.log.warning('Cannot parse "DOCUMENTATION": {0}'
.format(e))
def _parse_metdata(self, node):
# type (ast.Dict) -> dict
if not isinstance(node.value, ast.Dict):
self.log.warning('Cannot parse "ANSIBLE_METADATA" field, '
'dict expected')
return
return ast.literal_eval(node.value)
|
66868f5e2a055031b6304b2edabe9f3ba42ca5db
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/scripts/tests/test_inconsistent_namespace_check.py
|
64f66e6168efeb54d268e2d411f0e73cea9a1fad
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
test_inconsistent_namespace_check.py
|
import pytest
from scripts.check_for_inconsistent_pandas_namespace import (
check_for_inconsistent_pandas_namespace,
)
BAD_FILE_0 = (
"from pandas import Categorical\n"
"cat_0 = Categorical()\n"
"cat_1 = pd.Categorical()"
)
BAD_FILE_1 = (
"from pandas import Categorical\n"
"cat_0 = pd.Categorical()\n"
"cat_1 = Categorical()"
)
BAD_FILE_2 = (
"from pandas import Categorical\n"
"cat_0 = pandas.Categorical()\n"
"cat_1 = Categorical()"
)
GOOD_FILE_0 = (
"from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()"
)
GOOD_FILE_1 = "cat_0 = pd.Categorical()\ncat_1 = pd.Categorical()"
GOOD_FILE_2 = "from array import array\nimport pandas as pd\narr = pd.array([])"
PATH = "t.py"
@pytest.mark.parametrize(
"content, expected",
[
(BAD_FILE_0, "t.py:3:8: Found both 'pd.Categorical' and 'Categorical' in t.py"),
(BAD_FILE_1, "t.py:2:8: Found both 'pd.Categorical' and 'Categorical' in t.py"),
(
BAD_FILE_2,
"t.py:2:8: Found both 'pandas.Categorical' and 'Categorical' in t.py",
),
],
)
def test_inconsistent_usage(content, expected, capsys):
with pytest.raises(SystemExit):
check_for_inconsistent_pandas_namespace(content, PATH, replace=False)
result, _ = capsys.readouterr()
assert result == expected
@pytest.mark.parametrize("content", [GOOD_FILE_0, GOOD_FILE_1, GOOD_FILE_2])
@pytest.mark.parametrize("replace", [True, False])
def test_consistent_usage(content, replace):
# should not raise
check_for_inconsistent_pandas_namespace(content, PATH, replace=replace)
@pytest.mark.parametrize("content", [BAD_FILE_0, BAD_FILE_1, BAD_FILE_2])
def test_inconsistent_usage_with_replace(content):
result = check_for_inconsistent_pandas_namespace(content, PATH, replace=True)
expected = (
"from pandas import Categorical\ncat_0 = Categorical()\ncat_1 = Categorical()"
)
assert result == expected
|
a304c1061f05c195ec4d900369ed75f54b8539a6
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/MLDB-986-recording.py
|
0567398ab243f066c54ddce87acf1428fe8619c8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
MLDB-986-recording.py
|
#
# MLDB-986-recording.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
from mldb import mldb
dataset_config = {
'type' : 'sparse.mutable',
'id' : 'example'
}
ds = mldb.create_dataset(dataset_config)
out = mldb.post('/v1/datasets/example/rows', {
"rowName": "first row",
"columns": [["x", {"num" : "NaN"}, 0]]
})
mldb.log(out)
out = mldb.post('/v1/datasets/example/rows', {
"rowName": "second row",
"columns": [["y", {"ts" : "1969-07-20T01:02:03.000Z"}, 0]]
})
mldb.log(out)
out = mldb.post('/v1/datasets/example/rows', {
"rowName": "third row",
"columns": [["z", {"num" : "Inf"}, 0]]
})
mldb.log(out)
out = mldb.post('/v1/datasets/example/rows', {
"rowName": "fourth row",
"columns": [["w", {"interval" : "1D"}, 0]]
})
mldb.log(out)
ds.commit()
result = mldb.get('/v1/query',
q='select x + 1 as output from example where x IS NOT null')
mldb.log(result)
assert result.json()[0]['columns'][0][1]["num"] == "NaN"
result = mldb.get(
'/v1/query',
q="select y + INTERVAL '2D' as output from example where y IS NOT null")
mldb.log(result)
assert result.json()[0]['columns'][0][1]["ts"] == "1969-07-22T01:02:03Z"
result = mldb.get('/v1/query',
q='select z + 1 as output from example where z IS NOT null')
mldb.log(result)
assert result.json()[0]['columns'][0][1]["num"] == "Inf"
result = mldb.get(
'/v1/query',
q='select w + INTERVAL "1W" as output from example where w IS NOT null')
mldb.log(result)
assert result.json()[0]['columns'][0][1]["interval"] == "8D"
# MLDB-955
result = mldb.query('select x + 1 as output from example where x IS NOT null')
mldb.log(result)
assert result[1][1], "NaN"
result = mldb.query('select z + 1 as output from example where z IS NOT null')
mldb.log(result)
assert result[1][1] == "Inf"
result = mldb.query('select y as output from example where y IS NOT null')
mldb.log(result)
assert result[1][1] == "1969-07-20T01:02:03Z"
result = mldb.query('select w as output from example where w IS NOT null')
mldb.log(result)
assert result[1][1] == "1D"
request.set_return("success")
|
0c5b79c3123d324011b82fc0becfc92b94b10eb7
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/CVCaptureModule/streams.py
|
1f85b740857d8279d243481f2d68e8fb0fca4619
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 11,444
|
py
|
streams.py
|
import base64
import json
import logging
import os
import threading
import time
import cv2
import numpy as np
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
IMG_WIDTH = 960
IMG_HEIGHT = 540
class Stream:
def __init__(self, cam_id, cam_source, fps, endpoint, sender):
self.cam_id = cam_id
self.mutex = threading.Lock()
self.cam_source = cam_source
self.endpoint = endpoint
# if self.model.is_gpu:
# frameRate = 30
# else:
# frameRate = 10
self.cam = None
self.fps = max(0.1, fps)
self.cam_is_alive = True
self.IMG_WIDTH = 960
self.IMG_HEIGHT = 540
self.image_shape = [540, 960]
self.last_img = None
self.last_update = None
self.last_send = None
self.edge = '960'
self.zmq_sender = sender
self.start_http()
# self.start_zmq()
def start_http(self):
def _new_streaming(self):
cnt = 0
endpoint = self.endpoint + "/predict_opencv?camera_id=" + self.cam_id + '&edge=' + self.edge
if self.cam_source == "0":
self.cam = cv2.VideoCapture(0)
else:
self.cam = cv2.VideoCapture(self.cam_source)
if self.cam.isOpened():
cam_fps = self.cam.get(cv2.CAP_PROP_FPS)
if cam_fps > 0.0 and cam_fps < self.fps:
self.fps = cam_fps
while self.cam_is_alive:
cnt += 1
is_ok, img = self.cam.read()
if is_ok:
width = IMG_WIDTH
ratio = IMG_WIDTH / img.shape[1]
height = int(img.shape[0] * ratio + 0.000001)
if height >= self.IMG_HEIGHT:
height = self.IMG_HEIGHT
ratio = self.IMG_HEIGHT / img.shape[0]
width = int(img.shape[1] * ratio + 0.000001)
self.edge = '540'
img = cv2.resize(img, (width, height))
self.last_img = img
self.last_update = time.time()
time.sleep(1 / self.fps)
# print(jpg)
else:
self.restart_cam()
time.sleep(1)
logger.warning("Stream {} finished".format(self.cam_id))
self.cam.release()
def run_send(self):
cnt = 0
while self.cam_is_alive:
if self.last_img is None:
logger.warning(
"stream {} img not ready".format(self.cam_id))
time.sleep(1)
continue
if self.last_send == self.last_update:
# logger.warning('no new img')
time.sleep(1 / self.fps)
continue
cnt += 1
if cnt % 30 == 1:
logger.warning(
"send through channel {} to inference server , count = {}".format(
bytes(self.cam_id, "utf-8"), cnt
)
)
# data = cv2.imencode(".jpg", self.last_img)[1].tobytes()
data = self.last_img.tobytes()
endpoint = self.endpoint + "/predict_opencv?camera_id=" + self.cam_id + '&edge=' + self.edge
res = requests.post(endpoint, data=data)
self.last_send = self.last_update
time.sleep(1 / self.fps)
threading.Thread(target=_new_streaming,
args=(self,), daemon=True).start()
threading.Thread(target=run_send, args=(self,), daemon=True).start()
def start_zmq(self):
def run_capture(self):
if self.cam_source == "0":
self.cam = cv2.VideoCapture(0)
else:
self.cam = cv2.VideoCapture(self.cam_source)
cnt = 0
while self.cam_is_alive:
is_ok, img = self.cam.read()
if is_ok:
width = IMG_WIDTH
ratio = IMG_WIDTH / img.shape[1]
height = int(img.shape[0] * ratio + 0.000001)
if height >= self.IMG_HEIGHT:
height = self.IMG_HEIGHT
ratio = self.IMG_HEIGHT / img.shape[0]
width = int(img.shape[1] * ratio + 0.000001)
img = cv2.resize(img, (width, height))
self.last_img = img
self.last_update = time.time()
time.sleep(1 / self.fps)
else:
time.sleep(1)
self.restart_cam()
logger.warning("Stream {} finished".format(self.cam_id))
def run_send(self):
cnt = 0
while self.cam_is_alive:
cnt += 1
if self.last_img is None:
logger.warning(
"stream {} img not ready".format(self.cam_id))
time.sleep(1)
continue
if self.last_send == self.last_update:
# logger.warning('no new img')
time.sleep(1 / self.fps)
continue
if cnt % 30 == 1:
logger.warning(
"send through channel {} to inference server".format(
bytes(self.cam_id, "utf-8")
)
)
# self.mutex.acquire()
# FIXME may find a better way to deal with encoding
self.zmq_sender.send_multipart(
[
bytes(self.cam_id, "utf-8"), self.last_img.tobytes(),
]
)
self.last_send = self.last_update
time.sleep(1 / self.fps)
threading.Thread(target=run_capture, args=(self,), daemon=True).start()
threading.Thread(target=run_send, args=(self,), daemon=True).start()
def restart_cam(self):
logger.warning("Restarting Cam {}".format(self.cam_id))
cam = cv2.VideoCapture(self.cam_source)
# Protected by Mutex
self.mutex.acquire()
self.cam.release()
self.cam = cam
self.mutex.release()
def update_cam(self, cam_id, cam_source, endpoint):
print("[INFO] Updating Cam ...", flush=True)
# if self.cam_type == cam_type and self.cam_source == cam_source:
# return
if (
self.cam_source != cam_source
or round(self.frameRate) != round(frameRate)
or self.lva_mode != lva_mode
):
self.cam_source = cam_source
self.frameRate = frameRate
self.lva_mode = lva_mode
self._update_instance(normalize_rtsp(cam_source), str(frameRate))
self.has_aoi = has_aoi
self.aoi_info = aoi_info
detection_mode = self.model.get_detection_mode()
if detection_mode == "PC":
print("[INFO] Line INFO", line_info, flush=True)
self.scenario = PartCounter()
self.scenario_type = self.model.detection_mode
try:
line_info = json.loads(line_info)
self.use_line = line_info["useCountingLine"]
lines = line_info["countingLines"]
if len(lines) > 0:
x1 = int(lines[0]["label"][0]["x"])
y1 = int(lines[0]["label"][0]["y"])
x2 = int(lines[0]["label"][1]["x"])
y2 = int(lines[0]["label"][1]["y"])
self.scenario.set_line(x1, y1, x2, y2)
print("Upading Line:", flush=True)
print(" use_line:", self.use_line, flush=True)
print(" line:", x1, y1, x2, y2, flush=True)
else:
print("Upading Line:", flush=True)
print(" use_line:", self.use_line, flush=True)
except:
self.use_line = False
print("Upading Line[*]:", flush=True)
print(" use_line :", False, flush=True)
elif detection_mode == "ES":
print("[INFO] Zone INFO", zone_info, flush=True)
self.scenario = DangerZone()
self.scenario_type = self.model.detection_mode
# FIXME
self.scenario.set_targets(["person"])
try:
zone_info = json.loads(zone_info)
self.use_zone = zone_info["useDangerZone"]
zones = zone_info["dangerZones"]
_zones = []
print("Upading Line:", flush=True)
print(" use_zone:", self.use_zone, flush=True)
for zone in zones:
x1 = int(zone["label"]["x1"])
y1 = int(zone["label"]["y1"])
x2 = int(zone["label"]["x2"])
y2 = int(zone["label"]["y2"])
_zones.append([x1, y1, x2, y2])
print(" zone:", x1, y1, x2, y2, flush=True)
self.scenario.set_zones(_zones)
except:
self.use_zone = False
print("Upading Zone[*]:", flush=True)
print(" use_zone :", False, flush=True)
elif detection_mode == "DD":
print("[INFO] Line INFO", line_info, flush=True)
self.scenario = DefeatDetection()
self.scenario_type = self.model.detection_mode
# FIXME
self.scenario.set_ok("Bottle - OK")
self.scenario.set_ng("Bottle - NG")
try:
line_info = json.loads(line_info)
self.use_line = line_info["useCountingLine"]
lines = line_info["countingLines"]
if len(lines) > 0:
x1 = int(lines[0]["label"][0]["x"])
y1 = int(lines[0]["label"][0]["y"])
x2 = int(lines[0]["label"][1]["x"])
y2 = int(lines[0]["label"][1]["y"])
self.scenario.set_line(x1, y1, x2, y2)
print("Upading Line:", flush=True)
print(" use_line:", self.use_line, flush=True)
print(" line:", x1, y1, x2, y2, flush=True)
else:
print("Upading Line:", flush=True)
print(" use_line:", self.use_line, flush=True)
except:
self.use_line = False
print("Upading Line[*]:", flush=True)
print(" use_line :", False, flush=True)
else:
self.scenario = None
self.scenario_type = self.model.detection_mode
def check_update(self, rtsp, fps, endpoint):
print(endpoint)
print(type(endpoint))
print(self.endpoint)
print(type(self.endpoint))
return rtsp != self.cam_source or endpoint != self.endpoint or self.fps != fps
def delete(self):
# self.mutex.acquire()
self.cam_is_alive = False
# self.mutex.release()
logging.info("Deactivate stream {}".format(self.cam_id))
|
1d6b95c055cdf0afe350f8e771c862ace9024289
|
c34703aecac2d7f0ceb29428cac5e9d1da4229d1
|
/build-binutils.py
|
9cfe016e0cb9c2ccb79975cbea7a2857e9a52e6f
|
[
"Apache-2.0"
] |
permissive
|
ClangBuiltLinux/tc-build
|
ef342da72c8b7abef399069f9dc838394b4ae82d
|
5297456f7d24470e24109023cd632ecf3823a24f
|
refs/heads/main
| 2023-08-18T08:39:09.728513
| 2023-08-07T16:07:47
| 2023-08-07T16:07:47
| 178,067,258
| 192
| 242
|
Apache-2.0
| 2023-09-05T17:28:29
| 2019-03-27T20:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,765
|
py
|
build-binutils.py
|
#!/usr/bin/env python3
# pylint: disable=invalid-name
from argparse import ArgumentParser
from pathlib import Path
import time
import tc_build.binutils
import tc_build.utils
LATEST_BINUTILS_RELEASE = (2, 41, 0)
parser = ArgumentParser()
parser.add_argument('-B',
'--binutils-folder',
help='''
By default, the script will download a copy of the binutils source in the src folder within
the same folder as this script. If you have your own copy of the binutils source that you
would like to build from, pass it to this parameter. It can be either an absolute or
relative path.
''',
type=str)
parser.add_argument('-b',
'--build-folder',
help='''
By default, the script will create a "build/binutils" folder in the same folder as this
script then build each target in its own folder within that containing folder. If you
would like the containing build folder to be somewhere else, pass it to this parameter.
that done somewhere else, pass it to this parameter. It can be either an absolute or
relative path.
''',
type=str)
parser.add_argument('-i',
'--install-folder',
help='''
By default, the script will build binutils but stop before installing it. To install
them into a prefix, pass it to this parameter. This can be either an absolute or
relative path.
''',
type=str)
parser.add_argument('-m',
'--march',
metavar='ARCH',
help='''
Add -march=ARCH and -mtune=ARCH to CFLAGS to optimize the toolchain for the target
host processor.
''',
type=str)
parser.add_argument('--show-build-commands',
help='''
By default, the script only shows the output of the comands it is running. When this option
is enabled, the invocations of configure and make will be shown to help with reproducing
issues outside of the script.
''',
action='store_true')
parser.add_argument('-t',
'--targets',
help='''
The script can build binutils targeting arm-linux-gnueabi, aarch64-linux-gnu,
mips-linux-gnu, mipsel-linux-gnu, powerpc-linux-gnu, powerpc64-linux-gnu,
powerpc64le-linux-gnu, riscv64-linux-gnu, s390x-linux-gnu, and x86_64-linux-gnu.
By default, it builds all supported targets ("all"). If you would like to build
specific targets only, pass them to this script. It can be either the full target
or just the first part (arm, aarch64, x86_64, etc).
''',
nargs='+')
args = parser.parse_args()
script_start = time.time()
tc_build_folder = Path(__file__).resolve().parent
bsm = tc_build.binutils.BinutilsSourceManager()
if args.binutils_folder:
bsm.location = Path(args.binutils_folder).resolve()
if not bsm.location.exists():
raise RuntimeError(f"Provided binutils source ('{bsm.location}') does not exist?")
else:
# Turns (2, 40, 0) into 2.40 and (2, 40, 1) into 2.40.1 to follow tarball names
folder_name = 'binutils-' + '.'.join(str(x) for x in LATEST_BINUTILS_RELEASE if x)
bsm.location = Path(tc_build_folder, 'src', folder_name)
bsm.tarball.base_download_url = 'https://sourceware.org/pub/binutils/releases'
bsm.tarball.local_location = bsm.location.with_name(f"{folder_name}.tar.xz")
bsm.tarball_remote_checksum_name = 'sha512.sum'
bsm.prepare()
if args.build_folder:
build_folder = Path(args.build_folder).resolve()
else:
build_folder = Path(tc_build_folder, 'build/binutils')
default_targets = bsm.default_targets()
if args.targets:
targets = default_targets if 'all' in args.targets else set(args.targets)
else:
targets = default_targets
targets_to_builder = {
'arm': tc_build.binutils.ArmBinutilsBuilder,
'aarch64': tc_build.binutils.AArch64BinutilsBuilder,
'mips': tc_build.binutils.MipsBinutilsBuilder,
'mipsel': tc_build.binutils.MipselBinutilsBuilder,
'powerpc': tc_build.binutils.PowerPCBinutilsBuilder,
'powerpc64': tc_build.binutils.PowerPC64BinutilsBuilder,
'powerpc64le': tc_build.binutils.PowerPC64LEBinutilsBuilder,
'riscv64': tc_build.binutils.RISCV64BinutilsBuilder,
's390x': tc_build.binutils.S390XBinutilsBuilder,
'x86_64': tc_build.binutils.X8664BinutilsBuilder,
}
if 'loongarch64' in default_targets:
targets_to_builder['loongarch64'] = tc_build.binutils.LoongArchBinutilsBuilder
for item in targets:
target = item.split('-', maxsplit=1)[0]
if target in targets_to_builder:
builder = targets_to_builder[target]()
builder.folders.build = Path(build_folder, target)
if args.install_folder:
builder.folders.install = Path(args.install_folder).resolve()
builder.folders.source = bsm.location
if args.march:
builder.cflags += [f"-march={args.march}", f"-mtune={args.march}"]
builder.show_commands = args.show_build_commands
builder.build()
else:
tc_build.utils.print_warning(f"Unsupported target ('{target}'), ignoring...")
print(f"\nTotal script duration: {tc_build.utils.get_duration(script_start)}")
|
1f231961cdbc32123c67e2e59c6aeccdf3f5618d
|
57d9afb528319a1930812a25eb183f1657f68a07
|
/rl_reliability_metrics/analysis/stats.py
|
0308fca1b79408cdff994d832ec7311496106399
|
[
"Apache-2.0"
] |
permissive
|
google-research/rl-reliability-metrics
|
83a94a5e097fc196bf5ef0af4f72561d54897376
|
50da84ba3fd7c3a5aa211f3906811e1a80c8ed59
|
refs/heads/master
| 2023-08-31T07:52:05.093868
| 2023-07-31T15:05:13
| 2023-07-31T15:06:20
| 226,411,041
| 152
| 22
|
Apache-2.0
| 2019-12-13T18:05:37
| 2019-12-06T21:07:45
|
Python
|
UTF-8
|
Python
| false
| false
| 22,690
|
py
|
stats.py
|
# coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for running analyses/statistics on robustness metric results.
Two types of analyses can be performed:
(1) bootstrap runs for an algorithm to obtain confidence intervals
(2) permute runs across pairs of algorithms to compare
For online metrics, we can compute these for different timeframes along the
training runs (e.g. beginning/middle/end).
"""
from absl import logging
import numpy as np
from rl_reliability_metrics.analysis import data_def
from rl_reliability_metrics.analysis import io_utils_oss as io_utils
from rl_reliability_metrics.metrics import metrics_offline
from rl_reliability_metrics.metrics import metrics_online
import scipy.stats
# Internal gfile dependencies
class StatsRunner(object):
"""Computes statistics on robustness metric results, for a single metric."""
def __init__(self,
data,
metric,
n_timeframes=3,
n_random_samples=1000,
outfile_dir=None,
resampled_results_dir=None):
"""Initialize StatsRunner object.
Args:
data: DataDef object containing metric results.
metric: Which metric to evaluate.
n_timeframes: Number of timeframes that we are splitting each curve into.
n_random_samples: Number of random samples (number of permutations /
number of bootstraps).
outfile_dir: Path to directory in which to write outputs, if desired.
resampled_results_dir: Path to directory containing metric values
evaluated on permuted or bootstrapped runs (only necessary for
across-run metrics).
"""
self.data_def = data
self.metric = metric
self.n_timeframes = n_timeframes
self.n_random_samples = n_random_samples
self.outfile_dir = outfile_dir
self.resampled_results_dir = resampled_results_dir
# Get metric-specific information
self.result_dims, self.bigger_is_better = self._get_metric_attributes()
def compare_algorithms(self, algo1, algo2, timeframe):
"""Compute statistical significance on difference between two algorithms.
Args:
algo1: First algorithm for comparison
algo2: Second algorithm for comparison.
timeframe: Index of the timeframe that we are evaluating.
Returns:
p-value for the permutation test comparing the two algorithms.
"""
timeframe_points = self.get_timeframe_points(timeframe)
# Load the metric results.
metric_results = self.load_metric_results(self.data_def.algorithms,
timeframe_points)
# Compute the ranks of each algorithm, per task
metric_ranks_all = self.rank_per_task(metric_results)
algo1_ind = self.data_def.algorithms.index(algo1)
algo2_ind = self.data_def.algorithms.index(algo2)
metric_ranks_algo1_algo2 = metric_ranks_all[[algo1_ind, algo2_ind]]
# Compute the actual difference in mean rank between algorithms.
actual_diff = self._algo_meanrank_diff(metric_ranks_algo1_algo2)
# Get the null distribution of differences in mean rank, through permutation
if self.result_dims == 'ATP':
# For across-run metrics, we load already-computed metrics values that
# were evaluated on permuted runs. For each permutation, we rank and
# compute the difference in mean rank to obtain a null distribution.
perm_diffs = self._load_metrics_on_permuted_and_diff_rank(
algo1, algo2, timeframe_points)
else:
# Directly permute the metric ranks to obtain a null distribution.
perm_diffs = self._permute_ranks_and_diff(metric_ranks_algo1_algo2)
# Get p-value from the permutation distribution.
pval = self._get_pval(perm_diffs, actual_diff)
# Write result.
if self.outfile_dir:
self._write_pval_result(pval, algo1, algo2, timeframe)
logging.info('P-value: %g', pval)
return pval
def bootstrap_confidence_interval(self, algo, timeframe, alpha=0.05):
"""Compute confidence interval for an algorithm, by bootstrapping on runs.
Args:
algo: Algorithm to evaluate.
timeframe: Index of the timeframe that we are evaluating.
alpha: Threshold for confidence interval. Confidence level = 1 - alpha.
Returns:
(lower bound, upper bound) on the confidence interval.
"""
timeframe_points = self.get_timeframe_points(timeframe)
# Get the bootstrap distribution on the mean rank.
if self.result_dims == 'ATP':
# For across-run metrics, load the metrics evaluated on bootstrapped runs,
# then rank to get the bootstrap distribution.
algo_mean_ranks = self._bootstrap_distribution_loaded(
algo, timeframe_points)
else:
# Bootstrap the runs on the metric results directly.
metric_results = self.load_metric_results(self.data_def.algorithms,
timeframe_points)
algo_ind = self.data_def.algorithms.index(algo)
algo_mean_ranks = self._bootstrap_distribution_directly(
metric_results, algo_ind)
# Compute the confidence interval
ci_lower = np.percentile(algo_mean_ranks, 100 * alpha / 2)
ci_upper = np.percentile(algo_mean_ranks, 100 * (1 - (alpha / 2)))
# Write result.
if self.outfile_dir:
self._write_confidence_interval_result(ci_lower, ci_upper, algo,
timeframe)
logging.info('Confidence interval: (%s, %s)', ci_lower, ci_upper)
return ci_lower, ci_upper
def _get_metric_attributes(self):
registry = {}
registry.update(metrics_offline.REGISTRY)
registry.update(metrics_online.REGISTRY)
metric_cls = registry[self.metric]
return metric_cls.result_dimensions, metric_cls.bigger_is_better
def _bootstrap_distribution_loaded(self, algo, timeframe_points):
"""Get distribution on mean ranks, by loading bootstrapped metric values.
For across-run metrics (result_dims=='ATP'), we cannot directly bootstrap
the metric rankings, because the metrics need to be re-evaluated for each
bootstrap resampling.
Here we load the metric results that were evaluated on bootstrapped runs
(resampled with replacement within each algo/task combination). Then, for
each resampling, we compute rankings and mean ranking, in order to obtain a
bootstrap distribution on the mean ranking.
Args:
algo: The algorithm to be evaluated.
timeframe_points: List of indices to load, along the "eval points"
dimension.
Returns:
Bootstrap distribution on mean rank of the algorithm.
A 1-D Numpy array with length self.n_random_samples
"""
assert self.result_dims == 'ATP' # across-run metrics
# Load the metrics values, evaluated on resampled runs.
data_bootstrapped = data_def.DataDefBootstrapped(
self.resampled_results_dir,
algorithm=algo,
tasks=self.data_def.tasks,
n_runs_per_experiment=self.data_def.n_runs_per_experiment,
n_bootstrap=self.n_random_samples)
# Compute the bootstrap distribution of mean rankings.
mean_ranks = np.empty(self.n_random_samples)
for i_boot in range(self.n_random_samples):
# Load the bootstrapped metric values.
bootstrapped_metric_values = np.empty(self.data_def.n_tasks)
for i_task, task in enumerate(self.data_def.tasks):
all_timepoints = data_bootstrapped.results['%s.seed%d' %
(task, i_boot)][self.metric]
timeframe_values = np.array(all_timepoints)[timeframe_points]
bootstrapped_metric_values[i_task] = np.median(timeframe_values)
# Combine bootstrapped + non-bootstrapped metric values
other_algos = set(self.data_def.algorithms) - {algo}
nonbootstrapped_metric_values = self.load_metric_results(
other_algos, timeframe_points)
all_metric_values = np.concatenate(
(nonbootstrapped_metric_values, [bootstrapped_metric_values]), axis=0)
# Rank the metric values
metric_ranks_all = self.rank_per_task(all_metric_values)
# Compute the mean rank, for the algorithm of interest.
algo_ranks = metric_ranks_all[-1, :]
mean_ranks[i_boot] = np.mean(algo_ranks)
return mean_ranks
def _bootstrap_distribution_directly(self, metric_results, algo_ind):
"""Resample runs on the metric results, to get a distribution of mean ranks.
Args:
metric_results: Array containing metric values for all algorithms.
algo_ind: The index of the algorithm that we wish to resample for.
Returns:
Bootstrap distribution on mean rank of the algorithm.
A 1-D Numpy array with length self.n_random_samples
"""
# Bootstrap the metric results and get distribution of mean rank.
algo_mean_ranks = np.empty(self.n_random_samples)
for iboot in range(self.n_random_samples):
# Resample the per-run metric results, only for algorithm of interest.
metric_results_resampled = self._resample_metric_results(
metric_results, algo_ind)
# Compute the ranks of each algorithm, per task
metric_ranks = self.rank_per_task(metric_results_resampled)
# Get the mean rank for the algorithm of interest.
algo_mean_ranks[iboot] = np.mean(metric_ranks[algo_ind])
return algo_mean_ranks
def _resample_metric_results(self, metric_results, algo_ind):
"""Resample runs with replacement, only for the algorithm of interest.
Args:
metric_results: Array containing metric values for all algorithms.
algo_ind: The index of the algorithm that we wish to resample for.
Returns:
Array that is the same as metric_results, except that the metric values
have been resampled with replacement (within each task) for the algorithm
of interest.
"""
assert self.result_dims[:3] == 'ATR'
n_task = metric_results.shape[1]
n_runs = metric_results.shape[2]
# Resample the runs with replacement within each task, for the specified
# algorithm only.
resampled = metric_results.copy()
for task_ind in range(n_task):
# Resample.
resampling_inds = np.random.choice(
range(n_runs), size=n_runs, replace=True)
algo_task_results = metric_results[algo_ind, task_ind]
algo_task_results_resampled = algo_task_results[resampling_inds]
# Place back into full array of metric results.
resampled[algo_ind, task_ind] = algo_task_results_resampled
return resampled
def get_timeframe_points(self, timeframe):
"""Determine which timepoints are in this timeframe.
The eval points will be divided into timeframes of equal length. If the
total length of the time series is not equally divisible, the remainder will
be assigned to the last timeframe.
Args:
timeframe: Which timeframe to get. To get the indices corresponding to
*all* evaluation points, set to None.
Returns:
A list of indices corresponding to the points within the desired
timeframe.
"""
# Some metrics have no eval points (just one value per run).
if 'P' not in self.result_dims:
return None
# Get the evaluation points for this metric.
eval_points = np.array(
self.data_def.metric_params[self.metric]['eval_points'])
# Return indices corresponding to all evaluation points, if indicated.
if timeframe is None:
return range(len(eval_points))
# Get the length of each timeframe.
timeframe_len = (eval_points[-1] - eval_points[0]) / self.n_timeframes
# Get timeframe start.
if timeframe == 0:
start_idx = 0
else:
start_time = timeframe * timeframe_len
start_idx = np.where(eval_points >= start_time)[0][0]
# Get timeframe end.
if timeframe == (self.n_timeframes - 1):
end_idx = len(eval_points)
else:
end_time = (timeframe + 1) * timeframe_len
end_idx = np.where(eval_points < end_time)[0][-1] + 1
return range(start_idx, end_idx)
def load_metric_results(self,
algos,
timeframe_points,
collapse_on_timepoints=True):
"""Load all results for the metric, for the specified timeframe.
Args:
algos: List of strings specifying which algorithms to load.
timeframe_points: List of indices to load. May be None if the metric has
no "eval points" dimension (i.e. "P" not in self.result_dims)
collapse_on_timepoints: If True, we collapse across all timepoints within
the timeframe.
Returns:
Numpy array containing the metric results for the specified metric and
algorithms. Dimensions as specified by self.result_dims.
"""
n_algo = len(algos)
n_task = len(self.data_def.tasks)
n_run = self.data_def.n_runs_per_experiment
timeframe_len = len(timeframe_points) if timeframe_points else None
# Initialize the array
if self.result_dims == 'ATP': # (algo, task, evalpoint)
metric_results = np.empty((n_algo, n_task, timeframe_len))
elif self.result_dims == 'ATRP': # (algo, task, run, evalpoint)
metric_results = np.empty((n_algo, n_task, n_run, timeframe_len))
elif self.result_dims == 'ATR': # (algo, task, run)
metric_results = np.empty((n_algo, n_task, n_run))
else:
raise ValueError('Cannot currently process dimensions: %s' %
self.result_dims)
# Load results for each algo and task
for i_algo, algo in enumerate(algos):
for i_task, task in enumerate(self.data_def.tasks):
algo_task_results = self.data_def.results['%s.%s' %
(algo, task)][self.metric]
algo_task_results = np.array(algo_task_results)
# take only the timeframe points, if needed
if self.result_dims == 'ATRP':
algo_task_results = algo_task_results[:, timeframe_points]
elif self.result_dims == 'ATP':
algo_task_results = algo_task_results[timeframe_points]
metric_results[i_algo][i_task] = algo_task_results
# Compute median across eval points within the timeframe
if collapse_on_timepoints and ('P' in self.result_dims):
metric_results = np.median(metric_results, -1)
return metric_results
def rank_per_task(self, results_array):
"""Rank all results, on a per-task basis.
Ranks start at 1, which indicates the best algorithm.
Args:
results_array: Array with dimensions self.result_dims
Returns:
Array with the rank for each result, evaluated separately for each task.
Dimensions same as results_array.
"""
task_dim = self.result_dims.find('T')
n_task = results_array.shape[task_dim]
assert task_dim == 1 # We make this assumption below.
ranks = np.empty(results_array.shape)
for i_task in range(n_task):
task_results = results_array[:, i_task]
task_ranks = scipy.stats.rankdata(task_results)
if self.bigger_is_better:
# bigger metric values are better (and should have smaller rank).
task_ranks = task_ranks.size + 1 - task_ranks
task_ranks = np.reshape(task_ranks, task_results.shape)
ranks[:, i_task] = task_ranks
return ranks
@staticmethod
def _algo_meanrank_diff(ranks_array):
"""Compute the difference in mean rank, for two algorithms.
Args:
ranks_array: Array with the ranks for each algorithm, for each task.
ranks_array[0] are the ranks for the first algorithm. ranks_array[1] are
the ranks for the second algorithm.
Returns:
Float: Mean rank of second algorithm minus mean rank of first algorithm.
"""
assert ranks_array.shape[0] == 2
# For each algo, compute mean rank across tasks (and runs, if needed).
algo1_meanrank = np.mean(ranks_array[0])
algo2_meanrank = np.mean(ranks_array[1])
# Return the difference.
meanrank_diff = algo2_meanrank - algo1_meanrank
return meanrank_diff
def _load_metrics_on_permuted_and_diff_rank(self, algo1, algo2,
timeframe_points):
"""Get null distribution on difference in rank, from permuted metric results.
For across-run metrics (with result_dims 'ATP'), we cannot directly permute
the metric rankings, because the metrics need to be re-evaluated for each
permutation of the runs.
Here we load the metric results that were evaluated on permuted runs
(permuted between algo1 and algo2). Then, for each permutation, we
re-compute rankings and differences in mean ranking, in order to obtain a
null distribution on the difference in mean ranking between algo1 and algo2.
Args:
algo1: The first algorithm to be compared.
algo2: The second algorithm to be compared.
timeframe_points: List of indices to load, along the "eval points"
dimension.
Returns:
Distribution of differences in mean rank between the two algorithms.
A 1-D Numpy array with length self.n_random_samples
"""
assert self.result_dims == 'ATP' # across-run metrics
# Load the metrics values
data_permuted = data_def.DataDefPermuted(
self.resampled_results_dir,
algorithms=[algo1, algo2],
tasks=self.data_def.tasks,
n_runs_per_experiment=self.data_def.n_runs_per_experiment,
n_permutation=self.n_random_samples)
perm_diffs = np.empty(self.n_random_samples)
for i_perm in range(self.n_random_samples):
# Load the permuted metric values
permuted_metric_values = np.empty((2, self.data_def.n_tasks))
for split in (1, 2):
for i_task, task in enumerate(self.data_def.tasks):
all_timepoints = data_permuted.results['%s.seed%d.split%d' %
(task, i_perm,
split)][self.metric]
timeframe_values = np.array(all_timepoints)[timeframe_points]
permuted_metric_values[split - 1,
i_task] = np.median(timeframe_values)
# Combine permuted + unpermuted metric values
other_algos = set(self.data_def.algorithms) - {algo1, algo2}
unpermuted_metric_values = self.load_metric_results(
other_algos, timeframe_points)
all_metric_values = np.concatenate(
(unpermuted_metric_values, permuted_metric_values), axis=0)
# Rank the metric values
metric_ranks_all = self.rank_per_task(all_metric_values)
# Compute the difference in mean rank, for the algorithms of interest.
metric_ranks_algo1_algo2 = metric_ranks_all[-2:, :]
perm_diff = self._algo_meanrank_diff(metric_ranks_algo1_algo2)
perm_diffs[i_perm] = perm_diff
return perm_diffs
def _permute_ranks_and_diff(self, metric_ranks):
"""Get null distribution of differences, by directly permuting ranks.
Permute ranks across pairs of algorithms, within each task. This allows us
to obtain a distribution of differences in mean rank
Args:
metric_ranks: Array with the rank for each metric result, evaluated
separately for each task.
Returns:
Distribution of differences in mean rank between the two algorithms.
A 1-D Numpy array with length self.n_random_samples
"""
# We assume 0th dimension is algorithm and 1st dimension is task.
assert self.result_dims[:2] == 'AT'
n_task = metric_ranks.shape[1]
perm_diffs = np.empty(self.n_random_samples)
for i_perm in range(self.n_random_samples):
# Permute across the two algorithms, within each task.
permuted_ranks = np.zeros(metric_ranks.shape)
for i_task in range(n_task):
sample1 = metric_ranks[0, i_task]
sample2 = metric_ranks[1, i_task]
all_samples = np.concatenate([sample1, sample2])
len_sample1 = sample1.shape[0]
permutation_indices = np.random.permutation(range(len_sample1 * 2))
permuted_samples = all_samples[permutation_indices]
permuted_ranks[0, i_task, :] = permuted_samples[:len_sample1]
permuted_ranks[1, i_task, :] = permuted_samples[len_sample1:]
# difference between mean ranks, for this permutation
perm_diff = self._algo_meanrank_diff(permuted_ranks)
perm_diffs[i_perm] = perm_diff
return perm_diffs
@staticmethod
def _get_pval(null_distribution, observed_value):
"""Compute p-value given an observation and a null distribution.
I.e. the proportion of the null distribution that is at least as extreme as
the observed value. Note that this is a two-sided test.
Args:
null_distribution: 1-D array, containing the null distribution of the test
statistic (e.g. on a set of permutations)
observed_value: float, the observed value of the test statistic
Returns:
float, the p-value
"""
return np.mean(np.abs(null_distribution) >= abs(observed_value))
def _write_pval_result(self, pval, algo1, algo2, timeframe=None):
"""Write p-value to text file."""
io_utils.makedirs(self.outfile_dir)
outfile_path = ('%s/%s_%s_%s' %
(self.outfile_dir, self.metric, algo1, algo2))
if timeframe is not None:
outfile_path += '_%d' % timeframe
with open(outfile_path, 'w') as outfile:
outfile.write('%g' % pval)
logging.info('P-val result written to: %s', outfile_path)
def _write_confidence_interval_result(self,
ci_lower,
ci_upper,
algo,
timeframe=None):
"""Write confidence interval to text file."""
io_utils.makedirs(self.outfile_dir)
outfile_path = '%s/%s_%s' % (self.outfile_dir, self.metric, algo)
if timeframe is not None:
outfile_path += '_%d' % timeframe
with open(outfile_path, 'w') as outfile:
outfile.write('%g,%g' % (ci_lower, ci_upper))
logging.info('Confidence interval written to: %s', outfile_path)
|
fd858ecea4982bc7df0a176fbea8641d37aa7ec5
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/python/akg/ops/math/equal.py
|
fe6b8052e52d0a0e4299ea87bf51a345cecb1c7e
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,643
|
py
|
equal.py
|
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: equal"""
import akg.tvm
import akg.topi
import akg.utils as utils
from akg.utils.dsl_create import produce_shapes
from akg.utils.kernel_exec import product_is_mini
from .cast import cast
from .sub import sub
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor)
def _equal(input1, input2):
shape1 = [x.value for x in input1.shape]
shape2 = [x.value for x in input2.shape]
utils.check_shape(shape1)
utils.check_shape(shape2)
shape1, shape2, shape = produce_shapes(shape1, shape2)
utils.elemwise_dtype_check(input1.dtype, input2.dtype)
dtype = input1.dtype
# get equal compute
t_value = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "T")
f_value = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "F")
input1_bro = akg.topi.broadcast_to(input1, shape)
input2_bro = akg.topi.broadcast_to(input2, shape)
c_out = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(input1_bro[indice] == input2_bro[indice],
t_value[indice], f_value[indice]), name="C")
res = akg.tvm.compute(shape, lambda *indice: c_out(*indice).astype("bool"), name="res")
return res
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor)
def _equal_ascend(input1, input2, target=utils.CCE):
# check shapes
shape1 = [x.value for x in input1.shape]
shape2 = [x.value for x in input2.shape]
shapes = [shape1, shape2]
for _, shp in enumerate(shapes):
utils.check_shape(shp)
utils.ops_dtype_check([input1.dtype, input2.dtype],
[utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32,
utils.DtypeForDavinci.INT8, utils.DtypeForDavinci.UINT8])
dtype = input1.dtype
orig_dtype = dtype
if product_is_mini() and dtype != "float16":
dtype = "float16"
if (not product_is_mini()) and dtype not in ("float16", "float32"):
# for int32, if cast to float16, there may be overflow
dtype = "float32"
if orig_dtype == "float32" and dtype == "float16":
input_sub = sub(input1, input2, target)
input_sub = cast(input_sub, dtype, target)
zero = akg.tvm.const(0.0, dtype)
res = akg.topi.equal(input_sub, zero)
else:
input1 = cast(input1, dtype, target)
input2 = cast(input2, dtype, target)
res = akg.topi.equal(input1, input2)
return res
def Equal(input1, input2, target=utils.CCE):
"""
check whether input1 equals to input2.
Args:
input1 (tvm.tensor.Tensor): Tensor.
input2 (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor. If input1 equal to input2 return True, else return False.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _equal_ascend(input1, input2)
else:
return _equal(input1, input2)
|
4ac56dee17371da184f1301ce06a844608bcd11c
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/tests/unit/objects/test_trusted_certs.py
|
9029845ef34188502958e3bdb881bd75ea4a892f
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
test_trusted_certs.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from nova.objects import trusted_certs
from nova.tests.unit.objects import test_objects
from oslo_serialization import jsonutils
fake_trusted_certs = trusted_certs.TrustedCerts(ids=['fake-trusted-cert-1',
'fake-trusted-cert-2'])
fake_instance_extras = {
'trusted_certs': jsonutils.dumps(fake_trusted_certs.obj_to_primitive())
}
class _TestTrustedCertsObject(object):
@mock.patch('nova.db.main.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = fake_instance_extras
certs = trusted_certs.TrustedCerts.get_by_instance_uuid(
self.context, 'fake_uuid')
self.assertEqual(certs.ids, fake_trusted_certs.ids)
class TestTrustedCertsObject(test_objects._LocalTest,
_TestTrustedCertsObject):
pass
class TestRemoteTrustedCertsObject(test_objects._RemoteTest,
_TestTrustedCertsObject):
pass
|
896f56c4275b5eee62024dfd38433b9d05e54d97
|
308f5596f1c7d382520cfce13ceaa5dff6f4f783
|
/third-party/thrift/src/thrift/test/python_capi/capi_test.py
|
860be0e991f67abeace3013763fbcd9fb1efc510
|
[
"MIT",
"Apache-2.0",
"PHP-3.01",
"Zend-2.0"
] |
permissive
|
facebook/hhvm
|
7e200a309a1cad5304621b0516f781c689d07a13
|
d8203129dc7e7bf8639a2b99db596baad3d56b46
|
refs/heads/master
| 2023-09-04T04:44:12.892628
| 2023-09-04T00:43:05
| 2023-09-04T00:43:05
| 455,600
| 10,335
| 2,326
|
NOASSERTION
| 2023-09-14T21:24:04
| 2010-01-02T01:17:06
|
C++
|
UTF-8
|
Python
| false
| false
| 19,564
|
py
|
capi_test.py
|
#!/usr/bin/env fbpython
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from sys import getrefcount
from typing import Generator
import thrift.python_capi.fixture as fixture
from folly.iobuf import IOBuf
from thrift.python.serializer import Protocol, serialize, serialize_iobuf
from thrift.test.python_capi.module.thrift_types import (
AdaptedFields,
AnnoyingEnum,
ComposeStruct,
DoubledPair,
EmptyStruct,
ListStruct,
MapStruct,
MyDataItem,
MyEnum,
MyStruct,
Onion as MyUnion,
PrimitiveStruct,
SetStruct,
StringPair,
)
from thrift.test.python_capi.serialized_dep.thrift_types import (
SerializedError,
SerializedStruct,
SerializedUnion,
)
from thrift.test.python_capi.thrift_dep.thrift_types import (
DepEnum,
DepStruct,
SomeError,
)
class PythonCapiFixture(unittest.TestCase):
def my_struct(self) -> MyStruct:
return MyStruct(
inty=1,
stringy="hello",
myItemy=MyDataItem(),
myEnumy=MyEnum.MyValue1,
booly=True,
floatListy=[-1.0, 1.0, 2.0, 3.0],
strMappy={b"hello": "world", b"-1": "-1"},
intSetty={-1, 1, 2, 3, 5, 8},
)
def my_union(self) -> Generator[MyUnion, None, None]:
yield MyUnion()
yield MyUnion(myEnum=MyEnum.MyValue1)
yield MyUnion(myStruct=self.primitive())
yield MyUnion(myString="acef")
yield MyUnion(doubleList=[1.0, 2.0, 3.0])
yield MyUnion(strMap={b"key": "val", b"bytes": "str"})
def primitive(self) -> PrimitiveStruct:
return PrimitiveStruct(
booly=True,
charry=-9,
shorty=2**15 - 1,
inty=2**31 - 1,
longy=2**63 - 1,
floaty=-1.0,
dubby=-1.0,
stringy="€ to £ to ₹",
bytey=b"bippity boppity boo",
buffy=IOBuf(memoryview(b" the buffest buffer ")),
pointbuffy=IOBuf(memoryview(b"the pointiest buffer")),
patched_struct=self.my_struct(),
empty_struct=EmptyStruct(),
some_error=SomeError(msg="bad math"),
fbstring=b"v fast string",
managed_string_view="I'm an rpc string utility",
)
def primitive_unset(self) -> PrimitiveStruct:
return PrimitiveStruct(
booly=True,
# charry left deliberately unset, should be 0
shorty=0,
inty=2**31 - 1,
longy=2**63 - 1,
# leave optional `floaty` `dubby`, `stringy`, `bytey` unset
)
def adapted_fields(self) -> AdaptedFields:
return AdaptedFields(
adapted_int=4247,
list_adapted_int=[1, 1, 2, 3, 5, 8],
set_adapted_int={2, 3, 5, 7, 11, 13},
inline_adapted_int=47,
)
def list_struct(self) -> ListStruct:
return ListStruct(
boolz=[True, True, False, False, False, False, True, True, False, True],
intz=[-1, -2, -1, 0, 1, 2, 2, 2, 2, 10],
stringz=["wat", "", "-1", "-1", "lol", "loool"],
encoded=[b"beep", b"boop", b"bop"],
uidz=[-(2**63), -1, 0, 1, 2**63 - 1],
matrix=[[4.0, 9.0, 2.0], [3.0, 5.0, 7.0], [8.0, 1.0, 6.0]],
ucharz=[[2, 7, 6], [9, 5, 1], [4, 3, 8]],
voxels=[
[[2, 7, 6], [9, 5, 1], [4, 3, 8]],
[[2, 7, 6], [9, 5, 1], [4, 3, 8]],
[[2, 7, 6], [9, 5, 1], [4, 3, 8]],
],
buf_ptrs=[IOBuf(memoryview(x)) for x in [b"abc", b"def", b"ghi"]],
)
def empty_lists(self) -> ListStruct:
# optional fields left unset
return ListStruct(
boolz=[],
encoded=[],
uidz=[],
matrix=[],
ucharz=[[], [9, 5, 1], []],
voxels=[[], [[]], [[], [3], []]],
)
def set_struct(self) -> SetStruct:
return SetStruct(
enumz={MyEnum.MyValue1, MyEnum.MyValue2},
intz={1, 1, 2, 3, 5, 8, 13, 23, 42},
binnaz={b"abcd", b"efgh", b"ijkl", b"mnop"},
encoded={b"abcd", b"bcda", b"cdab", b"dabc"},
uidz={0, 10, 100, 1000, 10000},
charz={0, 1, 2, 4, 8, 16},
setz=[{1, 2, 3}, {}, {2, 3}, {1, 2, 3}],
)
def empty_sets(self) -> SetStruct:
return SetStruct(
enumz={},
intz={},
binnaz={},
encoded={},
uidz={},
charz={},
setz=[{}],
)
def map_struct(self) -> MapStruct:
return MapStruct(
enumz={MyEnum.MyValue1: "V1", MyEnum.MyValue2: "V2"},
intz={i: str(i) for i in range(-3, 3)},
binnaz={b"a": self.primitive(), b"b": self.primitive()},
encoded={"wdf": 3.1, "wef": 2.9},
flotz={i: float(i) for i in range(5)},
map_list=[{i: i**2 for i in range(j)} for j in range(2)],
list_map={-1: [1, -2, 3, -5], 2: [4, -8, 16]},
fast_list_map={1: [-1.0, 1.0], -1: [1.0, -1.0]},
buf_map={x: IOBuf(memoryview(x)) for x in [b"qergq", b"", b"wefwi"]},
unsigned_list_map={1: [1, 2, 3, 5], 2: [4, 8, 16]},
)
def empty_maps(self) -> MapStruct:
return MapStruct(
enumz={},
encoded={},
flotz={},
map_list=[{}],
list_map={},
fast_list_map={},
)
def dep_struct(self) -> DepStruct:
return DepStruct(
s="blah",
i=42,
)
def composed(self) -> ComposeStruct:
return ComposeStruct(
enum_=MyEnum.MyValue2,
renamed_=AnnoyingEnum.FOO,
primitive=self.primitive(),
aliased=self.list_struct(),
xenum=DepEnum.Arm2,
xstruct=self.dep_struct(),
friends=[self.dep_struct()] * 3,
serial_struct=SerializedStruct(s="wefw", i=42),
serial_union=SerializedUnion(i=47),
serial_error=SerializedError(msg="tldr"),
)
class PythonCapiRoundtrip(PythonCapiFixture):
def test_roundtrip_struct(self) -> None:
i = MyDataItem()
empty = MyStruct()
s = self.my_struct()
self.assertEqual(i, fixture.roundtrip_MyDataItem(i))
self.assertEqual(empty, fixture.roundtrip_MyStruct(empty))
self.assertEqual(s, fixture.roundtrip_MyStruct(s))
def test_roundtrip_union(self) -> None:
for u in self.my_union():
self.assertEqual(u, fixture.roundtrip_MyUnion(u))
def test_roundtrip_enum(self) -> None:
self.assertEqual(MyEnum.MyValue1, fixture.roundtrip_MyEnum(MyEnum.MyValue1))
self.assertEqual(MyEnum.MyValue2, fixture.roundtrip_MyEnum(MyEnum.MyValue2))
def test_roundtrip_field_adapted(self) -> None:
a, b = ("TacosSalad", "DaLassoCat")
s = StringPair(normal=a, doubled=b)
self.assertEqual(s, fixture.roundtrip_StringPair(s)),
def test_roundtrip_type_adapted(self) -> None:
s = DoubledPair(s="TacosSalad", x=42)
self.assertEqual(s, fixture.roundtrip_DoubledPair(s))
def test_roundtrip_marshal_EmptyStruct(self) -> None:
self.assertEqual(EmptyStruct(), fixture.roundtrip_EmptyStruct(EmptyStruct()))
with self.assertRaises(TypeError):
fixture.roundtrip_EmptyStruct(MyStruct())
def test_roundtrip_TypeError(self) -> None:
with self.assertRaises(TypeError):
fixture.roundtrip_MyDataItem(MyEnum.MyValue1)
with self.assertRaises(TypeError):
fixture.roundtrip_MyUnion(MyEnum.MyValue1)
with self.assertRaises(TypeError):
fixture.roundtrip_MyEnum(self.my_struct())
def test_roundtrip_OverflowError(self) -> None:
## Failures on extraction to cpp
negative_msg = "can't convert negative"
with self.assertRaisesRegex(OverflowError, negative_msg):
fixture.roundtrip_PrimitiveStruct(PrimitiveStruct(shorty=-1))
with self.assertRaisesRegex(OverflowError, negative_msg):
fixture.roundtrip_PrimitiveStruct(PrimitiveStruct(longy=-1))
with self.assertRaisesRegex(OverflowError, negative_msg):
fixture.roundtrip_MapStruct(MapStruct(unsigned_list_map={1: [1, -1]}))
with self.assertRaisesRegex(OverflowError, negative_msg):
fixture.roundtrip_MapStruct(MapStruct(unsigned_list_map={-1: [1, 3]}))
## Failure on creation of thrift-python object (existing behavior)
with self.assertRaises(OverflowError):
fixture.roundtrip_PrimitiveStruct(PrimitiveStruct(shorty=2**15))
def test_roundtrip_marshal_PrimitiveStruct(self) -> None:
self.assertEqual(
PrimitiveStruct(), fixture.roundtrip_PrimitiveStruct(PrimitiveStruct())
)
self.assertEqual(
self.primitive(), fixture.roundtrip_PrimitiveStruct(self.primitive())
)
self.assertEqual(
self.primitive_unset(),
fixture.roundtrip_PrimitiveStruct(self.primitive_unset()),
)
unset_primitive = fixture.roundtrip_PrimitiveStruct(self.primitive_unset())
self.assertIsNone(unset_primitive.floaty)
self.assertIsNone(unset_primitive.dubby)
self.assertIsNone(unset_primitive.stringy)
self.assertIsNone(unset_primitive.bytey)
with self.assertRaises(TypeError):
fixture.roundtrip_PrimitiveStruct(self.my_struct())
def test_memleak_primitive(self) -> None:
# Use non-singleton objects to avoid noise from runtime
short = 9001
f = 9001.0
bytes_ = b"bippity boppity boo"
def make_primitive():
return PrimitiveStruct(
shorty=short,
inty=short,
longy=short,
floaty=f,
dubby=f,
bytey=bytes_,
)
primitive = make_primitive()
# This test works to detect leaks of primitives only because they are
# placed directly into struct internal data without conversion.
# Non-primitives can be leaked, but not detectable by this test.
self.assertIs(primitive.shorty, short)
self.assertIs(primitive.inty, short)
self.assertIs(primitive.longy, short)
self.assertIs(primitive.floaty, f)
self.assertIs(primitive.dubby, f)
self.assertIs(primitive.bytey, bytes_)
short_refcount = getrefcount(short)
f_refcount = getrefcount(f)
bytes_refcount = getrefcount(bytes_)
for _ in range(10):
fixture.roundtrip_PrimitiveStruct(make_primitive())
# These all fail if there is a leak in Extractor<PrimitiveStruct>
self.assertEqual(bytes_refcount, getrefcount(bytes_))
self.assertEqual(f_refcount, getrefcount(f))
self.assertEqual(short_refcount, getrefcount(short))
def test_roundtrip_marshal_ListStruct(self) -> None:
self.assertEqual(ListStruct(), fixture.roundtrip_ListStruct(ListStruct()))
self.assertEqual(
self.list_struct(), fixture.roundtrip_ListStruct(self.list_struct())
)
self.assertEqual(
self.empty_lists(), fixture.roundtrip_ListStruct(self.empty_lists())
)
self.assertIsNone(fixture.roundtrip_ListStruct(self.empty_lists()).intz)
self.assertIsNone(fixture.roundtrip_ListStruct(self.empty_lists()).stringz)
def test_roundtrip_marshal_SetStruct(self) -> None:
self.assertEqual(SetStruct(), fixture.roundtrip_SetStruct(SetStruct()))
self.assertEqual(
self.empty_sets(), fixture.roundtrip_SetStruct(self.empty_sets())
)
expected = self.set_struct()
actual = fixture.roundtrip_SetStruct(self.set_struct())
# sets are serialized in a non-sorted order, so compare field by field
for f in ["enumz", "intz", "binnaz", "encoded", "uidz", "charz", "setz"]:
self.assertEqual(getattr(expected, f), getattr(actual, f), f)
def test_roundtrip_marshal_MapStruct(self) -> None:
self.assertEqual(MapStruct(), fixture.roundtrip_MapStruct(MapStruct()))
self.assertEqual(
self.empty_maps(), fixture.roundtrip_MapStruct(self.empty_maps())
)
expected = self.map_struct()
actual = fixture.roundtrip_MapStruct(self.map_struct())
for f in [
"enumz",
"intz",
"binnaz",
"encoded",
"flotz",
"map_list",
"list_map",
"fast_list_map",
]:
self.assertEqual(getattr(expected, f), getattr(actual, f), f)
def test_roundtrip_marshal_ComposeStruct(self) -> None:
self.assertEqual(
ComposeStruct(), fixture.roundtrip_ComposeStruct(ComposeStruct())
)
self.assertEqual(
self.composed(), fixture.roundtrip_ComposeStruct(self.composed())
)
def test_roundtrip_marshal_AdaptedFields(self) -> None:
self.assertEqual(
AdaptedFields(), fixture.roundtrip_AdaptedFields(AdaptedFields())
)
self.assertEqual(
self.adapted_fields(),
fixture.roundtrip_AdaptedFields(self.adapted_fields()),
)
class PythonCapiTypeCheck(PythonCapiFixture):
def test_typeCheck_struct(self) -> None:
i = MyDataItem()
s = self.my_struct()
self.assertTrue(fixture.check_MyDataItem(i))
self.assertFalse(fixture.check_MyDataItem(s))
self.assertTrue(fixture.check_MyStruct(s))
self.assertFalse(fixture.check_MyStruct(i))
def test_typeCheck_union(self) -> None:
for u in self.my_union():
self.assertTrue(fixture.check_MyUnion(u))
self.assertFalse(fixture.check_MyUnion(self.my_struct()))
self.assertFalse(fixture.check_MyUnion(MyEnum.MyValue1))
def test_typeCheck_enum(self) -> None:
self.assertTrue(fixture.check_MyEnum(MyEnum.MyValue1))
self.assertTrue(fixture.check_MyEnum(MyEnum.MyValue2))
self.assertFalse(fixture.check_MyEnum(self.my_struct()))
def test_roundtrip_field_adapted(self) -> None:
a, b = ("TacosSalad", "DaLassoCat")
self.assertTrue(fixture.check_StringPair(StringPair(normal=a, doubled=b)))
self.assertFalse(fixture.check_StringPair(MyEnum.MyValue1))
def test_roundtrip_type_adapted(self) -> None:
self.assertTrue(
fixture.check_DoubledPair(DoubledPair(s="TacosSalad" * 2, x=42))
)
self.assertFalse(fixture.check_DoubledPair(MyEnum.MyValue1))
def test_typeCheck_PrimitiveStruct(self) -> None:
self.assertTrue(fixture.check_PrimitiveStruct(self.primitive()))
self.assertTrue(fixture.check_PrimitiveStruct(PrimitiveStruct()))
self.assertFalse(fixture.check_PrimitiveStruct(MyEnum.MyValue1))
self.assertFalse(fixture.check_PrimitiveStruct(self.my_struct()))
def test_typeCheck_ListStruct(self) -> None:
self.assertTrue(fixture.check_ListStruct(self.list_struct()))
self.assertTrue(fixture.check_ListStruct(self.empty_lists()))
self.assertTrue(fixture.check_ListStruct(ListStruct()))
self.assertFalse(fixture.check_ListStruct(MyEnum.MyValue1))
self.assertFalse(fixture.check_ListStruct(self.my_struct()))
def test_typeCheck_SetStruct(self) -> None:
self.assertTrue(fixture.check_SetStruct(self.set_struct()))
self.assertTrue(fixture.check_SetStruct(self.empty_sets()))
self.assertTrue(fixture.check_SetStruct(SetStruct()))
self.assertFalse(fixture.check_SetStruct(MyEnum.MyValue1))
self.assertFalse(fixture.check_SetStruct(self.my_struct()))
def test_typeCheck_MapStruct(self) -> None:
self.assertTrue(fixture.check_MapStruct(self.map_struct()))
self.assertTrue(fixture.check_MapStruct(self.empty_maps()))
self.assertTrue(fixture.check_MapStruct(MapStruct()))
self.assertFalse(fixture.check_MapStruct(MyEnum.MyValue1))
self.assertFalse(fixture.check_MapStruct(self.my_struct()))
def test_typeCheck_ComposeStruct(self) -> None:
self.assertTrue(fixture.check_ComposeStruct(self.composed()))
self.assertTrue(fixture.check_ComposeStruct(ComposeStruct()))
self.assertFalse(fixture.check_ComposeStruct(MyEnum.MyValue1))
self.assertFalse(fixture.check_ComposeStruct(self.my_struct()))
class PythonCapiSerializeParity(PythonCapiFixture):
def serialize(self, s: object) -> IOBuf:
return serialize_iobuf(s, protocol=Protocol.BINARY)
def test_PrimitiveStruct_extract(self) -> None:
self.assertEqual(
bytes(fixture.extract_and_serialize_PrimitiveStruct(PrimitiveStruct())),
serialize(PrimitiveStruct(), protocol=Protocol.BINARY),
)
# need to actually create a thrift-cpp2 struct with both methods
# to ensure consistent ordering of map and set fields
self.assertEqual(
fixture.extract_and_serialize_PrimitiveStruct(self.primitive()),
fixture.deserialize_and_serialize_PrimitiveStruct(
self.serialize(self.primitive())
),
)
def test_MyStruct_extract(self) -> None:
self.assertEqual(
fixture.extract_and_serialize_MyStruct(self.my_struct()),
fixture.deserialize_and_serialize_MyStruct(
self.serialize(self.my_struct())
),
)
def test_AdaptedFields_extract(self) -> None:
self.assertEqual(
fixture.extract_and_serialize_AdaptedFields(self.adapted_fields()),
fixture.deserialize_and_serialize_AdaptedFields(
self.serialize(self.adapted_fields())
),
)
def test_ListStruct_extract(self) -> None:
self.assertEqual(
fixture.extract_and_serialize_ListStruct(self.list_struct()),
fixture.deserialize_and_serialize_ListStruct(
self.serialize(self.list_struct())
),
)
def test_SetStruct_extract(self) -> None:
self.assertEqual(
fixture.extract_and_serialize_SetStruct(self.set_struct()),
fixture.deserialize_and_serialize_SetStruct(
self.serialize(self.set_struct())
),
)
def test_MapStruct_extract(self) -> None:
self.assertEqual(
fixture.extract_and_serialize_MapStruct(self.map_struct()),
fixture.deserialize_and_serialize_MapStruct(
self.serialize(self.map_struct())
),
)
def test_ComposeStruct_extract(self) -> None:
self.assertEqual(
fixture.extract_and_serialize_ComposeStruct(self.composed()),
fixture.deserialize_and_serialize_ComposeStruct(
self.serialize(self.composed())
),
)
|
f4e64a14a9491d135540a9937ebc5bb17a94cd1e
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/onnxruntime/python/tools/kernel_explorer/kernels/gemm_softmax_gemm_permute_test.py
|
6e1e431842a5607719f6b24b9877f3a10eb76815
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 18,575
|
py
|
gemm_softmax_gemm_permute_test.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import os
import sys
from dataclasses import dataclass
from itertools import product
import kernel_explorer as ke
import numpy as np
import pytest
from utils import dtype_to_suffix, matmul, softmax
max_batch_size = int(os.environ.get("KERNEL_EXPLORER_BATCHED_GEMM_MAX_BATCH_SIZE", 64))
def multinormal_distribution(num_distribution, num_element_per_dist):
arrays = []
for _ in range(num_distribution):
mean = np.random.rand() - 0.5
std = np.random.rand() + 0.5
arrays.append(np.random.normal(mean, std, (num_element_per_dist,)))
return np.array(arrays)
def get_ck_binding_name(dtype, biased: bool, masked: bool):
dtype_suffix = "_" + dtype_to_suffix(dtype)
ck_suffix = ""
if biased:
ck_suffix += "Biased"
if masked:
ck_suffix += "Masked"
ck_suffix += dtype_suffix
return "GemmSoftmaxGemmPermuteCK" + ck_suffix
dtypes = ["float16"]
batches = [1, max_batch_size]
seqlens = [128, 512]
total_seqlens = [128, 512]
num_heads = [8, 12]
head_sizes = [64]
biaseds = [False, True]
mask_dims = [0, 2, 3, 4]
def get_biased_id(biased):
return "biased" if biased else "nobias"
def get_mask_dim_id(dim):
if dim == 0:
return "nomask"
return f"mask_{dim}d"
def maybe_pack_q_k_v_bnsh_for_device_on_host(q, k, v, dtype, qkv_format):
q = q.astype(dtype)
k = k.astype(dtype)
v = v.astype(dtype)
if qkv_format == ke.qkv_format.Q_K_V_BNSH:
return q, k, v
# BNSH to BSNH
q = np.swapaxes(q, 2, 1)
k = np.swapaxes(k, 2, 1)
v = np.swapaxes(v, 2, 1)
if qkv_format == ke.qkv_format.Q_K_V_BSNH:
return np.ascontiguousarray(q), np.ascontiguousarray(k), np.ascontiguousarray(v)
if qkv_format == ke.qkv_format.QKV_BSN3H:
return np.ascontiguousarray(np.stack([q, k, v], axis=-2)), None, None
if qkv_format == ke.qkv_format.Q_KV_BSNH_BSN2H:
return np.ascontiguousarray(q), np.ascontiguousarray(np.stack([k, v], axis=-2)), None
raise NotImplementedError
def _test_gemm_softmax_gemm_permute(
f, dtype, batch, seqlen, total_seqlen, num_heads, head_size, biased, mask_dim, scale, qkv_format
):
v_head_size = head_size
q_shape = [batch, num_heads, seqlen, head_size]
k_shape = [batch, num_heads, total_seqlen, head_size]
v_shape = [batch, num_heads, total_seqlen, v_head_size]
out_shape = [batch, seqlen, num_heads, head_size]
attn_bias = None
bias_shape = [batch, num_heads, seqlen, total_seqlen] if biased else None
attn_mask = None
mask_shape = None
mask_shape_broadcasted = None
max_seqlen = None
if mask_dim != 0:
if mask_dim == 2:
mask_shape = [batch, total_seqlen]
mask_shape_broadcasted = [batch, 1, 1, total_seqlen]
elif mask_dim == 3:
mask_shape = [batch, seqlen, total_seqlen]
mask_shape_broadcasted = [batch, 1, seqlen, total_seqlen]
elif mask_dim == 4:
max_seqlen = ((seqlen - 1) // 1024 + 1) * 1024 # round up to multiple of 1024
mask_shape = [batch, 1, max_seqlen, max_seqlen]
else:
raise ValueError
np.random.seed(42)
q = multinormal_distribution(np.prod(q_shape[:-1]), q_shape[-1]).reshape(q_shape).astype(np.float64)
k = multinormal_distribution(np.prod(k_shape[:-1]), k_shape[-1]).reshape(k_shape).astype(np.float64)
v = multinormal_distribution(np.prod(v_shape[:-1]), v_shape[-1]).reshape(v_shape).astype(np.float64)
if bias_shape is not None:
attn_bias = np.random.uniform(-0.5, 0.5, size=bias_shape)
if mask_shape is not None:
attn_mask = (np.random.randint(0, 100, size=mask_shape) < 95).astype(np.int32)
pre_softmax_attn_scores = matmul(q, np.swapaxes(k, 2, 3))
pre_softmax_attn_scores = pre_softmax_attn_scores * scale
if attn_bias is not None:
pre_softmax_attn_scores = pre_softmax_attn_scores + attn_bias
if attn_mask is not None:
filter_value = -10000.0
if mask_dim == 4:
# equivalent to past_sequence_length = max_sequence_length - seqlen
converted_mask = (1 - attn_mask[:, :, -seqlen:, :total_seqlen]) * filter_value
else:
converted_mask = (1 - attn_mask.reshape(mask_shape_broadcasted)) * filter_value
pre_softmax_attn_scores = pre_softmax_attn_scores + converted_mask
attn_scores = softmax(pre_softmax_attn_scores, axis=-1)
attn = matmul(attn_scores, v)
ref = np.swapaxes(attn, 2, 1) # permute 0213
out = np.empty(out_shape, dtype=dtype)
host_q, host_k, host_v = maybe_pack_q_k_v_bnsh_for_device_on_host(q, k, v, dtype, qkv_format)
host_attn_bias = attn_bias.astype(dtype) if attn_bias is not None else None
dev_q = ke.DeviceArray(host_q)
dev_k = ke.DeviceArray(host_k) if host_k is not None else None
dev_v = ke.DeviceArray(host_v) if host_v is not None else None
dev_out = ke.DeviceArray(out)
dev_attn_bias = ke.DeviceArray(host_attn_bias) if host_attn_bias is not None else None
dev_attn_mask = ke.DeviceArray(attn_mask) if attn_mask is not None else None
my_gemm_softmax_gemm_permute = f(
batch,
seqlen,
total_seqlen,
max_seqlen,
num_heads,
head_size,
mask_dim,
scale,
qkv_format,
dev_q,
dev_k,
dev_v,
dev_attn_bias,
dev_attn_mask,
dev_out,
)
print() # write an empty line in case pytest ... -s -v
failures = {}
for impl in my_gemm_softmax_gemm_permute.ListOps():
if not my_gemm_softmax_gemm_permute.SelectOp(impl):
print("Unsupport", impl)
continue
print(" Support", impl)
my_gemm_softmax_gemm_permute.Run()
dev_out.UpdateHostNumpyArray()
try:
is_strict = int(os.environ.get("KERNEL_EXPLORER_STRICT_TEST", "0"))
if is_strict:
# NOTE: this will always fail, just for manual checking with:
# KERNEL_EXPLORER_STRICT_TEST=1 pytest ... -s -v
np.testing.assert_allclose(out, ref)
else:
is_zero_tol, atol, rtol = 1e-3, 2e-2, 1e-2
not_close_to_zeros = np.abs(ref) > is_zero_tol
np.testing.assert_allclose(out[not_close_to_zeros], ref[not_close_to_zeros], atol=atol, rtol=rtol)
except Exception as err:
header = "*" * 30 + impl + "*" * 30
print(header)
print(err)
print("*" * len(header))
failures[impl] = str(err)
if failures:
raise Exception(failures)
@pytest.mark.parametrize("mask_dim", mask_dims, ids=get_mask_dim_id)
@pytest.mark.parametrize("biased", biaseds, ids=get_biased_id)
@pytest.mark.parametrize("head_size", head_sizes)
@pytest.mark.parametrize("nhead", num_heads)
@pytest.mark.parametrize("total_seqlen", total_seqlens)
@pytest.mark.parametrize("seqlen", seqlens)
@pytest.mark.parametrize("batch", [16])
@pytest.mark.parametrize("dtype", ["float16", "float32"])
def test_gemm_softmax_gemm_permute_generic(dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim):
f = getattr(ke, "GemmSoftmaxGemmPermuteGeneric_" + dtype_to_suffix(dtype))
scale = 1.0 / np.sqrt(head_size)
_test_gemm_softmax_gemm_permute(
f, dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim, scale, ke.qkv_format.Q_K_V_BNSH
)
@pytest.mark.parametrize("mask_dim", [2], ids=get_mask_dim_id)
@pytest.mark.parametrize("biased", [False], ids=get_biased_id)
@pytest.mark.parametrize("head_size", [64])
@pytest.mark.parametrize("nhead", [8])
@pytest.mark.parametrize("total_seqlen", [128])
@pytest.mark.parametrize("seqlen", [64])
@pytest.mark.parametrize("batch", [16])
@pytest.mark.parametrize("dtype", ["float16", "float32"])
def test_gemm_softmax_gemm_permute_generic_nested_tunable(
dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim
):
f = getattr(ke, "GemmSoftmaxGemmPermuteGenericNestedTunable_" + dtype_to_suffix(dtype))
scale = 1.0 / np.sqrt(head_size)
_test_gemm_softmax_gemm_permute(
f, dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim, scale, ke.qkv_format.Q_K_V_BNSH
)
@pytest.mark.skipif(not ke.is_composable_kernel_available(), reason="ck is not enabled")
@pytest.mark.parametrize("mask_dim", mask_dims, ids=get_mask_dim_id)
@pytest.mark.parametrize("biased", biaseds, ids=get_biased_id)
@pytest.mark.parametrize("head_size", head_sizes)
@pytest.mark.parametrize("nhead", num_heads)
@pytest.mark.parametrize("total_seqlen", total_seqlens)
@pytest.mark.parametrize("seqlen", seqlens)
@pytest.mark.parametrize("batch", batches)
@pytest.mark.parametrize("dtype", dtypes)
def test_gemm_softmax_gemm_permute_ck(dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim):
f = getattr(ke, get_ck_binding_name(dtype, biased, mask_dim != 0))
scale = 1.0 / np.sqrt(head_size)
_test_gemm_softmax_gemm_permute(
f, dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim, scale, ke.qkv_format.Q_K_V_BNSH
)
@pytest.mark.parametrize("mask_dim", [2], ids=get_mask_dim_id)
@pytest.mark.parametrize("biased", [False], ids=get_biased_id)
@pytest.mark.parametrize("head_size", [64])
@pytest.mark.parametrize("nhead", [8])
@pytest.mark.parametrize("total_seqlen", [128])
@pytest.mark.parametrize("seqlen", [64])
@pytest.mark.parametrize("batch", [16])
@pytest.mark.parametrize("dtype", ["float16"])
def test_gemm_softmax_gemm_permute_tunable(dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim):
f = getattr(ke, "GemmSoftmaxGemmPermuteTunable_" + dtype_to_suffix(dtype))
scale = 1.0 / np.sqrt(head_size)
_test_gemm_softmax_gemm_permute(
f, dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim, scale, ke.qkv_format.Q_K_V_BNSH
)
stabel_diffusion_configs = [
[2, 64, 64, 8, 160, "QKV_BSN3H"],
[2, 256, 256, 8, 160, "QKV_BSN3H"],
[2, 1024, 1024, 8, 80, "QKV_BSN3H"],
[2, 4096, 4096, 8, 40, "QKV_BSN3H"],
[2, 64, 77, 8, 160, "Q_KV_BSNH_BSN2H"],
[2, 256, 77, 8, 160, "Q_KV_BSNH_BSN2H"],
[2, 1024, 77, 8, 80, "Q_KV_BSNH_BSN2H"],
[2, 4096, 77, 8, 40, "Q_KV_BSNH_BSN2H"],
[1, 4096, 4096, 1, 512, "Q_K_V_BNSH"],
]
@pytest.mark.skipif(not ke.is_composable_kernel_available(), reason="ck is not enabled")
@pytest.mark.parametrize("mask_dim", [0], ids=get_mask_dim_id)
@pytest.mark.parametrize("biased", [False], ids=get_biased_id)
@pytest.mark.parametrize("batch, seqlen, total_seqlen, nhead, head_size, qkv_format_name", stabel_diffusion_configs)
@pytest.mark.parametrize("dtype", dtypes)
def test_gemm_softmax_gemm_permute_ck_sd(
dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim, qkv_format_name
):
qkv_format = getattr(ke.qkv_format, qkv_format_name)
f = getattr(ke, get_ck_binding_name(dtype, biased, mask_dim != 0))
scale = 1.0 / np.sqrt(head_size)
_test_gemm_softmax_gemm_permute(
f, dtype, batch, seqlen, total_seqlen, nhead, head_size, biased, mask_dim, scale, qkv_format
)
@dataclass
class GemmSoftmaxGemmPermuteMetric(ke.ComputeMetric):
batch: int
seqlen: int
total_seqlen: int
num_heads: int
head_size: int
biased: bool
mask_dim: int
def report(self):
bias_str = " biased" if self.biased else ""
mask_str = f" mask_{self.mask_dim}d" if self.mask_dim != 0 else ""
common = (
f"{self.dtype} B={self.batch} S={self.seqlen} T={self.total_seqlen} "
f"N={self.num_heads} H={self.head_size}{bias_str}{mask_str}, "
f"{self.name}"
)
if self.duration <= 0:
return "not supported " + common
return f"{self.duration:>6.2f} us {self.tflops:>5.2f} tflops " + common
def profile_gemm_softmax_gemm_permute_func(
f, dtype, batch, seqlen, total_seqlen, num_heads, head_size, biased, mask_dim, scale, qkv_format
):
v_head_size = head_size
q_shape = [batch, num_heads, seqlen, head_size]
k_shape = [batch, num_heads, total_seqlen, head_size]
v_shape = [batch, num_heads, total_seqlen, v_head_size]
out_shape = [batch, seqlen, num_heads, head_size]
attn_bias = None
bias_shape = [batch, num_heads, seqlen, total_seqlen] if biased else None
attn_mask = None
mask_shape = None
max_seqlen = None
if mask_dim != 0:
if mask_dim == 2:
mask_shape = [batch, total_seqlen]
elif mask_dim == 3:
mask_shape = [batch, seqlen, total_seqlen]
elif mask_dim == 4:
max_seqlen = ((seqlen - 1) // 1024 + 1) * 1024 # round up to multiple of 1024
mask_shape = [batch, 1, max_seqlen, max_seqlen]
else:
raise ValueError
np.random.seed(42)
q = multinormal_distribution(np.prod(q_shape[:-1]), q_shape[-1]).reshape(q_shape).astype(np.float64)
k = multinormal_distribution(np.prod(k_shape[:-1]), k_shape[-1]).reshape(k_shape).astype(np.float64)
v = multinormal_distribution(np.prod(v_shape[:-1]), v_shape[-1]).reshape(v_shape).astype(np.float64)
if bias_shape is not None:
attn_bias = np.random.uniform(-2, 2, size=bias_shape)
if mask_shape is not None:
attn_mask = (np.random.randint(0, 100, size=mask_shape) < 95).astype(np.int32)
out = np.empty(out_shape, dtype=dtype)
host_q, host_k, host_v = maybe_pack_q_k_v_bnsh_for_device_on_host(q, k, v, dtype, qkv_format)
host_attn_bias = attn_bias.astype(dtype) if attn_bias is not None else None
dev_q = ke.DeviceArray(host_q)
dev_k = ke.DeviceArray(host_k) if host_k is not None else None
dev_v = ke.DeviceArray(host_v) if host_v is not None else None
dev_out = ke.DeviceArray(out)
dev_attn_bias = ke.DeviceArray(host_attn_bias) if host_attn_bias is not None else None
dev_attn_mask = ke.DeviceArray(attn_mask) if attn_mask is not None else None
my_gemm_softmax_gemm_permute = f(
batch,
seqlen,
total_seqlen,
max_seqlen,
num_heads,
head_size,
mask_dim,
scale,
qkv_format,
dev_q,
dev_k,
dev_v,
dev_attn_bias,
dev_attn_mask,
dev_out,
)
for impl in my_gemm_softmax_gemm_permute.ListOps():
duration_ms = -1
if my_gemm_softmax_gemm_permute.SelectOp(impl):
duration_ms = my_gemm_softmax_gemm_permute.Profile()
m, n, k, o, gemm_batch = seqlen, total_seqlen, head_size, head_size, batch * num_heads
flops_per_batch = m * n * k * 2 + m * n * o * 2
flops_count_bias_and_softmax = True # set to false to be aligned with ck
if flops_count_bias_and_softmax:
flops_per_batch += 2 * n + 1
if flops_count_bias_and_softmax and attn_bias is not None:
flops_per_batch += m * n
if flops_count_bias_and_softmax and attn_mask is not None:
flops_per_batch += m * n
flops = flops_per_batch * gemm_batch
ke.report(
GemmSoftmaxGemmPermuteMetric(
impl, dtype, duration_ms, flops, batch, seqlen, total_seqlen, num_heads, head_size, biased, mask_dim
)
)
def profile_with_args(
dtype, batch, seqlen, total_seqlen, num_heads, head_size, biased, mask_dim, scale, qkv_format, *, sort=False
):
with ke.benchmark(sort):
args = (dtype, batch, seqlen, total_seqlen, num_heads, head_size, biased, mask_dim, scale, qkv_format)
if qkv_format == ke.qkv_format.Q_K_V_BNSH:
profile_gemm_softmax_gemm_permute_func(
getattr(ke, "GemmSoftmaxGemmPermuteGeneric_" + dtype_to_suffix(dtype)), *args
)
if ke.is_composable_kernel_available():
profile_gemm_softmax_gemm_permute_func(
getattr(ke, get_ck_binding_name(dtype, biased, mask_dim != 0)), *args
)
profile_gemm_softmax_gemm_permute_func(
getattr(ke, "GemmSoftmaxGemmPermuteTunable_" + dtype_to_suffix(dtype)), *args
)
def profile():
for batch, seqlen, total_seqlen, nhead, head_size, qkv_format_name in stabel_diffusion_configs:
profile_with_args(
"float16",
batch,
seqlen,
total_seqlen,
nhead,
head_size,
biased=False,
mask_dim=0,
qkv_format=getattr(ke.qkv_format, qkv_format_name),
scale=0.125,
sort=True,
)
print()
for args in product(dtypes, batches, seqlens, total_seqlens, num_heads, head_sizes, biaseds, mask_dims):
profile_with_args(*args, qkv_format=ke.qkv_format.Q_K_V_BNSH, scale=0.125, sort=True)
print()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
group = parser.add_argument_group("profile with args")
group.add_argument("--sort", action="store_true")
group.add_argument("dtype", choices=dtypes)
group.add_argument("batch", type=int)
group.add_argument("seqlen", type=int)
group.add_argument("total_seqlen", type=int)
group.add_argument("num_heads", type=int)
group.add_argument("head_size", type=int)
group.add_argument("biased", type=int, choices=[0, 1], default=0)
group.add_argument("mask_dim", type=int, choices=[0, 2, 3, 4], default=2, help="0 for mask disabled")
group.add_argument("--scale", type=float, default=None, help="default to 1.0/sqrt(head_size)")
group.add_argument(
"--qkv_format",
default="Q_K_V_BNSH",
choices=[
"Q_K_V_BNSH", # non-packed, permuted
"Q_K_V_BSNH", # non-packed, non-permuted
"Q_KV_BSNH_BSN2H", # kv packed, non-permuted
"QKV_BSN3H", # qkv packed, non-permuted
],
)
if len(sys.argv) == 1:
profile()
else:
args = parser.parse_args()
profile_with_args(
args.dtype,
args.batch,
args.seqlen,
args.total_seqlen,
args.num_heads,
args.head_size,
args.biased,
args.mask_dim,
args.scale,
getattr(ke.qkv_format, args.qkv_format),
sort=args.sort,
)
|
a1f9a9615c9cc9ffc83fdba30394066302ba15b4
|
b50df8a902f4e2c1ecd8667b7b97937da3371caf
|
/Parte002/ex1073_hackerrank_senior_anant_busqueda_capitan.py
|
941b041083d1ef20ed1997b4b980f3310a2edf90
|
[] |
no_license
|
Fhernd/PythonEjercicios
|
5a5633855979baec89a3c257eb57aac076a7465f
|
204d3d59ddeed6cbf263b23f14e950c20f81f608
|
refs/heads/master
| 2021-11-23T00:26:28.861302
| 2021-10-14T16:50:27
| 2021-10-14T16:50:27
| 230,629,743
| 124
| 84
| null | 2021-09-15T18:45:25
| 2019-12-28T15:45:28
|
Python
|
UTF-8
|
Python
| false
| false
| 629
|
py
|
ex1073_hackerrank_senior_anant_busqueda_capitan.py
|
# Ejercicio 1073: HackerRank Ayudar al señor Anant a encontrar al capitán en un grupo de turistas.
# Mr. Anant Asankhya is the manager at the INFINITE hotel. The hotel has an infinite amount of rooms.
# One fine day, a finite number of tourists come to stay at the hotel.
# The tourists consist of:
# → A Captain.
# → An unknown group of families consisting of members per group where ≠ .
# ...
from collections import Counter
if __name__ == '__main__':
k = int(input())
rooms = list(map(int, input().split()))
count = Counter(rooms)
result = count.most_common()[-1][0]
print(result)
|
1bf36327deb35273f31ffedd51380cd0f8fbe705
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/debug/stepping/test_step_over_yield.py
|
01762a689aff014bd16abb05986c68742ead7df7
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
test_step_over_yield.py
|
def generator2():
for i in range(4):
yield i
def generator():
a = 42 # breakpoint
yield from generator2()
return a
sum = 0
for i in generator():
sum += i
print("The end")
|
bfa747f8b68fd7590b6aa501b2a73a3ab2bffc9b
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/storefinders/geo_me.py
|
c4a9a61cd1e0d22454fcc3debabb67bb231f69c0
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,301
|
py
|
geo_me.py
|
from scrapy import Spider
from scrapy.http import JsonRequest
from scrapy.signals import spider_idle
from locations.dict_parser import DictParser
from locations.hours import DAYS, DAYS_EN, OpeningHours, day_range
from locations.items import Feature
# To use this store finder, specify key = x where x is the unique
# identifier of the store finder in domain x.geoapp.me.
#
# It is likely there are additional fields of data worth extracting
# from the store finder. These should be added by overriding the
# parse_item function. Two parameters are passed, item (and ATP
# "Feature" class) and location (a dict which is returned from the
# store locator JSON response for a particular location).
#
# This spider has two crawling steps which are executed in order:
# 1. Obtain list of all locations by using the API to do bounding
# box searches across the world. The only thing of interest
# returned for each location in this step is a unique identifier
# and coordinates.
# 2. Iterating through the all locations list produced by step (1),
# request the nearest 50 (API limit) locations for each location
# in the all locations list. Remove from the all locations list
# and locations that were returned with a nearest location
# search. Repeat until the all locations list is empty. The
# nearest location search returns all details of a location.
#
# Note that due to the way the two crawling steps are required to
# operate, numerous duplicate locations will be dropped during
# extraction. It is common for locations to be present in more than
# one nearby cluster of locations that the "nearest to" search
# iterates through.
class GeoMeSpider(Spider):
key = ""
api_version = "2"
url_within_bounds_template = "https://{}.geoapp.me/api/v{}/locations/within_bounds?sw[]={}&sw[]={}&ne[]={}&ne[]={}"
url_nearest_to_template = "https://{}.geoapp.me/api/v{}/locations/nearest_to?lat={}&lng={}&limit=50"
locations_found = {}
def start_requests(self):
self.crawler.signals.connect(self.start_location_requests, signal=spider_idle)
yield JsonRequest(
url=self.url_within_bounds_template.format(self.key, self.api_version, -90, -180, 90, 180),
callback=self.parse_bounding_box,
)
def parse_bounding_box(self, response):
for cluster in response.json().get("clusters", []):
if b := cluster.get("bounds"):
yield JsonRequest(
url=self.url_within_bounds_template.format(
self.key, self.api_version, b["sw"][0], b["sw"][1], b["ne"][0], b["ne"][1]
),
callback=self.parse_bounding_box,
)
for location in response.json().get("locations", []):
self.locations_found[location["id"]] = (float(location["lat"]), float(location["lng"]))
def start_location_requests(self):
self.crawler.signals.disconnect(self.start_location_requests, signal=spider_idle)
if len(self.locations_found) > 0:
first_search_location = self.locations_found.popitem()
first_request = JsonRequest(
url=self.url_nearest_to_template.format(
self.key, self.api_version, first_search_location[1][0], first_search_location[1][1]
),
callback=self.parse_locations,
)
self.crawler.engine.crawl(first_request)
def parse_locations(self, response):
for location in response.json()["locations"]:
if location.get("inactive"):
continue
location["street_address"] = location.pop("address")
item = DictParser.parse(location)
self.extract_hours(item, location)
yield from self.parse_item(item, location) or []
# Remove found location from the list of locations which
# are still waiting to be found.
if self.locations_found.get(location["id"]):
self.locations_found.pop(location["id"])
# Get the next location to do a "nearest to" search from.
if len(self.locations_found) > 0:
next_search_location = self.locations_found.popitem()
yield JsonRequest(
url=self.url_nearest_to_template.format(
self.key, self.api_version, next_search_location[1][0], next_search_location[1][1]
),
callback=self.parse_locations,
)
def extract_hours(self, item, location):
item["opening_hours"] = OpeningHours()
if location.get("open_status") == "twenty_four_hour":
item["opening_hours"].add_days_range(DAYS, "00:00", "23:59")
return
open_hours = location.get("opening_hours")
if not open_hours:
return
for spec in open_hours:
days = spec["days"]
day_from = day_to = days[0]
if len(days) == 2:
day_to = days[1]
for day in day_range(DAYS_EN[day_from], DAYS_EN[day_to]):
for hours in spec["hours"]:
item["opening_hours"].add_range(day, hours[0], hours[1])
def parse_item(self, item: Feature, location: dict, **kwargs):
yield item
|
9143f3e48531e61c8729050613f15d362b4a7a9d
|
7453911cee47edd9414ecfc66d189dc578f7e421
|
/src/gevent/tests/test__ssl.py
|
b80bca0fdd7e9190d61cee80d5aa393e0e3feae4
|
[
"Python-2.0",
"MIT"
] |
permissive
|
gevent/gevent
|
f20eca1852098e47f32eb062db646acfead36e71
|
6b22af0fa8eb2efa89fce36c35808948c67352b0
|
refs/heads/master
| 2023-08-31T19:27:29.410236
| 2023-08-31T10:26:35
| 2023-08-31T10:26:35
| 5,801,666
| 4,981
| 866
|
NOASSERTION
| 2023-09-13T14:16:59
| 2012-09-13T22:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 5,597
|
py
|
test__ssl.py
|
from __future__ import print_function, division, absolute_import
from gevent import monkey
monkey.patch_all()
import os
import socket
import gevent.testing as greentest
# Be careful not to have TestTCP as a bare attribute in this module,
# even aliased, to avoid running duplicate tests
from gevent.tests import test__socket
import ssl
def ssl_listener(private_key, certificate):
raw_listener = socket.socket()
greentest.bind_and_listen(raw_listener)
# pylint:disable=deprecated-method
sock = wrap_socket(raw_listener, keyfile=private_key, certfile=certificate,
server_side=True)
return sock, raw_listener
def wrap_socket(sock, *, keyfile=None, certfile=None, server_side=False):
context = ssl.SSLContext(
protocol=ssl.PROTOCOL_TLS
)
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
context.load_default_certs()
if keyfile is not None or certfile is not None:
context.load_cert_chain(certfile=certfile, keyfile=keyfile)
return context.wrap_socket(sock, server_side=server_side)
class TestSSL(test__socket.TestTCP):
# To generate:
# openssl req -x509 -newkey rsa:4096 -keyout test_server.key -out test_server.crt -days 36500 -nodes -subj '/CN=localhost'
certfile = os.path.join(os.path.dirname(__file__), 'test_server.crt')
privfile = os.path.join(os.path.dirname(__file__), 'test_server.key')
# Python 2.x has socket.sslerror (which is an alias for
# ssl.SSLError); That's gone in Py3 though. In Python 2, most timeouts are raised
# as SSLError, but Python 3 raises the normal socket.timeout instead. So this has
# the effect of making TIMEOUT_ERROR be SSLError on Py2 and socket.timeout on Py3
# See https://bugs.python.org/issue10272.
# PyPy3 7.2 has a bug, though: it shares much of the SSL implementation with Python 2,
# and it unconditionally does `socket.sslerror = SSLError` when ssl is imported.
# So we can't rely on getattr/hasattr tests, we must be explicit.
TIMEOUT_ERROR = socket.timeout # pylint:disable=no-member
def _setup_listener(self):
listener, raw_listener = ssl_listener(self.privfile, self.certfile)
self._close_on_teardown(raw_listener)
return listener
def create_connection(self, *args, **kwargs): # pylint:disable=signature-differs
return self._close_on_teardown(
# pylint:disable=deprecated-method
wrap_socket(super(TestSSL, self).create_connection(*args, **kwargs)))
# The SSL library can take a long time to buffer the large amount of data we're trying
# to send, so we can't compare to the timeout values
_test_sendall_timeout_check_time = False
# The SSL layer has extra buffering, so test_sendall needs
# to send a very large amount to make it timeout
_test_sendall_data = data_sent = b'hello' * 100000000
test_sendall_array = greentest.skipOnMacOnCI("Sometimes misses data")(
greentest.skipOnManylinux("Sometimes misses data")(
test__socket.TestTCP.test_sendall_array
)
)
test_sendall_str = greentest.skipOnMacOnCI("Sometimes misses data")(
greentest.skipOnManylinux("Sometimes misses data")(
test__socket.TestTCP.test_sendall_str
)
)
@greentest.skipOnWindows("Not clear why we're skipping")
def test_ssl_sendall_timeout0(self):
# Issue #317: SSL_WRITE_PENDING in some corner cases
server_sock = []
acceptor = test__socket.Thread(target=lambda: server_sock.append(
# pylint:disable=no-member
self.listener.accept()))
client = self.create_connection()
client.setblocking(False)
try:
# Python 3 raises ssl.SSLWantWriteError; Python 2 simply *hangs*
# on non-blocking sockets because it's a simple loop around
# send(). Python 2.6 doesn't have SSLWantWriteError
expected = getattr(ssl, 'SSLWantWriteError', ssl.SSLError)
with self.assertRaises(expected):
client.sendall(self._test_sendall_data)
finally:
acceptor.join()
client.close()
server_sock[0][0].close()
# def test_fullduplex(self):
# try:
# super(TestSSL, self).test_fullduplex()
# except LoopExit:
# if greentest.LIBUV and greentest.WIN:
# # XXX: Unable to duplicate locally
# raise greentest.SkipTest("libuv on Windows sometimes raises LoopExit")
# raise
@greentest.ignores_leakcheck
@greentest.skipOnPy310("No longer raises SSLError")
def test_empty_send(self):
# Issue 719
# Sending empty bytes with the 'send' method raises
# ssl.SSLEOFError in the stdlib. PyPy 4.0 and CPython 2.6
# both just raise the superclass, ssl.SSLError.
# Ignored during leakchecks because the third or fourth iteration of the
# test hangs on CPython 2/posix for some reason, likely due to
# the use of _close_on_teardown keeping something alive longer than intended.
# cf test__makefile_ref
with self.assertRaises(ssl.SSLError):
super(TestSSL, self).test_empty_send()
@greentest.ignores_leakcheck
def test_sendall_nonblocking(self):
# Override; doesn't work with SSL sockets.
pass
@greentest.ignores_leakcheck
def test_connect_with_type_flags_ignored(self):
# Override; doesn't work with SSL sockets.
pass
if __name__ == '__main__':
greentest.main()
|
c450a8d177c798c77e644b28d1569a94a37fba6f
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/Analysis_parts/AnalysisDisplayColoredSurfaceSettings.py
|
fc84b4bfd587f7d40ddb2c1e7480ad8049a15d3c
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,003
|
py
|
AnalysisDisplayColoredSurfaceSettings.py
|
class AnalysisDisplayColoredSurfaceSettings(object,IDisposable):
"""
Contains colored surface settings for analysis display style element.
AnalysisDisplayColoredSurfaceSettings()
AnalysisDisplayColoredSurfaceSettings(other: AnalysisDisplayColoredSurfaceSettings)
"""
def Dispose(self):
""" Dispose(self: AnalysisDisplayColoredSurfaceSettings) """
pass
def IsEqual(self,other):
"""
IsEqual(self: AnalysisDisplayColoredSurfaceSettings,other: AnalysisDisplayColoredSurfaceSettings) -> bool
Compares two colored surface settings objects.
other: Colored surface settings object to compare with.
Returns: True if objects are equal,false otherwise.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: AnalysisDisplayColoredSurfaceSettings,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,other=None):
"""
__new__(cls: type)
__new__(cls: type,other: AnalysisDisplayColoredSurfaceSettings)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
GridColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Color of grid lines.
Get: GridColor(self: AnalysisDisplayColoredSurfaceSettings) -> Color
Set: GridColor(self: AnalysisDisplayColoredSurfaceSettings)=value
"""
GridLineWeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Line weight of grid lines.
Get: GridLineWeight(self: AnalysisDisplayColoredSurfaceSettings) -> int
Set: GridLineWeight(self: AnalysisDisplayColoredSurfaceSettings)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: AnalysisDisplayColoredSurfaceSettings) -> bool
"""
ShowContourLines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""If true,show contour lines in the analysis display.
Get: ShowContourLines(self: AnalysisDisplayColoredSurfaceSettings) -> bool
Set: ShowContourLines(self: AnalysisDisplayColoredSurfaceSettings)=value
"""
ShowGridLines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""If true,show grid lines in the analysis display.
Get: ShowGridLines(self: AnalysisDisplayColoredSurfaceSettings) -> bool
Set: ShowGridLines(self: AnalysisDisplayColoredSurfaceSettings)=value
"""
|
f922e072359007b62b911852c07bb1f82069e686
|
88efd76316e4184d76a5e0585d95fe734233942c
|
/yellowbrick/__init__.py
|
964afb99569b87bba95c1b0502063e12582fe60b
|
[
"Apache-2.0"
] |
permissive
|
DistrictDataLabs/yellowbrick
|
1ecd9f33e58f0d007569904401c204a6cdeb5661
|
f7a8e950bd31452ea2f5d402a1c5d519cd163fd5
|
refs/heads/develop
| 2023-08-03T12:25:26.511916
| 2023-07-05T18:14:28
| 2023-07-05T18:14:28
| 59,121,694
| 4,242
| 660
|
Apache-2.0
| 2023-07-15T17:50:31
| 2016-05-18T14:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,826
|
py
|
__init__.py
|
# yellowbrick
# A suite of visual analysis and diagnostic tools for machine learning.
#
# Author: Rebecca Bilbro
# Author: Benjamin Bengfort
# Created: Wed May 18 10:46:33 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: __init__.py [0c5ba04] benjamin@bengfort.com $
"""
A suite of visual analysis and diagnostic tools to facilitate feature
selection, model selection, and parameter tuning for machine learning.
"""
##########################################################################
## Imports
##########################################################################
# Capture the original matplotlib rcParams
import matplotlib as mpl
_orig_rc_params = mpl.rcParams.copy()
# Import the version number at the top level
from .version import get_version, __version_info__
# Import the style management functions
from .style.rcmod import reset_defaults, reset_orig
from .style.rcmod import set_aesthetic, set_style, set_palette
from .style.palettes import color_palette, set_color_codes
# Import yellowbrick functionality to the top level
# TODO: review top-level functionality
from .anscombe import anscombe
from .datasaurus import datasaurus
from .classifier import ROCAUC, ClassBalance, ClassificationScoreVisualizer
# from .classifier import crplot, rocplot
# from .regressor import peplot, residuals_plot
##########################################################################
## Set default aesthetics
##########################################################################
set_aesthetic() # NOTE: modifies mpl.rcParams
##########################################################################
## Package Version
##########################################################################
__version__ = get_version(short=True)
|
b28407076c8e05c5bd73cad7b271fe3e0ef34cab
|
8ed15d43652dbcab332c78923da416b91b139323
|
/python/fedml/core/dp/budget_accountant/rdp_analysis.py
|
0d11bdb0580970e0e4501c6ac234de388c184700
|
[
"Apache-2.0"
] |
permissive
|
FedML-AI/FedML
|
74d144038c9de4a0621eb328d00987abac35e2d1
|
b436fbd95cbb62f6c58d2233d7affa0f62cb1817
|
refs/heads/master
| 2023-08-31T22:15:39.786371
| 2023-08-24T03:41:58
| 2023-08-24T03:41:58
| 281,519,510
| 3,197
| 807
|
Apache-2.0
| 2023-09-14T02:14:20
| 2020-07-21T22:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,747
|
py
|
rdp_analysis.py
|
import numpy as np
from typing import List, Union
import math
from scipy import special
"""imported from Opacus: https://github.com/pytorch/opacus"""
def compute_rdp(
*, q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float]
) -> Union[List[float], float]:
r"""Computes Renyi Differential Privacy (RDP) guarantees of the
Sampled Gaussian Mechanism (SGM) iterated ``steps`` times.
Args:
q: Sampling rate of SGM.
noise_multiplier: The ratio of the standard deviation of the
additive Gaussian noise to the L2-sensitivity of the function
to which it is added. Note that this is same as the standard
deviation of the additive Gaussian noise when the L2-sensitivity
of the function is 1.
steps: The number of iterations of the mechanism.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDP guarantees at all orders; can be ``np.inf``.
"""
if isinstance(orders, float):
rdp = _compute_rdp(q, noise_multiplier, orders)
else:
rdp = np.array([_compute_rdp(q, noise_multiplier, order) for order in orders])
return rdp * steps
def _compute_rdp(q: float, sigma: float, alpha: float) -> float:
r"""Computes RDP of the Sampled Gaussian Mechanism at order ``alpha``.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at order ``alpha``; can be np.inf.
"""
if q == 0:
return 0
# no privacy
if sigma == 0:
return np.inf
if q == 1.0:
return alpha / (2 * sigma ** 2)
if np.isinf(alpha):
return np.inf
return _compute_log_a(q, sigma, alpha) / (alpha - 1)
def _compute_log_a_for_int_alpha(q: float, sigma: float, alpha: int) -> float:
r"""Computes :math:`log(A_\alpha)` for integer ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in Section 3.3 of
https://arxiv.org/pdf/1908.10530.pdf.
"""
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
math.log(special.binom(alpha, i))
+ i * math.log(q)
+ (alpha - i) * math.log(1 - q)
)
s = log_coef_i + (i * i - i) / (2 * (sigma ** 2))
log_a = _log_add(log_a, s)
return float(log_a)
def _log_add(logx: float, logy: float) -> float:
r"""Adds two numbers in the log space.
Args:
logx: First term in log space.
logy: Second term in log space.
Returns:
Sum of numbers in log space.
"""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _compute_log_a_for_frac_alpha(q: float, sigma: float, alpha: float) -> float:
r"""Computes :math:`log(A_\alpha)` for fractional ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in Section 3.3 of
https://arxiv.org/pdf/1908.10530.pdf.
"""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma ** 2 * math.log(1 / q - 1) + 0.5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma ** 2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma ** 2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _log_sub(logx: float, logy: float) -> float:
r"""Subtracts two numbers in the log space.
Args:
logx: First term in log space. Expected to be greater than the second term.
logy: First term in log space. Expected to be less than the first term.
Returns:
Difference of numbers in log space.
Raises:
ValueError
If the result is negative.
"""
if logx < logy:
raise ValueError("The result of subtraction must be non-negative.")
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _compute_log_a(q: float, sigma: float, alpha: float) -> float:
r"""Computes :math:`log(A_\alpha)` for any positive finite ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf
for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in the paper mentioned above.
"""
if float(alpha).is_integer():
return _compute_log_a_for_int_alpha(q, sigma, int(alpha))
else:
return _compute_log_a_for_frac_alpha(q, sigma, alpha)
|
f234f844bbf7fa90d3515220a89c7b552558df3b
|
6eb0ba72a576b18873e53b0ff4f86fb581c6c806
|
/docker/types/containers.py
|
a28061383d68a0f8e9bdbbeacbfddd64df0bdd62
|
[
"Apache-2.0"
] |
permissive
|
docker/docker-py
|
566f9dd69c71ef79fbe2b9dd2745c905e1c613df
|
c38656dc7894363f32317affecc3e4279e1163f8
|
refs/heads/main
| 2023-08-31T14:13:48.087317
| 2023-08-21T13:31:57
| 2023-08-21T13:31:57
| 10,247,874
| 6,473
| 1,943
|
Apache-2.0
| 2023-09-08T18:24:21
| 2013-05-23T16:15:07
|
Python
|
UTF-8
|
Python
| false
| false
| 27,380
|
py
|
containers.py
|
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
format_environment, format_extra_hosts, normalize_links, parse_bytes,
parse_devices, split_command, version_gte, version_lt,
)
from .base import DictType
from .healthcheck import Healthcheck
class LogConfigTypesEnum:
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class LogConfig(DictType):
"""
Configure logging for a container, when provided as an argument to
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
You may refer to the
`official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
for more information.
Args:
type (str): Indicate which log driver to use. A set of valid drivers
is provided as part of the :py:attr:`LogConfig.types`
enum. Other values may be accepted depending on the engine version
and available logging plugins.
config (dict): A driver-dependent configuration dictionary. Please
refer to the driver's documentation for a list of valid config
keys.
Example:
>>> from docker.types import LogConfig
>>> lc = LogConfig(type=LogConfig.types.JSON, config={
... 'max-size': '1g',
... 'labels': 'production_status,geo'
... })
>>> hc = client.create_host_config(log_config=lc)
>>> container = client.create_container('busybox', 'true',
... host_config=hc)
>>> client.inspect_container(container)['HostConfig']['LogConfig']
{
'Type': 'json-file',
'Config': {'labels': 'production_status,geo', 'max-size': '1g'}
}
"""
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super().__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
""" Set a the value for ``key`` to ``value`` inside the ``config``
dict.
"""
self.config[key] = value
def unset_config(self, key):
""" Remove the ``key`` property from the ``config`` dict. """
if key in self.config:
del self.config[key]
class Ulimit(DictType):
"""
Create a ulimit declaration to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
name (str): Which ulimit will this apply to. The valid names can be
found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
Example:
>>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
>>> hc = client.create_host_config(ulimits=[nproc_limit])
>>> container = client.create_container(
'busybox', 'true', host_config=hc
)
>>> client.inspect_container(container)['HostConfig']['Ulimits']
[{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
"""
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, str):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super().__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
class DeviceRequest(DictType):
"""
Create a device request to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
driver (str): Which driver to use for this device. Optional.
count (int): Number or devices to request. Optional.
Set to -1 to request all available devices.
device_ids (list): List of strings for device IDs. Optional.
Set either ``count`` or ``device_ids``.
capabilities (list): List of lists of strings to request
capabilities. Optional. The global list acts like an OR,
and the sub-lists are AND. The driver will try to satisfy
one of the sub-lists.
Available capabilities for the ``nvidia`` driver can be found
`here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
options (dict): Driver-specific options. Optional.
"""
def __init__(self, **kwargs):
driver = kwargs.get('driver', kwargs.get('Driver'))
count = kwargs.get('count', kwargs.get('Count'))
device_ids = kwargs.get('device_ids', kwargs.get('DeviceIDs'))
capabilities = kwargs.get('capabilities', kwargs.get('Capabilities'))
options = kwargs.get('options', kwargs.get('Options'))
if driver is None:
driver = ''
elif not isinstance(driver, str):
raise ValueError('DeviceRequest.driver must be a string')
if count is None:
count = 0
elif not isinstance(count, int):
raise ValueError('DeviceRequest.count must be an integer')
if device_ids is None:
device_ids = []
elif not isinstance(device_ids, list):
raise ValueError('DeviceRequest.device_ids must be a list')
if capabilities is None:
capabilities = []
elif not isinstance(capabilities, list):
raise ValueError('DeviceRequest.capabilities must be a list')
if options is None:
options = {}
elif not isinstance(options, dict):
raise ValueError('DeviceRequest.options must be a dict')
super().__init__({
'Driver': driver,
'Count': count,
'DeviceIDs': device_ids,
'Capabilities': capabilities,
'Options': options
})
@property
def driver(self):
return self['Driver']
@driver.setter
def driver(self, value):
self['Driver'] = value
@property
def count(self):
return self['Count']
@count.setter
def count(self, value):
self['Count'] = value
@property
def device_ids(self):
return self['DeviceIDs']
@device_ids.setter
def device_ids(self, value):
self['DeviceIDs'] = value
@property
def capabilities(self):
return self['Capabilities']
@capabilities.setter
def capabilities(self, value):
self['Capabilities'] = value
@property
def options(self):
return self['Options']
@options.setter
def options(self, value):
self['Options'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
privileged=False, dns=None, dns_search=None,
volumes_from=None, network_mode=None, restart_policy=None,
cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_reservation=None,
kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None, cpu_period=None,
blkio_weight=None, blkio_weight_device=None,
device_read_bps=None, device_write_bps=None,
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, uts_mode=None,
pids_limit=None, isolation=None, auto_remove=False,
storage_opt=None, init=None, init_path=None,
volume_driver=None, cpu_count=None, cpu_percent=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None, device_requests=None,
cgroupns=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
if pid_mode:
if version_lt(version, '1.24') and pid_mode != 'host':
raise host_config_value_error('pid_mode', pid_mode)
self['PidMode'] = pid_mode
if ipc_mode:
self['IpcMode'] = ipc_mode
if privileged:
self['Privileged'] = privileged
if oom_kill_disable:
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
self['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
self['PublishAllPorts'] = publish_all_ports
if read_only is not None:
self['ReadonlyRootfs'] = read_only
if dns_search:
self['DnsSearch'] = dns_search
if network_mode == 'host' and port_bindings:
raise host_config_incompatible_error(
'network_mode', 'host', 'port_bindings'
)
self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
self['RestartPolicy'] = restart_policy
if cap_add:
self['CapAdd'] = cap_add
if cap_drop:
self['CapDrop'] = cap_drop
if devices:
self['Devices'] = parse_devices(devices)
if group_add:
self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
self['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error(
'security_opt', security_opt, 'list'
)
self['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in sysctls.items():
self['Sysctls'][k] = str(v)
if volumes_from is not None:
if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
if binds is not None:
self['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
self['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = format_extra_hosts(extra_hosts)
self['ExtraHosts'] = extra_hosts
if links is not None:
self['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
self['LxcConf'] = lxc_conf
if cgroup_parent is not None:
self['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
for lmt in ulimits:
if not isinstance(lmt, Ulimit):
lmt = Ulimit(**lmt)
self['Ulimits'].append(lmt)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
self['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
self['CpuPeriod'] = cpu_period
if cpu_shares:
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
self['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
if not isinstance(cpuset_mems, str):
raise host_config_type_error(
'cpuset_mems', cpuset_mems, 'str'
)
self['CpusetMems'] = cpuset_mems
if cpu_rt_period:
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_rt_period', '1.25')
if not isinstance(cpu_rt_period, int):
raise host_config_type_error(
'cpu_rt_period', cpu_rt_period, 'int'
)
self['CPURealtimePeriod'] = cpu_rt_period
if cpu_rt_runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_rt_runtime', '1.25')
if not isinstance(cpu_rt_runtime, int):
raise host_config_type_error(
'cpu_rt_runtime', cpu_rt_runtime, 'int'
)
self['CPURealtimeRuntime'] = cpu_rt_runtime
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
'blkio_weight', blkio_weight, 'int'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
self["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
self["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
self["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
self["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
self["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
self["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
if uts_mode:
if uts_mode != "host":
raise host_config_value_error("uts_mode", uts_mode)
self['UTSMode'] = uts_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
if auto_remove:
if version_lt(version, '1.25'):
raise host_config_version_error('auto_remove', '1.25')
self['AutoRemove'] = auto_remove
if storage_opt is not None:
if version_lt(version, '1.24'):
raise host_config_version_error('storage_opt', '1.24')
self['StorageOpt'] = storage_opt
if init is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init', '1.25')
self['Init'] = init
if init_path is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init_path', '1.25')
if version_gte(version, '1.29'):
# https://github.com/moby/moby/pull/32470
raise host_config_version_error('init_path', '1.29', False)
self['InitPath'] = init_path
if volume_driver is not None:
self['VolumeDriver'] = volume_driver
if cpu_count:
if not isinstance(cpu_count, int):
raise host_config_type_error('cpu_count', cpu_count, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_count', '1.25')
self['CpuCount'] = cpu_count
if cpu_percent:
if not isinstance(cpu_percent, int):
raise host_config_type_error('cpu_percent', cpu_percent, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_percent', '1.25')
self['CpuPercent'] = cpu_percent
if nano_cpus:
if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
self['NanoCpus'] = nano_cpus
if runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('runtime', '1.25')
self['Runtime'] = runtime
if mounts is not None:
if version_lt(version, '1.30'):
raise host_config_version_error('mounts', '1.30')
self['Mounts'] = mounts
if device_cgroup_rules is not None:
if version_lt(version, '1.28'):
raise host_config_version_error('device_cgroup_rules', '1.28')
if not isinstance(device_cgroup_rules, list):
raise host_config_type_error(
'device_cgroup_rules', device_cgroup_rules, 'list'
)
self['DeviceCgroupRules'] = device_cgroup_rules
if device_requests is not None:
if version_lt(version, '1.40'):
raise host_config_version_error('device_requests', '1.40')
if not isinstance(device_requests, list):
raise host_config_type_error(
'device_requests', device_requests, 'list'
)
self['DeviceRequests'] = []
for req in device_requests:
if not isinstance(req, DeviceRequest):
req = DeviceRequest(**req)
self['DeviceRequests'].append(req)
if cgroupns:
self['CgroupnsMode'] = cgroupns
def host_config_type_error(param, param_value, expected):
return TypeError(
f'Invalid type for {param} param: expected {expected} '
f'but found {type(param_value)}'
)
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
return errors.InvalidVersion(
f'{param} param is not supported in API versions {operator} {version}',
)
def host_config_value_error(param, param_value):
return ValueError(f'Invalid value for {param} param: {param_value}')
def host_config_incompatible_error(param, param_value, incompatible_param):
return errors.InvalidArgument(
f'\"{param_value}\" {param} is incompatible with {incompatible_param}'
)
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, ports=None, environment=None,
volumes=None, network_disabled=False, entrypoint=None,
working_dir=None, domainname=None, host_config=None, mac_address=None,
labels=None, stop_signal=None, networking_config=None,
healthcheck=None, stop_timeout=None, runtime=None
):
if stop_timeout is not None and version_lt(version, '1.25'):
raise errors.InvalidVersion(
'stop_timeout was only introduced in API version 1.25'
)
if healthcheck is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
raise errors.InvalidVersion(
'healthcheck start period was introduced in API '
'version 1.29'
)
if isinstance(command, str):
command = split_command(command)
if isinstance(entrypoint, str):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
labels = {lbl: '' for lbl in labels}
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports[f'{port}/{proto}'] = {}
ports = exposed_ports
if isinstance(volumes, str):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
self.update({
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': str(user) if user is not None else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Image': image,
'Volumes': volumes,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'WorkingDir': working_dir,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
'StopTimeout': stop_timeout,
'Runtime': runtime
})
|
cb5a2906f1b64aa4acd03103fd6b32b7d37e5098
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/examples/features/hp_constraints_mnist_pytorch/layers.py
|
4b2b4fee95cc60de466c6ab9e8e3fd4a83ef8a33
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298
| 2023-08-21T08:34:16
| 2023-08-21T08:34:16
| 253,846,879
| 2,531
| 330
|
Apache-2.0
| 2023-09-14T21:54:17
| 2020-04-07T16:12:29
|
Go
|
UTF-8
|
Python
| false
| false
| 568
|
py
|
layers.py
|
from typing import Any
import torch
from torch import nn
from determined.pytorch import TorchData
class Flatten(nn.Module):
def forward(self, *args: TorchData, **kwargs: Any) -> torch.Tensor:
assert len(args) == 1
x = args[0]
assert isinstance(x, torch.Tensor)
return x.contiguous().view(x.size(0), -1)
class Squeeze(nn.Module):
def forward(self, *args: TorchData, **kwargs: Any) -> torch.Tensor:
assert len(args) == 1
x = args[0]
assert isinstance(x, torch.Tensor)
return torch.squeeze(x)
|
6253a661461e27a4224b3681c758e110c7315b97
|
111b205910a92e5ed3fdde933ec34d5620725105
|
/tests/test_runner.py
|
8418582647918f4dfff006b429db18f8d72c6d34
|
[
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nasa/CompDam_DGD
|
9a24fbe11ca897d784394014e7586eecd803a027
|
1c6832051ca8890fb711853bcead25ccaa9114b7
|
refs/heads/master
| 2023-07-06T00:50:50.744579
| 2023-06-26T12:04:43
| 2023-06-26T12:04:43
| 63,346,335
| 114
| 61
|
NOASSERTION
| 2020-07-28T18:39:39
| 2016-07-14T15:05:39
|
Fortran
|
UTF-8
|
Python
| false
| false
| 32,688
|
py
|
test_runner.py
|
#
# Unittest code to run tests on single element models with DGD
#
import os
import shutil
import sys
import abaverify as av
import math
import re
import subprocess
import json
def _versiontuple(v):
return tuple(map(int, (v.split("."))))
def copyMatProps():
'''
Helper for dealing with .props files
'''
# Put a copy of the properties file in the testOutput directory
propsFiles = [x for x in os.listdir(os.getcwd()) if x.endswith('.props')]
copyAdditionalFiles(propsFiles)
def copyParametersFile(jobName='CompDam'):
'''
Helper for dealing with .parameters files
'''
copyAdditionalFiles(jobName + '.parameters')
def copyAdditionalFiles(files):
'''
Helper for copying supporting files to testOutput
'''
# If testOutput doesn't exist, create it
testOutputPath = os.path.join(os.getcwd(), 'testOutput')
if not os.path.isdir(testOutputPath):
os.makedirs(testOutputPath)
# Copy files
if isinstance(files, str):
files = [files,]
for f in files:
shutil.copyfile(f, os.path.join(os.getcwd(), 'testOutput', f))
def modifyParametersFile(jobName='CompDam', **kwargs):
'''
For modifying the parameters file
Input dictionary should have key, value pairs that correspond to entries in CompDam.parameters
'''
# Copy/modify parameters file
with open(os.path.join(os.getcwd(), 'CompDam.parameters'), 'r') as f:
data = f.read()
for key, value in kwargs.items():
data = re.sub(key + r' ?= ?[-0-9\.d(TRUE)(FALSE)]+', key + ' = ' + value, data)
# Write to testOutput directory
with open(os.path.join(os.getcwd(), 'testOutput', jobName + '.parameters'), 'w') as f:
f.write(data)
def evaluate_pyextmod_output(abaverify_obj, jobName, arguments):
'''
Helper for evaluating python extension module implementation
'''
# Arguments
subroutine = arguments[0]
# Run the debug file through the python extension module helper code; outputs a json file
subprocess.check_output('bash -i pyextmod_run.sh ' + subroutine + ' ' + jobName + '-1-debug-0.py', shell=True)
# Load the json file with the state variables computed by abaqus and the Python Extension Module
with open(os.path.join(os.getcwd(), 'testOutput', jobName+'_pyextmod_results.json'), 'r') as f:
results_dict = json.load(f)
# Load the file that specifies which state variables to compare (and tolerances for comparison)
results_expected = __import__('verify_debug_' + jobName + '_expected').parameters
for sv in results_expected.keys():
abaverify_obj.assertAlmostEqual(results_dict[sv][0], results_dict[sv][1], delta=results_expected[sv]) # First value is abaqus, 2nd value is pyextmod
def plotFailureEnvelope(baseName, abscissaIdentifier, ordinateIdentifier, abcissaStrengths, ordinateStrengths):
"""
Create a plot of the failure envelope. Does nothing if matplotlib import fails.
"""
# Try to import matplotlib
try:
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
import matplotlib.pyplot as plt
# Read the failure envelope data
with open(os.path.join(os.getcwd(), 'testOutput', baseName + '_failure_envelope.txt'), 'r') as fe:
data = dict()
dataHeaders = list()
for line in fe:
lineSplit = line.split(', ')
# Handle the header row separately
if len(data) == 0:
for i in range(0, len(lineSplit)):
data[lineSplit[i]] = list()
dataHeaders.append(lineSplit[i])
else:
for i in range(0, len(lineSplit)):
data[dataHeaders[i]].append(float(lineSplit[i]))
# Plot the failure envelope
fig, ax = plt.subplots()
# Reference data
dataRef = dict()
dataRef[abscissaIdentifier] = abcissaStrengths + [0]*len(ordinateStrengths)
dataRef[ordinateIdentifier] = [0]*len(abcissaStrengths) + ordinateStrengths
plt.plot(dataRef[abscissaIdentifier], dataRef[ordinateIdentifier], 'x', markeredgecolor='black')
# Data from CompDam
plt.plot(data[abscissaIdentifier], data[ordinateIdentifier], 'o', markerfacecolor='none', markeredgecolor='#ED7D31')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xlabel(r'$\sigma_{' + abscissaIdentifier.split('S')[1] + '}$ [MPa]')
plt.ylabel(r'$\sigma_{' + ordinateIdentifier.split('S')[1] + '}$ [MPa]')
fig.savefig(os.path.join(os.getcwd(), 'testOutput', baseName + '.png'), dpi=300)
# If import fails, the above code is skipped
except ImportError:
print "INFO: matplotlib package not found. Install matplotlib to generate plots of the failure envelope automatically."
def plotStressLife(baseName, stressRatios, R_ratio=0.1):
"""
Create a stress-life plot from a series of fatigue analyses.
"""
import math
# Try to import matplotlib
try:
import matplotlib as mpl
# If import fails, the above code is skipped
except ImportError:
print "INFO: matplotlib package not found. Install matplotlib to generate stress-life plots automatically."
raise
# if os.environ.get('DISPLAY', '') == '':
# mpl.use('Agg')
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
fatigue_life = list()
for sr in stressRatios:
# Read the fatigue cycle data from the inc2cycles log files. Assume the last line represents failure.
with open(os.path.join(os.getcwd(), 'testOutput', baseName + '_stress_ratio_' + str(sr).replace('.', '') + '_inc2cycles.log'), 'r') as f:
lines = f.read().splitlines()
fatigue_life.append(float(lines[-1].split()[-1]))
# Create the stress-life plot
fig, ax = plt.subplots()
abscissaIdentifier = 'Life'
ordinateIdentifier = 'Fatigue Strength'
# Analysis data
data = dict()
data[abscissaIdentifier] = fatigue_life
data[ordinateIdentifier] = stressRatios
plt.plot(data[abscissaIdentifier], data[ordinateIdentifier], 'x', markeredgecolor='black')
with open(os.path.join(os.getcwd(), 'testOutput', baseName + '_R-ratio_' + str(R_ratio).replace('.', '') + '.txt'), 'w') as f:
f.write("{},{}\n".format(ordinateIdentifier, abscissaIdentifier))
for i in range(len(data[ordinateIdentifier])):
f.write("{},{}\n".format(data[ordinateIdentifier][i], data[abscissaIdentifier][i]))
# Formatting
ax.set_xscale('log')
ax.set_yscale('log')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xlim(xmin=1e0, xmax=1e8)
ax.set_ylim(ymin=0.5, ymax=1.0)
formatter = FuncFormatter(lambda y, _: '{:.1g}'.format(y))
ax.yaxis.set_major_formatter(formatter)
ax.yaxis.set_minor_formatter(formatter)
plt.xlabel(abscissaIdentifier + ', cycles')
plt.ylabel(ordinateIdentifier + ', ' + r'$\sigma^{max}/\sigma_{c}$')
plt.grid(b=True, which='major', color='xkcd:silver', linestyle='--')
fig.savefig(os.path.join(os.getcwd(), 'testOutput', baseName + '_R-ratio_' + str(R_ratio).replace('.', '') + '.png'), dpi=300)
class ParametricMixedModeMatrix(av.TestCase):
"""
Parametric mixed mode tests.
"""
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D8R_mixedModeMatrix"
# Range of parameters to test; all combinations are tested
# alpha is the angle of the crack normal
# beta defines the direction of tensile loading in Step-1 and compressive loading in Step-2
parameters = {'alpha': range(0,50,10), 'beta': range(0,210,30), 'friction': [0.00, 0.15, 0.30, 0.45, 0.60]}
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
modifyParametersFile(alpha_search = '.FALSE.')
class ParametricElementSizeQuad(av.TestCase):
"""
vucharlength() tests for quad and hex elements not aligned with fiber material direction.
"""
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D8R_elementSize"
# The angle of misalignment (psi) and the aspect ratio, i.e., L1/L2, (alpha) of the element edges are here varied.
# A misalignment angle of zero will result in an Abaqus pre error due to an *NMAP rotation command being used in the input deck
parameters = {'misalignment_angle': [-45, -30, -15, 1, 11.25, 22.5, 45], 'alpha': [1.0, 1.5]}
# Closed-form equation for the matrix characteristic element length, valid for misalignment angles between -45 and +45 degrees and gamma = 90deg
L2 = 0.2 # matrix-direction element edge length
Lc_eq = lambda L, alpha, psi: L * (alpha * math.sin(abs(math.radians(psi))) + math.cos(math.radians(psi)))
# Element sizes are dependent on the misalignment and skew angles
expectedpy_parameters = {'Lc1': [Lc_eq(L2*alpha, 1.0/alpha, psi) for alpha in parameters['alpha'] for psi in parameters['misalignment_angle']],
'Lc2': [Lc_eq(L2, alpha, psi) for alpha in parameters['alpha'] for psi in parameters['misalignment_angle']]}
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
class ParametricElementSizeTri(av.TestCase):
"""
vucharlength() tests for tri and wedge elements not aligned with fiber material direction.
"""
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D6_elementSize"
# The angle of misalignment (psi) and the aspect ratio, i.e., L1/L2, (alpha) of the element edges are here varied.
# A misalignment angle of zero will result in an Abaqus pre error due to an *NMAP rotation command being used in the input deck
parameters = {'misalignment_angle': [-45, -30, -15, 1, 5, 10, 15], 'alpha': [1.0, 1.5]}
# The maximum misalignment_angle value must be less than 0.5*atan(1/alpha) to pass the below test
# Closed-form equation for the matrix characteristic element length, valid for misalignment angles between -45 and +45 degrees and gamma = 90deg
L2 = 0.2 # matrix-direction element edge length
Lc_eq = lambda L, alpha, psi: L * (alpha * math.sin(abs(math.radians(psi))) + math.cos(math.radians(psi)))
# Element sizes are dependent on the misalignment and skew angles
expectedpy_parameters = {'Lc1': [Lc_eq(L2*alpha, 1.0/alpha, psi) for alpha in parameters['alpha'] for psi in parameters['misalignment_angle']],
'Lc2': [Lc_eq(L2, alpha, psi) for alpha in parameters['alpha'] for psi in parameters['misalignment_angle']]}
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
class ParametricStressLife(av.TestCase):
"""
Generate data for a stress life plot with a series of fatigue analyses.
"""
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_COH3D8_fatigue_normal"
parameters = {'stress_ratio': [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99]}
expectedpy_parameters = {'stress_ratio': parameters['stress_ratio']}
fatigue_R_ratio = 0.5
# Class-wide methods
@classmethod
def setUpClass(cls):
modifyParametersFile(
fatigue_step = '2',
fatigue_R_ratio = str(cls.fatigue_R_ratio),
fatigue_damage_min_threshold = '5.d-7',
fatigue_damage_max_threshold = '1.d-4',
cycles_per_increment_mod = '0.1d0'
)
@classmethod
def tearDownClass(cls):
plotStressLife(baseName=cls.baseName, stressRatios=cls.parameters['stress_ratio'], R_ratio=cls.fatigue_R_ratio)
class ParametricFailureEnvelope_sig12sig22(av.TestCase):
"""
Generate failure envelope in the sigma12 - sigma22 space with a C3D8R element
"""
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D8R_failureEnvelope_sig12sig22"
# Range of parameters to test; all combinations are tested
abcissaStrengths = [-199.8, 62.3]
ordinateStrengths = [92.3]
parameters = {'loadRatio': [x/100. for x in range(0,101,5)], 'matrixStrength': abcissaStrengths}
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
modifyParametersFile(alpha_inc = '1')
@classmethod
def tearDownClass(cls):
plotFailureEnvelope(baseName=cls.baseName, abscissaIdentifier='S22', ordinateIdentifier='S12', abcissaStrengths=cls.abcissaStrengths, ordinateStrengths=cls.ordinateStrengths)
class ParametricFailureEnvelope_sig11sig22(av.TestCase):
"""
Generate failure envelope in the sigma11 - sigma22 space with C3D8R element
"""
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D8R_failureEnvelope_sig11sig22"
# Range of parameters to test; all combinations are tested
abcissaStrengths = [-1200.1, 2326.2]
ordinateStrengths = [-199.8, 62.3]
parameters = {'loadRatio': [x/100. for x in range(0,101,10)], 'ordinateStrength': ordinateStrengths, 'abcissaStrength': abcissaStrengths}
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
@classmethod
def tearDownClass(cls):
plotFailureEnvelope(baseName=cls.baseName, abscissaIdentifier='S11', ordinateIdentifier='S22', abcissaStrengths=cls.abcissaStrengths, ordinateStrengths=cls.ordinateStrengths)
class ParametricKinkBandWidth_twoElement(av.TestCase):
"""
Tests for fiber compression damage mode to ensure mesh objectivity
Should yield the same response as ParametricKinkBandWidth_singleElement
"""
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D8R_twoElement_fiberCompression_FKT"
# Use python script instead of input file
pythonScriptForModel = True
# Range of parameters to test; all combinations are tested
parameters = {'elasticElToTotal': [0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}
# Crush stress is different for each kinkband size, so the expected values are specified here
expectedpy_parameters = {'crushStress': [-7.9, -8.8, -9.6, -10.3, -11, -11.5]}
class ParametricKinkBandWidth_singleElement(av.TestCase):
"""
Tests to show the effect of kinkband width relative to element size
"""
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
# Specify meta class
__metaclass__ = av.ParametricMetaClass
# Refers to the template input file name
baseName = "test_C3D8R_fiberCompression_FKT_12"
# Range of parameters to test; all combinations are tested
parameters = {'wkbToTotal': [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
# Crush stress is different for each kinkband size, so the expected values are specified here
expectedpy_parameters = {'crushStress': [-7.9, -8.8, -9.6, -10.3, -11, -11.5, -11.9]}
# Ignore the displacement at peak load
expectedpy_ignore = ('x_at_peak_in_xy')
class VerifyDebugPy(av.TestCase):
"""
Tests to verify that the logic that writes the debug.py file is working
properly.
Single element tests are run with a special flag set
(debug_kill_at_total_time) to trigger an error and write the debug.py
file. The debug.py file is loaded and executed by the Python Extension
Module. The tests compare the state variables calculated using Abaqus to
those calculated using the Python Extension Module. Only the state
variables listed in the corresponding verify_debug_[jobname]_expected.py
are checked; the state variables must be equal to pass the test.
Several single element tests are used to exercise all the state variables.
The Python Extension Module helper routine 'verify_debug.py' produces a
json file with the state variables calculated from Abaqus and the Python
Extension Module. The first entry is the value from Abaqus and the second
entry is the value from the Python Extension Module.
"""
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
# Check for bash
try:
with open(os.path.join(os.getcwd(), os.pardir, 'etc', 'config.json'), 'r') as f:
config = json.load(f)
if not config["bash"]:
raise av.unittest.SkipTest("CompDam configuration has bash disabled; skipping")
except IOError:
print("WARNING: CompDam configuration was not set during installation. Run `python setup.py` from the CompDam root folder.")
raise av.unittest.SkipTest("Bash not found")
# Check for abaverify >= 0.5.0
installed_version = av.__version__
minimum_required_version = "0.5.0"
if _versiontuple(installed_version) < _versiontuple(minimum_required_version):
raise av.unittest.SkipTest("Abaverify 0.5.0 or newer required for these tests; skipping")
# Run compile script
sys.stdout.write('Compiling CompDam into a Python Extension Module ... ')
subprocess.check_output('bash -i pyextmod_compile.sh', stderr=subprocess.STDOUT, shell=True)
sys.stdout.write(' DONE\n')
# -----------------------------------------------------------------------------------------
# Test methods
def test_C3D8R_matrixTension(self):
modifyParametersFile(debug_kill_at_total_time='0.09d0', logLevel='3')
self.runTest("test_C3D8R_matrixTension", func=evaluate_pyextmod_output, arguments=['dgdevolve'])
modifyParametersFile(debug_kill_at_total_time = '-1.d0', logLevel='2')
def test_C3D8R_fiberCompression_FKT_12(self):
modifyParametersFile(debug_kill_at_total_time='0.08d0', logLevel='4')
self.runTest("test_C3D8R_fiberCompression_FKT_12", func=evaluate_pyextmod_output, arguments=['dgdkinkband'])
modifyParametersFile(debug_kill_at_total_time = '-1.d0', logLevel='2')
class SingleElementSchaeferTests(av.TestCase):
"""
Single element models to test the CompDam_DGD code base for Schaefer theory responses
"""
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
# -----------------------------------------------------------------------------------------
# Test methods
def test_C3D8R_schaefer_oat90(self):
""" Simple tension applied in the matrix direction, solid element. Tests Schaefer theory """
self.runTest("test_C3D8R_schaefer_oat90")
def test_C3D8R_schaefer_oat30(self):
""" Off Axis Tension (30 deg) solid element. Tests Schaefer theory """
self.runTest("test_C3D8R_schaefer_oat30")
def test_C3D8R_schaefer_oat60(self):
""" Off Axis Tension (60 deg) solid element. Tests Schaefer theory """
self.runTest("test_C3D8R_schaefer_oat60")
def test_C3D8R_schaefer_oat75(self):
""" Off Axis Tension (75 deg) solid element. Tests Schaefer theory """
self.runTest("test_C3D8R_schaefer_oat75")
class SingleElementCohesiveTests(av.TestCase):
"""
Single element models to test the cohesive element material model features
"""
# Class-wide methods
@classmethod
def setUpClass(cls):
copyParametersFile()
# -----------------------------------------------------------------------------------------
# Test methods
def test_COH2D4_normal(self):
""" Single 2-D cohesive element test for normal loading """
self.runTest("test_COH2D4_normal")
def test_COH2D4_shear(self):
""" Single 2-D cohesive element test for shear loading """
self.runTest("test_COH2D4_shear")
def test_COH2D4_shear_compression(self):
""" Single 2-D cohesive element test for shear loading with normal compression """
self.runTest("test_COH2D4_shear_compression")
def test_COH2D4_shear_friction(self):
""" Single 2-D cohesive element test for shear loading with friction """
self.runTest("test_COH2D4_shear_friction")
def test_COH3D8_normal(self):
""" Single cohesive element test for mode I response """
self.runTest("test_COH3D8_normal")
def test_COH3D8_shear13(self):
""" Single cohesive element test for 1-3 shear loading """
self.runTest("test_COH3D8_shear13")
def test_COH3D8_shear13_compression(self):
""" Single cohesive element test for 1-3 shear loading with normal compression """
self.runTest("test_COH3D8_shear13_compression")
def test_COH3D8_shear13_friction(self):
""" Single cohesive element test for 1-3 shear loading with friction """
self.runTest("test_COH3D8_shear13_friction")
def test_COH3D8_shear23(self):
""" Single cohesive element test for 2-3 shear loading """
self.runTest("test_COH3D8_shear23")
def test_COH3D8_shear23_compression(self):
""" Single cohesive element test for 2-3 shear loading with normal compression """
self.runTest("test_COH3D8_shear23_compression")
def test_COH3D8_shear23_friction(self):
""" Single cohesive element test for 2-3 shear loading with friction """
self.runTest("test_COH3D8_shear23_friction")
def test_COH3D8_thick_normal(self):
""" Single cohesive element test for finite-thickness mode I response """
self.runTest("test_COH3D8_thick_normal")
def test_COH3D8_thick_shear13(self):
""" Single cohesive element test for finite-thickness 1-3 shear loading """
self.runTest("test_COH3D8_thick_shear13")
def test_COH3D8_thick_shear23(self):
""" Single cohesive element test for finite-thickness 2-3 shear loading """
self.runTest("test_COH3D8_thick_shear23")
class SingleElementTests(av.TestCase):
"""
Single element models to test the solid element material model features
"""
# Class-wide methods
@classmethod
def setUpClass(cls):
copyMatProps()
copyParametersFile()
# -----------------------------------------------------------------------------------------
# Test methods
def test_C3D8R_error(self):
""" Intentionally cause a DGD convergence error """
self.runTest("test_C3D8R_error")
def test_C3D8R_matrixTension(self):
""" Simple tension in the matrix direction, with damage """
self.runTest("test_C3D8R_matrixTension")
def test_C3D8R_simpleShear12(self):
""" Simple shear in the 1-2 plane, with damage """
self.runTest("test_C3D8R_simpleShear12")
def test_C3D8R_simpleShear12friction(self):
""" Compression followed by simple shear in the 1-2 plane """
modifyParametersFile(alpha_search = '.FALSE.')
self.runTest("test_C3D8R_simpleShear12friction")
def test_C3D8R_nonlinearShear12(self):
""" Nonlinear shear model, loading and unloading """
self.runTest("test_C3D8R_nonlinearShear12")
def test_C3D8R_nonlinearShear12_monotonic(self):
""" Nonlinear shear model, monotonic loading """
self.runTest("test_C3D8R_nonlinearShear12_monotonic")
def test_C3D8R_nonlinearShear12_withFKT(self):
""" Nonlinear shear model, loading and unloading, with FKT """
self.runTest("test_C3D8R_nonlinearShear12_withFKT")
def test_C3D8R_nonlinearShear12_loadReversal(self):
""" Nonlinear shear model, loading and unloading, no damage, including full load reversal """
self.runTest("test_C3D8R_nonlinearShear12_loadReversal")
def test_C3D8R_fiberTension(self):
""" Simple tension in fiber direction, with damage """
self.runTest("test_C3D8R_fiberTension")
def test_C3D8R_fiberTension_FN(self):
""" Simple tension in fiber direction, with damage and fiber nonlinearity """
self.runTest("test_C3D8R_fiberTension_FN")
def test_C3D8R_fiberCompression_FKT_12(self):
""" Fiber compression: Fiber kinking theory based model, 1-2 """
self.runTest("test_C3D8R_fiberCompression_FKT_12")
def test_C3D8R_fiberCompression_FKT_13(self):
""" Fiber compression: Fiber kinking theory based model, 1-3 """
self.runTest("test_C3D8R_fiberCompression_FKT_13")
def test_C3D8R_fiberCompression_FKT_3D(self):
""" Fiber compression: Fiber kinking theory based model, 3-D """
self.runTest("test_C3D8R_fiberCompression_FKT_3D")
def test_C3D8R_fiberCompression_FKT_12_FF(self):
""" Fiber compression: Fiber kinking theory based model, fiber failure """
copyAdditionalFiles('test_C3D8R_fiberCompression_FKT_12.inp')
modifyParametersFile(fkt_fiber_failure_angle = '10.d0')
self.runTest("test_C3D8R_fiberCompression_FKT_12_FF")
modifyParametersFile(fkt_fiber_failure_angle = '-1.d0')
def test_C3D8R_fiberCompression_FKT_13_FF(self):
""" Fiber compression: Fiber kinking theory based model, fiber failure """
copyAdditionalFiles('test_C3D8R_fiberCompression_FKT_13.inp')
modifyParametersFile(fkt_fiber_failure_angle = '10.d0')
self.runTest("test_C3D8R_fiberCompression_FKT_13_FF")
modifyParametersFile(fkt_fiber_failure_angle = '-1.d0')
def test_C3D8R_fiberCompression_FKT_12_FF_negphi0(self):
""" Fiber compression: Fiber kinking theory based model, fiber failure """
modifyParametersFile(fkt_fiber_failure_angle = '10.d0')
self.runTest("test_C3D8R_fiberCompression_FKT_12_FF_negphi0")
modifyParametersFile(fkt_fiber_failure_angle = '-1.d0')
def test_C3D8R_fiberCompression_FKT_12_FN(self):
""" Fiber compression: Fiber kinking theory based model, fiber nonlinearity """
self.runTest("test_C3D8R_fiberCompression_FKT_12_FN")
def test_C3D8R_fiberCompression_FKT_13_FN(self):
""" Fiber compression: Fiber kinking theory based model, fiber nonlinearity """
self.runTest("test_C3D8R_fiberCompression_FKT_13_FN")
def test_C3D8R_fiberCompression_FKT_3D_pert(self):
""" Fiber compression: Fiber kinking theory based model, 3-D, perturbation """
self.runTest("test_C3D8R_fiberCompression_FKT_3D_pert")
def test_C3D8R_fiberCompression_FKT_3D_spring_oop(self):
""" Fiber compression: Fiber kinking theory based model, 3-D, out-of-plane stiffness """
self.runTest("test_C3D8R_fiberCompression_FKT_3D_spring_oop")
def test_C3D8R_fiberCompression_FKT_3D_spring_ip(self):
""" Fiber compression: Fiber kinking theory based model, 3-D, in-plane stiffness """
self.runTest("test_C3D8R_fiberCompression_FKT_3D_spring_ip")
def test_C3D8R_fiberCompression_BL(self):
""" Fiber compression: Bilinear softening based model """
self.runTest("test_C3D8R_fiberCompression_BL")
def test_C3D8R_fiberCompression_BL_FN(self):
""" Fiber compression: Bilinear softening based model, fiber nonlinearity """
self.runTest("test_C3D8R_fiberCompression_BL_FN")
def test_C3D8R_fiberLoadReversal(self):
""" Fiber damage model, Maimi: load reversal """
self.runTest("test_C3D8R_fiberLoadReversal")
def test_C3D8R_fiberLoadReversal_FN(self):
""" Fiber damage model, Maimi: load reversal, fiber nonlinearity """
self.runTest("test_C3D8R_fiberLoadReversal_FN")
def test_C3D8R_nonlinearShear12(self):
""" Nonlinear shear model, loading and unloading in 1-2 plane """
modifyParametersFile(alpha_search = '.FALSE.')
self.runTest("test_C3D8R_nonlinearShear12")
def test_C3D8R_nonlinearShear12_loadReversal(self):
""" Nonlinear shear model, loading and unloading in 1-2 plane, including full load reversal """
modifyParametersFile(alpha_search = '.FALSE.')
self.runTest("test_C3D8R_nonlinearShear12_loadReversal")
def test_C3D8R_nonlinearShear13(self):
""" Nonlinear shear model, loading and unloading in 1-3 plane"""
modifyParametersFile(alpha_search = '.FALSE.')
self.runTest("test_C3D8R_nonlinearShear13")
def test_C3D8R_nonlinearShear13_loadReversal(self):
""" Nonlinear shear model, loading and unloading in 1-3 plane, including full load reversal"""
modifyParametersFile(alpha_search = '.FALSE.')
self.runTest("test_C3D8R_nonlinearShear13_loadReversal")
def test_C3D8R_schapery12(self):
""" Schapery micro-damage model, loading and unloading in 1-2 plane"""
modifyParametersFile(alpha_search = '.FALSE.')
self.runTest("test_C3D8R_schapery12")
def test_C3D8R_matrixCompression(self):
""" Simple compression in the matrix direction """
copyParametersFile("test_C3D8R_matrixCompression")
self.runTest("test_C3D8R_matrixCompression")
def test_C3D8R_matrixCompression_friction(self):
""" Simple compression in the matrix direction with friction"""
copyParametersFile("test_C3D8R_matrixCompression_friction")
self.runTest("test_C3D8R_matrixCompression_friction")
def test_C3D8R_elastic_matrixTension(self):
""" Simple tension in the matrix direction, no damage """
self.runTest("test_C3D8R_elastic_matrixTension")
def test_C3D8R_elastic_fiberTension(self):
""" Simple tension in the fiber direction, no damage """
self.runTest("test_C3D8R_elastic_fiberTension")
def test_C3D8R_elastic_simpleShear12(self):
""" Simple shear in the 1-2 plane, no damage """
self.runTest("test_C3D8R_elastic_simpleShear12")
def test_CPS4R_elementSize(self):
""" Characteristic element size test, plane stress element """
self.runTest("test_CPS4R_elementSize")
def test_C3D6_matrixTension(self):
""" Simple tension in the matrix direction, two wedge elements """
self.runTest("test_C3D6_matrixTension")
def test_C3D6_simpleShear12(self):
""" Simple shear in the 1-2 plane, two wedge elements """
self.runTest("test_C3D6_simpleShear12")
def test_S4R_elementSize(self):
""" Characteristic element size test, conventional shell element """
self.runTest("test_S4R_elementSize")
def test_C3D8R_residualStress(self):
""" Residual thermal stress in a solid element """
self.runTest("test_C3D8R_residualStress")
class SingleElementFatigueTests(av.TestCase):
"""
Single element models to test the matrix crack fatigue model
"""
# -----------------------------------------------------------------------------------------
# Test methods
def test_COH3D8_fatigue_normal(self):
""" Single cohesive element fatigue test for mode I loading """
copyParametersFile("test_COH3D8_fatigue_normal")
self.runTest("test_COH3D8_fatigue_normal")
def test_COH3D8_fatigue_shear13(self):
""" Single cohesive element fatigue test for 1-3 shear loading """
copyParametersFile("test_COH3D8_fatigue_shear13")
self.runTest("test_COH3D8_fatigue_shear13")
def test_C3D8R_fatigue_matrixTension(self):
""" Single solid element fatigue test for tensile matrix loading """
copyParametersFile("test_C3D8R_fatigue_matrixTension")
self.runTest("test_C3D8R_fatigue_matrixTension")
def test_C3D8R_fatigue_simpleShear12(self):
""" Single solid element fatigue test for simple shear loading in the 1--2 plane """
copyParametersFile("test_C3D8R_fatigue_simpleShear12")
self.runTest("test_C3D8R_fatigue_simpleShear12")
if __name__ == "__main__":
av.runTests(relPathToUserSub='../for/CompDam_DGD', double=True)
|
e346c63077e783c695e792f8524e42abcd13bb7d
|
90d02fee4d02962c9e3d03314cd1597c70bf2f8c
|
/asdf/_tests/test_array_blocks.py
|
b95be394b4e7f6d1c18f7f9041a300168795db2f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
asdf-format/asdf
|
08e19f5d603c738b0ae94ccd1a339ff6b8cf4209
|
a5b2b2d94f2fc71746f896c6d322439a27dd0bdd
|
refs/heads/main
| 2023-08-17T17:06:20.828932
| 2023-08-08T10:53:27
| 2023-08-08T10:53:27
| 18,112,754
| 328
| 25
|
BSD-3-Clause
| 2023-09-13T15:57:22
| 2014-03-25T19:00:43
|
Python
|
UTF-8
|
Python
| false
| false
| 32,626
|
py
|
test_array_blocks.py
|
import io
import os
import numpy as np
import pytest
import yaml
from numpy.random import random
from numpy.testing import assert_array_equal
import asdf
from asdf import block, constants, generic_io
RNG = np.random.default_rng(6)
def test_external_block(tmp_path):
tmp_path = str(tmp_path)
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff.set_array_storage(my_array, "external")
assert ff.get_array_storage(my_array) == "external"
ff.write_to(os.path.join(tmp_path, "test.asdf"))
assert "test0000.asdf" in os.listdir(tmp_path)
def test_external_block_url():
uri = "asdf://foo"
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
asdf.get_config().all_array_storage = "external"
# this should not raise a ValueError since uri is provided
asdf.AsdfFile(tree, uri=uri)
def test_external_block_non_url():
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff.set_array_storage(my_array, "external")
assert ff.get_array_storage(my_array) == "external"
buff = io.BytesIO()
with pytest.raises(ValueError, match=r"Can't write external blocks, since URI of main file is unknown."):
ff.write_to(buff)
def test_invalid_array_storage():
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
with pytest.raises(ValueError, match=r"array_storage must be one of.*"):
ff.set_array_storage(my_array, "foo")
b = block.Block()
b._array_storage = "foo"
with pytest.raises(ValueError, match=r"Unknown array storage type foo"):
ff._blocks.add(b)
with pytest.raises(ValueError, match=r"Unknown array storage type foo"):
ff._blocks.remove(b)
def test_transfer_array_sources(tmp_path):
tmp_path = str(tmp_path)
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff.write_to(os.path.join(tmp_path, "test.asdf"))
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(my_array, ff.tree["my_array"])
ff.write_to(os.path.join(tmp_path, "test2.asdf"))
# write_to should have no effect on getting the original data
assert_array_equal(my_array, ff.tree["my_array"])
assert ff._fd is None
def test_write_to_same(tmp_path):
tmp_path = str(tmp_path)
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff.write_to(os.path.join(tmp_path, "test.asdf"))
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
assert_array_equal(my_array, ff.tree["my_array"])
ff.tree["extra"] = [0] * 1000
ff.write_to(os.path.join(tmp_path, "test2.asdf"))
with asdf.open(os.path.join(tmp_path, "test2.asdf"), mode="rw") as ff:
assert_array_equal(my_array, ff.tree["my_array"])
def test_pad_blocks(tmp_path):
tmp_path = str(tmp_path)
# This is the case where the new tree can't fit in the available space
my_array = np.ones((8, 8)) * 1
my_array2 = np.ones((42, 5)) * 2
tree = {"my_array": my_array, "my_array2": my_array2}
ff = asdf.AsdfFile(tree)
ff.write_to(os.path.join(tmp_path, "test.asdf"), pad_blocks=True)
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["my_array"], my_array)
assert_array_equal(ff.tree["my_array2"], my_array2)
def test_update_expand_tree(tmp_path):
tmp_path = str(tmp_path)
testpath = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
my_array = np.arange(64) * 1
my_array2 = np.arange(64) * 2
tree = {"arrays": [my_array, my_array2, np.arange(3)]}
ff = asdf.AsdfFile(tree)
ff.set_array_storage(tree["arrays"][2], "inline")
assert len(list(ff._blocks.inline_blocks)) == 1
ff.write_to(testpath, pad_blocks=True)
with asdf.open(testpath, mode="rw") as ff:
assert_array_equal(ff.tree["arrays"][0], my_array)
orig_offset = ff._blocks[ff.tree["arrays"][0]].offset
ff.tree["extra"] = [0] * 6000
ff.update()
with asdf.open(testpath) as ff:
assert orig_offset <= ff._blocks[ff.tree["arrays"][0]].offset
assert ff._blocks[ff.tree["arrays"][2]].array_storage == "inline"
assert_array_equal(ff.tree["arrays"][0], my_array)
assert_array_equal(ff.tree["arrays"][1], my_array2)
# Now, we expand the header only by a little bit
ff = asdf.AsdfFile(tree)
ff.set_array_storage(tree["arrays"][2], "inline")
ff.write_to(os.path.join(tmp_path, "test2.asdf"), pad_blocks=True)
with asdf.open(os.path.join(tmp_path, "test2.asdf"), mode="rw") as ff:
orig_offset = ff._blocks[ff.tree["arrays"][0]].offset
ff.tree["extra"] = [0] * 2
ff.update()
with asdf.open(os.path.join(tmp_path, "test2.asdf")) as ff:
assert orig_offset == ff._blocks[ff.tree["arrays"][0]].offset
assert ff._blocks[ff.tree["arrays"][2]].array_storage == "inline"
assert_array_equal(ff.tree["arrays"][0], my_array)
assert_array_equal(ff.tree["arrays"][1], my_array2)
def test_update_all_external(tmp_path):
fn = tmp_path / "test.asdf"
my_array = np.arange(64) * 1
my_array2 = np.arange(64) * 2
tree = {"arrays": [my_array, my_array2]}
af = asdf.AsdfFile(tree)
af.write_to(fn)
with asdf.config.config_context() as cfg:
cfg.array_inline_threshold = 10
cfg.all_array_storage = "external"
with asdf.open(fn, mode="rw") as af:
af.update()
assert "test0000.asdf" in os.listdir(tmp_path)
assert "test0001.asdf" in os.listdir(tmp_path)
def _get_update_tree():
return {"arrays": [np.arange(64) * 1, np.arange(64) * 2, np.arange(64) * 3]}
def test_update_delete_first_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
del ff.tree["arrays"][0]
ff.update()
assert os.stat(path).st_size <= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][1])
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][2])
def test_update_delete_last_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
del ff.tree["arrays"][-1]
ff.update()
assert os.stat(path).st_size <= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][0])
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][1])
def test_update_delete_middle_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
del ff.tree["arrays"][1]
ff.update()
assert len(ff._blocks._internal_blocks) == 2
assert os.stat(path).st_size <= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert len(ff.tree["arrays"]) == 2
assert ff.tree["arrays"][0]._source == 0
assert ff.tree["arrays"][1]._source == 1
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][0])
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][2])
def test_update_replace_first_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
ff.tree["arrays"][0] = np.arange(32)
ff.update()
assert os.stat(path).st_size <= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], np.arange(32))
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][1])
assert_array_equal(ff.tree["arrays"][2], tree["arrays"][2])
def test_update_replace_last_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
ff.tree["arrays"][2] = np.arange(32)
ff.update()
assert os.stat(path).st_size <= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][0])
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][1])
assert_array_equal(ff.tree["arrays"][2], np.arange(32))
def test_update_replace_middle_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
ff.tree["arrays"][1] = np.arange(32)
ff.update()
assert os.stat(path).st_size <= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][0])
assert_array_equal(ff.tree["arrays"][1], np.arange(32))
assert_array_equal(ff.tree["arrays"][2], tree["arrays"][2])
def test_update_add_array(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
ff.tree["arrays"].append(np.arange(32))
ff.update()
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][0])
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][1])
assert_array_equal(ff.tree["arrays"][2], tree["arrays"][2])
assert_array_equal(ff.tree["arrays"][3], np.arange(32))
def test_update_add_array_at_end(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
tree = _get_update_tree()
ff = asdf.AsdfFile(tree)
ff.write_to(path, pad_blocks=True)
original_size = os.stat(path).st_size
with asdf.open(os.path.join(tmp_path, "test.asdf"), mode="rw") as ff:
ff.tree["arrays"].append(np.arange(65536, dtype="<i8"))
ff.update()
assert len(ff._blocks) == 4
assert os.stat(path).st_size >= original_size
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
assert_array_equal(ff.tree["arrays"][0], tree["arrays"][0])
assert_array_equal(ff.tree["arrays"][1], tree["arrays"][1])
assert_array_equal(ff.tree["arrays"][2], tree["arrays"][2])
assert_array_equal(ff.tree["arrays"][3], np.arange(65536, dtype="<i8"))
def test_update_replace_all_arrays(tmp_path):
tmp_path = str(tmp_path)
testpath = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
my_array = np.ones((64, 64)) * 1
tree = {
"my_array": my_array,
}
ff = asdf.AsdfFile(tree)
ff.write_to(testpath, pad_blocks=True)
with asdf.open(testpath, mode="rw") as ff:
assert_array_equal(ff.tree["my_array"], np.ones((64, 64)) * 1)
ff.tree["my_array"] = np.ones((64, 64)) * 2
ff.update()
with asdf.open(testpath) as ff:
assert_array_equal(ff.tree["my_array"], np.ones((64, 64)) * 2)
def test_update_array_in_place(tmp_path):
tmp_path = str(tmp_path)
testpath = os.path.join(tmp_path, "test.asdf")
# This is the case where the new tree can't fit in the available space
my_array = np.ones((64, 64)) * 1
tree = {
"my_array": my_array,
}
ff = asdf.AsdfFile(tree)
ff.write_to(testpath, pad_blocks=True)
with asdf.open(testpath, mode="rw") as ff:
array = np.asarray(ff.tree["my_array"])
array *= 2
ff.update()
with asdf.open(testpath) as ff:
assert_array_equal(ff.tree["my_array"], np.ones((64, 64)) * 2)
def test_init_from_asdffile(tmp_path):
tmp_path = str(tmp_path)
my_array = RNG.normal(size=(8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff2 = asdf.AsdfFile(ff)
assert ff.tree["my_array"] is ff2.tree["my_array"]
assert_array_equal(ff.tree["my_array"], ff2.tree["my_array"])
assert ff._blocks[my_array] != ff2._blocks[my_array]
ff2.tree["my_array"] = None
assert_array_equal(ff.tree["my_array"], my_array)
ff.write_to(os.path.join(tmp_path, "test.asdf"))
with asdf.open(os.path.join(tmp_path, "test.asdf")) as ff:
ff2 = asdf.AsdfFile(ff)
assert ff.tree["my_array"] is not ff2.tree["my_array"]
assert_array_equal(ff.tree["my_array"], ff2.tree["my_array"])
assert ff._blocks[my_array] != ff2._blocks[my_array]
ff2.tree["my_array"] = None
assert_array_equal(ff.tree["my_array"], my_array)
def test_seek_until_on_block_boundary():
# Create content where the first block begins on a
# file-reading-block boundary.
content = b"""#ASDF 1.0.0
%YAML 1.1
%TAG ! tag:stsci.edu:asdf/
--- !core/asdf-1.0.0
foo : bar
...
"""
content += b"\0" * (io.DEFAULT_BUFFER_SIZE - 2) + constants.BLOCK_MAGIC + b"\0\x30" + b"\0" * 50
buff = io.BytesIO(content)
ff = asdf.open(buff)
assert len(ff._blocks) == 1
buff.seek(0)
fd = generic_io.InputStream(buff, "r")
ff = asdf.open(fd)
assert len(ff._blocks) == 1
def test_checksum(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
my_array = np.arange(0, 64, dtype="<i8").reshape((8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff.write_to(path)
with asdf.open(path, validate_checksums=True) as ff:
assert type(ff._blocks._internal_blocks[0].checksum) == bytes
assert ff._blocks._internal_blocks[0].checksum == b"\xcaM\\\xb8t_L|\x00\n+\x01\xf1\xcfP1"
def test_checksum_update(tmp_path):
tmp_path = str(tmp_path)
path = os.path.join(tmp_path, "test.asdf")
my_array = np.arange(0, 64, dtype="<i8").reshape((8, 8))
tree = {"my_array": my_array}
ff = asdf.AsdfFile(tree)
ff.write_to(path)
with asdf.open(path, mode="rw") as ff:
ff.tree["my_array"][7, 7] = 0.0
# update() should update the checksum, even if the data itself
# is memmapped and isn't expressly re-written.
ff.update()
with asdf.open(path, validate_checksums=True) as ff:
assert ff._blocks._internal_blocks[0].checksum == b"T\xaf~[\x90\x8a\x88^\xc2B\x96D,N\xadL"
def test_deferred_block_loading(small_tree):
buff = io.BytesIO()
ff = asdf.AsdfFile(small_tree)
# Since we're testing with small arrays, force all arrays to be stored
# in internal blocks rather than letting some of them be automatically put
# inline.
ff.write_to(buff, include_block_index=False, all_array_storage="internal")
buff.seek(0)
with asdf.open(buff) as ff2:
assert len([x for x in ff2._blocks.blocks if isinstance(x, block.Block)]) == 1
ff2.tree["science_data"] * 2
ff2.tree["not_shared"] * 2
assert len([x for x in ff2._blocks.blocks if isinstance(x, block.Block)]) == 2
with pytest.raises(ValueError, match=r"Block .* not found."):
ff2._blocks.get_block(2)
def test_block_index():
buff = io.BytesIO()
arrays = []
for i in range(100):
arrays.append(np.ones((8, 8)) * i)
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
ff.write_to(buff)
buff.seek(0)
with asdf.open(buff) as ff2:
assert isinstance(ff2._blocks._internal_blocks[0], block.Block)
assert len(ff2._blocks._internal_blocks) == 100
for i in range(2, 99):
assert isinstance(ff2._blocks._internal_blocks[i], block.UnloadedBlock)
assert isinstance(ff2._blocks._internal_blocks[99], block.Block)
# Force the loading of one array
ff2.tree["arrays"][50] * 2
for i in range(2, 99):
if i == 50:
assert isinstance(ff2._blocks._internal_blocks[i], block.Block)
else:
assert isinstance(ff2._blocks._internal_blocks[i], block.UnloadedBlock)
def test_large_block_index():
"""
This test is designed to test reading of a block index that is
larger than a single file system block, which is why we create
io.DEFAULT_BUFFER_SIZE / 4 arrays, and assuming each entry has more
than one digit in its address, we're guaranteed to have an index
larger than a filesystem block.
"""
# TODO: It would be nice to find a way to make this test faster. The
# real bottleneck here is the enormous YAML section.
buff = io.BytesIO()
narrays = int(io.DEFAULT_BUFFER_SIZE / 4)
arrays = []
for i in range(narrays):
arrays.append(np.array([i], np.uint16))
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
# Since we're testing with small arrays, force all arrays to be stored
# in internal blocks rather than letting some of them be automatically put
# inline.
ff.write_to(buff, all_array_storage="internal")
buff.seek(0)
with asdf.open(buff) as ff2:
assert isinstance(ff2._blocks._internal_blocks[0], block.Block)
assert len(ff2._blocks._internal_blocks) == narrays
def test_no_block_index():
buff = io.BytesIO()
arrays = []
for i in range(10):
arrays.append(np.ones((8, 8)) * i)
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
ff.write_to(buff, include_block_index=False)
assert constants.INDEX_HEADER not in buff.getvalue()
def test_junk_after_index():
buff = io.BytesIO()
arrays = []
for i in range(10):
arrays.append(np.ones((8, 8)) * i)
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
ff.write_to(buff)
buff.write(b"JUNK")
buff.seek(0)
# This has junk after the block index, so it
# should fall back to the skip method, which
# only loads the first block.
with asdf.open(buff) as ff:
assert len(ff._blocks) == 1
def test_short_file_find_block_index():
# This tests searching for a block index in a file that looks like
# it might have an index, in the last filesystem block or so, but
# ultimately proves to not have an index.
buff = io.BytesIO()
ff = asdf.AsdfFile({"arr": np.ndarray([1]), "arr2": np.ndarray([2])})
# Since we're testing with small arrays, force all arrays to be stored
# in internal blocks rather than letting some of them be automatically put
# inline.
ff.write_to(buff, include_block_index=False, all_array_storage="internal")
buff.write(b"#ASDF BLOCK INDEX\n")
buff.write(b"0" * (io.DEFAULT_BUFFER_SIZE * 4))
buff.seek(0)
with asdf.open(buff) as ff:
assert len(ff._blocks) == 1
def test_invalid_block_index_values():
# This adds a value in the block index that points to something
# past the end of the file. In that case, we should just reject
# the index altogether.
buff = io.BytesIO()
arrays = []
for i in range(10):
arrays.append(np.ones((8, 8)) * i)
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
ff.write_to(buff, include_block_index=False)
ff._blocks._internal_blocks.append(block.UnloadedBlock(buff, 123456789))
ff._blocks.write_block_index(buff, ff)
buff.seek(0)
with asdf.open(buff) as ff:
assert len(ff._blocks) == 1
@pytest.mark.parametrize("block_index_index", [0, -1])
def test_invalid_block_index_offset(block_index_index):
"""
This adds a value in the block index that points to something
that isn't a block
"""
buff = io.BytesIO()
arrays = []
for i in range(10):
arrays.append(np.ones((8, 8)) * i)
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
ff.write_to(buff)
# now overwrite the block index with the first entry
# incorrectly pointing to a non-block offset
buff.seek(0)
bs = buff.read()
block_index_header_start = bs.index(constants.INDEX_HEADER)
block_index_start = block_index_header_start + len(constants.INDEX_HEADER)
block_index = yaml.load(bs[block_index_start:], yaml.SafeLoader)
block_index[block_index_index] -= 4
yaml_version = tuple(int(x) for x in ff.version_map["YAML_VERSION"].split("."))
buff.seek(block_index_start)
yaml.dump(
block_index,
stream=buff,
explicit_start=True,
explicit_end=True,
version=yaml_version,
allow_unicode=True,
encoding="utf-8",
)
buff.seek(0)
with asdf.open(buff) as ff:
assert len(ff._blocks) == 1
for i, a in enumerate(arrays):
assert_array_equal(ff["arrays"][i], a)
def test_unordered_block_index():
"""
This creates a block index that isn't in increasing order
"""
buff = io.BytesIO()
arrays = []
for i in range(10):
arrays.append(np.ones((8, 8)) * i)
tree = {"arrays": arrays}
ff = asdf.AsdfFile(tree)
ff.write_to(buff, include_block_index=False)
ff._blocks._internal_blocks = ff._blocks._internal_blocks[::-1]
ff._blocks.write_block_index(buff, ff)
buff.seek(0)
with asdf.open(buff) as ff:
assert len(ff._blocks) == 1
def test_invalid_block_id():
ff = asdf.AsdfFile()
with pytest.raises(ValueError, match=r"Invalid source id .*"):
ff._blocks.get_block(-2)
def test_dots_but_no_block_index():
"""
This puts `...` at the end of the file, so we sort of think
we might have a block index, but as it turns out, we don't
after reading a few chunks from the end of the file.
"""
buff = io.BytesIO()
tree = {"array": np.ones((8, 8))}
ff = asdf.AsdfFile(tree)
ff.write_to(buff, include_block_index=False)
buff.write(b"A" * 64000)
buff.write(b"...\n")
buff.seek(0)
with asdf.open(buff) as ff:
assert len(ff._blocks) == 1
def test_open_no_memmap(tmp_path):
tmpfile = os.path.join(str(tmp_path), "random.asdf")
tree = {"array": np.random.random((20, 20))}
ff = asdf.AsdfFile(tree)
ff.write_to(tmpfile)
# Test that by default we use memmapped arrays when possible
with asdf.open(tmpfile) as af:
array = af.tree["array"]
# Make sure to access the block so that it gets loaded
array[0]
assert array.block._memmapped is True
assert isinstance(array.block._data, np.memmap)
# Test that if we ask for copy, we do not get memmapped arrays
with asdf.open(tmpfile, copy_arrays=True) as af:
array = af.tree["array"]
assert array.block._memmapped is False
# We can't just check for isinstance(..., np.array) since this will
# be true for np.memmap as well
assert not isinstance(array.block._data, np.memmap)
def test_fd_not_seekable():
data = np.ones(1024)
b = block.Block(data=data)
fd = io.BytesIO()
seekable = lambda: False # noqa: E731
fd.seekable = seekable
write_array = lambda arr: fd.write(arr.tobytes()) # noqa: E731
fd.write_array = write_array
read_blocks = lambda us: [fd.read(us)] # noqa: E731
fd.read_blocks = read_blocks
fast_forward = lambda offset: fd.seek(offset, 1) # noqa: E731
fd.fast_forward = fast_forward
b.output_compression = "zlib"
b.write(fd)
fd.seek(0)
b = block.Block()
b.read(fd)
# We lost the information about the underlying array type,
# but still can compare the bytes.
assert b.data.tobytes() == data.tobytes()
def test_add_block_before_fully_loaded(tmp_path):
"""
This test covers a subtle case where a block is added
to a file before all pre-existing internal blocks have
been located. If the BlockManager isn't careful to
locate them all first, the new block will take the index
of an existing block and views over that index will
point to the wrong data.
See https://github.com/asdf-format/asdf/issues/999
"""
file_path1 = tmp_path / "test1.asdf"
file_path2 = tmp_path / "test2.asdf"
arr0 = random(10)
arr1 = random(10)
arr2 = random(10)
with asdf.AsdfFile() as af:
af["arr0"] = None
af["arr1"] = arr1
af["arr2"] = arr2
af.write_to(file_path1, include_block_index=False)
with asdf.open(file_path1) as af:
af["arr0"] = arr0
af.write_to(file_path2)
with asdf.open(file_path2) as af:
assert_array_equal(af["arr0"], arr0)
assert_array_equal(af["arr1"], arr1)
assert_array_equal(af["arr2"], arr2)
def test_block_allocation_on_validate():
"""
Verify that no additional block is allocated when a tree
containing a fortran ordered array is validated
See https://github.com/asdf-format/asdf/issues/1205
"""
array = np.array([[11, 12, 13], [21, 22, 23]], order="F")
af = asdf.AsdfFile({"array": array})
assert len(list(af._blocks.blocks)) == 1
af.validate()
assert len(list(af._blocks.blocks)) == 1
@pytest.mark.parametrize("all_array_storage", ["internal", "external", "inline"])
@pytest.mark.parametrize("all_array_compression", [None, "", "zlib", "bzp2", "lz4", "input"])
@pytest.mark.parametrize("compression_kwargs", [None, {}])
def test_write_to_update_storage_options(tmp_path, all_array_storage, all_array_compression, compression_kwargs):
if all_array_compression == "bzp2" and compression_kwargs is not None:
compression_kwargs = {"compresslevel": 1}
def assert_result(ff):
if "array" not in ff:
# this was called from _write_to while making an external block
# so don't check the result
return
if all_array_storage == "external":
assert "test0000.asdf" in os.listdir(tmp_path)
else:
assert "test0000.asdf" not in os.listdir(tmp_path)
if all_array_storage == "internal":
assert len(ff._blocks._internal_blocks) == 1
else:
assert len(ff._blocks._internal_blocks) == 0
blk = ff._blocks[ff["array"]]
target_compression = all_array_compression or None
if target_compression == "input":
target_compression = None
assert blk.output_compression == target_compression
target_compression_kwargs = compression_kwargs or {}
assert blk._output_compression_kwargs == target_compression_kwargs
arr1 = np.ones((8, 8))
tree = {"array": arr1}
fn = tmp_path / "test.asdf"
ff1 = asdf.AsdfFile(tree)
# as a new AsdfFile is used for write_to and we want
# to check blocks here, we patch _write_to to allow us
# to inspect the blocks in the new AsdfFile before
# it falls out of scope
original = asdf.AsdfFile._write_to
def patched(self, *args, **kwargs):
original(self, *args, **kwargs)
assert_result(self)
asdf.AsdfFile._write_to = patched
# first check write_to
ff1.write_to(
fn,
all_array_storage=all_array_storage,
all_array_compression=all_array_compression,
compression_kwargs=compression_kwargs,
)
asdf.AsdfFile._write_to = original
# then reuse the file to check update
with asdf.open(fn, mode="rw") as ff2:
arr2 = np.ones((8, 8)) * 42
ff2["array"] = arr2
ff2.update(
all_array_storage=all_array_storage,
all_array_compression=all_array_compression,
compression_kwargs=compression_kwargs,
)
assert_result(ff2)
def test_block_key():
# make an AsdfFile to get a BlockManager
af = asdf.AsdfFile()
bm = af._blocks
# add a block for an array
arr = np.array([1, 2, 3], dtype="uint8")
arr_blk = bm.find_or_create_block_for_array(arr)
assert arr_blk in bm._internal_blocks
# now make a new block, add it using a key
blk = block.Block(arr)
key = "foo"
bm.add(blk, key)
assert arr_blk in bm._internal_blocks
assert blk in bm._internal_blocks
# make sure we can retrieve the block by the key
assert bm.find_or_create_block(key) is blk
assert isinstance(bm.find_or_create_block("bar"), block.Block)
# now remove it, the original array block should remain
bm.remove(blk)
assert arr_blk in bm._internal_blocks
assert blk not in bm._internal_blocks
@pytest.mark.parametrize("memmap", [True, False])
@pytest.mark.parametrize("lazy_load", [True, False])
def test_data_callback(tmp_path, memmap, lazy_load):
class Callback:
def __init__(self, data):
self.n_calls = 0
self.data = data
def __call__(self):
self.n_calls += 1
return self.data
arr = np.array([1, 2, 3], dtype="uint8")
callback = Callback(arr)
b = block.Block(memmap=memmap, lazy_load=lazy_load, data_callback=callback)
assert callback.n_calls == 0
assert b.data is arr
assert callback.n_calls == 1
assert b._data is None
assert b.data is arr
assert callback.n_calls == 2
fn = tmp_path / "test.b"
with generic_io.get_file(fn, mode="w") as f:
b.write(f)
assert callback.n_calls == 3
with generic_io.get_file(fn, mode="r") as f:
rb = block.Block(memmap=memmap, lazy_load=lazy_load)
rb.read(f, past_magic=False)
assert_array_equal(rb.data, arr)
with pytest.raises(ValueError, match=r"Block.__init__ cannot contain non-None data and a non-None data_callback"):
b = block.Block(data=arr, memmap=memmap, lazy_load=lazy_load, data_callback=callback)
rb = block.Block(memmap=memmap, lazy_load=lazy_load, data_callback=callback)
with pytest.raises(RuntimeError, match=r"read called on a Block with a data_callback"), generic_io.get_file(
fn,
mode="r",
) as f:
rb.read(f, past_magic=False)
def test_remove_blocks(tmp_path):
"""Test that writing to a new file"""
fn1 = tmp_path / "test.asdf"
fn2 = tmp_path / "test2.asdf"
tree = {"a": np.zeros(3), "b": np.ones(1)}
af = asdf.AsdfFile(tree)
af.write_to(fn1)
with asdf.open(fn1, mode="rw") as af:
assert len(af._blocks._internal_blocks) == 2
af["a"] = None
af.write_to(fn2)
with asdf.open(fn1, mode="rw") as af:
assert len(af._blocks._internal_blocks) == 2
af["a"] = None
af.update()
for fn in (fn1, fn2):
with asdf.open(fn) as af:
assert len(af._blocks._internal_blocks) == 1
def test_write_to_before_update(tmp_path):
# this is a regression test for: https://github.com/asdf-format/asdf/issues/1505
fn1 = tmp_path / "test1.asdf"
fn2 = tmp_path / "test2.asdf"
tree = {"a": np.zeros(3), "b": np.ones(3)}
af = asdf.AsdfFile(tree)
af.write_to(fn1)
with asdf.open(fn1, mode="rw") as af:
af["a"] = None
af.write_to(fn2)
af.update()
|
9d5f7d4554836fe881f9206e18e95bd5e8ee05fe
|
bffbde8cc7a544f1b5d6c1bc4b84ca607226e134
|
/tests/test_single_channel_output.py
|
99f5a8447dfd01851a94ba22d09c5fa9732cd6d9
|
[
"MIT"
] |
permissive
|
VainF/Torch-Pruning
|
c006d274e69c5c592ca1e302a70f6603504b8e07
|
e2478a72022c96af3b9053da359a726939e1adaf
|
refs/heads/master
| 2023-09-05T02:38:36.804176
| 2023-09-04T11:26:29
| 2023-09-04T11:26:29
| 228,203,350
| 1,606
| 231
|
MIT
| 2023-09-06T16:45:28
| 2019-12-15T15:07:24
|
Python
|
UTF-8
|
Python
| false
| false
| 974
|
py
|
test_single_channel_output.py
|
import torch
from torch import nn
import torch.nn.functional as F
import torch_pruning as tp
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 64, 2, stride=2)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 1, 3, 1, 1)
self.bn3 = nn.BatchNorm2d(1)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return x
def test_single_channel_output():
model = Model()
example_inputs = torch.randn(1, 3, 224, 224)
DG = tp.DependencyGraph().build_dependency(model, example_inputs=example_inputs)
all_groups = list(DG.get_all_groups())
print(all_groups[0])
assert len(all_groups[0])==3
if __name__ == "__main__":
test_single_channel_output()
|
e232af2b2e686016e79e8bf89123c8bb9208e925
|
f1f21ba2236da38a49a8185ce33b3ce4a4424c1d
|
/pahelix/model_zoo/gem_model.py
|
ff4e213d30493fdd29585201b2486bbb9e31f591
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHelix
|
75a07c2f14475e56e72f4573b2cf82a91d1cbfda
|
e6ab0261eb719c21806bbadfd94001ecfe27de45
|
refs/heads/dev
| 2023-08-05T03:34:55.009355
| 2023-08-01T09:30:44
| 2023-08-01T09:30:44
| 314,704,349
| 771
| 197
|
Apache-2.0
| 2023-08-01T09:15:07
| 2020-11-21T00:53:39
|
Python
|
UTF-8
|
Python
| false
| false
| 12,170
|
py
|
gem_model.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an implementation of GeoGNN:
"""
import numpy as np
import paddle
import paddle.nn as nn
import pgl
from pgl.nn import GraphPool
from pahelix.networks.gnn_block import GIN
from pahelix.networks.compound_encoder import AtomEmbedding, BondEmbedding, \
BondFloatRBF, BondAngleFloatRBF
from pahelix.utils.compound_tools import CompoundKit
from pahelix.networks.gnn_block import MeanPool, GraphNorm
from pahelix.networks.basic_block import MLP
class GeoGNNBlock(nn.Layer):
"""
GeoGNN Block
"""
def __init__(self, embed_dim, dropout_rate, last_act):
super(GeoGNNBlock, self).__init__()
self.embed_dim = embed_dim
self.last_act = last_act
self.gnn = GIN(embed_dim)
self.norm = nn.LayerNorm(embed_dim)
self.graph_norm = GraphNorm()
if last_act:
self.act = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, graph, node_hidden, edge_hidden):
"""tbd"""
out = self.gnn(graph, node_hidden, edge_hidden)
out = self.norm(out)
out = self.graph_norm(graph, out)
if self.last_act:
out = self.act(out)
out = self.dropout(out)
out = out + node_hidden
return out
class GeoGNNModel(nn.Layer):
"""
The GeoGNN Model used in GEM.
Args:
model_config(dict): a dict of model configurations.
"""
def __init__(self, model_config={}):
super(GeoGNNModel, self).__init__()
self.embed_dim = model_config.get('embed_dim', 32)
self.dropout_rate = model_config.get('dropout_rate', 0.2)
self.layer_num = model_config.get('layer_num', 8)
self.readout = model_config.get('readout', 'mean')
self.atom_names = model_config['atom_names']
self.bond_names = model_config['bond_names']
self.bond_float_names = model_config['bond_float_names']
self.bond_angle_float_names = model_config['bond_angle_float_names']
self.init_atom_embedding = AtomEmbedding(self.atom_names, self.embed_dim)
self.init_bond_embedding = BondEmbedding(self.bond_names, self.embed_dim)
self.init_bond_float_rbf = BondFloatRBF(self.bond_float_names, self.embed_dim)
self.bond_embedding_list = nn.LayerList()
self.bond_float_rbf_list = nn.LayerList()
self.bond_angle_float_rbf_list = nn.LayerList()
self.atom_bond_block_list = nn.LayerList()
self.bond_angle_block_list = nn.LayerList()
for layer_id in range(self.layer_num):
self.bond_embedding_list.append(
BondEmbedding(self.bond_names, self.embed_dim))
self.bond_float_rbf_list.append(
BondFloatRBF(self.bond_float_names, self.embed_dim))
self.bond_angle_float_rbf_list.append(
BondAngleFloatRBF(self.bond_angle_float_names, self.embed_dim))
self.atom_bond_block_list.append(
GeoGNNBlock(self.embed_dim, self.dropout_rate, last_act=(layer_id != self.layer_num - 1)))
self.bond_angle_block_list.append(
GeoGNNBlock(self.embed_dim, self.dropout_rate, last_act=(layer_id != self.layer_num - 1)))
# TODO: use self-implemented MeanPool due to pgl bug.
if self.readout == 'mean':
self.graph_pool = MeanPool()
else:
self.graph_pool = pgl.nn.GraphPool(pool_type=self.readout)
print('[GeoGNNModel] embed_dim:%s' % self.embed_dim)
print('[GeoGNNModel] dropout_rate:%s' % self.dropout_rate)
print('[GeoGNNModel] layer_num:%s' % self.layer_num)
print('[GeoGNNModel] readout:%s' % self.readout)
print('[GeoGNNModel] atom_names:%s' % str(self.atom_names))
print('[GeoGNNModel] bond_names:%s' % str(self.bond_names))
print('[GeoGNNModel] bond_float_names:%s' % str(self.bond_float_names))
print('[GeoGNNModel] bond_angle_float_names:%s' % str(self.bond_angle_float_names))
@property
def node_dim(self):
"""the out dim of graph_repr"""
return self.embed_dim
@property
def graph_dim(self):
"""the out dim of graph_repr"""
return self.embed_dim
def forward(self, atom_bond_graph, bond_angle_graph):
"""
Build the network.
"""
node_hidden = self.init_atom_embedding(atom_bond_graph.node_feat)
bond_embed = self.init_bond_embedding(atom_bond_graph.edge_feat)
edge_hidden = bond_embed + self.init_bond_float_rbf(atom_bond_graph.edge_feat)
node_hidden_list = [node_hidden]
edge_hidden_list = [edge_hidden]
for layer_id in range(self.layer_num):
node_hidden = self.atom_bond_block_list[layer_id](
atom_bond_graph,
node_hidden_list[layer_id],
edge_hidden_list[layer_id])
cur_edge_hidden = self.bond_embedding_list[layer_id](atom_bond_graph.edge_feat)
cur_edge_hidden = cur_edge_hidden + self.bond_float_rbf_list[layer_id](atom_bond_graph.edge_feat)
cur_angle_hidden = self.bond_angle_float_rbf_list[layer_id](bond_angle_graph.edge_feat)
edge_hidden = self.bond_angle_block_list[layer_id](
bond_angle_graph,
cur_edge_hidden,
cur_angle_hidden)
node_hidden_list.append(node_hidden)
edge_hidden_list.append(edge_hidden)
node_repr = node_hidden_list[-1]
edge_repr = edge_hidden_list[-1]
graph_repr = self.graph_pool(atom_bond_graph, node_repr)
return node_repr, edge_repr, graph_repr
class GeoPredModel(nn.Layer):
"""tbd"""
def __init__(self, model_config, compound_encoder):
super(GeoPredModel, self).__init__()
self.compound_encoder = compound_encoder
self.hidden_size = model_config['hidden_size']
self.dropout_rate = model_config['dropout_rate']
self.act = model_config['act']
self.pretrain_tasks = model_config['pretrain_tasks']
# context mask
if 'Cm' in self.pretrain_tasks:
self.Cm_vocab = model_config['Cm_vocab']
self.Cm_linear = nn.Linear(compound_encoder.embed_dim, self.Cm_vocab + 3)
self.Cm_loss = nn.CrossEntropyLoss()
# functinal group
self.Fg_linear = nn.Linear(compound_encoder.embed_dim, model_config['Fg_size']) # 494
self.Fg_loss = nn.BCEWithLogitsLoss()
# bond angle with regression
if 'Bar' in self.pretrain_tasks:
self.Bar_mlp = MLP(2,
hidden_size=self.hidden_size,
act=self.act,
in_size=compound_encoder.embed_dim * 3,
out_size=1,
dropout_rate=self.dropout_rate)
self.Bar_loss = nn.SmoothL1Loss()
# bond length with regression
if 'Blr' in self.pretrain_tasks:
self.Blr_mlp = MLP(2,
hidden_size=self.hidden_size,
act=self.act,
in_size=compound_encoder.embed_dim * 2,
out_size=1,
dropout_rate=self.dropout_rate)
self.Blr_loss = nn.SmoothL1Loss()
# atom distance with classification
if 'Adc' in self.pretrain_tasks:
self.Adc_vocab = model_config['Adc_vocab']
self.Adc_mlp = MLP(2,
hidden_size=self.hidden_size,
in_size=self.compound_encoder.embed_dim * 2,
act=self.act,
out_size=self.Adc_vocab + 3,
dropout_rate=self.dropout_rate)
self.Adc_loss = nn.CrossEntropyLoss()
print('[GeoPredModel] pretrain_tasks:%s' % str(self.pretrain_tasks))
def _get_Cm_loss(self, feed_dict, node_repr):
masked_node_repr = paddle.gather(node_repr, feed_dict['Cm_node_i'])
logits = self.Cm_linear(masked_node_repr)
loss = self.Cm_loss(logits, feed_dict['Cm_context_id'])
return loss
def _get_Fg_loss(self, feed_dict, graph_repr):
fg_label = paddle.concat(
[feed_dict['Fg_morgan'],
feed_dict['Fg_daylight'],
feed_dict['Fg_maccs']], 1)
logits = self.Fg_linear(graph_repr)
loss = self.Fg_loss(logits, fg_label)
return loss
def _get_Bar_loss(self, feed_dict, node_repr):
node_i_repr = paddle.gather(node_repr, feed_dict['Ba_node_i'])
node_j_repr = paddle.gather(node_repr, feed_dict['Ba_node_j'])
node_k_repr = paddle.gather(node_repr, feed_dict['Ba_node_k'])
node_ijk_repr = paddle.concat([node_i_repr, node_j_repr, node_k_repr], 1)
pred = self.Bar_mlp(node_ijk_repr)
loss = self.Bar_loss(pred, feed_dict['Ba_bond_angle'] / np.pi)
return loss
def _get_Blr_loss(self, feed_dict, node_repr):
node_i_repr = paddle.gather(node_repr, feed_dict['Bl_node_i'])
node_j_repr = paddle.gather(node_repr, feed_dict['Bl_node_j'])
node_ij_repr = paddle.concat([node_i_repr, node_j_repr], 1)
pred = self.Blr_mlp(node_ij_repr)
loss = self.Blr_loss(pred, feed_dict['Bl_bond_length'])
return loss
def _get_Adc_loss(self, feed_dict, node_repr):
node_i_repr = paddle.gather(node_repr, feed_dict['Ad_node_i'])
node_j_repr = paddle.gather(node_repr, feed_dict['Ad_node_j'])
node_ij_repr = paddle.concat([node_i_repr, node_j_repr], 1)
logits = self.Adc_mlp.forward(node_ij_repr)
atom_dist = paddle.clip(feed_dict['Ad_atom_dist'], 0.0, 20.0)
atom_dist_id = paddle.cast(atom_dist / 20.0 * self.Adc_vocab, 'int64')
loss = self.Adc_loss(logits, atom_dist_id)
return loss
def forward(self, graph_dict, feed_dict, return_subloss=False):
"""
Build the network.
"""
node_repr, edge_repr, graph_repr = self.compound_encoder.forward(
graph_dict['atom_bond_graph'], graph_dict['bond_angle_graph'])
masked_node_repr, masked_edge_repr, masked_graph_repr = self.compound_encoder.forward(
graph_dict['masked_atom_bond_graph'], graph_dict['masked_bond_angle_graph'])
sub_losses = {}
if 'Cm' in self.pretrain_tasks:
sub_losses['Cm_loss'] = self._get_Cm_loss(feed_dict, node_repr)
sub_losses['Cm_loss'] += self._get_Cm_loss(feed_dict, masked_node_repr)
if 'Fg' in self.pretrain_tasks:
sub_losses['Fg_loss'] = self._get_Fg_loss(feed_dict, graph_repr)
sub_losses['Fg_loss'] += self._get_Fg_loss(feed_dict, masked_graph_repr)
if 'Bar' in self.pretrain_tasks:
sub_losses['Bar_loss'] = self._get_Bar_loss(feed_dict, node_repr)
sub_losses['Bar_loss'] += self._get_Bar_loss(feed_dict, masked_node_repr)
if 'Blr' in self.pretrain_tasks:
sub_losses['Blr_loss'] = self._get_Blr_loss(feed_dict, node_repr)
sub_losses['Blr_loss'] += self._get_Blr_loss(feed_dict, masked_node_repr)
if 'Adc' in self.pretrain_tasks:
sub_losses['Adc_loss'] = self._get_Adc_loss(feed_dict, node_repr)
sub_losses['Adc_loss'] += self._get_Adc_loss(feed_dict, masked_node_repr)
loss = 0
for name in sub_losses:
loss += sub_losses[name]
if return_subloss:
return loss, sub_losses
else:
return loss
|
cd62c0cf60d1a2a866714dc9616443fb3a13e1c1
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/wled/test_coordinator.py
|
89817fb8569bacea9aea2c66e54aec8edca71aab
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 6,465
|
py
|
test_coordinator.py
|
"""Tests for the coordinator of the WLED integration."""
import asyncio
from collections.abc import Callable
from copy import deepcopy
from unittest.mock import MagicMock
import pytest
from wled import (
Device as WLEDDevice,
WLEDConnectionClosedError,
WLEDConnectionError,
WLEDError,
)
from homeassistant.components.wled.const import SCAN_INTERVAL
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_not_supporting_websocket(
hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock
) -> None:
"""Ensure no WebSocket attempt is made if non-WebSocket device."""
assert mock_wled.connect.call_count == 0
@pytest.mark.parametrize("device_fixture", ["rgb_websocket"])
async def test_websocket_already_connected(
hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled: MagicMock
) -> None:
"""Ensure no a second WebSocket connection is made, if already connected."""
assert mock_wled.connect.call_count == 1
mock_wled.connected = True
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert mock_wled.connect.call_count == 1
@pytest.mark.parametrize("device_fixture", ["rgb_websocket"])
async def test_websocket_connect_error_no_listen(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Ensure we don't start listening if WebSocket connection failed."""
assert mock_wled.connect.call_count == 1
assert mock_wled.listen.call_count == 1
mock_wled.connect.side_effect = WLEDConnectionError
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
assert mock_wled.connect.call_count == 2
assert mock_wled.listen.call_count == 1
@pytest.mark.parametrize("device_fixture", ["rgb_websocket"])
async def test_websocket(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test WebSocket connection."""
state = hass.states.get("light.wled_websocket")
assert state
assert state.state == STATE_ON
# There is no Future in place yet...
assert mock_wled.connect.call_count == 1
assert mock_wled.listen.call_count == 1
assert mock_wled.disconnect.call_count == 1
connection_connected = asyncio.Future()
connection_finished = asyncio.Future()
async def connect(callback: Callable[[WLEDDevice], None]):
connection_connected.set_result(callback)
await connection_finished
# Mock out wled.listen with a Future
mock_wled.listen.side_effect = connect
# Mock out the event bus
mock_bus = MagicMock()
hass.bus = mock_bus
# Next refresh it should connect
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
callback = await connection_connected
# Connected to WebSocket, disconnect not called
# listening for Home Assistant to stop
assert mock_wled.connect.call_count == 2
assert mock_wled.listen.call_count == 2
assert mock_wled.disconnect.call_count == 1
assert mock_bus.async_listen_once.call_count == 1
assert (
mock_bus.async_listen_once.call_args_list[0][0][0] == EVENT_HOMEASSISTANT_STOP
)
assert (
mock_bus.async_listen_once.call_args_list[0][0][1].__name__ == "close_websocket"
)
assert mock_bus.async_listen_once.return_value.call_count == 0
# Send update from WebSocket
updated_device = deepcopy(mock_wled.update.return_value)
updated_device.state.on = False
callback(updated_device)
await hass.async_block_till_done()
# Check if entity updated
state = hass.states.get("light.wled_websocket")
assert state
assert state.state == STATE_OFF
# Resolve Future with a connection losed.
connection_finished.set_exception(WLEDConnectionClosedError)
await hass.async_block_till_done()
# Disconnect called, unsubbed Home Assistant stop listener
assert mock_wled.disconnect.call_count == 2
assert mock_bus.async_listen_once.return_value.call_count == 1
# Light still available, as polling takes over
state = hass.states.get("light.wled_websocket")
assert state
assert state.state == STATE_OFF
@pytest.mark.parametrize("device_fixture", ["rgb_websocket"])
async def test_websocket_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Test WebSocket connection erroring out, marking lights unavailable."""
state = hass.states.get("light.wled_websocket")
assert state
assert state.state == STATE_ON
connection_connected = asyncio.Future()
connection_finished = asyncio.Future()
async def connect(callback: Callable[[WLEDDevice], None]):
connection_connected.set_result(None)
await connection_finished
mock_wled.listen.side_effect = connect
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await connection_connected
# Resolve Future with an error.
connection_finished.set_exception(WLEDError)
await hass.async_block_till_done()
# Light no longer available as an error occurred
state = hass.states.get("light.wled_websocket")
assert state
assert state.state == STATE_UNAVAILABLE
@pytest.mark.parametrize("device_fixture", ["rgb_websocket"])
async def test_websocket_disconnect_on_home_assistant_stop(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled: MagicMock,
) -> None:
"""Ensure WebSocket is disconnected when Home Assistant stops."""
assert mock_wled.disconnect.call_count == 1
connection_connected = asyncio.Future()
connection_finished = asyncio.Future()
async def connect(callback: Callable[[WLEDDevice], None]):
connection_connected.set_result(None)
await connection_finished
mock_wled.listen.side_effect = connect
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await connection_connected
assert mock_wled.disconnect.call_count == 1
hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert mock_wled.disconnect.call_count == 2
|
7daf98dc4b9804f8d45cf8215fedb60b538d7e4c
|
c4039d6c964407d74d8625d340d90586a611c3c7
|
/models/dcgan.py
|
291e45197dce932866094b90aac6f9add3ec2081
|
[
"MIT"
] |
permissive
|
Zeleni9/pytorch-wgan
|
2874878a1c5947bfd94e83838f2f6c6f7394804e
|
d5b9b4db573f2efbfa56e115d46b28d1f0465312
|
refs/heads/master
| 2023-04-06T03:55:31.771605
| 2022-01-06T14:30:50
| 2022-01-06T14:30:50
| 122,645,948
| 612
| 155
|
MIT
| 2023-03-25T01:34:09
| 2018-02-23T16:32:34
|
Python
|
UTF-8
|
Python
| false
| false
| 13,902
|
py
|
dcgan.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import time as t
import os
from utils.tensorboard_logger import Logger
from utils.inception_score import get_inception_score
from itertools import chain
from torchvision import utils
class Generator(torch.nn.Module):
def __init__(self, channels):
super().__init__()
# Filters [1024, 512, 256]
# Input_dim = 100
# Output_dim = C (number of channels)
self.main_module = nn.Sequential(
# Z latent vector 100
nn.ConvTranspose2d(in_channels=100, out_channels=1024, kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(num_features=1024),
nn.ReLU(True),
# State (1024x4x4)
nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=512),
nn.ReLU(True),
# State (512x8x8)
nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(num_features=256),
nn.ReLU(True),
# State (256x16x16)
nn.ConvTranspose2d(in_channels=256, out_channels=channels, kernel_size=4, stride=2, padding=1))
# output of main module --> Image (Cx32x32)
self.output = nn.Tanh()
def forward(self, x):
x = self.main_module(x)
return self.output(x)
class Discriminator(torch.nn.Module):
def __init__(self, channels):
super().__init__()
# Filters [256, 512, 1024]
# Input_dim = channels (Cx64x64)
# Output_dim = 1
self.main_module = nn.Sequential(
# Image (Cx32x32)
nn.Conv2d(in_channels=channels, out_channels=256, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True),
# State (256x16x16)
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# State (512x8x8)
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True))
# outptut of main module --> State (1024x4x4)
self.output = nn.Sequential(
nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0),
# Output 1
nn.Sigmoid())
def forward(self, x):
x = self.main_module(x)
return self.output(x)
def feature_extraction(self, x):
# Use discriminator for feature extraction then flatten to vector of 16384 features
x = self.main_module(x)
return x.view(-1, 1024*4*4)
class DCGAN_MODEL(object):
def __init__(self, args):
print("DCGAN model initalization.")
self.G = Generator(args.channels)
self.D = Discriminator(args.channels)
self.C = args.channels
# binary cross entropy loss and optimizer
self.loss = nn.BCELoss()
self.cuda = False
self.cuda_index = 0
# check if cuda is available
self.check_cuda(args.cuda)
# Using lower learning rate than suggested by (ADAM authors) lr=0.0002 and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
self.d_optimizer = torch.optim.Adam(self.D.parameters(), lr=0.0002, betas=(0.5, 0.999))
self.g_optimizer = torch.optim.Adam(self.G.parameters(), lr=0.0002, betas=(0.5, 0.999))
self.epochs = args.epochs
self.batch_size = args.batch_size
# Set the logger
self.logger = Logger('./logs')
self.number_of_images = 10
# cuda support
def check_cuda(self, cuda_flag=False):
if cuda_flag:
self.cuda = True
self.D.cuda(self.cuda_index)
self.G.cuda(self.cuda_index)
self.loss = nn.BCELoss().cuda(self.cuda_index)
print("Cuda enabled flag: ")
print(self.cuda)
def train(self, train_loader):
self.t_begin = t.time()
generator_iter = 0
#self.file = open("inception_score_graph.txt", "w")
for epoch in range(self.epochs):
self.epoch_start_time = t.time()
for i, (images, _) in enumerate(train_loader):
# Check if round number of batches
if i == train_loader.dataset.__len__() // self.batch_size:
break
z = torch.rand((self.batch_size, 100, 1, 1))
real_labels = torch.ones(self.batch_size)
fake_labels = torch.zeros(self.batch_size)
if self.cuda:
images, z = Variable(images).cuda(self.cuda_index), Variable(z).cuda(self.cuda_index)
real_labels, fake_labels = Variable(real_labels).cuda(self.cuda_index), Variable(fake_labels).cuda(self.cuda_index)
else:
images, z = Variable(images), Variable(z)
real_labels, fake_labels = Variable(real_labels), Variable(fake_labels)
# Train discriminator
# Compute BCE_Loss using real images
outputs = self.D(images)
d_loss_real = self.loss(outputs.flatten(), real_labels)
real_score = outputs
# Compute BCE Loss using fake images
if self.cuda:
z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda(self.cuda_index)
else:
z = Variable(torch.randn(self.batch_size, 100, 1, 1))
fake_images = self.G(z)
outputs = self.D(fake_images)
d_loss_fake = self.loss(outputs.flatten(), fake_labels)
fake_score = outputs
# Optimize discriminator
d_loss = d_loss_real + d_loss_fake
self.D.zero_grad()
d_loss.backward()
self.d_optimizer.step()
# Train generator
# Compute loss with fake images
if self.cuda:
z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda(self.cuda_index)
else:
z = Variable(torch.randn(self.batch_size, 100, 1, 1))
fake_images = self.G(z)
outputs = self.D(fake_images)
g_loss = self.loss(outputs.flatten(), real_labels)
# Optimize generator
self.D.zero_grad()
self.G.zero_grad()
g_loss.backward()
self.g_optimizer.step()
generator_iter += 1
if generator_iter % 1000 == 0:
# Workaround because graphic card memory can't store more than 800+ examples in memory for generating image
# Therefore doing loop and generating 800 examples and stacking into list of samples to get 8000 generated images
# This way Inception score is more correct since there are different generated examples from every class of Inception model
# sample_list = []
# for i in range(10):
# z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
# samples = self.G(z)
# sample_list.append(samples.data.cpu().numpy())
#
# # Flattening list of lists into one list of numpy arrays
# new_sample_list = list(chain.from_iterable(sample_list))
# print("Calculating Inception Score over 8k generated images")
# # Feeding list of numpy arrays
# inception_score = get_inception_score(new_sample_list, cuda=True, batch_size=32,
# resize=True, splits=10)
print('Epoch-{}'.format(epoch + 1))
self.save_model()
if not os.path.exists('training_result_images/'):
os.makedirs('training_result_images/')
# Denormalize images and save them in grid 8x8
z = Variable(torch.randn(800, 100, 1, 1)).cuda(self.cuda_index)
samples = self.G(z)
samples = samples.mul(0.5).add(0.5)
samples = samples.data.cpu()[:64]
grid = utils.make_grid(samples)
utils.save_image(grid, 'training_result_images/img_generatori_iter_{}.png'.format(str(generator_iter).zfill(3)))
time = t.time() - self.t_begin
#print("Inception score: {}".format(inception_score))
print("Generator iter: {}".format(generator_iter))
print("Time {}".format(time))
# Write to file inception_score, gen_iters, time
#output = str(generator_iter) + " " + str(time) + " " + str(inception_score[0]) + "\n"
#self.file.write(output)
if ((i + 1) % 100) == 0:
print("Epoch: [%2d] [%4d/%4d] D_loss: %.8f, G_loss: %.8f" %
((epoch + 1), (i + 1), train_loader.dataset.__len__() // self.batch_size, d_loss.data, g_loss.data))
z = Variable(torch.randn(self.batch_size, 100, 1, 1).cuda(self.cuda_index))
# TensorBoard logging
# Log the scalar values
info = {
'd_loss': d_loss.data,
'g_loss': g_loss.data
}
for tag, value in info.items():
self.logger.scalar_summary(tag, value, generator_iter)
# Log values and gradients of the parameters
for tag, value in self.D.named_parameters():
tag = tag.replace('.', '/')
self.logger.histo_summary(tag, self.to_np(value), generator_iter)
self.logger.histo_summary(tag + '/grad', self.to_np(value.grad), generator_iter)
# Log the images while training
info = {
'real_images': self.real_images(images, self.number_of_images),
'generated_images': self.generate_img(z, self.number_of_images)
}
for tag, images in info.items():
self.logger.image_summary(tag, images, generator_iter)
self.t_end = t.time()
print('Time of training-{}'.format((self.t_end - self.t_begin)))
#self.file.close()
# Save the trained parameters
self.save_model()
def evaluate(self, test_loader, D_model_path, G_model_path):
self.load_model(D_model_path, G_model_path)
z = Variable(torch.randn(self.batch_size, 100, 1, 1)).cuda(self.cuda_index)
samples = self.G(z)
samples = samples.mul(0.5).add(0.5)
samples = samples.data.cpu()
grid = utils.make_grid(samples)
print("Grid of 8x8 images saved to 'dgan_model_image.png'.")
utils.save_image(grid, 'dgan_model_image.png')
def real_images(self, images, number_of_images):
if (self.C == 3):
return self.to_np(images.view(-1, self.C, 32, 32)[:self.number_of_images])
else:
return self.to_np(images.view(-1, 32, 32)[:self.number_of_images])
def generate_img(self, z, number_of_images):
samples = self.G(z).data.cpu().numpy()[:number_of_images]
generated_images = []
for sample in samples:
if self.C == 3:
generated_images.append(sample.reshape(self.C, 32, 32))
else:
generated_images.append(sample.reshape(32, 32))
return generated_images
def to_np(self, x):
return x.data.cpu().numpy()
def save_model(self):
torch.save(self.G.state_dict(), './generator.pkl')
torch.save(self.D.state_dict(), './discriminator.pkl')
print('Models save to ./generator.pkl & ./discriminator.pkl ')
def load_model(self, D_model_filename, G_model_filename):
D_model_path = os.path.join(os.getcwd(), D_model_filename)
G_model_path = os.path.join(os.getcwd(), G_model_filename)
self.D.load_state_dict(torch.load(D_model_path))
self.G.load_state_dict(torch.load(G_model_path))
print('Generator model loaded from {}.'.format(G_model_path))
print('Discriminator model loaded from {}-'.format(D_model_path))
def generate_latent_walk(self, number):
if not os.path.exists('interpolated_images/'):
os.makedirs('interpolated_images/')
# Interpolate between twe noise(z1, z2) with number_int steps between
number_int = 10
z_intp = torch.FloatTensor(1, 100, 1, 1)
z1 = torch.randn(1, 100, 1, 1)
z2 = torch.randn(1, 100, 1, 1)
if self.cuda:
z_intp = z_intp.cuda()
z1 = z1.cuda()
z2 = z2.cuda()
z_intp = Variable(z_intp)
images = []
alpha = 1.0 / float(number_int + 1)
print(alpha)
for i in range(1, number_int + 1):
z_intp.data = z1*alpha + z2*(1.0 - alpha)
alpha += alpha
fake_im = self.G(z_intp)
fake_im = fake_im.mul(0.5).add(0.5) #denormalize
images.append(fake_im.view(self.C,32,32).data.cpu())
grid = utils.make_grid(images, nrow=number_int )
utils.save_image(grid, 'interpolated_images/interpolated_{}.png'.format(str(number).zfill(3)))
print("Saved interpolated images to interpolated_images/interpolated_{}.".format(str(number).zfill(3)))
|
5e3a66f39a4a76a4e131b403d4b14fcabb4a513f
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/ShiftManagement/Scripts/AssignAnalystToIncidentOOO/AssignAnalystToIncidentOOO_test.py
|
e98e9fd82f45006beda5ceca9b4e708ed23a5fce
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
AssignAnalystToIncidentOOO_test.py
|
import io
import json
import pytest
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
user_data = util_load_json('test_data/user_data.json')
away_user_data = util_load_json('test_data/away_user_data.json')
ooo_user_data = util_load_json('test_data/ooo_user_data.json')
def execute_command_mock(command, args):
if command == 'getUsers':
return [{'Type': 6, 'Contents': user_data}]
if command == 'GetAwayUsers':
return [{'Type': 6, 'EntryContext': {'AwayUsers': away_user_data}}]
if command == 'GetUsersOOO':
return [{'Type': 6, 'EntryContext': {'ShiftManagment.OOOUsers': ooo_user_data}}]
if command == 'setOwner':
assert 'admin' in args['owner']
return [{'Type': 6}]
if command == 'AssignAnalystToIncident':
assert 'admin' in args['username']
return [{'Type': 6}]
raise Exception(f'Unexpected command: {command}')
@pytest.mark.parametrize('args', [({'assignAll': False}), ({'assignAll': True})])
def test_script_flow(mocker, args):
"""
Given:
- Cortex XSOAR args.
When:
- Calling AssignAnalystToIncidentOOO.
Then:
- Ensure expected behaviour.
Behaviour of expected given args for commands are checked via the `execute_command_mock` function.
"""
from AssignAnalystToIncidentOOO import main
import demistomock as demisto
mocker.patch.object(demisto, 'executeCommand', side_effect=execute_command_mock)
mocker.patch.object(demisto, 'args', return_value=args)
main()
|
e8baa531b5c5ef9298113218ddf165b56fcc129c
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-quality/soda-core/soda/core/tests/data_source/test_attributes.py
|
4f2d34a7afd78dc6963dea8b8c8aa255f5d37c19
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"BSD-3-Clause",
"MIT"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 5,942
|
py
|
test_attributes.py
|
from helpers.common_test_tables import customers_test_table
from helpers.data_source_fixture import DataSourceFixture
mock_schema = [
{"type": "number", "name": "priority"},
{"type": "singleSelect", "allowedValues": ["sales", "marketing"], "name": "department"},
{"type": "multiSelect", "allowedValues": ["generated", "user-created"], "name": "tags"},
{"type": "text", "name": "sales_owner"},
{"type": "datetime", "name": "arrival_date"},
{"type": "datetime", "name": "arrival_datetime"},
]
mock_variables = {"DEPT": "sales"}
def test_check_attributes_valid(data_source_fixture: DataSourceFixture):
table_name = data_source_fixture.ensure_test_table(customers_test_table)
scan = data_source_fixture.create_test_scan()
scan.mock_check_attributes_schema(mock_schema)
scan.add_variables(mock_variables)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- row_count > 0:
attributes:
priority: 1
department: ${{DEPT}}
${{DEPT}}_owner: John Doe
tags: ["user-created"]
arrival_date: "2022-12-12"
arrival_datetime: "2022-12-12T12:00:00"
"""
)
scan.execute()
scan.assert_all_checks_pass()
scan_result = scan.build_scan_results()
assert scan_result["checks"][0]["resourceAttributes"] == [
{"name": "priority", "value": "1"},
{"name": "department", "value": "sales"},
{"name": "sales_owner", "value": "John Doe"},
{"name": "tags", "value": ["user-created"]},
{"name": "arrival_date", "value": "2022-12-12"},
{"name": "arrival_datetime", "value": "2022-12-12T12:00:00"},
]
def test_check_attributes_invalid(data_source_fixture: DataSourceFixture):
table_name = data_source_fixture.ensure_test_table(customers_test_table)
scan = data_source_fixture.create_test_scan()
scan.mock_check_attributes_schema(mock_schema)
scan.add_variables(mock_variables)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- row_count > 0:
attributes:
priority: "high"
something-invalid: some-value
tags: ["unknown"]
arrival_date: 2022/01/01
arrival_datetime: 2022/01/01T01:01:01
"""
)
scan.execute_unchecked()
scan_result = scan.build_scan_results()
assert scan_result["checks"] == []
scan.assert_has_error(
"Soda Cloud does not recognize 'tags': '['unknown']' attribute value. Valid attribute value(s): ['generated', 'user-created']"
)
scan.assert_has_error(
"Soda Cloud does not recognize 'DoubleQuotedScalarString' type of attribute 'priority'. It expects the following type(s): ['int', 'float']"
)
scan.assert_has_error("Soda Cloud does not recognize 'something-invalid' attribute name.")
scan.assert_has_error(
"Soda Cloud expects an ISO formatted date or datetime value for the 'arrival_date' attribute."
)
scan.assert_has_error(
"Soda Cloud expects an ISO formatted date or datetime value for the 'arrival_datetime' attribute."
)
def test_foreach_attributes(data_source_fixture: DataSourceFixture):
table_name = data_source_fixture.ensure_test_table(customers_test_table)
scan = data_source_fixture.create_test_scan()
scan.mock_check_attributes_schema(mock_schema)
scan.add_variables(mock_variables)
scan.add_sodacl_yaml_str(
f"""
for each dataset D:
datasets:
- {table_name}
checks:
- row_count > 0:
attributes:
priority: 1.333
tags: ["generated"]
department: ${{DEPT}}
${{DEPT}}_owner: John Doe
"""
)
scan.execute()
scan.assert_all_checks_pass()
scan_result = scan.build_scan_results()
assert scan_result["checks"][0]["resourceAttributes"] == [
{"name": "priority", "value": "1.333"},
{"name": "tags", "value": ["generated"]},
{"name": "department", "value": "sales"},
{"name": "sales_owner", "value": "John Doe"},
]
def test_check_attributes_skip_invalid(data_source_fixture: DataSourceFixture):
table_name = data_source_fixture.ensure_test_table(customers_test_table)
scan = data_source_fixture.create_test_scan()
scan.mock_check_attributes_schema(mock_schema)
scan.add_variables(mock_variables)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- row_count > 0:
name: count
attributes:
priority: 1
- missing_count(id) = 0:
attributes:
does-not-exist: 1
"""
)
scan.execute_unchecked()
scan_result = scan.build_scan_results()
assert len(scan_result["checks"]) == 0
def test_all_supported_check_types(data_source_fixture: DataSourceFixture):
table_name = data_source_fixture.ensure_test_table(customers_test_table)
scan = data_source_fixture.create_test_scan()
scan.mock_check_attributes_schema(mock_schema)
scan.add_variables(mock_variables)
scan.add_sodacl_yaml_str(
f"""
checks for {table_name}:
- row_count > 0:
name: count
attributes:
priority: 1
- schema:
fail:
when forbidden column present: [xxx]
attributes:
priority: 1
- failed rows:
fail condition: cat = 'xxx'
attributes:
priority: 1
- values in (cst_size) must exist in {table_name} (cst_size):
attributes:
priority: 1
- freshness(ts) < 10000d:
attributes:
priority: 1
"""
)
scan.execute()
scan.assert_all_checks_pass()
scan.assert_no_error_nor_warning_logs()
|
ebe292547f6ce78ca6601c0dc50388d5961ec103
|
01891a781b63c0ca4413e1bd9bbe1e4dee9d081f
|
/dedupe/serializer.py
|
7de0177e906053d001cf2b73fed9dd2d72d1b1e5
|
[
"MIT"
] |
permissive
|
dedupeio/dedupe
|
f933fa07cadb62c19c478cd801c8a3ff22f7563e
|
f72d4a161bfc66c9e1de9b39e2bd7e01bcad3c49
|
refs/heads/main
| 2023-08-23T15:18:36.902429
| 2023-02-17T16:34:52
| 2023-02-17T16:34:52
| 4,087,724
| 2,702
| 407
|
MIT
| 2023-05-29T02:57:35
| 2012-04-20T14:57:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,463
|
py
|
serializer.py
|
import json
from typing import Any, Iterator, TextIO
from dedupe._typing import TrainingData
def _from_json(json_object: Any) -> Any:
if "__class__" in json_object:
if json_object["__class__"] == "frozenset":
return frozenset(json_object["__value__"])
if json_object["__class__"] == "tuple":
return tuple(json_object["__value__"])
return json_object
def hint_tuples(item: Any) -> Any:
if isinstance(item, tuple):
return {"__class__": "tuple", "__value__": [hint_tuples(e) for e in item]}
if isinstance(item, list):
return [hint_tuples(e) for e in item]
if isinstance(item, dict):
return {key: hint_tuples(value) for key, value in item.items()}
else:
return item
class TupleEncoder(json.JSONEncoder):
def encode(self, obj: Any) -> Any:
return super().encode(hint_tuples(obj))
def iterencode(self, obj: Any, _one_shot: bool = False) -> Iterator[str]:
return super().iterencode(hint_tuples(obj))
def default(self, python_object: Any) -> Any:
if isinstance(python_object, frozenset):
return {"__class__": "frozenset", "__value__": list(python_object)}
return super().default(python_object)
def read_training(training_file: TextIO) -> Any:
"""
Read training from previously built training data file object
Args:
training_file: file object containing the training data
Returns:
A dictionary with two keys, `match` and `distinct`. See the inverse,
:func:`write_training`.
"""
return json.load(training_file, object_hook=_from_json)
def write_training(labeled_pairs: TrainingData, file_obj: TextIO) -> None:
"""
Write a JSON file that contains labeled examples
Args:
labeled_pairs: A dictionary with two keys, `match` and `distinct`.
The values are lists that can contain pairs of records
file_obj: file object to write training data to
.. code:: python
examples = {
"match": [
({'name' : 'Georgie Porgie'}, {'name' : 'George Porgie'}),
],
"distinct": [
({'name' : 'Georgie Porgie'}, {'name' : 'Georgette Porgette'}),
],
}
with open('training.json', 'w') as f:
dedupe.write_training(examples, f)
"""
json.dump(labeled_pairs, file_obj, cls=TupleEncoder, ensure_ascii=True)
|
dd0049f3f86bf02ced9311a9a59e758cf02631b5
|
def11205c653744bce1e48aac6105ad460f15b6c
|
/reddit.py
|
6344de6b7d5d959e5f344387164761ed308353d4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
snarfed/bridgy
|
1b37aa77c22a62f3d5c0f6fc9d271ed5db30ba69
|
1d1fc440a504acb53333121215f00da0ce9af466
|
refs/heads/main
| 2023-08-31T04:56:05.500157
| 2023-08-22T22:14:10
| 2023-08-22T22:18:33
| 2,968,000
| 580
| 66
| null | 2023-09-11T22:50:13
| 2011-12-12T22:33:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,311
|
py
|
reddit.py
|
"""Reddit source code and datastore model classes."""
from granary import reddit as gr_reddit
from granary import source as gr_source
from oauth_dropins import reddit as oauth_reddit
from oauth_dropins.webutil.util import json_dumps, json_loads
from prawcore.exceptions import NotFound
from flask_app import app
import models
import util
class Reddit(models.Source):
"""A Reddit account.
The key name is the username.
"""
GR_CLASS = gr_reddit.Reddit
OAUTH_START = oauth_reddit.Start
SHORT_NAME = 'reddit'
TYPE_LABELS = {
'post': 'submission',
'comment': 'comment',
}
CAN_PUBLISH = False
DISABLE_HTTP_CODES = ('401', '403')
USERNAME_KEY_ID = True
URL_CANONICALIZER = util.UrlCanonicalizer(domain=GR_CLASS.DOMAIN)
@staticmethod
def new(auth_entity=None, **kwargs):
"""Creates and returns a :class:`Reddit` entity.
Args:
auth_entity: :class:`oauth_dropins.reddit.RedditAuth`
kwargs: property values
"""
assert 'username' not in kwargs
assert 'id' not in kwargs
user = json_loads(auth_entity.user_json)
gr_source = gr_reddit.Reddit(auth_entity.refresh_token)
return Reddit(username=user.get('name'),
auth_entity=auth_entity.key,
url=gr_source.user_url(user.get('name')),
name=user.get('name'),
picture=user.get('icon_img'),
**kwargs)
def silo_url(self):
"""Returns the Reddit account URL, e.g. https://reddit.com/user/foo."""
return self.gr_source.user_url(self.username)
def label_name(self):
"""Returns the username."""
return self.username
def get_activities_response(self, *args, **kwargs):
"""Set user_id manually.
...since Reddit sometimes (always?) 400s our calls to
https://oauth.reddit.com/api/v1/me (via PRAW's Reddit.user.me() ).
"""
kwargs.setdefault('user_id', self.username)
if kwargs.get('count'):
kwargs['count'] = min(kwargs['count'], 10)
try:
return super().get_activities_response(*args, **kwargs)
except NotFound:
# this user was deleted or banned
raise models.DisableSource()
def search_for_links(self):
"""Searches for activities with links to any of this source's web sites.
Returns:
sequence of ActivityStreams activity dicts
"""
urls = {util.schemeless(util.fragmentless(url), slashes=False)
for url in self.domain_urls
if not util.in_webmention_blocklist(util.domain_from_link(url))}
if not urls:
return []
# Search syntax: https://www.reddit.com/wiki/search
url_query = ' OR '.join(f'site:"{u}" OR selftext:"{u}"' for u in urls)
return self.get_activities(
search_query=url_query, group_id=gr_source.SEARCH, etag=self.last_activities_etag,
fetch_replies=False, fetch_likes=False, fetch_shares=False, count=50)
class Callback(oauth_reddit.Callback):
def finish(self, auth_entity, state=None):
util.maybe_add_or_delete_source(Reddit, auth_entity, state)
app.add_url_rule('/reddit/start',
view_func=util.oauth_starter(oauth_reddit.Start).as_view('reddit_start', '/reddit/callback'), methods=['POST'])
app.add_url_rule('/reddit/callback',
view_func=Callback.as_view('reddit_callback', 'unused to_path'))
|
01d64963295741313317af0b8c71662199aa617d
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/tools/python3/src/Lib/email/contentmanager.py
|
b4f5830beada4a2bb7353a4d6672d2397a7fb499
|
[
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"BSD-3-Clause",
"0BSD",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 10,588
|
py
|
contentmanager.py
|
import binascii
import email.charset
import email.message
import email.errors
from email import quoprimime
class ContentManager:
def __init__(self):
self.get_handlers = {}
self.set_handlers = {}
def add_get_handler(self, key, handler):
self.get_handlers[key] = handler
def get_content(self, msg, *args, **kw):
content_type = msg.get_content_type()
if content_type in self.get_handlers:
return self.get_handlers[content_type](msg, *args, **kw)
maintype = msg.get_content_maintype()
if maintype in self.get_handlers:
return self.get_handlers[maintype](msg, *args, **kw)
if '' in self.get_handlers:
return self.get_handlers[''](msg, *args, **kw)
raise KeyError(content_type)
def add_set_handler(self, typekey, handler):
self.set_handlers[typekey] = handler
def set_content(self, msg, obj, *args, **kw):
if msg.get_content_maintype() == 'multipart':
# XXX: is this error a good idea or not? We can remove it later,
# but we can't add it later, so do it for now.
raise TypeError("set_content not valid on multipart")
handler = self._find_set_handler(msg, obj)
msg.clear_content()
handler(msg, obj, *args, **kw)
def _find_set_handler(self, msg, obj):
full_path_for_error = None
for typ in type(obj).__mro__:
if typ in self.set_handlers:
return self.set_handlers[typ]
qname = typ.__qualname__
modname = getattr(typ, '__module__', '')
full_path = '.'.join((modname, qname)) if modname else qname
if full_path_for_error is None:
full_path_for_error = full_path
if full_path in self.set_handlers:
return self.set_handlers[full_path]
if qname in self.set_handlers:
return self.set_handlers[qname]
name = typ.__name__
if name in self.set_handlers:
return self.set_handlers[name]
if None in self.set_handlers:
return self.set_handlers[None]
raise KeyError(full_path_for_error)
raw_data_manager = ContentManager()
def get_text_content(msg, errors='replace'):
content = msg.get_payload(decode=True)
charset = msg.get_param('charset', 'ASCII')
return content.decode(charset, errors=errors)
raw_data_manager.add_get_handler('text', get_text_content)
def get_non_text_content(msg):
return msg.get_payload(decode=True)
for maintype in 'audio image video application'.split():
raw_data_manager.add_get_handler(maintype, get_non_text_content)
del maintype
def get_message_content(msg):
return msg.get_payload(0)
for subtype in 'rfc822 external-body'.split():
raw_data_manager.add_get_handler('message/'+subtype, get_message_content)
del subtype
def get_and_fixup_unknown_message_content(msg):
# If we don't understand a message subtype, we are supposed to treat it as
# if it were application/octet-stream, per
# tools.ietf.org/html/rfc2046#section-5.2.4. Feedparser doesn't do that,
# so do our best to fix things up. Note that it is *not* appropriate to
# model message/partial content as Message objects, so they are handled
# here as well. (How to reassemble them is out of scope for this comment :)
return bytes(msg.get_payload(0))
raw_data_manager.add_get_handler('message',
get_and_fixup_unknown_message_content)
def _prepare_set(msg, maintype, subtype, headers):
msg['Content-Type'] = '/'.join((maintype, subtype))
if headers:
if not hasattr(headers[0], 'name'):
mp = msg.policy
headers = [mp.header_factory(*mp.header_source_parse([header]))
for header in headers]
try:
for header in headers:
if header.defects:
raise header.defects[0]
msg[header.name] = header
except email.errors.HeaderDefect as exc:
raise ValueError("Invalid header: {}".format(
header.fold(policy=msg.policy))) from exc
def _finalize_set(msg, disposition, filename, cid, params):
if disposition is None and filename is not None:
disposition = 'attachment'
if disposition is not None:
msg['Content-Disposition'] = disposition
if filename is not None:
msg.set_param('filename',
filename,
header='Content-Disposition',
replace=True)
if cid is not None:
msg['Content-ID'] = cid
if params is not None:
for key, value in params.items():
msg.set_param(key, value)
# XXX: This is a cleaned-up version of base64mime.body_encode (including a bug
# fix in the calculation of unencoded_bytes_per_line). It would be nice to
# drop both this and quoprimime.body_encode in favor of enhanced binascii
# routines that accepted a max_line_length parameter.
def _encode_base64(data, max_line_length):
encoded_lines = []
unencoded_bytes_per_line = max_line_length // 4 * 3
for i in range(0, len(data), unencoded_bytes_per_line):
thisline = data[i:i+unencoded_bytes_per_line]
encoded_lines.append(binascii.b2a_base64(thisline).decode('ascii'))
return ''.join(encoded_lines)
def _encode_text(string, charset, cte, policy):
lines = string.encode(charset).splitlines()
linesep = policy.linesep.encode('ascii')
def embedded_body(lines): return linesep.join(lines) + linesep
def normal_body(lines): return b'\n'.join(lines) + b'\n'
if cte is None:
# Use heuristics to decide on the "best" encoding.
if max((len(x) for x in lines), default=0) <= policy.max_line_length:
try:
return '7bit', normal_body(lines).decode('ascii')
except UnicodeDecodeError:
pass
if policy.cte_type == '8bit':
return '8bit', normal_body(lines).decode('ascii', 'surrogateescape')
sniff = embedded_body(lines[:10])
sniff_qp = quoprimime.body_encode(sniff.decode('latin-1'),
policy.max_line_length)
sniff_base64 = binascii.b2a_base64(sniff)
# This is a little unfair to qp; it includes lineseps, base64 doesn't.
if len(sniff_qp) > len(sniff_base64):
cte = 'base64'
else:
cte = 'quoted-printable'
if len(lines) <= 10:
return cte, sniff_qp
if cte == '7bit':
data = normal_body(lines).decode('ascii')
elif cte == '8bit':
data = normal_body(lines).decode('ascii', 'surrogateescape')
elif cte == 'quoted-printable':
data = quoprimime.body_encode(normal_body(lines).decode('latin-1'),
policy.max_line_length)
elif cte == 'base64':
data = _encode_base64(embedded_body(lines), policy.max_line_length)
else:
raise ValueError("Unknown content transfer encoding {}".format(cte))
return cte, data
def set_text_content(msg, string, subtype="plain", charset='utf-8', cte=None,
disposition=None, filename=None, cid=None,
params=None, headers=None):
_prepare_set(msg, 'text', subtype, headers)
cte, payload = _encode_text(string, charset, cte, msg.policy)
msg.set_payload(payload)
msg.set_param('charset',
email.charset.ALIASES.get(charset, charset),
replace=True)
msg['Content-Transfer-Encoding'] = cte
_finalize_set(msg, disposition, filename, cid, params)
raw_data_manager.add_set_handler(str, set_text_content)
def set_message_content(msg, message, subtype="rfc822", cte=None,
disposition=None, filename=None, cid=None,
params=None, headers=None):
if subtype == 'partial':
raise ValueError("message/partial is not supported for Message objects")
if subtype == 'rfc822':
if cte not in (None, '7bit', '8bit', 'binary'):
# http://tools.ietf.org/html/rfc2046#section-5.2.1 mandate.
raise ValueError(
"message/rfc822 parts do not support cte={}".format(cte))
# 8bit will get coerced on serialization if policy.cte_type='7bit'. We
# may end up claiming 8bit when it isn't needed, but the only negative
# result of that should be a gateway that needs to coerce to 7bit
# having to look through the whole embedded message to discover whether
# or not it actually has to do anything.
cte = '8bit' if cte is None else cte
elif subtype == 'external-body':
if cte not in (None, '7bit'):
# http://tools.ietf.org/html/rfc2046#section-5.2.3 mandate.
raise ValueError(
"message/external-body parts do not support cte={}".format(cte))
cte = '7bit'
elif cte is None:
# http://tools.ietf.org/html/rfc2046#section-5.2.4 says all future
# subtypes should be restricted to 7bit, so assume that.
cte = '7bit'
_prepare_set(msg, 'message', subtype, headers)
msg.set_payload([message])
msg['Content-Transfer-Encoding'] = cte
_finalize_set(msg, disposition, filename, cid, params)
raw_data_manager.add_set_handler(email.message.Message, set_message_content)
def set_bytes_content(msg, data, maintype, subtype, cte='base64',
disposition=None, filename=None, cid=None,
params=None, headers=None):
_prepare_set(msg, maintype, subtype, headers)
if cte == 'base64':
data = _encode_base64(data, max_line_length=msg.policy.max_line_length)
elif cte == 'quoted-printable':
# XXX: quoprimime.body_encode won't encode newline characters in data,
# so we can't use it. This means max_line_length is ignored. Another
# bug to fix later. (Note: encoders.quopri is broken on line ends.)
data = binascii.b2a_qp(data, istext=False, header=False, quotetabs=True)
data = data.decode('ascii')
elif cte == '7bit':
data = data.decode('ascii')
elif cte in ('8bit', 'binary'):
data = data.decode('ascii', 'surrogateescape')
msg.set_payload(data)
msg['Content-Transfer-Encoding'] = cte
_finalize_set(msg, disposition, filename, cid, params)
for typ in (bytes, bytearray, memoryview):
raw_data_manager.add_set_handler(typ, set_bytes_content)
del typ
|
8793136b1ddd8b7d6f05afe80a9341a707628773
|
bf1d4cf83e663e102cbaf77681aeee27f0fd57b1
|
/nlpaug/model/spectrogram/frequency_masking.py
|
2f703ddb2017945b220bb7460d45add22784fb94
|
[
"MIT"
] |
permissive
|
makcedward/nlpaug
|
1616ed30de030d7ec8ef6baf0d3dcc91cc30fed7
|
23800cbb9632c7fc8c4a88d46f9c4ecf68a96299
|
refs/heads/master
| 2023-06-25T01:07:59.107131
| 2022-07-07T05:16:43
| 2022-07-07T05:16:43
| 176,858,880
| 4,128
| 476
|
MIT
| 2023-03-04T04:13:01
| 2019-03-21T03:00:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 769
|
py
|
frequency_masking.py
|
import numpy as np
from nlpaug.model.spectrogram import Spectrogram
class FrequencyMasking(Spectrogram):
def __init__(self):
super().__init__()
def manipulate(self, data, f, f0, time_start, time_end):
"""
https://arxiv.org/pdf/1904.08779.pdf, https://arxiv.org/pdf/2001.01401.pdf
Frequency masking is applied so that f consecutive mel
frequency channels [f0, f0 + f) are masked, where f is
first chosen from a uniform distribution from 0 to the
frequency mask parameter F, and f0 is chosen from
[0, v - f). v is the number of mel frequency channels.
"""
aug_data = data.copy()
aug_data[f0:f0+f, time_start:time_end] = 0
return aug_data
|
7eb2d2a89d55635cf4cbc6416bb4598e640b8607
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py
|
1bafecc4ccb1b59ec804af11d7374a7f4e93f773
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 16,858
|
py
|
test_zdist.py
|
# Generated by pypy/tool/import_cffi.py
import sys, os, py
import subprocess
import cffi
from pypy.module.test_lib_pypy.cffi_tests.udir import udir
def chdir_to_tmp(f):
f.chdir_to_tmp = True
return f
def from_outside(f):
f.chdir_to_tmp = False
return f
class TestDist(object):
def setup_method(self, meth):
self.executable = os.path.abspath(sys.executable)
self.rootdir = os.path.abspath(os.path.dirname(os.path.dirname(
cffi.__file__)))
self.udir = udir.join(meth.__name__)
os.mkdir(str(self.udir))
if meth.chdir_to_tmp:
self.saved_cwd = os.getcwd()
os.chdir(str(self.udir))
def teardown_method(self, meth):
if hasattr(self, 'saved_cwd'):
os.chdir(self.saved_cwd)
def run(self, args, cwd=None):
env = os.environ.copy()
# a horrible hack to prevent distutils from finding ~/.pydistutils.cfg
# (there is the --no-user-cfg option, but not in Python 2.6...)
env['HOME'] = '/this/path/does/not/exist'
if cwd is None:
newpath = self.rootdir
if 'PYTHONPATH' in env:
newpath += os.pathsep + env['PYTHONPATH']
env['PYTHONPATH'] = newpath
subprocess.check_call([self.executable] + args, cwd=cwd, env=env)
def _prepare_setuptools(self):
if hasattr(TestDist, '_setuptools_ready'):
return
try:
import setuptools
except ImportError:
py.test.skip("setuptools not found")
if os.path.exists(os.path.join(self.rootdir, 'setup.py')):
self.run(['setup.py', 'egg_info'], cwd=self.rootdir)
TestDist._setuptools_ready = True
def check_produced_files(self, content, curdir=None):
if curdir is None:
curdir = str(self.udir)
found_so = None
for name in os.listdir(curdir):
if (name.endswith('.so') or name.endswith('.pyd') or
name.endswith('.dylib') or name.endswith('.dll')):
found_so = os.path.join(curdir, name)
# foo.so => foo
parts = name.split('.')
del parts[-1]
if len(parts) > 1 and parts[-1] != 'bar':
# foo.cpython-34m.so => foo, but foo.bar.so => foo.bar
del parts[-1]
name = '.'.join(parts)
# foo_d => foo (Python 2 debug builds)
if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
name = name[:-2]
name += '.SO'
if name.startswith('pycparser') and name.endswith('.egg'):
continue # no clue why this shows up sometimes and not others
if name == '.eggs':
continue # seems new in 3.5, ignore it
assert name in content, "found unexpected file %r" % (
os.path.join(curdir, name),)
value = content.pop(name)
if value is None:
assert name.endswith('.SO') or (
os.path.isfile(os.path.join(curdir, name)))
else:
subdir = os.path.join(curdir, name)
assert os.path.isdir(subdir)
if value == '?':
continue
found_so = self.check_produced_files(value, subdir) or found_so
assert content == {}, "files or dirs not produced in %r: %r" % (
curdir, content.keys())
return found_so
@chdir_to_tmp
def test_empty(self):
self.check_produced_files({})
@chdir_to_tmp
def test_abi_emit_python_code_1(self):
ffi = cffi.FFI()
ffi.set_source("package_name_1.mymod", None)
ffi.emit_python_code('xyz.py')
self.check_produced_files({'xyz.py': None})
@chdir_to_tmp
def test_abi_emit_python_code_2(self):
ffi = cffi.FFI()
ffi.set_source("package_name_1.mymod", None)
py.test.raises(IOError, ffi.emit_python_code, 'unexisting/xyz.py')
@from_outside
def test_abi_emit_python_code_3(self):
ffi = cffi.FFI()
ffi.set_source("package_name_1.mymod", None)
ffi.emit_python_code(str(self.udir.join('xyt.py')))
self.check_produced_files({'xyt.py': None})
@chdir_to_tmp
def test_abi_compile_1(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", None)
x = ffi.compile()
self.check_produced_files({'mod_name_in_package': {'mymod.py': None}})
assert x == os.path.join('.', 'mod_name_in_package', 'mymod.py')
@chdir_to_tmp
def test_abi_compile_2(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", None)
x = ffi.compile('build2')
self.check_produced_files({'build2': {
'mod_name_in_package': {'mymod.py': None}}})
assert x == os.path.join('build2', 'mod_name_in_package', 'mymod.py')
@from_outside
def test_abi_compile_3(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", None)
tmpdir = str(self.udir.join('build3'))
x = ffi.compile(tmpdir)
self.check_produced_files({'build3': {
'mod_name_in_package': {'mymod.py': None}}})
assert x == os.path.join(tmpdir, 'mod_name_in_package', 'mymod.py')
@chdir_to_tmp
def test_api_emit_c_code_1(self):
ffi = cffi.FFI()
ffi.set_source("package_name_1.mymod", "/*code would be here*/")
ffi.emit_c_code('xyz.c')
self.check_produced_files({'xyz.c': None})
@chdir_to_tmp
def test_api_emit_c_code_2(self):
ffi = cffi.FFI()
ffi.set_source("package_name_1.mymod", "/*code would be here*/")
py.test.raises(IOError, ffi.emit_c_code, 'unexisting/xyz.c')
@from_outside
def test_api_emit_c_code_3(self):
ffi = cffi.FFI()
ffi.set_source("package_name_1.mymod", "/*code would be here*/")
ffi.emit_c_code(str(self.udir.join('xyu.c')))
self.check_produced_files({'xyu.c': None})
@chdir_to_tmp
def test_api_compile_1(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
x = ffi.compile()
if sys.platform != 'win32':
sofile = self.check_produced_files({
'mod_name_in_package': {'mymod.SO': None,
'mymod.c': None,
'mymod.o': None}})
assert os.path.isabs(x) and os.path.samefile(x, sofile)
else:
self.check_produced_files({
'mod_name_in_package': {'mymod.SO': None,
'mymod.c': None},
'Release': '?'})
@chdir_to_tmp
def test_api_compile_2(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
x = ffi.compile('output')
if sys.platform != 'win32':
sofile = self.check_produced_files({
'output': {'mod_name_in_package': {'mymod.SO': None,
'mymod.c': None,
'mymod.o': None}}})
assert os.path.isabs(x) and os.path.samefile(x, sofile)
else:
self.check_produced_files({
'output': {'mod_name_in_package': {'mymod.SO': None,
'mymod.c': None},
'Release': '?'}})
@from_outside
def test_api_compile_3(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
x = ffi.compile(str(self.udir.join('foo')))
if sys.platform != 'win32':
sofile = self.check_produced_files({
'foo': {'mod_name_in_package': {'mymod.SO': None,
'mymod.c': None,
'mymod.o': None}}})
assert os.path.isabs(x) and os.path.samefile(x, sofile)
else:
self.check_produced_files({
'foo': {'mod_name_in_package': {'mymod.SO': None,
'mymod.c': None},
'Release': '?'}})
@chdir_to_tmp
def test_api_compile_explicit_target_1(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
x = ffi.compile(target="foo.bar.*")
if sys.platform != 'win32':
sofile = self.check_produced_files({
'mod_name_in_package': {'foo.bar.SO': None,
'mymod.c': None,
'mymod.o': None}})
assert os.path.isabs(x) and os.path.samefile(x, sofile)
else:
self.check_produced_files({
'mod_name_in_package': {'foo.bar.SO': None,
'mymod.c': None},
'Release': '?'})
@chdir_to_tmp
def test_api_compile_explicit_target_3(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
x = ffi.compile(target="foo.bar.baz")
if sys.platform != 'win32':
self.check_produced_files({
'mod_name_in_package': {'foo.bar.baz': None,
'mymod.c': None,
'mymod.o': None}})
sofile = os.path.join(str(self.udir),
'mod_name_in_package', 'foo.bar.baz')
assert os.path.isabs(x) and os.path.samefile(x, sofile)
else:
self.check_produced_files({
'mod_name_in_package': {'foo.bar.baz': None,
'mymod.c': None},
'Release': '?'})
@chdir_to_tmp
def test_api_distutils_extension_1(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
ext = ffi.distutils_extension()
self.check_produced_files({'build': {
'mod_name_in_package': {'mymod.c': None}}})
if hasattr(os.path, 'samefile'):
assert os.path.samefile(ext.sources[0],
'build/mod_name_in_package/mymod.c')
@from_outside
def test_api_distutils_extension_2(self):
ffi = cffi.FFI()
ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/")
ext = ffi.distutils_extension(str(self.udir.join('foo')))
self.check_produced_files({'foo': {
'mod_name_in_package': {'mymod.c': None}}})
if hasattr(os.path, 'samefile'):
assert os.path.samefile(ext.sources[0],
str(self.udir.join('foo/mod_name_in_package/mymod.c')))
def _make_distutils_api(self):
os.mkdir("src")
os.mkdir(os.path.join("src", "pack1"))
with open(os.path.join("src", "pack1", "__init__.py"), "w") as f:
pass
with open("setup.py", "w") as f:
f.write("""if 1:
# https://bugs.python.org/issue23246
import sys
if sys.platform == 'win32':
try:
import setuptools
except ImportError:
pass
import cffi
ffi = cffi.FFI()
ffi.set_source("pack1.mymod", "/*code would be here*/")
from distutils.core import setup
setup(name='example1',
version='0.1',
packages=['pack1'],
package_dir={'': 'src'},
ext_modules=[ffi.distutils_extension()])
""")
@chdir_to_tmp
def test_distutils_api_1(self):
self._make_distutils_api()
self.run(["setup.py", "build"])
self.check_produced_files({'setup.py': None,
'build': '?',
'src': {'pack1': {'__init__.py': None}}})
@chdir_to_tmp
def test_distutils_api_2(self):
self._make_distutils_api()
self.run(["setup.py", "build_ext", "-i"])
self.check_produced_files({'setup.py': None,
'build': '?',
'src': {'pack1': {'__init__.py': None,
'mymod.SO': None}}})
def _make_setuptools_abi(self):
self._prepare_setuptools()
os.mkdir("src0")
os.mkdir(os.path.join("src0", "pack2"))
with open(os.path.join("src0", "pack2", "__init__.py"), "w") as f:
pass
with open(os.path.join("src0", "pack2", "_build.py"), "w") as f:
f.write("""if 1:
import cffi
ffi = cffi.FFI()
ffi.set_source("pack2.mymod", None)
""")
with open("setup.py", "w") as f:
f.write("""if 1:
from setuptools import setup
setup(name='example1',
version='0.1',
packages=['pack2'],
package_dir={'': 'src0'},
cffi_modules=["src0/pack2/_build.py:ffi"])
""")
@chdir_to_tmp
def test_setuptools_abi_1(self):
self._make_setuptools_abi()
self.run(["setup.py", "build"])
self.check_produced_files({'setup.py': None,
'build': '?',
'src0': {'pack2': {'__init__.py': None,
'_build.py': None}}})
@chdir_to_tmp
def test_setuptools_abi_2(self):
self._make_setuptools_abi()
self.run(["setup.py", "build_ext", "-i"])
self.check_produced_files({'setup.py': None,
'src0': {'pack2': {'__init__.py': None,
'_build.py': None,
'mymod.py': None}}})
def _make_setuptools_api(self):
self._prepare_setuptools()
os.mkdir("src1")
os.mkdir(os.path.join("src1", "pack3"))
with open(os.path.join("src1", "pack3", "__init__.py"), "w") as f:
pass
with open(os.path.join("src1", "pack3", "_build.py"), "w") as f:
f.write("""if 1:
import cffi
ffi = cffi.FFI()
ffi.set_source("pack3.mymod", "/*code would be here*/")
ffi._hi_there = 42
""")
with open("setup.py", "w") as f:
f.write("from __future__ import print_function\n"
"""if 1:
from setuptools import setup
from distutils.command.build_ext import build_ext
import os
class TestBuildExt(build_ext):
def pre_run(self, ext, ffi):
print('_make_setuptools_api: in pre_run:', end=" ")
assert ffi._hi_there == 42
assert ext.name == "pack3.mymod"
fn = os.path.join(os.path.dirname(self.build_lib),
'..', 'see_me')
print('creating %r' % (fn,))
open(fn, 'w').close()
setup(name='example1',
version='0.1',
packages=['pack3'],
package_dir={'': 'src1'},
cffi_modules=["src1/pack3/_build.py:ffi"],
cmdclass={'build_ext': TestBuildExt},
)
""")
@chdir_to_tmp
def test_setuptools_api_1(self):
self._make_setuptools_api()
self.run(["setup.py", "build"])
self.check_produced_files({'setup.py': None,
'build': '?',
'see_me': None,
'src1': {'pack3': {'__init__.py': None,
'_build.py': None}}})
@chdir_to_tmp
def test_setuptools_api_2(self):
self._make_setuptools_api()
self.run(["setup.py", "build_ext", "-i"])
self.check_produced_files({'setup.py': None,
'build': '?',
'see_me': None,
'src1': {'pack3': {'__init__.py': None,
'_build.py': None,
'mymod.SO': None}}})
|
fe44bbe6a98264149567597810641a473aaa5a61
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/espnet2/spk/loss/aamsoftmax.py
|
4005fa03e8511b7d83d57111195a2ef1a2921a76
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,540
|
py
|
aamsoftmax.py
|
#! /usr/bin/python
# -*- encoding: utf-8 -*-
# code from https://github.com/clovaai/voxceleb_trainer/blob/master/loss/aamsoftmax.py
# Adapted from https://github.com/wujiyang/Face_Pytorch (Apache License)
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet2.spk.loss.abs_loss import AbsLoss
class AAMSoftmax(AbsLoss):
"""
Additive angular margin softmax.
Paper: Deng, Jiankang, et al. "Arcface: Additive angular margin loss for
deep face recognition." Proceedings of the IEEE/CVF conference on computer
vision and pattern recognition. 2019.
args:
nout : dimensionality of speaker embedding
nclases: number of speakers in the training set
margin : margin value of AAMSoftmax
scale : scale value of AAMSoftmax
"""
def __init__(
self, nout, nclasses, margin=0.3, scale=15, easy_margin=False, **kwargs
):
super().__init__(nout)
self.test_normalize = True
self.m = margin
self.s = scale
self.in_feats = nout
self.weight = torch.nn.Parameter(
torch.FloatTensor(nclasses, nout), requires_grad=True
)
self.ce = nn.CrossEntropyLoss()
nn.init.xavier_normal_(self.weight, gain=1)
self.easy_margin = easy_margin
self.cos_m = math.cos(self.m)
self.sin_m = math.sin(self.m)
# make the function cos(theta+m) monotonic decreasing while theta in [0°,180°]
self.th = math.cos(math.pi - self.m)
self.mm = math.sin(math.pi - self.m) * self.m
print("Initialised AAMSoftmax margin %.3f scale %.3f" % (self.m, self.s))
def forward(self, x, label=None):
if len(label.size()) == 2:
label = label.squeeze(1)
assert x.size()[0] == label.size()[0]
assert x.size()[1] == self.in_feats
# cos(theta)
cosine = F.linear(F.normalize(x), F.normalize(self.weight))
# cos(theta + m)
sine = torch.sqrt((1.0 - torch.mul(cosine, cosine)).clamp(0, 1))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm)
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, label.view(-1, 1), 1)
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output = output * self.s
loss = self.ce(output, label)
return loss
|
657051a614740e33e1994069e289d23920aa6c0f
|
08ea46c0a9fb71ef222cf6afa2e9094f5663dcfb
|
/tests/test_output_parser.py
|
f593981442883cfd40c0dec45051a22089414a84
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
griffithlab/pVACtools
|
e358919eee76100f79dbe8d40d02b3fce8b227ac
|
3317d2c18e82edb5ea183ae09820beb68c39d256
|
refs/heads/master
| 2023-08-09T15:42:06.725426
| 2023-08-09T14:28:44
| 2023-08-09T14:28:44
| 102,625,109
| 124
| 64
|
BSD-3-Clause-Clear
| 2023-09-08T14:17:22
| 2017-09-06T15:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 23,567
|
py
|
test_output_parser.py
|
import unittest
import os
import sys
import tempfile
import py_compile
from pvactools.lib.output_parser import DefaultOutputParser, UnmatchedSequencesOutputParser
from tests.utils import *
class OutputParserTests(unittest.TestCase):
@classmethod
def setUp(cls):
executable_dir = os.path.join(pvactools_directory(), 'pvactools', 'lib')
cls.executable = os.path.join(executable_dir, 'output_parser.py')
cls.test_data_dir = os.path.join(pvactools_directory(), 'tests', 'test_data', 'output_parser')
def test_source_compiles(self):
self.assertTrue(py_compile.compile(self.executable))
def test_parse_output_runs_and_produces_expected_output(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_peptide_sequence_length_21.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_peptide_sequence_length_21.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_peptide_sequence_length_21.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_peptide_sequence_length_21',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_peptide_sequence_length_21.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_with_multiple_iedb_files(self):
parse_output_input_iedb_files = [
os.path.join(self.test_data_dir, "input.ann.HLA-A*29:02.9.tsv"),
os.path.join(self.test_data_dir, "input.smm.HLA-A*29:02.9.tsv"),
os.path.join(self.test_data_dir, "input.smmpmbec.HLA-A*29:02.9.tsv"),
]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "Test.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "Test_21.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_files,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_Test_21.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_repetitive_deletion_at_beginning_of_sequence(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "pat27_4.ann.HLA-A*02:01.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "pat27_4.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "pat27_4_18.fa.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'pat27_4',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_pat27_4_18.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_repetitive_insertion_at_beginning_of_sequence(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "pat126.ann.HLA-A*01:01.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "pat126.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "pat126_17.fa.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'pat126',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_pat126_17.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_frameshift_variant_feature_elongation_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_frameshift_variant_feature_elongation.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_frameshift_variant_feature_elongation.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_frameshift_variant_feature_elongation.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_frameshift_variant_feature_elongation',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_frameshift_variant_feature_elongation.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_frameshift_variant_feature_truncation_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_frameshift_variant_feature_truncation.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_frameshift_variant_feature_truncation.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_frameshift_variant_feature_truncation.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_frameshift_variant_feature_truncation',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_frameshift_variant_feature_truncation.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_frameshift_variant_feature_truncation2_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_frameshift_variant_feature_truncation2.ann.HLA-E*01:01.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_frameshift_variant_feature_truncation2.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_frameshift_variant_feature_truncation2.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_frameshift_variant_feature_truncation2',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_frameshift_variant_feature_truncation2.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_frameshift_variant_position_1_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_frameshift_variant_position_1.MHCnuggetsI.HLA-A*02:01.8.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_frameshift_variant_position_1.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_frameshift_variant_position_1.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_frameshift_variant_position_1',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_frameshift_variant_position_1.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_inframe_deletion_aa_deletion_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_inframe_deletion_aa_deletion.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_inframe_deletion_aa_deletion.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_inframe_deletion_aa_deletion.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_inframe_deletion_aa_deletion',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_inframe_deletion_aa_deletion.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_inframe_deletion_aa_replacement_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_inframe_deletion_aa_replacement.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_inframe_deletion_aa_replacement.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_inframe_deletion_aa_replacement.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_inframe_deletion_aa_replacement',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_inframe_deletion_aa_replacement.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_inframe_insertion_aa_insertion_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_inframe_insertion_aa_insertion.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_inframe_insertion_aa_insertion.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_inframe_insertion_aa_insertion.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_inframe_insertion_aa_insertion',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_inframe_insertion_aa_insertion.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_input_inframe_insertion_aa_replacement_gets_parsed_correctly(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_inframe_insertion_aa_replacement.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_inframe_insertion_aa_replacement.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_inframe_insertion_aa_replacement.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_inframe_insertion_aa_replacement',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_inframe_insertion_aa_replacement.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_class_ii(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input.nn_align.H2-IAb.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_peptide_sequence_length_31.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_peptide_sequence_length_31.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_nn_align.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_duplicate_transcripts(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_multiple_transcripts_per_alt.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_multiple_transcripts_per_alt.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_multiple_transcripts_per_alt.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_multiple_transcripts_per_alt',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_multiple_transcripts_per_alt.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_mnps(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_mnp.ann.HLA-A*01:01.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_mnp.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_mnp.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_mnp',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_mnp.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_mnp_at_beginning_of_sequence(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_mnp2.ann.HLA-A*01:01.10.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "input_mnp2.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "input_mnp2.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_mnp2',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_mnp2.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_with_iedb_dna_warning(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_iedb_dna_warning.ann.HLA-A*29:02.9.tsv")]
parse_output_input_tsv_file = os.path.join(self.test_data_dir, "Test.tsv")
parse_output_key_file = os.path.join(self.test_data_dir, "Test_21.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : parse_output_input_tsv_file,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_iedb_dna_warning',
}
parser = DefaultOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
def test_parse_output_runs_and_produces_expected_output_for_pvacvector(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_pvacvector.ann.H-2-Kb.8.tsv")]
parse_output_key_file = os.path.join(self.test_data_dir, "input_pvacvector.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : None,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_pvacvector',
}
parser = UnmatchedSequencesOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_pvacvector.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_none_percentile(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_percentile_none.netmhcpan.HLA-C*03:03.9.tsv_1-2")]
parse_output_key_file = os.path.join(self.test_data_dir, "input_pvacvector.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : None,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_percentile_none',
}
parser = UnmatchedSequencesOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_percentile_none.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
def test_parse_output_runs_and_produces_expected_output_for_empty_percentile(self):
parse_output_input_iedb_file = [os.path.join(self.test_data_dir, "input_percentile_empty.MHCflurry.HLA-C*15:05.8.tsv_1-78")]
parse_output_key_file = os.path.join(self.test_data_dir, "input_percentile_empty.key")
parse_output_output_file = tempfile.NamedTemporaryFile()
parse_output_params = {
'input_iedb_files' : parse_output_input_iedb_file,
'input_tsv_file' : None,
'key_file' : parse_output_key_file,
'output_file' : parse_output_output_file.name,
'sample_name' : 'input_percentile_empty',
}
parser = UnmatchedSequencesOutputParser(**parse_output_params)
self.assertFalse(parser.execute())
expected_output_file = os.path.join(self.test_data_dir, "output_percentile_empty.iedb.parsed.tsv")
self.assertTrue(compare(parse_output_output_file.name, expected_output_file))
|
1ccc2f440ba95e62e33e6e43527359e1ccc0fc51
|
be77982d0795d3b8517dcc9f87aea794ebb3b881
|
/easyVmaf.py
|
c406a0bb63408d796f2e006c9b253af0b70c1a77
|
[
"MIT"
] |
permissive
|
gdavila/easyVmaf
|
a8df3f95e4bcdddd29374938a4f5b47a77bda9ba
|
6885079484633d54738506aa86383c1d2bdee429
|
refs/heads/master
| 2023-05-27T10:14:53.841217
| 2023-05-11T18:44:46
| 2023-05-11T18:44:46
| 243,396,674
| 139
| 30
|
MIT
| 2023-05-11T18:44:48
| 2020-02-27T00:39:32
|
Python
|
UTF-8
|
Python
| false
| false
| 9,824
|
py
|
easyVmaf.py
|
"""
MIT License
Copyright (c) 2020 Gabriel Davila - gdavila.revelo@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import json
import sys
import os.path
import glob
import xml.etree.ElementTree as ET
from FFmpeg import HD_MODEL_NAME, HD_NEG_MODEL_NAME, HD_PHONE_MODEL_NAME ,_4K_MODEL_NAME, HD_PHONE_MODEL_VERSION
from statistics import mean, harmonic_mean
from Vmaf import vmaf
from signal import signal, SIGINT
def handler(signal_received, frame):
print('SIGINT or CTRL-C detected. Exiting gracefully')
sys.exit(0)
def get_args():
'''This function parses and return arguments passed in'''
parser = MyParser(prog='easyVmaf',
description="Script to easy compute VMAF using FFmpeg. It allows to deinterlace, scale and sync Ref and Distorted video samples automatically: \
\n\n \t Autodeinterlace: If the Reference or Distorted samples are interlaced, deinterlacing is applied\
\n\n \t Autoscale: Reference and Distorted samples are scaled automatically to 1920x1080 or 3840x2160 depending on the VMAF model to use\
\n\n \t Autosync: The first frames of the distorted video are used as reference to a sync look up with the Reference video. \
\n \t \t The sync is doing by a frame-by-frame look up of the best PSNR\
\n \t \t See [-reverse] for more options of syncing\
\n\n As output, a json file with VMAF score is created",
formatter_class=argparse.RawTextHelpFormatter)
requiredgroup = parser.add_argument_group('required arguments')
requiredgroup.add_argument(
'-d', dest='d', type=str, help='Distorted video', required=True)
requiredgroup.add_argument(
'-r', dest='r', type=str, help='Reference video ', required=True)
parser.add_argument('-sw', dest='sw', type=float, default=0,
help='Sync Window: window size in seconds of a subsample of the Reference video. The sync lookup will be done between the first frames of the Distorted input and this Subsample of the Reference. (default=0. No sync).')
parser.add_argument('-ss', dest='ss', type=float, default=0,
help="Sync Start Time. Time in seconds from the beginning of the Reference video to which the Sync Window will be applied from. (default=0).")
parser.add_argument('-fps', dest='fps', type=float, default=0,
help='Video Frame Rate: force frame rate conversion to <fps> value. Autodeinterlace is disabled when setting this')
parser.add_argument('-subsample', dest='n', type=int, default=1,
help="Specifies the subsampling of frames to speed up calculation. (default=1, None).")
parser.add_argument('-reverse', help="If enable, it Changes the default Autosync behaviour: The first frames of the Reference video are used as reference to sync with the Distorted one. (Default = Disable).", action='store_true')
parser.add_argument('-model', dest='model', type=str, default="HD",
help="Vmaf Model. Options: HD, 4K. (Default: HD).")
parser.add_argument('-threads', dest='threads', type=int,
default=0, help='number of threads')
parser.add_argument(
'-verbose', help='Activate verbose loglevel. (Default: info).', action='store_true')
parser.add_argument(
'-progress', help='Activate progress indicator for vmaf computation. (Default: false).', action='store_true')
parser.add_argument(
'-endsync', help='Activate end sync. This ends the computation when the shortest video ends. (Default: false).', action='store_true')
parser.add_argument('-output_fmt', dest='output_fmt', type=str, default='json',
help='Output vmaf file format. Options: json or xml (Default: json)')
parser.add_argument(
'-cambi_heatmap', help='Activate cambi heatmap. (Default: false).', action='store_true')
parser.add_argument(
'-sync_only', action='store_true', default=False, help='For sync measurement only. No Vmaf processing')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return parser.parse_args()
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
if __name__ == '__main__':
signal(SIGINT, handler)
'''reading values from cmdParser'''
cmdParser = get_args()
main_pattern = cmdParser.d
reference = cmdParser.r
''' to avoid error negative numbers are not allowed'''
syncWin = abs(cmdParser.sw)
ss = abs(cmdParser.ss)
fps = abs(cmdParser.fps)
n_subsample = abs(cmdParser.n)
reverse = cmdParser.reverse
model = cmdParser.model
verbose = cmdParser.verbose
output_fmt = cmdParser.output_fmt
threads = cmdParser.threads
print_progress = cmdParser.progress
end_sync = cmdParser.endsync
cambi_heatmap = cmdParser.cambi_heatmap
sync_only = cmdParser.sync_only
# Setting verbosity
if verbose:
loglevel = "verbose"
else:
loglevel = "info"
# check output format
if not output_fmt in ["json", "xml"]:
print("output_fmt: ", output_fmt,
" Not supported. JSON output used instead", flush=True)
output_fmt = "json"
'''
Distorted video path could be loaded as patterns i.e., "myFolder/video-sample-*.mp4"
In this way, many computations could be done with just one command line.
'''
main_pattern = os.path.expanduser(main_pattern)
mainFiles = glob.glob(main_pattern)
if not(os.path.isfile(reference)):
print("Reference Video file not found: ", reference, flush=True)
sys.exit(1)
if len(mainFiles) == 0:
print("Distorted Video files not found with the given pattern/name: ",
main_pattern, flush=True)
sys.exit(1)
for main in mainFiles:
myVmaf = vmaf(main, reference, loglevel=loglevel, subsample=n_subsample, model=model,
output_fmt=output_fmt, threads=threads, print_progress=print_progress, end_sync=end_sync, manual_fps=fps, cambi_heatmap = cambi_heatmap)
'''check if syncWin was set. If true offset is computed automatically, otherwise manual values are used '''
if syncWin > 0:
offset, psnr = myVmaf.syncOffset(syncWin, ss, reverse)
if cmdParser.sync_only:
print("offset: ", offset, flush=True)
sys.exit(1)
else:
offset = ss
psnr = None
if reverse:
myVmaf.offset = -offset
else:
myVmaf.offset = offset
vmafProcess = myVmaf.getVmaf()
vmafpath = myVmaf.ffmpegQos.vmafpath
vmafScore = []
vmafNegScore = []
vmafPhoneScore = []
if output_fmt == 'json':
with open(vmafpath) as jsonFile:
jsonData = json.load(jsonFile)
for frame in jsonData['frames']:
if model == 'HD':
vmafScore.append(frame["metrics"][HD_MODEL_NAME])
vmafNegScore.append(frame["metrics"][HD_NEG_MODEL_NAME])
vmafPhoneScore.append(frame["metrics"][HD_PHONE_MODEL_NAME])
if model == '4K':
vmafScore.append(frame["metrics"][_4K_MODEL_NAME])
elif output_fmt == 'xml':
tree = ET.parse(vmafpath)
root = tree.getroot()
for frame in root.findall('frames/frame'):
if model == 'HD':
vmafScore.append(frame["metrics"][HD_MODEL_NAME])
vmafNegScore.append(frame["metrics"][HD_NEG_MODEL_NAME])
vmafPhoneScore.append(frame["metrics"][HD_PHONE_MODEL_NAME])
if model == '4K':
vmafScore.append(frame["metrics"][_4K_MODEL_NAME])
print("\n \n \n \n \n ")
print("=======================================", flush=True)
print("VMAF computed", flush=True)
print("=======================================", flush=True)
print("offset: ", offset, " | psnr: ", psnr)
if model == 'HD':
print("VMAF HD: ", mean(vmafScore))
print("VMAF Neg: ", mean(vmafNegScore))
print("VMAF Phone: ", mean(vmafPhoneScore))
if model == '4K':
print("VMAF 4K: ", mean(vmafScore))
print("VMAF output file path: ", myVmaf.ffmpegQos.vmafpath)
if cambi_heatmap:
print("CAMBI Heatmap output path: ", myVmaf.ffmpegQos.vmaf_cambi_heatmap_path)
print("\n \n \n \n \n ")
|
4eda7536a0b5c56f3d6b24fdc8538faa7be8d5f1
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/haystack/nodes/prompt/shapers.py
|
265f5712722ee847551975ca964b4ad836523a8b
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
shapers.py
|
from typing import Optional, List, Union
from haystack.schema import Answer, Document
from haystack.nodes.other.shaper import ( # pylint: disable=unused-import
Shaper,
current_datetime, # used as shaping function
join_documents_to_string as join, # used as shaping function
format_document,
format_answer,
format_string,
)
def to_strings(items: List[Union[str, Document, Answer]], pattern=None, str_replace=None) -> List[str]:
results = []
for idx, item in enumerate(items, start=1):
if isinstance(item, str):
results.append(format_string(item, str_replace=str_replace))
elif isinstance(item, Document):
results.append(format_document(document=item, pattern=pattern, str_replace=str_replace, idx=idx))
elif isinstance(item, Answer):
results.append(format_answer(answer=item, pattern=pattern, str_replace=str_replace, idx=idx))
else:
raise ValueError(f"Unsupported item type: {type(item)}")
return results
class BaseOutputParser(Shaper):
"""
An output parser in `PromptTemplate` defines how to parse the model output and convert it into Haystack primitives (answers, documents, or labels).
BaseOutputParser is the base class for output parser implementations.
"""
@property
def output_variable(self) -> Optional[str]:
return self.outputs[0]
class AnswerParser(BaseOutputParser):
"""
Parses the model output to extract the answer into a proper `Answer` object using regex patterns.
AnswerParser adds the `document_ids` of the documents used to generate the answer and the prompts used to the `Answer` object.
You can pass a `reference_pattern` to extract the document_ids of the answer from the model output.
"""
def __init__(self, pattern: Optional[str] = None, reference_pattern: Optional[str] = None):
"""
:param pattern: The regex pattern to use for parsing the answer.
Examples:
`[^\\n]+$` finds "this is an answer" in string "this is an argument.\nthis is an answer".
`Answer: (.*)` finds "this is an answer" in string "this is an argument. Answer: this is an answer".
If not specified, the whole string is used as the answer. If specified, the first group of the regex is used as the answer. If there is no group, the whole match is used as the answer.
:param reference_pattern: The regex pattern to use for parsing the document references.
Example: `\\[(\\d+)\\]` finds "1" in string "this is an answer[1]".
If None, no parsing is done and all documents are referenced.
"""
self.pattern = pattern
self.reference_pattern = reference_pattern
super().__init__(
func="strings_to_answers",
inputs={"strings": "results"},
outputs=["answers"],
params={"pattern": pattern, "reference_pattern": reference_pattern},
)
|
a493c1613ccc983a6b55d488b4acdb9120f3c01c
|
503bfe863ae9e92bf940a5e8baa57c0de44f4da6
|
/src/silx/gui/plot/PlotInteraction.py
|
fe139b48ce98a91d9014ae55682efaebe609174f
|
[
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
silx-kit/silx
|
58105c0ed9cd02c75543c0c67a027471ca87922b
|
5e33cb69afd2a8b1cfe3183282acdd8b34c1a74f
|
refs/heads/main
| 2023-08-24T14:33:49.732794
| 2023-07-25T07:44:02
| 2023-07-25T07:44:02
| 43,291,718
| 120
| 78
|
MIT
| 2023-09-14T13:07:11
| 2015-09-28T09:23:13
|
Python
|
UTF-8
|
Python
| false
| false
| 64,970
|
py
|
PlotInteraction.py
|
# /*##########################################################################
#
# Copyright (c) 2014-2023 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Implementation of the interaction for the :class:`Plot`."""
from __future__ import annotations
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "15/02/2019"
import math
import numpy
import time
import weakref
from typing import NamedTuple, Optional
from silx.gui import qt
from .. import colors
from . import items
from .Interaction import (ClickOrDrag, LEFT_BTN, RIGHT_BTN, MIDDLE_BTN,
State, StateMachine)
from .PlotEvents import (prepareCurveSignal, prepareDrawingSignal,
prepareHoverSignal, prepareImageSignal,
prepareMarkerSignal, prepareMouseSignal)
from .backends.BackendBase import (CURSOR_POINTING, CURSOR_SIZE_HOR,
CURSOR_SIZE_VER, CURSOR_SIZE_ALL)
from ._utils import (FLOAT32_SAFE_MIN, FLOAT32_MINPOS, FLOAT32_SAFE_MAX,
applyZoomToPlot, EnabledAxes)
# Base class ##################################################################
class _PlotInteraction(object):
"""Base class for interaction handler.
It provides a weakref to the plot and methods to set/reset overlay.
"""
def __init__(self, plot):
"""Init.
:param plot: The plot to apply modifications to.
"""
self._needReplot = False
self._selectionAreas = set()
self._plot = weakref.ref(plot) # Avoid cyclic-ref
@property
def plot(self):
plot = self._plot()
assert plot is not None
return plot
def setSelectionArea(self, points, fill, color, name='', shape='polygon'):
"""Set a polygon selection area overlaid on the plot.
Multiple simultaneous areas are supported through the name parameter.
:param points: The 2D coordinates of the points of the polygon
:type points: An iterable of (x, y) coordinates
:param str fill: The fill mode: 'hatch', 'solid' or 'none'
:param color: RGBA color to use or None to disable display
:type color: list or tuple of 4 float in the range [0, 1]
:param name: The key associated with this selection area
:param str shape: Shape of the area in 'polygon', 'polylines'
"""
assert shape in ('polygon', 'polylines')
if color is None:
return
points = numpy.asarray(points)
# TODO Not very nice, but as is for now
legend = '__SELECTION_AREA__' + name
fill = fill != 'none' # TODO not very nice either
greyed = colors.greyed(color)[0]
if greyed < 0.5:
color2 = "white"
else:
color2 = "black"
self.plot.addShape(points[:, 0], points[:, 1], legend=legend,
replace=False,
shape=shape, fill=fill,
color=color, linebgcolor=color2, linestyle="--",
overlay=True)
self._selectionAreas.add(legend)
def resetSelectionArea(self):
"""Remove all selection areas set by setSelectionArea."""
for legend in self._selectionAreas:
self.plot.remove(legend, kind='item')
self._selectionAreas = set()
# Zoom/Pan ####################################################################
class _PlotInteractionWithClickEvents(ClickOrDrag, _PlotInteraction):
""":class:`ClickOrDrag` state machine emitting click and double click events.
Base class for :class:`Pan` and :class:`Zoom`
"""
_DOUBLE_CLICK_TIMEOUT = 0.4
def click(self, x, y, btn):
"""Handle clicks by sending events
:param int x: Mouse X position in pixels
:param int y: Mouse Y position in pixels
:param btn: Clicked mouse button
"""
if btn == LEFT_BTN:
lastClickTime, lastClickPos = self._lastClick
# Signal mouse double clicked event first
if (time.time() - lastClickTime) <= self._DOUBLE_CLICK_TIMEOUT:
# Use position of first click
eventDict = prepareMouseSignal('mouseDoubleClicked', 'left',
*lastClickPos)
self.plot.notify(**eventDict)
self._lastClick = 0., None
else:
# Signal mouse clicked event
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
eventDict = prepareMouseSignal('mouseClicked', 'left',
dataPos[0], dataPos[1],
x, y)
self.plot.notify(**eventDict)
self._lastClick = time.time(), (dataPos[0], dataPos[1], x, y)
elif btn == RIGHT_BTN:
# Signal mouse clicked event
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
eventDict = prepareMouseSignal('mouseClicked', 'right',
dataPos[0], dataPos[1],
x, y)
self.plot.notify(**eventDict)
def __init__(self, plot, **kwargs):
"""Init.
:param plot: The plot to apply modifications to.
"""
self._lastClick = 0., None
_PlotInteraction.__init__(self, plot)
ClickOrDrag.__init__(self, **kwargs)
# Pan #########################################################################
class Pan(_PlotInteractionWithClickEvents):
"""Pan plot content and zoom on wheel state machine."""
def _pixelToData(self, x, y):
xData, yData = self.plot.pixelToData(x, y)
_, y2Data = self.plot.pixelToData(x, y, axis='right')
return xData, yData, y2Data
def beginDrag(self, x, y, btn):
self._previousDataPos = self._pixelToData(x, y)
def drag(self, x, y, btn):
xData, yData, y2Data = self._pixelToData(x, y)
lastX, lastY, lastY2 = self._previousDataPos
xMin, xMax = self.plot.getXAxis().getLimits()
yMin, yMax = self.plot.getYAxis().getLimits()
y2Min, y2Max = self.plot.getYAxis(axis='right').getLimits()
if self.plot.getXAxis()._isLogarithmic():
try:
dx = math.log10(xData) - math.log10(lastX)
newXMin = pow(10., (math.log10(xMin) - dx))
newXMax = pow(10., (math.log10(xMax) - dx))
except (ValueError, OverflowError):
newXMin, newXMax = xMin, xMax
# Makes sure both values stays in positive float32 range
if newXMin < FLOAT32_MINPOS or newXMax > FLOAT32_SAFE_MAX:
newXMin, newXMax = xMin, xMax
else:
dx = xData - lastX
newXMin, newXMax = xMin - dx, xMax - dx
# Makes sure both values stays in float32 range
if newXMin < FLOAT32_SAFE_MIN or newXMax > FLOAT32_SAFE_MAX:
newXMin, newXMax = xMin, xMax
if self.plot.getYAxis()._isLogarithmic():
try:
dy = math.log10(yData) - math.log10(lastY)
newYMin = pow(10., math.log10(yMin) - dy)
newYMax = pow(10., math.log10(yMax) - dy)
dy2 = math.log10(y2Data) - math.log10(lastY2)
newY2Min = pow(10., math.log10(y2Min) - dy2)
newY2Max = pow(10., math.log10(y2Max) - dy2)
except (ValueError, OverflowError):
newYMin, newYMax = yMin, yMax
newY2Min, newY2Max = y2Min, y2Max
# Makes sure y and y2 stays in positive float32 range
if (newYMin < FLOAT32_MINPOS or newYMax > FLOAT32_SAFE_MAX or
newY2Min < FLOAT32_MINPOS or newY2Max > FLOAT32_SAFE_MAX):
newYMin, newYMax = yMin, yMax
newY2Min, newY2Max = y2Min, y2Max
else:
dy = yData - lastY
dy2 = y2Data - lastY2
newYMin, newYMax = yMin - dy, yMax - dy
newY2Min, newY2Max = y2Min - dy2, y2Max - dy2
# Makes sure y and y2 stays in float32 range
if (newYMin < FLOAT32_SAFE_MIN or
newYMax > FLOAT32_SAFE_MAX or
newY2Min < FLOAT32_SAFE_MIN or
newY2Max > FLOAT32_SAFE_MAX):
newYMin, newYMax = yMin, yMax
newY2Min, newY2Max = y2Min, y2Max
self.plot.setLimits(newXMin, newXMax,
newYMin, newYMax,
newY2Min, newY2Max)
self._previousDataPos = self._pixelToData(x, y)
def endDrag(self, startPos, endPos, btn):
del self._previousDataPos
def cancel(self):
pass
# Zoom ########################################################################
class AxesExtent(NamedTuple):
xmin: float
xmax: float
ymin: float
ymax: float
y2min: float
y2max: float
class Zoom(_PlotInteractionWithClickEvents):
"""Zoom-in/out state machine.
Zoom-in on selected area, zoom-out on right click,
and zoom on mouse wheel.
"""
SURFACE_THRESHOLD = 5
def __init__(self, plot, color):
self.color = color
self.enabledAxes = EnabledAxes()
super(Zoom, self).__init__(plot)
self.plot.getLimitsHistory().clear()
def _getAxesExtent(
self,
x0: float,
y0: float,
x1: float,
y1: float,
enabledAxes: Optional[EnabledAxes] = None,
) -> AxesExtent:
"""Convert selection coordinates (pixels) to axes coordinates (data)
This takes into account axes selected for zoom and aspect ratio.
"""
if enabledAxes is None:
enabledAxes = self.enabledAxes
y2_0, y2_1 = y0, y1
left, top, width, height = self.plot.getPlotBoundsInPixels()
if not all(enabledAxes) and not self.plot.isKeepDataAspectRatio():
# Handle axes disabled for zoom if plot is not keeping aspec ratio
if not enabledAxes.xaxis:
x0, x1 = left, left + width
if not enabledAxes.yaxis:
y0, y1 = top, top + height
if not enabledAxes.y2axis:
y2_0, y2_1 = top, top + height
if self.plot.isKeepDataAspectRatio() and height != 0 and width != 0:
ratio = width / height
xextent, yextent = math.fabs(x1 - x0), math.fabs(y1 - y0)
if xextent != 0 and yextent != 0:
if xextent / yextent > ratio:
areaHeight = xextent / ratio
center = 0.5 * (y0 + y1)
y0 = center - numpy.sign(y1 - y0) * 0.5 * areaHeight
y1 = center + numpy.sign(y1 - y0) * 0.5 * areaHeight
else:
areaWidth = yextent * ratio
center = 0.5 * (x0 + x1)
x0 = center - numpy.sign(x1 - x0) * 0.5 * areaWidth
x1 = center + numpy.sign(x1 - x0) * 0.5 * areaWidth
# Convert to data space
x0, y0 = self.plot.pixelToData(x0, y0, check=False)
x1, y1 = self.plot.pixelToData(x1, y1, check=False)
y2_0 = self.plot.pixelToData(None, y2_0, axis="right", check=False)[1]
y2_1 = self.plot.pixelToData(None, y2_1, axis="right", check=False)[1]
return AxesExtent(
min(x0, x1),
max(x0, x1),
min(y0, y1),
max(y0, y1),
min(y2_0, y2_1),
max(y2_0, y2_1),
)
def beginDrag(self, x, y, btn):
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
self.x0, self.y0 = x, y
def drag(self, x1, y1, btn):
if self.color is None:
return # Do not draw zoom area
dataPos = self.plot.pixelToData(x1, y1)
assert dataPos is not None
if self.plot.isKeepDataAspectRatio() or not all(self.enabledAxes):
# Patch enabledAxes to display the right Y axis area on the left Y axis
# since the selection area is always displayed on the left Y axis
isY2Visible = self.plot.getYAxis("right").isVisible()
areaZoomEnabledAxes = EnabledAxes(
self.enabledAxes.xaxis,
self.enabledAxes.yaxis and (not isY2Visible or self.enabledAxes.y2axis),
self.enabledAxes.y2axis,
)
extents = self._getAxesExtent(self.x0, self.y0, x1, y1, areaZoomEnabledAxes)
areaCorners = (
(extents.xmin, extents.ymin),
(extents.xmax, extents.ymin),
(extents.xmax, extents.ymax),
(extents.xmin, extents.ymax),
)
if self.color != 'video inverted':
areaColor = list(self.color)
areaColor[3] *= 0.25
else:
areaColor = [1., 1., 1., 1.]
self.setSelectionArea(areaCorners,
fill='none',
color=areaColor,
name="zoomedArea")
corners = ((self.x0, self.y0),
(self.x0, y1),
(x1, y1),
(x1, self.y0))
corners = numpy.array([self.plot.pixelToData(x, y, check=False)
for (x, y) in corners])
self.setSelectionArea(corners, fill='none', color=self.color)
def _zoom(self, x0, y0, x1, y1):
"""Zoom to the rectangle view x0,y0 x1,y1.
"""
# Store current zoom state in stack
self.plot.getLimitsHistory().push()
extents = self._getAxesExtent(x0, y0, x1, y1)
self.plot.setLimits(
extents.xmin,
extents.xmax,
extents.ymin,
extents.ymax,
extents.y2min,
extents.y2max,
)
def endDrag(self, startPos, endPos, btn):
x0, y0 = startPos
x1, y1 = endPos
if abs(x0 - x1) * abs(y0 - y1) >= self.SURFACE_THRESHOLD:
# Avoid empty zoom area
self._zoom(x0, y0, x1, y1)
self.resetSelectionArea()
def cancel(self):
if isinstance(self.state, self.states['drag']):
self.resetSelectionArea()
# Select ######################################################################
class Select(StateMachine, _PlotInteraction):
"""Base class for drawing selection areas."""
def __init__(self, plot, parameters, states, state):
"""Init a state machine.
:param plot: The plot to apply changes to.
:param dict parameters: A dict of parameters such as color.
:param dict states: The states of the state machine.
:param str state: The name of the initial state.
"""
_PlotInteraction.__init__(self, plot)
self.parameters = parameters
StateMachine.__init__(self, states, state)
@property
def color(self):
return self.parameters.get('color', None)
class SelectPolygon(Select):
"""Drawing selection polygon area state machine."""
DRAG_THRESHOLD_DIST = 4
class Idle(State):
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
self.goto('select', x, y)
return True
class Select(State):
def enterState(self, x, y):
dataPos = self.machine.plot.pixelToData(x, y)
assert dataPos is not None
self._firstPos = dataPos
self.points = [dataPos, dataPos]
self.updateFirstPoint()
def updateFirstPoint(self):
"""Update drawing first point, using self._firstPos"""
x, y = self.machine.plot.dataToPixel(*self._firstPos, check=False)
offset = self.machine.getDragThreshold()
points = [(x - offset, y - offset),
(x - offset, y + offset),
(x + offset, y + offset),
(x + offset, y - offset)]
points = [self.machine.plot.pixelToData(xpix, ypix, check=False)
for xpix, ypix in points]
self.machine.setSelectionArea(points, fill=None,
color=self.machine.color,
name='first_point')
def updateSelectionArea(self):
"""Update drawing selection area using self.points"""
self.machine.setSelectionArea(self.points,
fill='hatch',
color=self.machine.color)
eventDict = prepareDrawingSignal('drawingProgress',
'polygon',
self.points,
self.machine.parameters)
self.machine.plot.notify(**eventDict)
def validate(self):
if len(self.points) > 2:
self.closePolygon()
else:
# It would be nice to have a cancel event.
# The plot is not aware that the interaction was cancelled
self.machine.cancel()
def closePolygon(self):
self.machine.resetSelectionArea()
self.points[-1] = self.points[0]
eventDict = prepareDrawingSignal('drawingFinished',
'polygon',
self.points,
self.machine.parameters)
self.machine.plot.notify(**eventDict)
self.goto('idle')
def onWheel(self, x, y, angle):
self.machine.onWheel(x, y, angle)
self.updateFirstPoint()
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
# checking if the position is close to the first point
# if yes : closing the "loop"
firstPos = self.machine.plot.dataToPixel(*self._firstPos,
check=False)
dx, dy = abs(firstPos[0] - x), abs(firstPos[1] - y)
threshold = self.machine.getDragThreshold()
# Only allow to close polygon after first point
if len(self.points) > 2 and dx <= threshold and dy <= threshold:
self.closePolygon()
return False
# Update polygon last point not too close to previous one
dataPos = self.machine.plot.pixelToData(x, y)
assert dataPos is not None
self.updateSelectionArea()
# checking that the new points isnt the same (within range)
# of the previous one
# This has to be done because sometimes the mouse release event
# is caught right after entering the Select state (i.e : press
# in Idle state, but with a slightly different position that
# the mouse press. So we had the two first vertices that were
# almost identical.
previousPos = self.machine.plot.dataToPixel(*self.points[-2],
check=False)
dx, dy = abs(previousPos[0] - x), abs(previousPos[1] - y)
if dx >= threshold or dy >= threshold:
self.points.append(dataPos)
else:
self.points[-1] = dataPos
return True
return False
def onMove(self, x, y):
firstPos = self.machine.plot.dataToPixel(*self._firstPos,
check=False)
dx, dy = abs(firstPos[0] - x), abs(firstPos[1] - y)
threshold = self.machine.getDragThreshold()
if dx <= threshold and dy <= threshold:
x, y = firstPos # Snap to first point
dataPos = self.machine.plot.pixelToData(x, y)
assert dataPos is not None
self.points[-1] = dataPos
self.updateSelectionArea()
def __init__(self, plot, parameters):
states = {
'idle': SelectPolygon.Idle,
'select': SelectPolygon.Select
}
super(SelectPolygon, self).__init__(plot, parameters,
states, 'idle')
def cancel(self):
if isinstance(self.state, self.states['select']):
self.resetSelectionArea()
def getDragThreshold(self):
"""Return dragging ratio with device to pixel ratio applied.
:rtype: float
"""
ratio = self.plot.window().windowHandle().devicePixelRatio()
return self.DRAG_THRESHOLD_DIST * ratio
class Select2Points(Select):
"""Base class for drawing selection based on 2 input points."""
class Idle(State):
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
self.goto('start', x, y)
return True
class Start(State):
def enterState(self, x, y):
self.machine.beginSelect(x, y)
def onMove(self, x, y):
self.goto('select', x, y)
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
self.goto('select', x, y)
return True
class Select(State):
def enterState(self, x, y):
self.onMove(x, y)
def onMove(self, x, y):
self.machine.select(x, y)
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
self.machine.endSelect(x, y)
self.goto('idle')
def __init__(self, plot, parameters):
states = {
'idle': Select2Points.Idle,
'start': Select2Points.Start,
'select': Select2Points.Select
}
super(Select2Points, self).__init__(plot, parameters,
states, 'idle')
def beginSelect(self, x, y):
pass
def select(self, x, y):
pass
def endSelect(self, x, y):
pass
def cancelSelect(self):
pass
def cancel(self):
if isinstance(self.state, self.states['select']):
self.cancelSelect()
class SelectEllipse(Select2Points):
"""Drawing ellipse selection area state machine."""
def beginSelect(self, x, y):
self.center = self.plot.pixelToData(x, y)
assert self.center is not None
def _getEllipseSize(self, pointInEllipse):
"""
Returns the size from the center to the bounding box of the ellipse.
:param Tuple[float,float] pointInEllipse: A point of the ellipse
:rtype: Tuple[float,float]
"""
x = abs(self.center[0] - pointInEllipse[0])
y = abs(self.center[1] - pointInEllipse[1])
if x == 0 or y == 0:
return x, y
# Ellipse definitions
# e: eccentricity
# a: length fron center to bounding box width
# b: length fron center to bounding box height
# Equations
# (1) b < a
# (2) For x,y a point in the ellipse: x^2/a^2 + y^2/b^2 = 1
# (3) b = a * sqrt(1-e^2)
# (4) e = sqrt(a^2 - b^2) / a
# The eccentricity of the ellipse defined by a,b=x,y is the same
# as the one we are searching for.
swap = x < y
if swap:
x, y = y, x
e = math.sqrt(x**2 - y**2) / x
# From (2) using (3) to replace b
# a^2 = x^2 + y^2 / (1-e^2)
a = math.sqrt(x**2 + y**2 / (1.0 - e**2))
b = a * math.sqrt(1 - e**2)
if swap:
a, b = b, a
return a, b
def select(self, x, y):
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
width, height = self._getEllipseSize(dataPos)
# Circle used for circle preview
nbpoints = 27.
angles = numpy.arange(nbpoints) * numpy.pi * 2.0 / nbpoints
circleShape = numpy.array((numpy.cos(angles) * width,
numpy.sin(angles) * height)).T
circleShape += numpy.array(self.center)
self.setSelectionArea(circleShape,
shape="polygon",
fill='hatch',
color=self.color)
eventDict = prepareDrawingSignal('drawingProgress',
'ellipse',
(self.center, (width, height)),
self.parameters)
self.plot.notify(**eventDict)
def endSelect(self, x, y):
self.resetSelectionArea()
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
width, height = self._getEllipseSize(dataPos)
eventDict = prepareDrawingSignal('drawingFinished',
'ellipse',
(self.center, (width, height)),
self.parameters)
self.plot.notify(**eventDict)
def cancelSelect(self):
self.resetSelectionArea()
class SelectRectangle(Select2Points):
"""Drawing rectangle selection area state machine."""
def beginSelect(self, x, y):
self.startPt = self.plot.pixelToData(x, y)
assert self.startPt is not None
def select(self, x, y):
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
self.setSelectionArea((self.startPt,
(self.startPt[0], dataPos[1]),
dataPos,
(dataPos[0], self.startPt[1])),
fill='hatch',
color=self.color)
eventDict = prepareDrawingSignal('drawingProgress',
'rectangle',
(self.startPt, dataPos),
self.parameters)
self.plot.notify(**eventDict)
def endSelect(self, x, y):
self.resetSelectionArea()
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
eventDict = prepareDrawingSignal('drawingFinished',
'rectangle',
(self.startPt, dataPos),
self.parameters)
self.plot.notify(**eventDict)
def cancelSelect(self):
self.resetSelectionArea()
class SelectLine(Select2Points):
"""Drawing line selection area state machine."""
def beginSelect(self, x, y):
self.startPt = self.plot.pixelToData(x, y)
assert self.startPt is not None
def select(self, x, y):
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
self.setSelectionArea((self.startPt, dataPos),
fill='hatch',
color=self.color)
eventDict = prepareDrawingSignal('drawingProgress',
'line',
(self.startPt, dataPos),
self.parameters)
self.plot.notify(**eventDict)
def endSelect(self, x, y):
self.resetSelectionArea()
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
eventDict = prepareDrawingSignal('drawingFinished',
'line',
(self.startPt, dataPos),
self.parameters)
self.plot.notify(**eventDict)
def cancelSelect(self):
self.resetSelectionArea()
class Select1Point(Select):
"""Base class for drawing selection area based on one input point."""
class Idle(State):
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
self.goto('select', x, y)
return True
class Select(State):
def enterState(self, x, y):
self.onMove(x, y)
def onMove(self, x, y):
self.machine.select(x, y)
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
self.machine.endSelect(x, y)
self.goto('idle')
def onWheel(self, x, y, angle):
self.machine.onWheel(x, y, angle) # Call select default wheel
self.machine.select(x, y)
def __init__(self, plot, parameters):
states = {
'idle': Select1Point.Idle,
'select': Select1Point.Select
}
super(Select1Point, self).__init__(plot, parameters, states, 'idle')
def select(self, x, y):
pass
def endSelect(self, x, y):
pass
def cancelSelect(self):
pass
def cancel(self):
if isinstance(self.state, self.states['select']):
self.cancelSelect()
class SelectHLine(Select1Point):
"""Drawing a horizontal line selection area state machine."""
def _hLine(self, y):
"""Return points in data coords of the segment visible in the plot.
Supports non-orthogonal axes.
"""
left, _top, width, _height = self.plot.getPlotBoundsInPixels()
dataPos1 = self.plot.pixelToData(left, y, check=False)
dataPos2 = self.plot.pixelToData(left + width, y, check=False)
return dataPos1, dataPos2
def select(self, x, y):
points = self._hLine(y)
self.setSelectionArea(points, fill='hatch', color=self.color)
eventDict = prepareDrawingSignal('drawingProgress',
'hline',
points,
self.parameters)
self.plot.notify(**eventDict)
def endSelect(self, x, y):
self.resetSelectionArea()
eventDict = prepareDrawingSignal('drawingFinished',
'hline',
self._hLine(y),
self.parameters)
self.plot.notify(**eventDict)
def cancelSelect(self):
self.resetSelectionArea()
class SelectVLine(Select1Point):
"""Drawing a vertical line selection area state machine."""
def _vLine(self, x):
"""Return points in data coords of the segment visible in the plot.
Supports non-orthogonal axes.
"""
_left, top, _width, height = self.plot.getPlotBoundsInPixels()
dataPos1 = self.plot.pixelToData(x, top, check=False)
dataPos2 = self.plot.pixelToData(x, top + height, check=False)
return dataPos1, dataPos2
def select(self, x, y):
points = self._vLine(x)
self.setSelectionArea(points, fill='hatch', color=self.color)
eventDict = prepareDrawingSignal('drawingProgress',
'vline',
points,
self.parameters)
self.plot.notify(**eventDict)
def endSelect(self, x, y):
self.resetSelectionArea()
eventDict = prepareDrawingSignal('drawingFinished',
'vline',
self._vLine(x),
self.parameters)
self.plot.notify(**eventDict)
def cancelSelect(self):
self.resetSelectionArea()
class DrawFreeHand(Select):
"""Interaction for drawing pencil. It display the preview of the pencil
before pressing the mouse.
"""
class Idle(State):
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
self.goto('select', x, y)
return True
def onMove(self, x, y):
self.machine.updatePencilShape(x, y)
def onLeave(self):
self.machine.cancel()
class Select(State):
def enterState(self, x, y):
self.__isOut = False
self.machine.setFirstPoint(x, y)
def onMove(self, x, y):
self.machine.updatePencilShape(x, y)
self.machine.select(x, y)
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
if self.__isOut:
self.machine.resetSelectionArea()
self.machine.endSelect(x, y)
self.goto('idle')
def onEnter(self):
self.__isOut = False
def onLeave(self):
self.__isOut = True
def __init__(self, plot, parameters):
# Circle used for pencil preview
angle = numpy.arange(13.) * numpy.pi * 2.0 / 13.
size = parameters.get('width', 1.) * 0.5
self._circle = size * numpy.array((numpy.cos(angle),
numpy.sin(angle))).T
states = {
'idle': DrawFreeHand.Idle,
'select': DrawFreeHand.Select
}
super(DrawFreeHand, self).__init__(plot, parameters, states, 'idle')
@property
def width(self):
return self.parameters.get('width', None)
def setFirstPoint(self, x, y):
self._points = []
self.select(x, y)
def updatePencilShape(self, x, y):
center = self.plot.pixelToData(x, y, check=False)
assert center is not None
polygon = center + self._circle
self.setSelectionArea(polygon, fill='none', color=self.color)
def select(self, x, y):
pos = self.plot.pixelToData(x, y, check=False)
if len(self._points) > 0:
if self._points[-1] == pos:
# Skip same points
return
self._points.append(pos)
eventDict = prepareDrawingSignal('drawingProgress',
'polylines',
self._points,
self.parameters)
self.plot.notify(**eventDict)
def endSelect(self, x, y):
pos = self.plot.pixelToData(x, y, check=False)
if len(self._points) > 0:
if self._points[-1] != pos:
# Append if different
self._points.append(pos)
eventDict = prepareDrawingSignal('drawingFinished',
'polylines',
self._points,
self.parameters)
self.plot.notify(**eventDict)
self._points = None
def cancelSelect(self):
self.resetSelectionArea()
def cancel(self):
self.resetSelectionArea()
class SelectFreeLine(ClickOrDrag, _PlotInteraction):
"""Base class for drawing free lines with tools such as pencil."""
def __init__(self, plot, parameters):
"""Init a state machine.
:param plot: The plot to apply changes to.
:param dict parameters: A dict of parameters such as color.
"""
# self.DRAG_THRESHOLD_SQUARE_DIST = 1 # Disable first move threshold
self._points = []
ClickOrDrag.__init__(self)
_PlotInteraction.__init__(self, plot)
self.parameters = parameters
@property
def color(self):
return self.parameters.get('color', None)
def click(self, x, y, btn):
if btn == LEFT_BTN:
self._processEvent(x, y, isLast=True)
def beginDrag(self, x, y, btn):
self._processEvent(x, y, isLast=False)
def drag(self, x, y, btn):
self._processEvent(x, y, isLast=False)
def endDrag(self, startPos, endPos, btn):
x, y = endPos
self._processEvent(x, y, isLast=True)
def cancel(self):
self.resetSelectionArea()
self._points = []
def _processEvent(self, x, y, isLast):
dataPos = self.plot.pixelToData(x, y, check=False)
isNewPoint = not self._points or dataPos != self._points[-1]
if isNewPoint:
self._points.append(dataPos)
if isNewPoint or isLast:
eventDict = prepareDrawingSignal(
'drawingFinished' if isLast else 'drawingProgress',
'polylines',
self._points,
self.parameters)
self.plot.notify(**eventDict)
if not isLast:
self.setSelectionArea(self._points, fill='none', color=self.color,
shape='polylines')
else:
self.cancel()
# ItemInteraction #############################################################
class ItemsInteraction(ClickOrDrag, _PlotInteraction):
"""Interaction with items (markers, curves and images).
This class provides selection and dragging of plot primitives
that support those interaction.
It is also meant to be combined with the zoom interaction.
"""
class Idle(ClickOrDrag.Idle):
def __init__(self, *args, **kw):
super(ItemsInteraction.Idle, self).__init__(*args, **kw)
self._hoverMarker = None
def enterState(self):
widget = self.machine.plot.getWidgetHandle()
if widget is None or not widget.isVisible():
return
position = widget.mapFromGlobal(qt.QCursor.pos())
self.onMove(position.x(), position.y())
def onMove(self, x, y):
marker = self.machine.plot._getMarkerAt(x, y)
if marker is not None:
dataPos = self.machine.plot.pixelToData(x, y)
assert dataPos is not None
eventDict = prepareHoverSignal(
marker.getName(), 'marker',
dataPos, (x, y),
marker.isDraggable(),
marker.isSelectable())
self.machine.plot.notify(**eventDict)
if marker != self._hoverMarker:
self._hoverMarker = marker
self.machine._setCursorForMarker(marker)
return True
def __init__(self, plot):
self._pan = Pan(plot)
_PlotInteraction.__init__(self, plot)
ClickOrDrag.__init__(self,
clickButtons=(LEFT_BTN, RIGHT_BTN),
dragButtons=(LEFT_BTN, MIDDLE_BTN))
def _setCursorForMarker(self, marker: Optional[items.MarkerBase] = None):
"""Set mouse cursor for given marker"""
if marker is None:
cursor = None
elif marker.isDraggable():
if isinstance(marker, items.YMarker):
cursor = CURSOR_SIZE_VER
elif isinstance(marker, items.XMarker):
cursor = CURSOR_SIZE_HOR
else:
cursor = CURSOR_SIZE_ALL
elif marker.isSelectable():
cursor = CURSOR_POINTING
else:
cursor = None
self.plot.setGraphCursorShape(cursor)
def click(self, x, y, btn):
"""Handle mouse click
:param x: X position of the mouse in pixels
:param y: Y position of the mouse in pixels
:param btn: Pressed button id
:return: True if click is catched by an item, False otherwise
"""
# Signal mouse clicked event
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
eventDict = prepareMouseSignal('mouseClicked', btn,
dataPos[0], dataPos[1],
x, y)
self.plot.notify(**eventDict)
eventDict = self._handleClick(x, y, btn)
if eventDict is not None:
self.plot.notify(**eventDict)
def _handleClick(self, x, y, btn):
"""Perform picking and prepare event if click is handled here
:param x: X position of the mouse in pixels
:param y: Y position of the mouse in pixels
:param btn: Pressed button id
:return: event description to send of None if not handling event.
:rtype: dict or None
"""
if btn == LEFT_BTN:
result = self.plot._pickTopMost(x, y, lambda i: i.isSelectable())
if result is None:
return None
item = result.getItem()
if isinstance(item, items.MarkerBase):
xData, yData = item.getPosition()
if xData is None:
xData = [0, 1]
if yData is None:
yData = [0, 1]
eventDict = prepareMarkerSignal('markerClicked',
'left',
item.getName(),
'marker',
item.isDraggable(),
item.isSelectable(),
(xData, yData),
(x, y), None)
return eventDict
elif isinstance(item, items.Curve):
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
xData = item.getXData(copy=False)
yData = item.getYData(copy=False)
indices = result.getIndices(copy=False)
eventDict = prepareCurveSignal('left',
item.getName(),
'curve',
xData[indices],
yData[indices],
dataPos[0], dataPos[1],
x, y)
return eventDict
elif isinstance(item, items.ImageBase):
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
indices = result.getIndices(copy=False)
row, column = indices[0][0], indices[1][0]
eventDict = prepareImageSignal('left',
item.getName(),
'image',
column, row,
dataPos[0], dataPos[1],
x, y)
return eventDict
return None
def _signalMarkerMovingEvent(self, eventType, marker, x, y):
assert marker is not None
xData, yData = marker.getPosition()
if xData is None:
xData = [0, 1]
if yData is None:
yData = [0, 1]
posDataCursor = self.plot.pixelToData(x, y)
assert posDataCursor is not None
eventDict = prepareMarkerSignal(eventType,
'left',
marker.getName(),
'marker',
marker.isDraggable(),
marker.isSelectable(),
(xData, yData),
(x, y),
posDataCursor)
self.plot.notify(**eventDict)
@staticmethod
def __isDraggableItem(item):
return isinstance(item, items.DraggableMixIn) and item.isDraggable()
def __terminateDrag(self, x, y):
"""Finalize a drag operation by reseting to initial state"""
self._setCursorForMarker(self.plot._getMarkerAt(x, y))
self.draggedItemRef = None
def beginDrag(self, x, y, btn):
"""Handle begining of drag interaction
:param x: X position of the mouse in pixels
:param y: Y position of the mouse in pixels
:param str btn: The mouse button for which a drag is starting.
:return: True if drag is catched by an item, False otherwise
"""
if btn == LEFT_BTN:
self._lastPos = self.plot.pixelToData(x, y)
assert self._lastPos is not None
result = self.plot._pickTopMost(x, y, self.__isDraggableItem)
item = result.getItem() if result is not None else None
self.draggedItemRef = None if item is None else weakref.ref(item)
if item is None:
self.__terminateDrag(x, y)
return False
if isinstance(item, items.MarkerBase):
self._signalMarkerMovingEvent('markerMoving', item, x, y)
item._startDrag()
return True
elif btn == MIDDLE_BTN:
self._pan.beginDrag(x, y, btn)
return True
def drag(self, x, y, btn):
if btn == LEFT_BTN:
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
item = None if self.draggedItemRef is None else self.draggedItemRef()
if item is not None:
item.drag(self._lastPos, dataPos)
if isinstance(item, items.MarkerBase):
self._signalMarkerMovingEvent('markerMoving', item, x, y)
self._lastPos = dataPos
elif btn == MIDDLE_BTN:
self._pan.drag(x, y, btn)
def endDrag(self, startPos, endPos, btn):
if btn == LEFT_BTN:
item = None if self.draggedItemRef is None else self.draggedItemRef()
if isinstance(item, items.MarkerBase):
posData = list(item.getPosition())
if posData[0] is None:
posData[0] = 1.
if posData[1] is None:
posData[1] = 1.
eventDict = prepareMarkerSignal(
'markerMoved',
'left',
item.getLegend(),
'marker',
item.isDraggable(),
item.isSelectable(),
posData)
self.plot.notify(**eventDict)
item._endDrag()
self.__terminateDrag(*endPos)
elif btn == MIDDLE_BTN:
self._pan.endDrag(startPos, endPos, btn)
def cancel(self):
self._pan.cancel()
widget = self.plot.getWidgetHandle()
if widget is None or not widget.isVisible():
return
position = widget.mapFromGlobal(qt.QCursor.pos())
self.__terminateDrag(position.x(), position.y())
class ItemsInteractionForCombo(ItemsInteraction):
"""Interaction with items to combine through :class:`FocusManager`.
"""
class Idle(ItemsInteraction.Idle):
@staticmethod
def __isItemSelectableOrDraggable(item):
return (item.isSelectable() or (
isinstance(item, items.DraggableMixIn) and item.isDraggable()))
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
result = self.machine.plot._pickTopMost(
x, y, self.__isItemSelectableOrDraggable)
if result is not None: # Request focus and handle interaction
self.goto('clickOrDrag', x, y, btn)
return True
else: # Do not request focus
return False
else:
return super().onPress(x, y, btn)
# FocusManager ################################################################
class FocusManager(StateMachine):
"""Manages focus across multiple event handlers
On press an event handler can acquire focus.
By default it looses focus when all buttons are released.
"""
class Idle(State):
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
for eventHandler in self.machine.eventHandlers:
requestFocus = eventHandler.handleEvent('press', x, y, btn)
if requestFocus:
self.goto('focus', eventHandler, btn)
break
def _processEvent(self, *args):
for eventHandler in self.machine.eventHandlers:
consumeEvent = eventHandler.handleEvent(*args)
if consumeEvent:
break
def onMove(self, x, y):
self._processEvent('move', x, y)
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
self._processEvent('release', x, y, btn)
def onWheel(self, x, y, angle):
self._processEvent('wheel', x, y, angle)
class Focus(State):
def enterState(self, eventHandler, btn):
self.eventHandler = eventHandler
self.focusBtns = {btn}
def validate(self):
self.eventHandler.validate()
self.goto('idle')
def onPress(self, x, y, btn):
if btn == LEFT_BTN:
self.focusBtns.add(btn)
self.eventHandler.handleEvent('press', x, y, btn)
def onMove(self, x, y):
self.eventHandler.handleEvent('move', x, y)
def onRelease(self, x, y, btn):
if btn == LEFT_BTN:
self.focusBtns.discard(btn)
requestFocus = self.eventHandler.handleEvent('release', x, y, btn)
if len(self.focusBtns) == 0 and not requestFocus:
self.goto('idle')
def onWheel(self, x, y, angleInDegrees):
self.eventHandler.handleEvent('wheel', x, y, angleInDegrees)
def __init__(self, eventHandlers=()):
self.eventHandlers = list(eventHandlers)
states = {
'idle': FocusManager.Idle,
'focus': FocusManager.Focus
}
super(FocusManager, self).__init__(states, 'idle')
def cancel(self):
for handler in self.eventHandlers:
handler.cancel()
class ZoomAndSelect(ItemsInteraction):
"""Combine Zoom and ItemInteraction state machine.
:param plot: The Plot to which this interaction is attached
:param color: The color to use for the zoom area bounding box
"""
def __init__(self, plot, color):
super(ZoomAndSelect, self).__init__(plot)
self._zoom = Zoom(plot, color)
self._doZoom = False
@property
def color(self):
"""Color of the zoom area"""
return self._zoom.color
@property
def zoomEnabledAxes(self) -> EnabledAxes:
"""Whether or not to apply zoom for each axis"""
return self._zoom.enabledAxes
@zoomEnabledAxes.setter
def zoomEnabledAxes(self, enabledAxes: EnabledAxes):
self._zoom.enabledAxes = enabledAxes
def click(self, x, y, btn):
"""Handle mouse click
:param x: X position of the mouse in pixels
:param y: Y position of the mouse in pixels
:param btn: Pressed button id
:return: True if click is catched by an item, False otherwise
"""
eventDict = self._handleClick(x, y, btn)
if eventDict is not None:
# Signal mouse clicked event
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
clickedEventDict = prepareMouseSignal('mouseClicked', btn,
dataPos[0], dataPos[1],
x, y)
self.plot.notify(**clickedEventDict)
self.plot.notify(**eventDict)
else:
self._zoom.click(x, y, btn)
def beginDrag(self, x, y, btn):
"""Handle start drag and switching between zoom and item drag.
:param x: X position in pixels
:param y: Y position in pixels
:param str btn: The mouse button for which a drag is starting.
"""
self._doZoom = not super(ZoomAndSelect, self).beginDrag(x, y, btn)
if self._doZoom:
self._zoom.beginDrag(x, y, btn)
def drag(self, x, y, btn):
"""Handle drag, eventually forwarding to zoom.
:param x: X position in pixels
:param y: Y position in pixels
:param str btn: The mouse button for which a drag is in progress.
"""
if self._doZoom:
return self._zoom.drag(x, y, btn)
else:
return super(ZoomAndSelect, self).drag(x, y, btn)
def endDrag(self, startPos, endPos, btn):
"""Handle end of drag, eventually forwarding to zoom.
:param startPos: (x, y) position at the beginning of the drag
:param endPos: (x, y) position at the end of the drag
:param str btn: The mouse button for which a drag is done.
"""
if self._doZoom:
return self._zoom.endDrag(startPos, endPos, btn)
else:
return super(ZoomAndSelect, self).endDrag(startPos, endPos, btn)
class PanAndSelect(ItemsInteraction):
"""Combine Pan and ItemInteraction state machine.
:param plot: The Plot to which this interaction is attached
"""
def __init__(self, plot):
super(PanAndSelect, self).__init__(plot)
self._pan = Pan(plot)
self._doPan = False
def click(self, x, y, btn):
"""Handle mouse click
:param x: X position of the mouse in pixels
:param y: Y position of the mouse in pixels
:param btn: Pressed button id
:return: True if click is catched by an item, False otherwise
"""
eventDict = self._handleClick(x, y, btn)
if eventDict is not None:
# Signal mouse clicked event
dataPos = self.plot.pixelToData(x, y)
assert dataPos is not None
clickedEventDict = prepareMouseSignal('mouseClicked', btn,
dataPos[0], dataPos[1],
x, y)
self.plot.notify(**clickedEventDict)
self.plot.notify(**eventDict)
else:
self._pan.click(x, y, btn)
def beginDrag(self, x, y, btn):
"""Handle start drag and switching between zoom and item drag.
:param x: X position in pixels
:param y: Y position in pixels
:param str btn: The mouse button for which a drag is starting.
"""
self._doPan = not super(PanAndSelect, self).beginDrag(x, y, btn)
if self._doPan:
self._pan.beginDrag(x, y, btn)
def drag(self, x, y, btn):
"""Handle drag, eventually forwarding to zoom.
:param x: X position in pixels
:param y: Y position in pixels
:param str btn: The mouse button for which a drag is in progress.
"""
if self._doPan:
return self._pan.drag(x, y, btn)
else:
return super(PanAndSelect, self).drag(x, y, btn)
def endDrag(self, startPos, endPos, btn):
"""Handle end of drag, eventually forwarding to zoom.
:param startPos: (x, y) position at the beginning of the drag
:param endPos: (x, y) position at the end of the drag
:param str btn: The mouse button for which a drag is done.
"""
if self._doPan:
return self._pan.endDrag(startPos, endPos, btn)
else:
return super(PanAndSelect, self).endDrag(startPos, endPos, btn)
# Interaction mode control ####################################################
# Mapping of draw modes: event handler
_DRAW_MODES = {
'polygon': SelectPolygon,
'rectangle': SelectRectangle,
'ellipse': SelectEllipse,
'line': SelectLine,
'vline': SelectVLine,
'hline': SelectHLine,
'polylines': SelectFreeLine,
'pencil': DrawFreeHand,
}
class DrawMode(FocusManager):
"""Interactive mode for draw and select"""
def __init__(self, plot, shape, label, color, width):
eventHandlerClass = _DRAW_MODES[shape]
parameters = {
'shape': shape,
'label': label,
'color': color,
'width': width,
}
super().__init__((
Pan(plot, clickButtons=(), dragButtons=(MIDDLE_BTN,)),
eventHandlerClass(plot, parameters)))
def getDescription(self):
"""Returns the dict describing this interactive mode"""
params = self.eventHandlers[1].parameters.copy()
params['mode'] = 'draw'
return params
class DrawSelectMode(FocusManager):
"""Interactive mode for draw and select"""
def __init__(self, plot, shape, label, color, width):
eventHandlerClass = _DRAW_MODES[shape]
self._pan = Pan(plot)
self._panStart = None
parameters = {
'shape': shape,
'label': label,
'color': color,
'width': width,
}
super().__init__((
ItemsInteractionForCombo(plot),
eventHandlerClass(plot, parameters)))
def handleEvent(self, eventName, *args, **kwargs):
# Hack to add pan interaction to select-draw
# See issue Refactor PlotWidget interaction #3292
if eventName == 'press' and args[2] == MIDDLE_BTN:
self._panStart = args[:2]
self._pan.beginDrag(*args)
return # Consume middle click events
elif eventName == 'release' and args[2] == MIDDLE_BTN:
self._panStart = None
self._pan.endDrag(self._panStart, args[:2], MIDDLE_BTN)
return # Consume middle click events
elif self._panStart is not None and eventName == 'move':
x, y = args[:2]
self._pan.drag(x, y, MIDDLE_BTN)
super().handleEvent(eventName, *args, **kwargs)
def getDescription(self):
"""Returns the dict describing this interactive mode"""
params = self.eventHandlers[1].parameters.copy()
params['mode'] = 'select-draw'
return params
class PlotInteraction(qt.QObject):
"""PlotWidget user interaction handler.
:param plot: The :class:`PlotWidget` to apply interaction to
"""
sigChanged = qt.Signal()
"""Signal emitted when the interaction configuration has changed"""
_DRAW_MODES = {
'polygon': SelectPolygon,
'rectangle': SelectRectangle,
'ellipse': SelectEllipse,
'line': SelectLine,
'vline': SelectVLine,
'hline': SelectHLine,
'polylines': SelectFreeLine,
'pencil': DrawFreeHand,
}
def __init__(self, parent):
super().__init__(parent)
self.__zoomOnWheel = True
self.__zoomEnabledAxes = EnabledAxes()
# Default event handler
self._eventHandler = ItemsInteraction(parent)
def isZoomOnWheelEnabled(self) -> bool:
"""Returns whether or not wheel interaction triggers zoom"""
return self.__zoomOnWheel
def setZoomOnWheelEnabled(self, enabled: bool):
"""Toggle zoom on wheel interaction"""
if enabled != self.__zoomOnWheel:
self.__zoomOnWheel = enabled
self.sigChanged.emit()
def setZoomEnabledAxes(self, xaxis: bool, yaxis: bool, y2axis: bool):
"""Toggle zoom interaction for each axis
This is taken into account only if the plot does not keep aspect ratio.
"""
zoomEnabledAxes = EnabledAxes(xaxis, yaxis, y2axis)
if zoomEnabledAxes != self.__zoomEnabledAxes:
self.__zoomEnabledAxes = zoomEnabledAxes
if isinstance(self._eventHandler, ZoomAndSelect):
self._eventHandler.zoomEnabledAxes = zoomEnabledAxes
self.sigChanged.emit()
def getZoomEnabledAxes(self) -> EnabledAxes:
"""Returns axes for which zoom is enabled"""
return self.__zoomEnabledAxes
def _getInteractiveMode(self):
"""Returns the current interactive mode as a dict.
The returned dict contains at least the key 'mode'.
Mode can be: 'draw', 'pan', 'select', 'select-draw', 'zoom'.
It can also contains extra keys (e.g., 'color') specific to a mode
as provided to :meth:`_setInteractiveMode`.
"""
if isinstance(self._eventHandler, ZoomAndSelect):
return {'mode': 'zoom', 'color': self._eventHandler.color}
elif isinstance(self._eventHandler, (DrawMode, DrawSelectMode)):
return self._eventHandler.getDescription()
elif isinstance(self._eventHandler, PanAndSelect):
return {'mode': 'pan'}
else:
return {'mode': 'select'}
def _validate(self):
"""Validate the current interaction if possible
If was designed to close the polygon interaction.
"""
self._eventHandler.validate()
def _setInteractiveMode(self, mode, color='black',
shape='polygon', label=None, width=None):
"""Switch the interactive mode.
:param str mode: The name of the interactive mode.
In 'draw', 'pan', 'select', 'select-draw', 'zoom'.
:param color: Only for 'draw' and 'zoom' modes.
Color to use for drawing selection area. Default black.
If None, selection area is not drawn.
:type color: Color description: The name as a str or
a tuple of 4 floats or None.
:param str shape: Only for 'draw' mode. The kind of shape to draw.
In 'polygon', 'rectangle', 'line', 'vline', 'hline',
'polylines'.
Default is 'polygon'.
:param str label: Only for 'draw' mode.
:param float width: Width of the pencil. Only for draw pencil mode.
"""
assert mode in ('draw', 'pan', 'select', 'select-draw', 'zoom')
plotWidget = self.parent()
assert plotWidget is not None
if isinstance(color, numpy.ndarray) or color not in (None, 'video inverted'):
color = colors.rgba(color)
if mode in ('draw', 'select-draw'):
self._eventHandler.cancel()
handlerClass = DrawMode if mode == 'draw' else DrawSelectMode
self._eventHandler = handlerClass(plotWidget, shape, label, color, width)
elif mode == 'pan':
# Ignores color, shape and label
self._eventHandler.cancel()
self._eventHandler = PanAndSelect(plotWidget)
elif mode == 'zoom':
# Ignores shape and label
self._eventHandler.cancel()
self._eventHandler = ZoomAndSelect(plotWidget, color)
self._eventHandler.zoomEnabledAxes = self.getZoomEnabledAxes()
else: # Default mode: interaction with plot objects
# Ignores color, shape and label
self._eventHandler.cancel()
self._eventHandler = ItemsInteraction(plotWidget)
self.sigChanged.emit()
def handleEvent(self, event, *args, **kwargs):
"""Forward event to current interactive mode state machine."""
if event == 'wheel': # Handle wheel events directly
self._onWheel(*args, **kwargs)
return
self._eventHandler.handleEvent(event, *args, **kwargs)
def _onWheel(self, x: float, y: float, angle: float):
"""Handle wheel events"""
if not self.isZoomOnWheelEnabled():
return
plotWidget = self.parent()
if plotWidget is None:
return
# All axes are enabled if keep aspect ratio is on
enabledAxes = EnabledAxes() if plotWidget.isKeepDataAspectRatio() else self.getZoomEnabledAxes()
if enabledAxes.isDisabled():
return
scale = 1.1 if angle > 0 else 1. / 1.1
applyZoomToPlot(plotWidget, scale, (x, y), enabledAxes)
|
9adcabdda26e771f2b8c1826a1824f1a5ba076ff
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/ut/python/graph_syntax/operators/test_mod.py
|
1da0b385cc9e977fb77258a55395c20199583929
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 2,566
|
py
|
test_mod.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test mod"""
import mindspore.nn as nn
from mindspore import context
context.set_context(mode=context.GRAPH_MODE)
def test_positive_mod_positive():
class Mod(nn.Cell):
def __init__(self, x, y):
super(Mod, self).__init__()
self.x = x
self.y = y
def construct(self):
return self.x % self.y
x = 3.0
y = 1.3
mod_net = Mod(x, y)
expect = x % y
assert abs(mod_net() - expect) < 0.000001
def test_positive_mod_negative():
class Mod(nn.Cell):
def __init__(self, x, y):
super(Mod, self).__init__()
self.x = x
self.y = y
def construct(self):
return self.x % self.y
x = 3.0
y = -1.3
mod_net = Mod(x, y)
expect = x % y
assert abs(mod_net() - expect) < 0.000001
def test_negative_mod_positive():
class Mod(nn.Cell):
def __init__(self, x, y):
super(Mod, self).__init__()
self.x = x
self.y = y
def construct(self):
return self.x % self.y
x = -3.0
y = 1.3
mod_net = Mod(x, y)
expect = x % y
assert abs(mod_net() - expect) < 0.000001
def test_negative_mod_negative():
class Mod(nn.Cell):
def __init__(self, x, y):
super(Mod, self).__init__()
self.x = x
self.y = y
def construct(self):
return self.x % self.y
x = -3.0
y = -1.3
mod_net = Mod(x, y)
expect = x % y
assert abs(mod_net() - expect) < 0.000001
def test_int_mod_int():
class Mod(nn.Cell):
def __init__(self, x, y):
super(Mod, self).__init__()
self.x = x
self.y = y
def construct(self):
return self.x % self.y
x = 3
y = 2
mod_net = Mod(x, y)
expect = x % y
assert abs(mod_net() - expect) < 0.000001
|
6c8957e855e8d401144550732085389a4163bab8
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/MLDB-1012_nested_function_calls.py
|
79ae87f0789dd9c21875a7dcbbe4e9142ef44a39
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
MLDB-1012_nested_function_calls.py
|
#
# MLDB-1012_nested_function_calls.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
from mldb import mldb, ResponseException
conf = {
"type": "sql.expression",
"params": {
"expression": "input.x*2 as x2, input.y*2 as y2"
}
}
mldb.put("/v1/functions/f1", conf)
conf2 = {
"type": "sql.expression",
"params": {
"expression": "input.x3*2 as x4, input.y3*2 as y4"
}
}
mldb.put("/v1/functions/f2", conf2)
rez = mldb.get("/v1/query", q="select f1( {input: {x: 1, y: 2}} ) as *")
js_rez = rez.json()
mldb.log(js_rez)
assert js_rez[0]['columns'][0][1] == 2
assert js_rez[0]['columns'][1][1] == 4
rez = mldb.get("/v1/query",
q="""select f2( {input: f1( {input: {x: 1, y: 2}} )
[{x3: x2, y3: y2}] }) as * """)
js_rez = rez.json()
mldb.log(js_rez)
assert js_rez[0]['columns'][0][1] == 4
assert js_rez[0]['columns'][1][1] == 8
# Test for 3-deep nested arguments
conf3 = {
"type": "sql.expression",
"params": {
"expression": "input.nested.x as foo"
}
}
rez = mldb.put("/v1/functions/f3", conf3)
mldb.log(rez)
rez = mldb.get("/v1/query",
q="select f3( { {{ 42 as x } as nested} as input } ) as *")
js_rez = rez.json()
mldb.log(js_rez)
assert js_rez[0]['columns'][0][1] == 42
mldb.put("/v1/functions/a", {
"type": "sql.expression",
"params": {"expression": "abs(input) as output"}
})
mldb.put("/v1/functions/b", {
"type": "sql.expression",
"params": {"expression": "a({input})[output] as output"}
})
mldb.put("/v1/functions/c", {
"type": "sql.expression",
"params": {"expression": "b({input})[output] as output"}
})
rez = mldb.get("/v1/query", q="select c({input: -1})")
js_rez = rez.json()
assert js_rez[0]['columns'][0][1] == 1
# MLDB-1251
mldb.log("MLDB-1251")
try:
mldb.put("/v1/functions/recurse", {
"type": "sql.expression",
"params": {"expression": "recurse({input})[output] as output"}
})
mldb.get("/v1/query", q="select recurse({input: -1})")
except ResponseException as exc:
pass
else:
assert False, 'Should have failed with a 400'
request.set_return("success")
|
cd072675eb540206ac1a3fa7d50b360c978470ac
|
8112d133bcb70b03a35b792caf8d682ba4ab22cd
|
/doc/book/fstar_pygments.py
|
d299ac6d1439420d736d9affed19a896d418b97a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
FStarLang/FStar
|
0d006096d33040c407ca747c5262abf91f77df1a
|
fc4726d1e04868a6a12bfc6927c0b87039e0ff8c
|
refs/heads/master
| 2023-08-18T18:24:53.271431
| 2023-08-18T16:23:24
| 2023-08-18T16:23:24
| 18,411,972
| 2,535
| 320
|
Apache-2.0
| 2023-09-03T17:11:50
| 2014-04-03T17:32:49
|
F*
|
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
fstar_pygments.py
|
from pygments.lexer import RegexLexer, words
from pygments.token import *
# very rough lexer; not 100% precise
class CustomLexer(RegexLexer):
name = 'FStar'
aliases = ['fstar']
filenames = ['*.fst', '*.fsti']
keywords = (
'attributes' ,
'noeq' ,
'unopteq' ,
'and' ,
'assert' ,
'assume' ,
'begin' ,
'by' ,
'calc' ,
'class' ,
'decreases' ,
'Dv' ,
'effect' ,
'eliminate' ,
'else' ,
'end' ,
'ensures' ,
'exception' ,
'exists' ,
'false' ,
'friend' ,
'forall' ,
'fun' ,
'function' ,
'GTot' ,
'if' ,
'in' ,
'include' ,
'inline' ,
'inline_for_extraction' ,
'instance' ,
'introduce' ,
'irreducible',
'let' ,
'logic' ,
'match' ,
'module' ,
'new' ,
'new_effect' ,
'layered_effect' ,
'polymonadic_bind' ,
'polymonadic_subcomp' ,
'SMTPat' ,
'noextract',
'of' ,
'open' ,
'opaque' ,
'private' ,
'range_of' ,
'rec' ,
'reifiable' ,
'reify' ,
'reflectable',
'requires' ,
'returns' ,
'set_range_of',
'sub_effect' ,
'synth' ,
'then' ,
'total' ,
'Tot' ,
'true' ,
'try' ,
'type' ,
'unfold' ,
'unfoldable' ,
'val' ,
'when' ,
'with' ,
'_' ,
'Lemma' ,
)
tokens = {
'root': [
(r' ', Text),
(r'\n', Text),
(r'\r', Text),
(r'//.*\n', Comment),
(r'\([*]([^*]|[*]+[^)])*[*]+\)', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(r'0x[0-9a-fA-F_]+', Literal.Number),
(r'[0-9_]+', Literal.Number),
(r'[a-zA-Z_]+', Text),
(r'.', Text),
]
}
#class CustomFormatter:
|
dce6ef15cfe42176d52cf6add19b33175cdfcb62
|
420e7db695f82c7cf9d29735df956fa86bc0f14f
|
/layers/raw_layer.py
|
e27910b8998d114933239249dcc0bda05f3fd587
|
[
"BSD-3-Clause"
] |
permissive
|
Kkevsterrr/geneva
|
bf929e3056dc6215bca079f1fd587866907a1cd5
|
6b091060ed0946b98a2ff9196dfbf93d85cbb28a
|
refs/heads/master
| 2023-08-23T22:30:49.750259
| 2023-05-18T21:24:14
| 2023-05-18T21:24:14
| 221,001,148
| 1,771
| 168
|
BSD-3-Clause
| 2023-05-26T10:04:58
| 2019-11-11T14:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
raw_layer.py
|
from layers.layer import Layer
class RawLayer(Layer):
"""
Defines an interface for the scapy Raw layer.
"""
name = "Raw"
protocol = Raw
_fields = []
fields = []
|
f5d10ae9894d3b9a0028af9992d0170e975c5524
|
a06812b0e10830c75abb05d16c1993750d75a403
|
/qgis/examples/quick_api_interactive_proper_testing/quick_api/gui/quick_api_dialog.py
|
261c49cea7a42f3f37a54f820f119d34a02f3866
|
[
"LicenseRef-scancode-zeusbench",
"LicenseRef-scancode-rsa-1990",
"RSA-MD",
"Spencer-94",
"BSD-3-Clause",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant",
"metamail",
"Apache-2.0",
"Beerware",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
gis-ops/tutorials
|
dfa8130d9f733422f85ca0b03beddca5541f2a52
|
92cd7c064040ba2febde36d5c2defb96d10bae21
|
refs/heads/master
| 2023-05-10T20:01:36.657762
| 2023-04-25T11:40:27
| 2023-04-25T11:40:27
| 159,300,990
| 138
| 67
|
Apache-2.0
| 2023-05-01T21:34:35
| 2018-11-27T08:25:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,388
|
py
|
quick_api_dialog.py
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QuickApiDialog
A QGIS plugin
Query OpenElevation API
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2021-03-12
git sha : $Format:%H$
copyright : (C) 2021 by GIS-OPS UG
email : info@gis-ops.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from qgis.PyQt.QtWidgets import QMessageBox, QApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt import uic
from qgis.PyQt import QtWidgets
from qgis.gui import QgisInterface, QgsMapTool
from qgis.core import (
QgsCoordinateReferenceSystem,
QgsProject,
QgsPointXY,
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsRectangle,
QgsCoordinateTransform,
)
from ..core.utils import maybe_transform_wgs84
from ..core.query import Nominatim
from ..core.maptool import PointTool
from ..ui.quick_api_dialog_base import Ui_QuickApiDialogBase
# This loads your .ui file so that PyQt can populate your plugin with the elements from Qt Designer
FORM_CLASS, _ = uic.loadUiType(
os.path.join(os.path.dirname(__file__), "../ui/quick_api_dialog_base.ui")
)
class QuickApiDialog(QtWidgets.QDialog, Ui_QuickApiDialogBase):
def __init__(self, iface: QgisInterface, parent=None):
"""Constructor."""
super(QuickApiDialog, self).__init__(parent)
self.setupUi(self)
self.iface = iface
# Instantiate a network access manager
self.nominatim = Nominatim()
self.point_tool = PointTool(iface.mapCanvas())
self.last_map_tool: QgsMapTool = None
# Set up widgets
self.map_button.setIcon(
QIcon(":images/themes/default/cursors/mCapturePoint.svg")
)
self.crs_input.setCrs(QgsCoordinateReferenceSystem("EPSG:4326"))
# Set up callbacks
self.finished.connect(self.request_geocoder)
self.map_button.clicked.connect(self._on_map_click)
self.point_tool.canvasClicked.connect(self._write_line_widget)
self.point_tool.deactivated.connect(
lambda: QApplication.restoreOverrideCursor()
)
def request_geocoder(self, result: int):
# See if OK was pressed
if result:
project = QgsProject.instance()
# First get all the values of the GUI items
crs_input = self.crs_input.crs()
lineedit_text = self.lineedit_xy.value()
# Protect the free text field for coordinates from generic user failure
try:
lineedit_yx = [
float(coord.strip()) for coord in lineedit_text.split(",")
]
except:
QMessageBox.critical(
self.iface.mainWindow(),
"QuickAPI error",
"Did you really specify a coordinate in comma-separated Lat/Long?\nExiting...",
)
return
# Create the input point and transform if necessary
point = maybe_transform_wgs84(
QgsPointXY(*reversed(lineedit_yx)),
crs_input,
QgsCoordinateTransform.ForwardTransform,
)
# Do the request and set the attributes
self.nominatim.do_request(point)
# Only process if HTTP status code is 200
if self.nominatim.status_code == 200:
# Get the content of the response and process it
if self.nominatim.error_string:
QMessageBox.critical(
self.iface.mainWindow(),
"Quick API error",
"The request was not processed succesfully!\n\n"
"Message:\n"
f"{self.nominatim.error_string}",
)
return
# Create the output memory layer
layer_out = QgsVectorLayer(
"Point?crs=EPSG:4326&field=address:string&field=license:string",
"Nominatim Reverse Geocoding",
"memory",
)
# Create the output feature (only one here)
feature = QgsFeature()
feature.setGeometry(
QgsGeometry.fromPointXY(self.nominatim.get_point())
)
feature.setAttributes(list(self.nominatim.get_attributes()))
# Add feature to layer and layer to map
layer_out.dataProvider().addFeature(feature)
layer_out.updateExtents()
project.addMapLayer(layer_out)
# build bbox for auto-zoom feature
bbox_temp = list()
for p in self.nominatim.get_bbox_points():
p = maybe_transform_wgs84(
p,
project.crs(),
QgsCoordinateTransform.ReverseTransform,
)
bbox_temp.append(p)
self.iface.mapCanvas().zoomToFeatureExtent(
QgsRectangle(*bbox_temp)
)
def _on_map_click(self):
self.hide()
self.last_map_tool = self.iface.mapCanvas().mapTool()
self.iface.mapCanvas().setMapTool(self.point_tool)
def _write_line_widget(self, point: QgsPointXY):
self.lineedit_xy.setText(f"{point.y():.6f}, {point.x():.6f}")
self.iface.mapCanvas().setMapTool(self.last_map_tool)
self.show()
|
29102259305b5d3a5981e00ca711cbcb46a04858
|
ea902879e3755c84b1a4961f07fe060d98374a57
|
/dingding/main.py
|
f58906d6f62f7c51b509d4ed9f2bad12f6af0f6c
|
[] |
no_license
|
zhouwei713/data_analysis
|
7a197319cd4175df3646e53201d05551edce3f75
|
5e3d865ee74379665633e8e80a98ae4e193d3a9f
|
refs/heads/master
| 2022-02-12T03:03:22.601292
| 2022-01-26T14:32:30
| 2022-01-26T14:32:30
| 168,663,096
| 392
| 247
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,418
|
py
|
main.py
|
# encoding: utf-8
"""
@version: ??
@author: Andy
@file: main.py
@time: 20/2/19 12:58
"""
from selenium import webdriver
from selenium.webdriver import ChromeOptions
import time
import os
options = ChromeOptions()
# options.add_argument(r"--user-data-dir=C:\Users\wei.zhou\AppData\Local\Google\Chrome\User Data\Default")
url = 'https://apps.apple.com/cn/app/%E9%92%89%E9%92%89/id930368978#see-all/reviews'
def get_data():
Chrome_driver = webdriver.Chrome(options=options)
try:
Chrome_driver.get(url)
time.sleep(5)
print('start scroll')
step = 0
while True:
if step <= 500:
print('step:', step)
Chrome_driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
time.sleep(5)
else:
break
step += 1
print('end scroll')
print('start to save data!')
for i in range(1, 20000):
comment_dict = {}
message_div = Chrome_driver.find_element_by_xpath('/html/body/div[4]/div/main/div/div/div/section/div[2]/div[%s]' % i)
inner_message = message_div.find_element_by_css_selector('.we-customer-review.lockup.ember-view')
score = inner_message.find_element_by_tag_name('figure').get_attribute('aria-label')
user_info = inner_message.find_element_by_css_selector('.we-customer-review__header.we-customer-review__header--user')
username = user_info.find_element_by_css_selector('.we-truncate.we-truncate--single-line.ember-view.we-customer-review__user').text
uptime = user_info.find_element_by_tag_name('time').text
title = inner_message.find_element_by_tag_name('h3').text
content = inner_message.find_element_by_tag_name('blockquote').find_element_by_tag_name('div').find_element_by_tag_name('p').text
comment_dict['score'] = score
comment_dict['username'] = username
comment_dict['time'] = uptime
comment_dict['title'] = title
comment_dict['content'] = content
print('score', score)
print('userName', username)
print('time', uptime)
print('title', title)
print('content', content)
save_data_to_csv(comment_dict)
Chrome_driver.close()
except Exception as e:
print(e)
Chrome_driver.close()
def save_data_to_csv(d):
if not os.path.exists('appstore_data1.csv'):
with open('appstore_data1.csv', 'a+', encoding='utf-8') as f:
f.write('score|username|time|title|content\n')
try:
row = '{}|{}|{}|{}|{}'.format(
d['score'],
d['username'],
d['time'],
d['title'],
d['content'])
f.write(row)
f.write('\n')
except:
pass
else:
with open('appstore_data1.csv', 'a+', encoding='utf-8') as f:
try:
row = '{}|{}|{}|{}|{}'.format(
d['score'],
d['username'],
d['time'],
d['title'],
d['content'])
f.write(row)
f.write('\n')
except:
pass
if __name__ == '__main__':
get_data()
|
9a648e975819ea36fab7b9dd6a269736a4e7c588
|
16fcf54e753704a08888a235419ef19fcb49b793
|
/Utils/GitThread.py
|
0088b11243749de0de57c09a9278ec277e932d50
|
[] |
no_license
|
PyQt5/PyQtClient
|
08ca93f7af79c201fbb3d232a6063741f900a019
|
f86e4e5038f9d9b1626c0a25f3aa59d33c3e4393
|
refs/heads/master
| 2022-09-03T21:43:42.371070
| 2022-09-01T19:20:44
| 2022-09-01T19:20:44
| 120,252,826
| 285
| 97
| null | 2019-02-02T09:53:59
| 2018-02-05T04:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 16,066
|
py
|
GitThread.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 2019年1月5日
@author: Irony
@site: https://pyqt.site https://github.com/PyQt5
@email: 892768447@qq.com
@file: Utils.GitThread
@description: Git操作线程
"""
import os
import re
import shutil
import stat
from contextlib import closing
from pathlib import Path
from time import time
from zipfile import ZipFile
import pygit2
import requests
from PyQt5.QtCore import QCoreApplication, QObject, Qt, QThread
from PyQt5.QtGui import QImage
from requests.auth import HTTPBasicAuth
from requests.exceptions import ConnectTimeout
from Utils import Constants, Version
from Utils.CommonUtil import AppLog, Signals, get_avatar_path
class LoginThread(QObject):
"""登录Github,获取头像
"""
Url = 'https://github.com/{0}.png?size=130'
def __init__(self, account, password, *args, **kwargs):
super(LoginThread, self).__init__(*args, **kwargs)
self.account = account
self.password = password
self.status = ''
self.emoji = ''
@classmethod
def quit(cls):
"""退出线程
:param cls:
"""
if hasattr(cls, '_thread'):
cls._thread.quit()
AppLog.info('login thread quit')
@classmethod
def start(cls, account, password='', parent=None):
"""启动登录线程
:param cls:
:param account: 账号
:param password: 密码
"""
cls._thread = QThread(parent)
cls._worker = LoginThread(account, password)
cls._worker.moveToThread(cls._thread)
cls._thread.started.connect(cls._worker.run)
cls._thread.finished.connect(cls._worker.deleteLater)
cls._thread.start()
AppLog.info('login thread started')
def save_avatar(self, data):
"""保存头像
:param data: 头像数据
"""
Constants.ImageAvatar = get_avatar_path(self.account)
image = QImage()
if image.loadFromData(data):
# 缩放图片
if not image.isNull():
if image.width() != 130 or image.height() != 130:
AppLog.warn('scaled avatar image size to 130x130')
image = image.scaled(130, 130, Qt.IgnoreAspectRatio,
Qt.SmoothTransformation)
AppLog.debug('save to: {}'.format(Constants.ImageAvatar))
return image.save(Constants.ImageAvatar)
else:
AppLog.warn('avatar image is null')
else:
AppLog.warn('can not load from image data')
return False
def get_avatar(self, url):
"""获取头像
:param url: 头像url
"""
try:
req = requests.get(url)
if req.status_code == 200 and req.headers.get(
'Content-Type').startswith('image/'):
imgformat = req.headers.get('Content-Type', '').split('/')[-1]
AppLog.debug('image type: {}'.format(imgformat))
AppLog.debug('content length: {}'.format(len(req.content)))
return self.save_avatar(req.content)
except Exception as e:
AppLog.warning(str(e))
return False
def run(self):
AppLog.info('start login github')
# 方式一从url直接获取
av_ok = self.get_avatar(self.Url.format(self.account))
# 方式二从网页提取
try:
req = requests.get(
self.Url.format(self.account).split('.png?size')[0])
if req.status_code == 200:
# 获取头像url
aurls = re.findall(
r'<meta property="og:image"\s*content="(.*?)"\s*/>'.encode(
), req.content)
# 获取状态
status = re.findall(
r'<div class="user-status-message-wrapper.*?"\s*>\s*<div>\s*(.*?)\s*</div>'
.encode(), req.content)
if status:
self.status = status[0].decode()
# 获取状态图标
emoji = re.findall(
r'<g-emoji.*?fallback-src="(.*?)"\s*>(.*?)</g-emoji>'.
encode(), req.content)
if emoji:
self.emoji = emoji[0][-1].decode()
if self.emoji.startswith('http'):
self.emoji = ''
# 下载头像
if not av_ok and len(aurls) > 0:
av_ok = self.get_avatar(aurls[0])
except Exception as e:
AppLog.warning(str(e))
if av_ok:
Signals.loginSuccessed.emit(self.account, self.status, self.emoji)
else:
Signals.loginErrored.emit(
QCoreApplication.translate('Repository',
'Login failed, Unknown reason'))
AppLog.info('login thread end')
LoginThread.quit()
class ProgressCallback(pygit2.RemoteCallbacks):
"""clone过程中的进度条
"""
def transfer_progress(self, stats):
Signals.progressUpdated.emit(stats.received_objects,
stats.total_objects)
AppLog.debug('total: {}, received: {}'.format(stats.total_objects,
stats.received_objects))
class CloneThread(QObject):
"""获取项目源码
"""
UrlGithub = 'https://github.com/PyQt5/PyQt.git'
UrlGitee = 'https://gitee.com/PyQt5/PyQt.git'
@classmethod
def quit(cls):
"""退出线程
:param cls:
"""
if hasattr(cls, '_thread'):
cls._thread.quit()
AppLog.info('clone thread quit')
@classmethod
def start(cls, parent=None):
"""启动Clone线程
:param cls:
"""
cls._thread = QThread(parent)
cls._worker = CloneThread()
cls._worker.moveToThread(cls._thread)
cls._thread.started.connect(cls._worker.run)
cls._thread.finished.connect(cls._worker.deleteLater)
cls._thread.start()
AppLog.info('clone thread started')
def pull(self, repo, remote_name='github,gitee', branch='master'):
""" pull changes for the specified remote (defaults to origin).
Code from MichaelBoselowitz at:
https://github.com/MichaelBoselowitz/pygit2-examples/blob/
68e889e50a592d30ab4105a2e7b9f28fac7324c8/examples.py#L58
licensed under the MIT license.
"""
repo.remotes.set_url('gitee', self.UrlGitee)
repo.remotes.set_url('github', self.UrlGithub)
for remote in repo.remotes:
if remote.name in remote_name:
AppLog.info('update from: {}'.format(remote.name))
remote.fetch()
remote_master_id = repo.lookup_reference(
'refs/remotes/origin/%s' % (branch)).target
merge_result, _ = repo.merge_analysis(remote_master_id)
# Up to date, do nothing
if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
return
# We can just fastforward
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
repo.checkout_tree(repo.get(remote_master_id))
try:
master_ref = repo.lookup_reference('refs/heads/%s' %
(branch))
master_ref.set_target(remote_master_id)
except KeyError:
repo.create_branch(branch, repo.get(remote_master_id))
repo.head.set_target(remote_master_id)
return
elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
repo.merge(remote_master_id)
if repo.index.conflicts is not None:
for conflict in repo.index.conflicts:
for c in conflict:
if not c:
continue
AppLog.error('Conflicts found in: %s', c.path)
raise AssertionError('Conflicts, ahhhhh!!')
user = repo.default_signature
tree = repo.index.write_tree()
repo.create_commit('HEAD', user, user, 'Merge!', tree,
[repo.head.target, remote_master_id])
# We need to do this or git CLI will think we are still
# merging.
repo.state_cleanup()
return
else:
raise AssertionError('Unknown merge analysis result')
def remove(self):
"""删除未clone完成的目录"""
for path in Path(Constants.DirProjects).rglob('*'):
path.chmod(stat.S_IWRITE)
shutil.rmtree(Constants.DirProjects, ignore_errors=True)
def clone(self, url):
"""克隆项目"""
AppLog.info('clone from: {}'.format(url))
pygit2.clone_repository(url,
Constants.DirProjects,
callbacks=ProgressCallback())
def _clone(self):
ok = False
for url in (self.UrlGithub, self.UrlGitee):
try:
# 本地项目不存在
if os.path.exists(Constants.DirProjects):
# 如果文件夹存在则删除
AppLog.info('remove dir: {}'.format(Constants.DirProjects))
self.remove()
AppLog.info('clone into dir: {}'.format(Constants.DirProjects))
Signals.progressUpdated.emit(5, 100)
self.clone(url)
ok = True
break
except Exception as e:
AppLog.error(str(e))
if not ok:
raise Exception('clone failed')
def run(self):
try:
path = pygit2.discover_repository(Constants.DirProjects)
if not path:
self._clone()
else:
repo = pygit2.Repository(path)
if repo.is_empty: # 如果项目为空
self._clone()
else:
# 重置并pull
AppLog.info('reset dir: {}'.format(Constants.DirProjects))
AppLog.info('reset target: {}'.format(repo.head.target))
repo.state_cleanup()
repo.reset(repo.head.target, pygit2.GIT_RESET_HARD)
Signals.progressUpdated.emit(5, 100)
AppLog.info('pull into dir: {}'.format(
Constants.DirProjects))
self.pull(repo)
Signals.progressStoped.emit()
except Exception as e:
AppLog.exception(e)
AppLog.info('clone thread end')
Signals.progressStoped.emit()
Signals.cloneFinished.emit('')
CloneThread.quit()
class UpgradeThread(QObject):
"""自动更新
"""
UpdateUrl = [
('https://github.com/PyQt5/PyQtClient/raw/master/.Update/Upgrade.json',
'https://github.com/PyQt5/PyQtClient/raw/master/.Update/Upgrade.{}.zip'
),
('https://gitee.com/PyQt5/PyQtClient/raw/master/.Update/Upgrade.json',
'https://gitee.com/PyQt5/PyQtClient/raw/master/.Update/Upgrade.{}.zip'
),
('https://pyqt.site/PyQt5/PyQtClient/raw/master/.Update/Upgrade.json',
'https://pyqt.site/PyQt5/PyQtClient/raw/master/.Update/Upgrade.{}.zip'
),
('https://pyqt5.com/PyQt5/PyQtClient/raw/master/.Update/Upgrade.json',
'https://pyqt5.com/PyQt5/PyQtClient/raw/master/.Update/Upgrade.{}.zip')
]
@classmethod
def quit(cls):
"""退出线程
:param cls:
"""
if hasattr(cls, '_thread'):
cls._thread.quit()
AppLog.info('upgrade thread quit')
@classmethod
def start(cls, parent=None):
"""启动自动更新线程
:param cls:
"""
cls._thread = QThread(parent)
cls._worker = UpgradeThread()
cls._worker.moveToThread(cls._thread)
cls._thread.started.connect(cls._worker.run)
cls._thread.finished.connect(cls._worker.deleteLater)
cls._thread.start()
AppLog.info('update thread started')
def unzip(self, file):
# 进行解压
zipfile = ZipFile(file)
path = os.path.abspath('.')
members = zipfile.namelist()
for zipinfo in members:
_name = zipinfo.lower()
if _name.endswith('.exe') or \
_name.endswith('.dll') or \
_name.endswith('.ttf') or \
_name.endswith('.so') or \
_name.endswith('.dylib'):
tpath = os.path.abspath(os.path.join(path, zipinfo))
# 需要重命名当前正在占用的文件
if os.path.isfile(tpath):
os.rename(tpath, tpath + str(time()) + '.old')
zipfile.extract(zipinfo, path)
# zipfile.extractall(os.path.abspath('.'))
zipfile.close()
def download(self, file, url):
AppLog.debug('start download {}'.format(url))
with closing(requests.get(url, stream=True)) as response:
# 单次请求最大值
chunk_size = 1024
# 内容体总大小
content_size = int(response.headers['content-length'])
data_count = 0
Signals.updateProgressChanged.emit(0, 0, content_size)
AppLog.debug('content_size: {}'.format(content_size))
with open(file, 'wb') as fp:
for data in response.iter_content(chunk_size=chunk_size):
fp.write(data)
data_count = data_count + len(data)
if content_size > 0:
Signals.updateProgressChanged.emit(
data_count, 0, content_size)
# 解压
self.unzip(file)
AppLog.debug('download {} end'.format(file))
def run(self):
for url_ver, url_zip in self.UpdateUrl:
try:
show = True
req = requests.get(url_ver)
AppLog.info(req.text)
if req.status_code != 200:
AppLog.info('update thread end')
UpgradeThread.quit()
return
content = req.json()
for version, text in content:
if Version.version < version:
if show:
Signals.updateDialogShowed.emit()
QThread.msleep(1000)
show = False
Signals.updateTextChanged.emit(str(Version.version),
str(version), text)
self.download(Constants.UpgradeFile.format(version),
url_zip.format(version))
Signals.updateFinished.emit(self.tr('update completed'))
break
except Exception as e:
Signals.updateFinished.emit(
self.tr('update failed: {}').format(str(e)))
AppLog.exception(e)
AppLog.info('update thread end')
UpgradeThread.quit()
|
da619caa3edf5658070ebc1e663d5f7f84db166b
|
6416b746ee71d897789eab1e450000831674dbd0
|
/src/otx/algorithms/common/adapters/mmcv/pipelines/transforms/augments.py
|
f8b2ed09e67cfa0f6611eadf1bde7318acdcf992
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/training_extensions
|
c921f83ad52311af96ff45ae0b88d0aecddd855b
|
80454808b38727e358e8b880043eeac0f18152fb
|
refs/heads/develop
| 2023-08-31T06:29:07.229339
| 2023-08-31T01:57:26
| 2023-08-31T01:57:26
| 154,843,614
| 397
| 230
|
Apache-2.0
| 2023-09-14T06:17:01
| 2018-10-26T14:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 8,725
|
py
|
augments.py
|
"""Module for defining Augments and CythonArguments class used for classification task."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import random
from typing import Union
from numpy import ndarray as CvImage
from PIL import Image, ImageEnhance, ImageOps
from PIL.Image import Image as PILImage
from PIL.Image import Resampling
# type: ignore[attr-defined]
# pylint: disable = no-name-in-module
import otx.algorithms.common.adapters.mmcv.pipelines.transforms.cython_augments.pil_augment as pil_aug
ImgTypes = Union[PILImage, CvImage]
class Augments: # pylint: disable=unused-argument
"""Augments class that implements various augmentations via plain PIL."""
@staticmethod
def _check_args_tf(kwargs):
def _interpolation(kwargs):
interpolation = kwargs.pop("resample", Resampling.BILINEAR)
if isinstance(interpolation, (list, tuple)):
# disable B311 random - used for the random sampling not for security/crypto
return random.choice(interpolation) # nosec B311
return interpolation
new_kwargs = {**kwargs, "resample": _interpolation(kwargs)}
return new_kwargs
@staticmethod
def autocontrast(img: PILImage, *args, **kwargs) -> PILImage:
"""Apply autocontrast for an given image."""
return ImageOps.autocontrast(img)
@staticmethod
def equalize(img: PILImage, *args, **kwargs) -> PILImage:
"""Apply equalize for an given image."""
return ImageOps.equalize(img)
@staticmethod
def solarize(img: PILImage, threshold: int, *args, **kwargs) -> PILImage:
"""Apply solarize for an given image."""
return ImageOps.solarize(img, threshold)
@staticmethod
def posterize(img: PILImage, bits_to_keep: int, *args, **kwargs) -> PILImage:
"""Apply posterize for an given image."""
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
@staticmethod
def color(img: PILImage, factor: float, *args, **kwargs) -> PILImage:
"""Apply color for an given image."""
return ImageEnhance.Color(img).enhance(factor)
@staticmethod
def contrast(img: PILImage, factor: float, *args, **kwargs) -> PILImage:
"""Apply contrast for an given image."""
return ImageEnhance.Contrast(img).enhance(factor)
@staticmethod
def brightness(img: PILImage, factor: float, *args, **kwargs) -> PILImage:
"""Apply brightness for an given image."""
return ImageEnhance.Brightness(img).enhance(factor)
@staticmethod
def sharpness(img: PILImage, factor: float, *args, **kwargs) -> PILImage:
"""Apply sharpness for an given image."""
return ImageEnhance.Sharpness(img).enhance(factor)
@staticmethod
def rotate(img: PILImage, degree: float, *args, **kwargs) -> PILImage:
"""Apply rotate for an given image."""
kwargs = Augments._check_args_tf(kwargs)
return img.rotate(degree, **kwargs)
@staticmethod
def shear_x(img: PILImage, factor: float, *args, **kwargs) -> PILImage:
"""Apply shear_x for an given image."""
kwargs = Augments._check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
@staticmethod
def shear_y(img: PILImage, factor: float, *args, **kwargs) -> PILImage:
"""Apply shear_y for an given image."""
kwargs = Augments._check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
@staticmethod
def translate_x_rel(img: PILImage, pct: float, *args, **kwargs) -> PILImage:
"""Apply translate_x_rel for an given image."""
kwargs = Augments._check_args_tf(kwargs)
pixels = pct * img.size[0]
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
@staticmethod
def translate_y_rel(img: PILImage, pct: float, *args, **kwargs) -> PILImage:
"""Apply translate_y_rel for an given image."""
kwargs = Augments._check_args_tf(kwargs)
pixels = pct * img.size[1]
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
class CythonAugments(Augments):
"""CythonAugments class that support faster augmentation with cythonizing."""
@staticmethod
def autocontrast(img: ImgTypes, *args, **kwargs) -> ImgTypes:
"""Apply autocontrast for an given image."""
if Image.isImageType(img):
return pil_aug.autocontrast(img)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def equalize(img: ImgTypes, *args, **kwargs) -> ImgTypes:
"""Apply equalize for an given image."""
if Image.isImageType(img):
return pil_aug.equalize(img)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def solarize(img: ImgTypes, threshold: int, *args, **kwargs) -> ImgTypes:
"""Apply solarize for an given image."""
if Image.isImageType(img):
return pil_aug.solarize(img, threshold)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def posterize(img: ImgTypes, bits_to_keep: int, *args, **kwargs) -> ImgTypes:
"""Apply posterize for an given image."""
if Image.isImageType(img):
if bits_to_keep >= 8:
return img
return pil_aug.posterize(img, bits_to_keep)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def color(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes:
"""Apply color for an given image."""
if Image.isImageType(img):
return pil_aug.color(img, factor)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def contrast(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes:
"""Apply contrast for an given image."""
if Image.isImageType(img):
return pil_aug.contrast(img, factor)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def brightness(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes:
"""Apply brightness for an given image."""
if Image.isImageType(img):
return pil_aug.brightness(img, factor)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def sharpness(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes:
"""Apply sharpness for an given image."""
if Image.isImageType(img):
return pil_aug.sharpness(img, factor)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def rotate(img: ImgTypes, degree: float, *args, **kwargs) -> ImgTypes:
"""Apply rotate for an given image."""
Augments._check_args_tf(kwargs)
if Image.isImageType(img):
return pil_aug.rotate(img, degree)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def shear_x(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes:
"""Apply shear_x for an given image."""
Augments._check_args_tf(kwargs)
if Image.isImageType(img):
return pil_aug.shear_x(img, factor)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def shear_y(img: ImgTypes, factor: float, *args, **kwargs) -> ImgTypes:
"""Apply shear_y for an given image."""
if Image.isImageType(img):
return pil_aug.shear_y(img, factor)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def translate_x_rel(img: ImgTypes, pct: float, *args, **kwargs) -> ImgTypes:
"""Apply translate_x_rel for an given image."""
if Image.isImageType(img):
return pil_aug.translate_x_rel(img, pct)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def translate_y_rel(img: ImgTypes, pct: float, *args, **kwargs) -> ImgTypes:
"""Apply translate_y_rel for an given image."""
if Image.isImageType(img):
return pil_aug.translate_y_rel(img, pct)
raise NotImplementedError(f"Unknown type: {type(img)}")
@staticmethod
def blend(src: ImgTypes, dst: CvImage, weight: float = 0.0):
"""Apply blend for an given image."""
assert isinstance(dst, CvImage), f"Type of dst should be numpy array, but type(dst)={type(dst)}."
if Image.isImageType(src):
return pil_aug.blend(src, dst, weight)
raise NotImplementedError(f"Unknown type: {type(src)}")
|
a26d61b85737015cdc14feb3930060e1ae1065be
|
34364ec19543d70cf357e0675fd3593e7e95b7e4
|
/xarm/x3/xarm.py
|
9c1b5be91e4405f1a41c2b1e016eabfaa6058cc0
|
[
"BSD-3-Clause"
] |
permissive
|
xArm-Developer/xArm-Python-SDK
|
d4775a72d57376989ec04dffbbf08ca1a9b30c1b
|
8fd5fb8df1476ba210edca4e5563b7cdb1741bcf
|
refs/heads/master
| 2023-07-20T04:05:22.307666
| 2023-07-06T03:14:19
| 2023-07-06T03:14:19
| 158,533,616
| 125
| 76
|
BSD-3-Clause
| 2022-08-17T02:48:56
| 2018-11-21T10:51:39
|
Python
|
UTF-8
|
Python
| false
| false
| 94,067
|
py
|
xarm.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import os
import math
import time
import uuid
import warnings
from collections.abc import Iterable
from ..core.config.x_config import XCONF
from ..core.utils.log import logger
from .base import Base
from .gripper import Gripper
from .track import Track
from .base_board import BaseBoard
from .servo import Servo
from .record import Record
from .robotiq import RobotIQ
from .ft_sensor import FtSensor
from .modbus_tcp import ModbusTcp
from .parse import GcodeParser
from .code import APIState
from .decorator import xarm_is_connected, xarm_is_ready, xarm_wait_until_not_pause, xarm_wait_until_cmdnum_lt_max
from .utils import to_radian
try:
# from ..tools.blockly_tool import BlocklyTool
from ..tools.blockly import BlocklyTool
except:
print('import BlocklyTool module failed')
BlocklyTool = None
gcode_p = GcodeParser()
class XArm(Gripper, Servo, Record, RobotIQ, BaseBoard, Track, FtSensor, ModbusTcp):
def __init__(self, port=None, is_radian=False, do_not_open=False, instance=None, **kwargs):
super(XArm, self).__init__()
kwargs['init'] = True
self._api_instance = instance
Base.__init__(self, port, is_radian, do_not_open, **kwargs)
def _is_out_of_tcp_range(self, value, i):
if not self._check_tcp_limit or self._stream_type != 'socket' or not self._enable_report:
return False
tcp_range = XCONF.Robot.TCP_LIMITS.get(self.axis).get(self.device_type, [])
if 2 < i < len(tcp_range): # only limit rotate
limit = list(tcp_range[i])
limit[0] += self._position_offset[i]
limit[1] += self._position_offset[i]
limit[0] += self._world_offset[i]
limit[1] += self._world_offset[i]
if limit[0] == limit[1]:
return False
if value < limit[0] - math.radians(0.1) or value > limit[1] + math.radians(0.1):
self.log_api_info('API -> set_position -> out_of_tcp_range -> code={}, i={} value={}'.format(APIState.OUT_OF_RANGE, i, value), code=APIState.OUT_OF_RANGE)
return True
return False
def _is_out_of_joint_range(self, angle, i):
if not self._check_joint_limit or self._stream_type != 'socket' or not self._enable_report:
return False
joint_limit = XCONF.Robot.JOINT_LIMITS.get(self.axis).get(self.device_type, [])
if i < len(joint_limit):
angle_range = joint_limit[i]
if angle < angle_range[0] - math.radians(0.1) or angle > angle_range[1] + math.radians(0.1):
self.log_api_info('API -> set_servo_angle -> out_of_joint_range -> code={}, i={} value={}'.format(APIState.OUT_OF_RANGE, i, angle), code=APIState.OUT_OF_RANGE)
return True
return False
def __wait_sync(self):
while not self._is_sync or self._need_sync:
if not self.connected:
return APIState.NOT_CONNECTED
elif self.has_error:
return APIState.HAS_ERROR
elif self.is_stop:
return APIState.NOT_READY
time.sleep(0.05)
return 0
def __update_tcp_motion_params(self, speed, acc, mvtime, pose=None):
self._last_tcp_speed = speed
self._last_tcp_acc = acc
self._mvtime = mvtime
if pose is not None:
self._last_position = pose.copy()
def __update_joint_motion_params(self, speed, acc, mvtime, angles=None):
self._last_joint_speed = speed
self._last_joint_acc = acc
self._mvtime = mvtime
if angles is not None:
self._last_angles = angles.copy()
def __get_tcp_motion_params(self, speed=None, mvacc=None, mvtime=None, **kwargs):
speed = speed if speed is not None else kwargs.get('mvvelo', self._last_tcp_speed)
# spd = self._last_tcp_speed if speed is None else min(max(float(speed), self._min_tcp_speed), self._max_tcp_speed)
# acc = self._last_tcp_acc if mvacc is None else min(max(float(mvacc), self._min_tcp_acc), self._max_tcp_acc)
spd = self._last_tcp_speed if speed is None else min(max(float(speed), self._min_tcp_speed), 1000)
acc = self._last_tcp_acc if mvacc is None else min(max(float(mvacc), self._min_tcp_acc), 50000)
mvt = self._mvtime if mvtime is None else mvtime
return spd, acc, mvt
def __get_joint_motion_params(self, speed=None, mvacc=None, mvtime=None, is_radian=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
speed = speed if speed is not None else kwargs.get('mvvelo', None)
speed = self._last_joint_speed if speed is None else to_radian(speed, is_radian)
mvacc = self._last_joint_acc if mvacc is None else to_radian(mvacc, is_radian)
# spd = min(max(float(speed), self._min_joint_speed), self._max_joint_speed)
# acc = min(max(float(mvacc), self._min_joint_acc), self._max_joint_acc)
spd = min(max(float(speed), self._min_joint_speed), math.pi)
acc = min(max(float(mvacc), self._min_joint_acc), 20)
mvt = self._mvtime if mvtime is None else mvtime
return spd, acc, mvt
def _set_position_absolute(self, x=None, y=None, z=None, roll=None, pitch=None, yaw=None, radius=None,
speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
tcp_pos = [
self._last_position[0] if x is None else float(x),
self._last_position[1] if y is None else float(y),
self._last_position[2] if z is None else float(z),
self._last_position[3] if roll is None else to_radian(roll, is_radian),
self._last_position[4] if pitch is None else to_radian(pitch, is_radian),
self._last_position[5] if yaw is None else to_radian(yaw, is_radian),
]
motion_type = kwargs.get('motion_type', False)
for i in range(3):
if self._is_out_of_tcp_range(tcp_pos[i+3], i + 3):
return APIState.OUT_OF_RANGE
if kwargs.get('check', False):
_, limit = self.is_tcp_limit(tcp_pos, True)
if _ == 0 and limit is True:
return APIState.TCP_LIMIT
self._has_motion_cmd = True
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime, **kwargs)
radius = radius if radius is not None else -1
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
if self.version_is_ge(1, 11, 100) or kwargs.get('debug', False):
ret = self.arm_cmd.move_line_common(tcp_pos, spd, acc, mvt, radius, coord=0, is_axis_angle=False, only_check_type=only_check_type, motion_type=motion_type, feedback_key=feedback_key)
else:
if radius >= 0:
ret = self.arm_cmd.move_lineb(tcp_pos, spd, acc, mvt, radius, only_check_type, motion_type=motion_type)
else:
ret = self.arm_cmd.move_line(tcp_pos, spd, acc, mvt, only_check_type, motion_type=motion_type)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> set_position -> code={}, pos={}, radius={}, velo={}, acc={}'.format(
ret[0], tcp_pos, radius, spd, acc), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_tcp_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_tcp_motion_params(spd, acc, mvt, tcp_pos)
return ret[0]
def _set_position_relative(self, x=None, y=None, z=None, roll=None, pitch=None, yaw=None, radius=None,
speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
motion_type = kwargs.get('motion_type', False)
if self.version_is_ge(1, 8, 100):
# use relative api
tcp_pos = [
0 if x is None else float(x),
0 if y is None else float(y),
0 if z is None else float(z),
0 if roll is None else to_radian(roll, is_radian),
0 if pitch is None else to_radian(pitch, is_radian),
0 if yaw is None else to_radian(yaw, is_radian),
]
self._has_motion_cmd = True
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime, **kwargs)
radius = radius if radius is not None else -1
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
ret = self.arm_cmd.move_relative(tcp_pos, spd, acc, mvt, radius, False, False, only_check_type, motion_type=motion_type, feedback_key=feedback_key)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> set_relative_position -> code={}, pos={}, radius={}, velo={}, acc={}'.format(
ret[0], tcp_pos, radius, spd, acc), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_tcp_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_tcp_motion_params(spd, acc, mvt)
return ret[0]
else:
# use absolute api
tcp_pos = [
self._last_position[0] if x is None else (self._last_position[0] + float(x)),
self._last_position[1] if y is None else (self._last_position[1] + float(y)),
self._last_position[2] if z is None else (self._last_position[2] + float(z)),
self._last_position[3] if roll is None else (self._last_position[3] + to_radian(roll, is_radian)),
self._last_position[4] if pitch is None else (self._last_position[4] + to_radian(pitch, is_radian)),
self._last_position[5] if yaw is None else (self._last_position[5] + to_radian(yaw, is_radian)),
]
return self._set_position_absolute(*tcp_pos, radius=radius, speed=speed, mvacc=mvacc, mvtime=mvtime,
is_radian=True, wait=wait, timeout=timeout, **kwargs)
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_position(self, x=None, y=None, z=None, roll=None, pitch=None, yaw=None, radius=None,
speed=None, mvacc=None, mvtime=None, relative=False, is_radian=None,
wait=False, timeout=None, **kwargs):
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if only_check_type > 0 and wait:
code = self.wait_move(timeout=timeout)
if code != 0:
return code
code = self.__wait_sync()
if code != 0:
return code
if relative:
return self._set_position_relative(x=x, y=y, z=z, roll=roll, pitch=pitch, yaw=yaw, radius=radius,
speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian,
wait=wait, timeout=timeout, **kwargs)
else:
return self._set_position_absolute(x=x, y=y, z=z, roll=roll, pitch=pitch, yaw=yaw, radius=radius,
speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian,
wait=wait, timeout=timeout, **kwargs)
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_tool_position(self, x=0, y=0, z=0, roll=0, pitch=0, yaw=0,
speed=None, mvacc=None, mvtime=None, is_radian=None,
wait=False, timeout=None, radius=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if only_check_type > 0 and wait:
code = self.wait_move(timeout=timeout)
if code != 0:
return code
tcp_pos = [
x, y, z,
to_radian(roll, is_radian),
to_radian(pitch, is_radian),
to_radian(yaw, is_radian)
]
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime, **kwargs)
self._has_motion_cmd = True
motion_type = kwargs.get('motion_type', False)
radius = radius if radius is not None else -1
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
if self.version_is_ge(1, 11, 100) or kwargs.get('debug', False):
ret = self.arm_cmd.move_line_common(tcp_pos, spd, acc, mvt, radius, coord=1, is_axis_angle=False, only_check_type=only_check_type, motion_type=motion_type, feedback_key=feedback_key)
else:
ret = self.arm_cmd.move_line_tool(tcp_pos, spd, acc, mvt, only_check_type, motion_type=motion_type)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> set_tool_position -> code={}, pos={}, velo={}, acc={}'.format(
ret[0], tcp_pos, spd, acc), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_tcp_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_tcp_motion_params(spd, acc, mvt)
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_position_aa(self, mvpose, speed=None, mvacc=None, mvtime=None,
is_radian=None, is_tool_coord=False, relative=False,
wait=False, timeout=None, radius=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if only_check_type > 0 and wait:
code = self.wait_move(timeout=timeout)
if code != 0:
return code
tcp_pos = [to_radian(mvpose[i], is_radian or i <= 2) for i in range(6)]
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime, **kwargs)
mvcoord = kwargs.get('mvcoord', int(is_tool_coord))
self._has_motion_cmd = True
motion_type = kwargs.get('motion_type', False)
radius = radius if radius is not None else -1
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
if self.version_is_ge(1, 11, 100) or kwargs.get('debug', False):
if not is_tool_coord and relative:
ret = self.arm_cmd.move_relative(tcp_pos, spd, acc, mvt, radius, False, True, only_check_type, motion_type=motion_type, feedback_key=feedback_key)
else:
ret = self.arm_cmd.move_line_common(tcp_pos, spd, acc, mvt, radius, coord=1 if is_tool_coord else 0, is_axis_angle=True, only_check_type=only_check_type, motion_type=motion_type, feedback_key=feedback_key)
else:
ret = self.arm_cmd.move_line_aa(tcp_pos, spd, acc, mvt, mvcoord, int(relative), only_check_type, motion_type=motion_type)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> set_position_aa -> code={}, pos={}, velo={}, acc={}'.format(
ret[0], tcp_pos, spd, acc), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_tcp_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_tcp_motion_params(spd, acc, mvt)
return ret[0]
@xarm_wait_until_not_pause
@xarm_is_ready(_type='set')
def set_servo_cartesian_aa(self, mvpose, speed=None, mvacc=None, is_radian=None, is_tool_coord=False, relative=False, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
assert len(mvpose) >= 6
tcp_pos = [to_radian(mvpose[i], is_radian or i <= 2) for i in range(6)]
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, self._mvtime, **kwargs)
tool_coord = kwargs.get('tool_coord', int(is_tool_coord))
self._has_motion_cmd = True
ret = self.arm_cmd.move_servo_cart_aa(mvpose=tcp_pos, mvvelo=spd, mvacc=acc, tool_coord=tool_coord, relative=int(relative))
ret[0] = self._check_code(ret[0], is_move_cmd=True, mode=1)
self.log_api_info('API -> set_servo_cartesian_aa -> code={}, pose={}, velo={}, acc={}'.format(
ret[0], tcp_pos, spd, acc
), code=ret[0])
self._is_set_move = True
return ret[0]
def _set_servo_angle_absolute(self, angles, speed=None, mvacc=None, mvtime=None,
is_radian=None, wait=False, timeout=None, radius=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
joints = self._last_angles.copy()
for i in range(min(len(self._last_angles), len(angles))):
if i >= self.axis or angles[i] is None:
continue
joints[i] = to_radian(angles[i], is_radian)
if self._is_out_of_joint_range(joints[i], i):
return APIState.OUT_OF_RANGE
if kwargs.get('check', False):
_, limit = self.is_joint_limit(joints, True)
if _ == 0 and limit is True:
return APIState.JOINT_LIMIT
spd, acc, mvt = self.__get_joint_motion_params(speed, mvacc, mvtime, is_radian=is_radian, **kwargs)
self._has_motion_cmd = True
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
if self.version_is_ge(1, 5, 20) and radius is not None and radius >= 0:
ret = self.arm_cmd.move_jointb(joints, spd, acc, radius, only_check_type, feedback_key=feedback_key)
else:
ret = self.arm_cmd.move_joint(joints, spd, acc, mvt, only_check_type, feedback_key=feedback_key)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> set_servo_angle -> code={}, angles={}, velo={}, acc={}, radius={}'.format(
ret[0], joints, spd, acc, radius
), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_joint_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_joint_motion_params(spd, acc, mvt, joints)
return ret[0]
def _set_servo_angle_relative(self, angles, speed=None, mvacc=None, mvtime=None,
is_radian=None, wait=False, timeout=None, radius=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if self.version_is_ge(1, 8, 100):
# use relative api
joints = [0] * 7
for i in range(min(7, len(angles))):
if i >= self.axis or angles[i] is None:
continue
joints[i] = to_radian(angles[i], is_radian)
self._has_motion_cmd = True
spd, acc, mvt = self.__get_joint_motion_params(speed, mvacc, mvtime, is_radian=is_radian, **kwargs)
radius = radius if radius is not None else -1
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
ret = self.arm_cmd.move_relative(joints, spd, acc, mvt, radius, True, False, only_check_type, feedback_key=feedback_key)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> set_relative_servo_angle -> code={}, angles={}, velo={}, acc={}, radius={}'.format(
ret[0], joints, spd, acc, radius
), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_joint_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_joint_motion_params(spd, acc, mvt)
return ret[0]
else:
# use absolute api
joints = self._last_angles.copy()
for i in range(min(len(self._last_angles), len(angles))):
if i >= self.axis or angles[i] is None:
continue
joints[i] = to_radian(angles[i], is_radian)
if self._is_out_of_joint_range(joints[i], i):
return APIState.OUT_OF_RANGE
return self._set_servo_angle_absolute(joints, speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=True,
wait=wait, timeout=timeout, radius=radius, **kwargs)
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_servo_angle(self, servo_id=None, angle=None, speed=None, mvacc=None, mvtime=None,
relative=False, is_radian=None, wait=False, timeout=None, radius=None, **kwargs):
assert ((servo_id is None or servo_id == 8) and isinstance(angle, Iterable)) \
or (1 <= servo_id <= 7 and angle is not None and not isinstance(angle, Iterable)), \
'param servo_id or angle error'
if servo_id is not None and servo_id != 8:
if servo_id > self.axis or servo_id <= 0:
return APIState.SERVO_NOT_EXIST
angles = [None] * 7
angles[servo_id - 1] = angle
else:
angles = angle
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if only_check_type > 0 and wait:
code = self.wait_move(timeout=timeout)
if code != 0:
return code
code = self.__wait_sync()
if code != 0:
return code
if relative:
return self._set_servo_angle_relative(angles, speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian,
wait=wait, timeout=timeout, radius=radius, **kwargs)
else:
return self._set_servo_angle_absolute(angles, speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian,
wait=wait, timeout=timeout, radius=radius, **kwargs)
@xarm_is_ready(_type='set')
def set_servo_angle_j(self, angles, speed=None, mvacc=None, mvtime=None, is_radian=None, **kwargs):
# if not self._check_mode_is_correct(1):
# return APIState.MODE_IS_NOT_CORRECT
is_radian = self._default_is_radian if is_radian is None else is_radian
angs = [to_radian(angle, is_radian) for angle in angles]
for i in range(self.axis):
if self._is_out_of_joint_range(angs[i], i):
return APIState.OUT_OF_RANGE
while len(angs) < 7:
angs.append(0)
spd, acc, mvt = self.__get_joint_motion_params(speed, mvacc, mvtime, is_radian=is_radian, **kwargs)
self._has_motion_cmd = True
ret = self.arm_cmd.move_servoj(angs, spd, acc, mvt)
ret[0] = self._check_code(ret[0], is_move_cmd=True, mode=1)
self.log_api_info('API -> set_servo_angle_j -> code={}, angles={}, velo={}, acc={}'.format(
ret[0], angs, spd, acc
), code=ret[0])
self._is_set_move = True
return ret[0]
@xarm_is_ready(_type='set')
def set_servo_cartesian(self, mvpose, speed=None, mvacc=None, mvtime=None, is_radian=None, is_tool_coord=False, **kwargs):
# if not self._check_mode_is_correct(1):
# return APIState.MODE_IS_NOT_CORRECT
assert len(mvpose) >= 6
is_radian = self._default_is_radian if is_radian is None else is_radian
tcp_pos = [to_radian(mvpose[i], is_radian or i <= 2) for i in range(6)]
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime, **kwargs)
self._has_motion_cmd = True
ret = self.arm_cmd.move_servo_cartesian(tcp_pos, spd, acc, int(is_tool_coord))
ret[0] = self._check_code(ret[0], is_move_cmd=True, mode=1)
self.log_api_info('API -> set_servo_cartisian -> code={}, pose={}, velo={}, acc={}, is_tool_coord={}'.format(
ret[0], tcp_pos, spd, acc, is_tool_coord
), code=ret[0])
self._is_set_move = True
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def move_circle(self, pose1, pose2, percent, speed=None, mvacc=None, mvtime=None, is_radian=None,
wait=False, timeout=None, is_tool_coord=False, is_axis_angle=False, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if only_check_type > 0 and wait:
code = self.wait_move(timeout=timeout)
if code != 0:
return code
pose_1 = []
pose_2 = []
for i in range(6):
pose_1.append(to_radian(pose1[i], is_radian or i <= 2))
pose_2.append(to_radian(pose2[i], is_radian or i <= 2))
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime, **kwargs)
self._has_motion_cmd = True
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
if self.version_is_ge(1, 11, 100) or kwargs.get('debug', False):
ret = self.arm_cmd.move_circle_common(pose_1, pose_2, spd, acc, mvt, percent, coord=1 if is_tool_coord else 0, is_axis_angle=is_axis_angle, only_check_type=only_check_type, feedback_key=feedback_key)
else:
ret = self.arm_cmd.move_circle(pose_1, pose_2, spd, acc, mvt, percent, only_check_type)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> move_circle -> code={}, pos1={}, pos2={}, percent={}%, velo={}, acc={}'.format(
ret[0], pose_1, pose_2, percent, spd, acc), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self.__update_tcp_motion_params(spd, acc, mvt)
self._sync()
return code
if only_check_type <= 0 and (ret[0] >= 0 or self.get_is_moving()):
self.__update_tcp_motion_params(spd, acc, mvt)
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def move_gohome(self, speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
only_check_type = kwargs.get('only_check_type', self._only_check_type)
if only_check_type > 0 and wait:
code = self.wait_move(timeout=timeout)
if code != 0:
return code
spd, acc, mvt = self.__get_joint_motion_params(speed, mvacc, mvtime, is_radian=is_radian, **kwargs)
self._has_motion_cmd = True
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
ret = self.arm_cmd.move_gohome(spd, acc, mvt, only_check_type, feedback_key=feedback_key)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0], is_move_cmd=True)
self.log_api_info('API -> move_gohome -> code={}, velo={}, acc={}'.format(
ret[0], spd, acc
), code=ret[0])
self._is_set_move = True
self._only_check_result = 0
if only_check_type > 0 and ret[0] == 0:
self._only_check_result = ret[3]
return APIState.HAS_ERROR if ret[3] != 0 else ret[0]
if only_check_type <= 0 and wait and ret[0] == 0:
code = self.wait_move(timeout, trans_id=trans_id)
self._sync()
return code
return ret[0]
@xarm_is_ready(_type='set')
def move_arc_lines(self, paths, is_radian=None, times=1, first_pause_time=0.1, repeat_pause_time=0,
automatic_calibration=True, speed=None, mvacc=None, mvtime=None, wait=False):
assert len(paths) > 0, 'parameter paths error'
is_radian = self._default_is_radian if is_radian is None else is_radian
spd, acc, mvt = self.__get_tcp_motion_params(speed, mvacc, mvtime)
logger.info('move_arc_lines--begin')
if automatic_calibration:
_ = self.set_position(*paths[0], is_radian=is_radian, speed=spd, mvacc=acc, mvtime=mvt, wait=True)
if _ < 0:
logger.error('quit, api failed, code={}'.format(_))
return
_, angles = self.get_servo_angle(is_radian=True)
if first_pause_time > 0:
self.set_pause_time(first_pause_time)
last_used_joint_speed = self._last_joint_speed
def _move():
if automatic_calibration:
ret = self.set_servo_angle(angle=angles, is_radian=True, speed=0.8726646259971648, wait=False)
if ret < 0:
return -1
self._last_joint_speed = last_used_joint_speed
for path in paths:
if len(path) > 6 and path[6] >= 0:
radius = path[6]
else:
radius = 0
if self.has_error or self.is_stop:
return -2
ret = self.set_position(*path[:6], radius=radius, is_radian=is_radian, wait=False, speed=spd, mvacc=acc, mvtime=mvt)
if ret < 0:
return -1
return 0
count = 1
api_failed = False
try:
if times == 0:
while not self.has_error and not self.is_stop:
_ = _move()
if _ == -1:
api_failed = True
break
elif _ == -2:
break
count += 1
if repeat_pause_time > 0:
self.set_pause_time(repeat_pause_time)
if api_failed:
logger.error('quit, api error')
elif self._error_code != 0:
logger.error('quit, controller error')
elif self.is_stop:
logger.error('quit, emergency_stop')
else:
for i in range(times):
if self.has_error or self.is_stop:
break
_ = _move()
if _ == -1:
api_failed = True
break
elif _ == -2:
break
count += 1
if repeat_pause_time > 0:
self.set_pause_time(repeat_pause_time)
if api_failed:
logger.error('quit, api error')
elif self._error_code != 0:
logger.error('quit, controller error')
elif self.is_stop:
logger.error('quit, emergency_stop')
except:
pass
logger.info('move_arc_lines--end')
if wait:
self.wait_move()
self._sync()
@xarm_is_connected(_type='set')
def set_servo_attach(self, servo_id=None):
# assert isinstance(servo_id, int) and 1 <= servo_id <= 8
# ret = self.arm_cmd.set_brake(servo_id, 0)
logger.info('set_servo_attach--begin')
ret = self.motion_enable(servo_id=servo_id, enable=True)
self.set_state(0)
self._sync()
logger.info('set_servo_attach--end')
return ret
@xarm_is_connected(_type='set')
def set_servo_detach(self, servo_id=None):
"""
:param servo_id: 1-7, 8
:return:
"""
assert isinstance(servo_id, int) and 1 <= servo_id <= 8, 'The value of parameter servo_id can only be 1-8.'
ret = self.arm_cmd.set_brake(servo_id, 1)
self.log_api_info('API -> set_servo_detach -> code={}'.format(ret[0]), code=ret[0])
self._sync()
return ret[0]
@xarm_is_connected(_type='set')
def system_control(self, value=1):
ret = self.arm_cmd.system_control(value)
self.log_api_info('API -> system_control({}) -> code={}'.format(value, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_reduced_mode(self, on_off):
ret = self.arm_cmd.set_reduced_mode(int(on_off))
self.log_api_info('API -> set_reduced_mode -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_reduced_max_tcp_speed(self, speed):
ret = self.arm_cmd.set_reduced_linespeed(speed)
self.log_api_info('API -> set_reduced_linespeed -> code={}, speed={}'.format(ret[0], speed), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_reduced_max_joint_speed(self, speed, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
speed = to_radian(speed, is_radian)
ret = self.arm_cmd.set_reduced_jointspeed(speed)
self.log_api_info('API -> set_reduced_linespeed -> code={}, speed={}'.format(ret[0], speed), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_reduced_mode(self):
ret = self.arm_cmd.get_reduced_mode()
ret[0] = self._check_code(ret[0])
return ret[0], ret[1]
@xarm_is_connected(_type='get')
def get_reduced_states(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_reduced_states(79 if self.version_is_ge(1, 2, 11) else 21)
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
if not is_radian:
ret[4] = round(math.degrees(ret[4]), 1)
if self.version_is_ge(1, 2, 11):
# ret[5] = list(map(math.degrees, ret[5]))
ret[5] = list(map(lambda x: round(math.degrees(x), 2), ret[5]))
return ret[0], ret[1:]
@xarm_is_connected(_type='set')
def set_reduced_tcp_boundary(self, boundary):
assert len(boundary) >= 6
boundary = list(map(int, boundary))
limits = [0] * 6
limits[0:2] = boundary[0:2] if boundary[0] >= boundary[1] else boundary[0:2][::-1]
limits[2:4] = boundary[2:4] if boundary[2] >= boundary[3] else boundary[2:4][::-1]
limits[4:6] = boundary[4:6] if boundary[4] >= boundary[5] else boundary[4:6][::-1]
ret = self.arm_cmd.set_xyz_limits(limits)
self.log_api_info('API -> set_reduced_tcp_boundary -> code={}, boundary={}'.format(ret[0], limits), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_reduced_joint_range(self, joint_range, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
assert len(joint_range) >= self.axis * 2
joint_range = list(map(float, joint_range))
limits = [0] * 14
for i in range(7):
if i < self.axis:
limits[i*2:i*2+2] = joint_range[i*2:i*2+2] if joint_range[i*2] <= joint_range[i*2+1] else joint_range[i*2:i*2+2][::-1]
if not is_radian:
limits = list(map(math.radians, limits))
# limits = list(map(lambda x: round(math.radians(x), 3), limits))
for i in range(self.axis):
joint_limit = XCONF.Robot.JOINT_LIMITS.get(self.axis).get(self.device_type, [])
if i < len(joint_limit):
angle_range = joint_limit[i]
# angle_range = list(map(lambda x: round(x, 3), joint_limit[i]))
if limits[i * 2] < angle_range[0]:
limits[i * 2] = angle_range[0]
if limits[i * 2 + 1] > angle_range[1]:
limits[i * 2 + 1] = angle_range[1]
if limits[i * 2] >= angle_range[1]:
return APIState.OUT_OF_RANGE
if limits[i * 2 + 1] <= angle_range[0]:
return APIState.OUT_OF_RANGE
ret = self.arm_cmd.set_reduced_jrange(limits)
self.log_api_info('API -> set_reduced_joint_range -> code={}, boundary={}'.format(ret[0], limits), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_fense_mode(self, on_off):
ret = self.arm_cmd.set_fense_on(int(on_off))
self.log_api_info('API -> set_fense_mode -> code={}, on={}'.format(ret[0], on_off), code=ret[0])
return ret
@xarm_is_connected(_type='set')
def set_collision_rebound(self, on_off):
ret = self.arm_cmd.set_collis_reb(int(on_off))
self.log_api_info('API -> set_collision_rebound -> code={}, on={}'.format(ret[0], on_off), code=ret[0])
return ret
@xarm_is_connected(_type='set')
def set_timer(self, secs_later, tid, fun_code, param1=0, param2=0):
ret = self.arm_cmd.set_timer(secs_later, tid, fun_code, param1, param2)
return ret[0]
@xarm_is_connected(_type='set')
def cancel_timer(self, tid):
ret = self.arm_cmd.cancel_timer(tid)
return ret[0]
@xarm_wait_until_not_pause
@xarm_is_connected(_type='set')
def set_world_offset(self, offset, is_radian=None, wait=True):
is_radian = self._default_is_radian if is_radian is None else is_radian
assert isinstance(offset, Iterable) and len(offset) >= 6
world_offset = [0] * 6
for i in range(min(len(offset), 6)):
world_offset[i] = to_radian(offset[i], is_radian or i <= 2)
if wait:
if self._support_feedback:
self.wait_all_task_finish()
else:
self.wait_move()
ret = self.arm_cmd.set_world_offset(world_offset)
self.log_api_info('API -> set_world_offset -> code={}, offset={}'.format(ret[0], world_offset), code=ret[0])
return ret[0]
def reset(self, speed=None, mvacc=None, mvtime=None, is_radian=None, wait=False, timeout=None):
logger.info('reset--begin')
is_radian = self._default_is_radian if is_radian is None else is_radian
if not self._enable_report or self._stream_type != 'socket':
self.get_err_warn_code()
self.get_state()
if self._warn_code != 0:
self.clean_warn()
if self._error_code != 0:
self.clean_error()
self.motion_enable(enable=True, servo_id=8)
self.set_state(0)
if not self._is_ready:
self.motion_enable(enable=True, servo_id=8)
self.set_state(state=0)
self.move_gohome(speed=speed, mvacc=mvacc, mvtime=mvtime, is_radian=is_radian, wait=wait, timeout=timeout)
logger.info('reset--end')
# # This interface is no longer supported
# @xarm_is_ready(_type='set')
# def set_joints_torque(self, joints_torque):
# ret = self.arm_cmd.set_servot(joints_torque)
# self.log_api_info('API -> set_joints_torque -> code={}, joints_torque={}'.format(ret[0], joints_torque), code=ret[0])
# return ret[0]
@xarm_is_connected(_type='get')
def get_joints_torque(self, servo_id=None):
ret = self.arm_cmd.get_joint_tau()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 7:
self._joints_torque = [float('{:.6f}'.format(ret[i])) for i in range(1, 8)]
if servo_id is None or servo_id == 8 or len(self._joints_torque) < servo_id:
return ret[0], list(self._joints_torque)
else:
return ret[0], self._joints_torque[servo_id - 1]
@xarm_is_connected(_type='get')
def get_safe_level(self):
ret = self.arm_cmd.get_safe_level()
return ret[0], ret[1]
@xarm_is_connected(_type='set')
def set_safe_level(self, level=4):
ret = self.arm_cmd.set_safe_level(level)
self.log_api_info('API -> set_safe_level -> code={}, level={}'.format(ret[0], level), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_pause_time(self, sltime, wait=False):
assert isinstance(sltime, (int, float))
ret = self.arm_cmd.sleep_instruction(sltime)
if wait:
time.sleep(sltime)
else:
if time.monotonic() >= self._sleep_finish_time:
self._sleep_finish_time = time.monotonic() + sltime
else:
self._sleep_finish_time += sltime
self.log_api_info('API -> set_pause_time -> code={}, sltime={}'.format(ret[0], sltime), code=ret[0])
return ret[0]
def set_sleep_time(self, sltime, wait=False):
return self.set_pause_time(sltime, wait)
@xarm_wait_until_not_pause
@xarm_is_connected(_type='set')
def set_tcp_offset(self, offset, is_radian=None, wait=True, **kwargs):
is_radian = self._default_is_radian if is_radian is None else is_radian
assert isinstance(offset, Iterable) and len(offset) >= 6
tcp_offset = [0] * 6
for i in range(min(len(offset), 6)):
tcp_offset[i] = to_radian(offset[i], is_radian or i <= 2)
if wait:
if self._support_feedback:
self.wait_all_task_finish()
else:
self.wait_move()
ret = self.arm_cmd.set_tcp_offset(tcp_offset)
self.log_api_info('API -> set_tcp_offset -> code={}, offset={}'.format(ret[0], tcp_offset), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_tcp_jerk(self, jerk):
ret = self.arm_cmd.set_tcp_jerk(jerk)
self.log_api_info('API -> set_tcp_jerk -> code={}, jerk={}'.format(ret[0], jerk), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_tcp_maxacc(self, acc):
ret = self.arm_cmd.set_tcp_maxacc(acc)
self.log_api_info('API -> set_tcp_maxacc -> code={}, maxacc={}'.format(ret[0], acc), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_joint_jerk(self, jerk, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
jerk = to_radian(jerk, is_radian)
ret = self.arm_cmd.set_joint_jerk(jerk)
self.log_api_info('API -> set_joint_jerk -> code={}, jerk={}'.format(ret[0], jerk), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_joint_maxacc(self, maxacc, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
maxacc = to_radian(maxacc, is_radian)
ret = self.arm_cmd.set_joint_maxacc(maxacc)
self.log_api_info('API -> set_joint_maxacc -> code={}, maxacc={}'.format(ret[0], maxacc), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_is_connected(_type='set')
def set_collision_sensitivity(self, value, wait=True):
assert isinstance(value, int) and 0 <= value <= 5
if self._support_feedback:
self.wait_all_task_finish()
else:
self.wait_move()
ret = self.arm_cmd.set_collis_sens(value)
self.set_state(0)
self.log_api_info('API -> set_collision_sensitivity -> code={}, sensitivity={}'.format(ret[0], value), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_is_connected(_type='set')
def set_teach_sensitivity(self, value, wait=True):
assert isinstance(value, int) and 1 <= value <= 5
if wait:
if self._support_feedback:
self.wait_all_task_finish()
else:
self.wait_move()
ret = self.arm_cmd.set_teach_sens(value)
self.log_api_info('API -> set_teach_sensitivity -> code={}, sensitivity={}'.format(ret[0], value), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_is_connected(_type='set')
def set_gravity_direction(self, direction, wait=True):
if wait:
if self._support_feedback:
self.wait_all_task_finish()
else:
self.wait_move()
ret = self.arm_cmd.set_gravity_dir(direction[:3])
self.log_api_info('API -> set_gravity_direction -> code={}, direction={}'.format(ret[0], direction), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_is_connected(_type='set')
def set_mount_direction(self, base_tilt_deg, rotation_deg, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
t1 = base_tilt_deg
t2 = rotation_deg
if not is_radian:
t1 = math.radians(t1)
t2 = math.radians(t2)
# original G vect mounted on flat surface
G_normal = [0, 0, -1]
# rotation matrix introduced by 2 mounting angles
R2 = [math.cos(-t2), -math.sin(-t2), 0, math.sin(-t2), math.cos(-t2), 0, 0, 0, 1]
R1 = [math.cos(-t1), 0, math.sin(-t1), 0, 1, 0, -math.sin(-t1), 0, math.cos(-t1)]
Rot = [0] * 9
g_new = [0] * 3
# Mat(Rot) = Mat(R2)*Mat(R1)
# vect(g_new) = Mat(Rot)*vect(G_normal)
for i in range(3):
for j in range(3):
Rot[i * 3 + j] += (
R2[i * 3 + 0] * R1[0 * 3 + j] + R2[i * 3 + 1] * R1[1 * 3 + j] + R2[i * 3 + 2] * R1[2 * 3 + j])
g_new[i] = Rot[i * 3 + 0] * G_normal[0] + Rot[i * 3 + 1] * G_normal[1] + Rot[i * 3 + 2] * G_normal[2]
ret = self.arm_cmd.set_gravity_dir(g_new)
self.log_api_info('API -> set_mount_direction -> code={}, tilt={}, rotation={}, direction={}'.format(ret[0], base_tilt_deg, rotation_deg, g_new), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def clean_conf(self):
ret = self.arm_cmd.clean_conf()
self.log_api_info('API -> clean_conf -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def save_conf(self):
ret = self.arm_cmd.save_conf()
self.log_api_info('API -> save_conf -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_inverse_kinematics(self, pose, input_is_radian=None, return_is_radian=None):
input_is_radian = self._default_is_radian if input_is_radian is None else input_is_radian
return_is_radian = self._default_is_radian if return_is_radian is None else return_is_radian
assert len(pose) >= 6
tcp_pose = [to_radian(pose[i], input_is_radian or i <= 2) for i in range(6)]
ret = self.arm_cmd.get_ik(tcp_pose)
angles = []
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
# angles = [ret[i][0] for i in range(1, 8)]
angles = [ret[i] for i in range(1, 8)]
if not return_is_radian:
angles = [math.degrees(angle) for angle in angles]
return ret[0], angles
@xarm_is_connected(_type='get')
def get_forward_kinematics(self, angles, input_is_radian=None, return_is_radian=None):
input_is_radian = self._default_is_radian if input_is_radian is None else input_is_radian
return_is_radian = self._default_is_radian if return_is_radian is None else return_is_radian
# assert len(angles) >= 7
joints = [0] * 7
for i in range(min(len(angles), 7)):
joints[i] = to_radian(angles[i], input_is_radian)
ret = self.arm_cmd.get_fk(joints)
pose = []
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
# pose = [ret[i][0] for i in range(1, 7)]
pose = [ret[i] for i in range(1, 7)]
if not return_is_radian:
pose = [pose[i] if i < 3 else math.degrees(pose[i]) for i in range(len(pose))]
return ret[0], pose
@xarm_is_connected(_type='get')
def is_tcp_limit(self, pose, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
assert len(pose) >= 6
tcp_pose = [to_radian(pose[i], is_radian or i <= 2, self._last_position[i]) for i in range(6)]
ret = self.arm_cmd.is_tcp_limit(tcp_pose)
self.log_api_info('API -> is_tcp_limit -> code={}, limit={}'.format(ret[0], ret[1]), code=ret[0])
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
return ret[0], bool(ret[1])
else:
return ret[0], None
@xarm_is_connected(_type='get')
def is_joint_limit(self, joint, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
# assert len(joint) >= 7
joints = [0] * 7
for i in range(min(len(joint), 7)):
joints[i] = to_radian(joint[i], is_radian, self._last_angles[i])
ret = self.arm_cmd.is_joint_limit(joints)
self.log_api_info('API -> is_joint_limit -> code={}, limit={}'.format(ret[0], ret[1]), code=ret[0])
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
return ret[0], bool(ret[1])
else:
return ret[0], None
def emergency_stop(self):
logger.info('emergency_stop--begin')
self.set_state(4)
expired = time.monotonic() + 3
while self.state not in [4] and time.monotonic() < expired:
self.set_state(4)
time.sleep(0.1)
self._sleep_finish_time = 0
self._sync()
logger.info('emergency_stop--end')
def send_cmd_async(self, command, timeout=None):
pass
def send_cmd_sync(self, command=None):
if command is None:
return 0
command = command.upper()
return self._handle_gcode(command)
def _handle_gcode(self, command):
def __handle_gcode_g(num):
if num == 1: # G1 move_line, ex: G1 X{} Y{} Z{} A{roll} B{pitch} C{yaw} F{speed} Q{acc} T{}
mvvelo = gcode_p.get_mvvelo(command)
mvacc = gcode_p.get_mvacc(command)
mvtime = gcode_p.get_mvtime(command)
mvpose = gcode_p.get_poses(command)
ret = self.set_position(*mvpose, radius=-1, speed=mvvelo, mvacc=mvacc, mvtime=mvtime)
elif num == 2: # G2 move_circle, ex: G2 X{} Y{} Z{} A{} B{} C{} I{} J{} K{} L{} M{} N{} F{speed} Q{acc} T{}
mvvelo = gcode_p.get_mvvelo(command)
mvacc = gcode_p.get_mvacc(command)
mvtime = gcode_p.get_mvtime(command)
pos1 = gcode_p.get_poses(command, default=0)
pos2 = gcode_p.get_joints(command, default=0)[:6]
percent = gcode_p.get_mvradius(command, default=0)
ret = self.move_circle(pos1, pos2, percent=percent, speed=mvvelo, mvacc=mvacc, mvtime=mvtime)
elif num == 4: # G4 set_pause_time, ex: G4 T{}
sltime = gcode_p.get_mvtime(command, default=0)
ret = self.set_pause_time(sltime)
elif num == 7: # G7 move_joint, ex: G7 I{} J{} K{} L{} M{} N{} O{} F{} Q{} T{}
mvvelo = gcode_p.get_mvvelo(command)
mvacc = gcode_p.get_mvacc(command)
mvtime = gcode_p.get_mvtime(command)
mvjoint = gcode_p.get_joints(command)
ret = self.set_servo_angle(angle=mvjoint, speed=mvvelo, mvacc=mvacc, mvtime=mvtime)
elif num == 8: # G8 move_gohome, ex: G8 F{} Q{} T{}
mvvelo = gcode_p.get_mvvelo(command)
mvacc = gcode_p.get_mvacc(command)
mvtime = gcode_p.get_mvtime(command)
ret = self.move_gohome(speed=mvvelo, mvacc=mvacc, mvtime=mvtime)
elif num == 9: # G9 move_arc_line, ex: G9 X{} Y{} Z{} A{roll} B{pitch} C{yaw} R{radius} F{speed} Q{acc} T{}
mvvelo = gcode_p.get_mvvelo(command)
mvacc = gcode_p.get_mvacc(command)
mvtime = gcode_p.get_mvtime(command)
mvpose = gcode_p.get_poses(command)
mvradii = gcode_p.get_mvradius(command, default=0)
ret = self.set_position(*mvpose, speed=mvvelo, mvacc=mvacc, mvtime=mvtime, radius=mvradii)
elif num == 11: # G11 set_servo_angle_j, ex: G11 I{} J{} K{} L{} M{} N{} O{} F{} Q{} T{}
mvvelo = gcode_p.get_mvvelo(command)
mvacc = gcode_p.get_mvacc(command)
mvtime = gcode_p.get_mvtime(command)
mvjoint = gcode_p.get_joints(command, default=0)
ret = self.set_servo_angle_j(mvjoint, speed=mvvelo, mvacc=mvacc, mvtime=mvtime)
elif num == 12: # G12 sleep, ex: G12 T{}
mvtime = gcode_p.get_mvtime(command, default=0)
time.sleep(mvtime)
ret = 0
else:
logger.debug('command {} is not exist'.format(command))
ret = APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
return ret
def __handle_gcode_h(num):
if num == 1: # H1 get_version, ex: H1
ret = self.get_version()
elif num == 10: # H10 system_control, ex: H10 V{}
value = gcode_p.get_int_value(command, default=0)
ret = self.system_control(value)
elif num == 11: # H11 motion_enable, ex: H11 I{id} V{enable}
value = gcode_p.get_int_value(command)
servo_id = gcode_p.get_id_num(command, default=0)
ret = self.motion_enable(enable=value, servo_id=servo_id)
elif num == 12: # H12 set_state, ex: H12 V{state}
value = gcode_p.get_int_value(command, default=0)
ret = self.set_state(value)
elif num == 13: # H13 get_state, ex: H13
ret = self.get_state()
elif num == 14: # H14 get_cmd_num, ex: H14
ret = self.get_cmdnum()
elif num == 15: # H15 get_error_warn_code, ex: H15
ret = self.get_err_warn_code()
elif num == 16: # H16 clean_error, ex: H16
ret = self.clean_error()
elif num == 17: # H17 clean_warn, ex: H17
ret = self.clean_warn()
elif num == 18: # H18 set_brake, ex: H18 I{id} V{open}
value = gcode_p.get_int_value(command)
servo_id = gcode_p.get_id_num(command, default=0)
ret = self.arm_cmd.set_brake(servo_id, value)[0]
elif num == 19: # H19 set_mode, ex: H19 V{mode}
value = gcode_p.get_int_value(command, default=0)
ret = self.set_mode(value)
elif num == 31: # H31 set_tcp_jerk, ex: H31 V{jerk}
value = gcode_p.get_float_value(command, default=-1)
ret = self.set_tcp_jerk(value)
elif num == 32: # H32 set_tcp_maxacc, ex: H32 V{maxacc}
value = gcode_p.get_float_value(command, default=-1)
ret = self.set_tcp_maxacc(value)
elif num == 33: # H33 set_joint_jerk, ex: H33 V{jerk}
value = gcode_p.get_float_value(command, default=-1)
ret = self.set_joint_jerk(value)
elif num == 34: # H34 set_joint_maxacc, ex: H34 V{maxacc}
value = gcode_p.get_float_value(command, default=-1)
ret = self.set_joint_maxacc(value)
elif num == 35: # H35 set_tcp_offset, ex: H35 X{x} Y{y} Z{z} A{roll} B{pitch} C{yaw}
pose = gcode_p.get_poses(command)
ret = self.set_tcp_offset(pose)
elif num == 36: # H36 set_tcp_load, ex: H36 I{weight} J{center_x} K{center_y} L{center_z}
values = gcode_p.get_joints(command, default=0)
ret = self.set_tcp_load(values[0], values[1:4])
elif num == 37: # H37 set_collision_sensitivity, ex: H37 V{sensitivity}
value = gcode_p.get_int_value(command, default=0)
ret = self.set_collision_sensitivity(value)
elif num == 38: # H38 set_teach_sensitivity, ex: H38 V{sensitivity}
value = gcode_p.get_int_value(command, default=0)
ret = self.set_teach_sensitivity(value)
elif num == 39: # H39 clean_conf, ex: H39
ret = self.clean_conf()
elif num == 40: # H40 save_conf, ex: H40
ret = self.save_conf()
elif num == 41: # H41 get_position, ex: H41
ret = self.get_position()
elif num == 42: # H42 get_servo_angle, ex: H42
ret = self.get_servo_angle()
elif num == 43: # H43 get_ik, ex: H43 X{} Y{} Z{} A{roll} B{pitch} C{yaw}
pose = gcode_p.get_poses(command, default=0)
ret = self.get_inverse_kinematics(pose, input_is_radian=False, return_is_radian=False)
elif num == 44: # H44 get_fk, ex: H44 I{} J{} K{} L{} M{} N{} O{}
joint = gcode_p.get_joints(command, default=0)
ret = self.get_forward_kinematics(joint, input_is_radian=False, return_is_radian=False)
elif num == 45: # H45 is_joint_limit, ex: H45 I{} J{} K{} L{} M{} N{} O{}
joint = gcode_p.get_joints(command)
ret = self.is_joint_limit(joint, is_radian=False)
elif num == 46: # H46 is_tcp_limit, ex: H46 X{} Y{} Z{} A{roll} B{pitch} C{yaw}
pose = gcode_p.get_poses(command)
ret = self.is_tcp_limit(pose, is_radian=False)
elif num == 51: # H51 set_gravity_direction, ex: H51 X{} Y{} Z{} A{roll} B{pitch} C{yaw}
pose = gcode_p.get_poses(command, default=0)
ret = self.set_gravity_direction(pose)
elif num == 101: # H101 set_servo_addr_16, ex: H101 I{id} D{addr} V{value}
value = gcode_p.get_int_value(command)
servo_id = gcode_p.get_id_num(command, default=0)
addr = gcode_p.get_addr(command)
ret = self.set_servo_addr_16(servo_id=servo_id, addr=addr, value=value)
elif num == 102: # H102 get_servo_addr_16, ex: H102 I{id} D{addr}
servo_id = gcode_p.get_id_num(command, default=0)
addr = gcode_p.get_addr(command)
ret = self.get_servo_addr_16(servo_id=servo_id, addr=addr)
elif num == 103: # H103 set_servo_addr_32, ex: H103 I{id} D{addr} V{value}
servo_id = gcode_p.get_id_num(command, default=0)
addr = gcode_p.get_addr(command)
value = gcode_p.get_int_value(command)
ret = self.set_servo_addr_32(servo_id=servo_id, addr=addr, value=value)
elif num == 104: # H104 get_servo_addr_32, ex: H104 I{id} D{addr}
servo_id = gcode_p.get_id_num(command, default=0)
addr = gcode_p.get_addr(command)
ret = self.get_servo_addr_32(servo_id=servo_id, addr=addr)
elif num == 105: # H105 set_servo_zero, ex: H105 I{id}
servo_id = gcode_p.get_id_num(command, default=0)
ret = self.set_servo_zero(servo_id=servo_id)
elif num == 106: # H106 get_servo_debug_msg, ex: H106
ret = self.get_servo_debug_msg()
else:
logger.debug('command {} is not exist'.format(command))
ret = APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
return ret
def __handle_gcode_m(num):
if num == 116: # M116 set_gripper_enable, ex: M116 V{enable}
value = gcode_p.get_int_value(command)
ret = self.set_gripper_enable(value)
elif num == 117: # M117 set_gripper_mode, ex: M117 V{mode}
value = gcode_p.get_int_value(command)
ret = self.set_gripper_mode(value)
elif num == 118: # M118 set_gripper_zero, ex: M118
ret = self.set_gripper_zero()
elif num == 119: # M119 get_gripper_position, ex: M119
ret = self.get_gripper_position()
elif num == 120: # M120 set_gripper_position, ex: M120 V{pos}
value = gcode_p.get_int_value(command)
ret = self.set_gripper_position(value)
elif num == 121: # M121 set_gripper_speed, ex: M121 V{speed}
value = gcode_p.get_int_value(command)
ret = self.set_gripper_speed(value)
elif num == 125: # M125 get_gripper_err_code, ex: M125
ret = self.get_gripper_err_code()
elif num == 126: # M126 clean_gripper_error, ex: M126
ret = self.clean_gripper_error()
elif num == 127:
ret = self.get_gripper_version()
elif num == 131: # M131 get_tgpio_digital, ex: M131
ret = self.get_tgpio_digital()
elif num == 132: # M132 set_tgpio_digital, ex: M132 I{ionum} V{}
ionum = gcode_p.get_id_num(command, default=0)
value = gcode_p.get_int_value(command)
ret = self.set_tgpio_digital(ionum, value)
elif num == 133: # M133 get_tgpio_analog(0), ex: M133 I{ionum=0}
ionum = gcode_p.get_id_num(command, default=0)
ret = self.get_tgpio_analog(ionum=ionum)
elif num == 134: # M134 get_tgpio_analog(1), ex: M134 I{ionum=1}
ionum = gcode_p.get_id_num(command, default=0)
ret = self.get_tgpio_analog(ionum=ionum)
elif num == 135:
return self.get_tgpio_version()
else:
logger.debug('command {} is not exist'.format(command))
ret = APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
return ret
def __handle_gcode_d(num):
if num == 11: # D11 I{id}
id_num = gcode_p.get_id_num(command, default=None)
ret = self.get_servo_error_code(id_num)
elif num == 12: # D12 I{id}
id_num = gcode_p.get_id_num(command, default=None)
if id_num == 0:
id_num = 8
self.clean_error()
self.clean_warn()
self.motion_enable(enable=False, servo_id=id_num)
ret = self.set_servo_detach(id_num)
elif num == 13: # D13 I{id}
id_num = gcode_p.get_id_num(command, default=None)
if id_num == 0:
id_num = 8
self.set_servo_zero(id_num)
ret = self.motion_enable(enable=True, servo_id=id_num)
elif num == 21: # D21 I{id}
id_num = gcode_p.get_id_num(command, default=None)
self.clean_servo_pvl_err(id_num)
ret = self.get_servo_error_code(id_num)
else:
logger.debug('command {} is not exist'.format(command))
ret = APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
return ret
def __handle_gcode_s(num):
if num == 44: # S44 I{id}
id_num = gcode_p.get_id_num(command, default=None)
ret = self.get_servo_all_pids(id_num)
elif num == 45:
id_num = gcode_p.get_id_num(command, default=1)
ret = self.get_servo_version(servo_id=id_num)
else:
logger.debug('command {} is not exist'.format(command))
ret = APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
return ret
def __handle_gcode_c(num):
if num == 131: # C131 get_cgpio_digital, ex: C131
ret = self.get_cgpio_digital()
elif num == 132: # C132 get_cgpio_analog(0), ex: C132 I{ionum=0}
ionum = gcode_p.get_id_num(command, default=0)
ret = self.get_cgpio_analog(ionum)
elif num == 133: # C133 get_cgpio_analog(1), ex: C133 I{ionum=1}
ionum = gcode_p.get_id_num(command, default=1)
ret = self.get_cgpio_analog(ionum)
elif num == 134: # C134 set_cgpio_digital, ex: C134 I{ionum} V{value}
ionum = gcode_p.get_id_num(command, default=0)
value = gcode_p.get_int_value(command)
ret = self.set_cgpio_digital(ionum, value)
elif num == 135: # C135 set_cgpio_analog(0, v), ex: C135 I{ionum=0} V{value}
ionum = gcode_p.get_id_num(command, default=0)
value = gcode_p.get_float_value(command)
ret = self.set_cgpio_analog(ionum, value)
elif num == 136: # C136 set_cgpio_analog(1, v), ex: C136 I{ionum=1} V{value}
ionum = gcode_p.get_id_num(command, default=1)
value = gcode_p.get_float_value(command)
ret = self.set_cgpio_analog(ionum, value)
elif num == 137: # C137 set_cgpio_digital_input_function, ex: C137 I{ionum} V{fun}
ionum = gcode_p.get_id_num(command, default=0)
value = gcode_p.get_int_value(command)
ret = self.set_cgpio_digital_input_function(ionum, value)
elif num == 138: # C138 set_cgpio_digital_output_function, ex: C138 I{ionum} V{fun}
ionum = gcode_p.get_id_num(command, default=0)
value = gcode_p.get_int_value(command)
ret = self.set_cgpio_digital_output_function(ionum, value)
elif num == 139: # C139 get_cgpio_state, ex: C139
ret = self.get_cgpio_state()
else:
logger.debug('command {} is not exist'.format(command))
ret = APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
return ret
cmd_num = gcode_p.get_gcode_cmd_num(command, 'G')
if cmd_num >= 0:
return __handle_gcode_g(cmd_num)
cmd_num = gcode_p.get_gcode_cmd_num(command, 'H')
if cmd_num >= 0:
return __handle_gcode_h(cmd_num)
cmd_num = gcode_p.get_gcode_cmd_num(command, 'M')
if cmd_num >= 0:
return __handle_gcode_m(cmd_num)
cmd_num = gcode_p.get_gcode_cmd_num(command, 'D')
if cmd_num >= 0:
return __handle_gcode_d(cmd_num)
cmd_num = gcode_p.get_gcode_cmd_num(command, 'S')
if cmd_num >= 0:
return __handle_gcode_s(cmd_num)
cmd_num = gcode_p.get_gcode_cmd_num(command, 'C')
if cmd_num >= 0:
return __handle_gcode_c(cmd_num)
logger.debug('command {} is not exist'.format(command))
return APIState.CMD_NOT_EXIST, 'command {} is not exist'.format(command)
@xarm_is_connected(_type='set')
def run_gcode_file(self, path, **kwargs):
times = kwargs.get('times', 1)
init = kwargs.get('init', False)
mode = kwargs.get('mode', 0)
state = kwargs.get('state', 0)
wait_seconds = kwargs.get('wait_seconds', 0)
try:
abs_path = os.path.abspath(path)
if not os.path.exists(abs_path):
raise FileNotFoundError
with open(abs_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
if init:
self.clean_error()
self.clean_warn()
self.motion_enable(True)
self.set_mode(mode)
self.set_state(state)
if wait_seconds > 0:
time.sleep(wait_seconds)
for i in range(times):
for line in lines:
line = line.strip()
if not line:
continue
if not self.connected:
logger.error('xArm is disconnect')
return APIState.NOT_CONNECTED
ret = self.send_cmd_sync(line)
if isinstance(ret, int) and ret < 0:
return ret
return APIState.NORMAL
except Exception as e:
logger.error(e)
return APIState.API_EXCEPTION
@xarm_is_connected(_type='set')
def run_blockly_app(self, path, **kwargs):
"""
Run the app generated by xArmStudio software
:param path: app path
"""
try:
if not os.path.exists(path):
path = os.path.join(os.path.expanduser('~'), '.UFACTORY', 'projects', 'test', 'xarm{}'.format(self.axis), 'app', 'myapp', path)
if os.path.isdir(path):
path = os.path.join(path, 'app.xml')
if not os.path.exists(path):
raise FileNotFoundError
blockly_tool = BlocklyTool(path)
succeed = blockly_tool.to_python(arm=self._api_instance, is_exec=True, **kwargs)
if succeed:
times = kwargs.get('times', 1)
highlight_callback = kwargs.get('highlight_callback', None)
blockly_print = kwargs.get('blockly_print', print)
connect_changed_callbacks = self._report_callbacks[self.REPORT_CONNECT_CHANGED_ID].copy()
state_changed_callbacks = self._report_callbacks[self.REPORT_STATE_CHANGED_ID].copy()
error_warn_changed_callbacks = self._report_callbacks[self.REPORT_ERROR_WARN_CHANGED_ID].copy()
count_changed_callbacks = self._report_callbacks[self.REPORT_COUNT_CHANGED_ID].copy()
code = APIState.NORMAL
try:
for _ in range(times):
exec(blockly_tool.codes, {'arm': self._api_instance, 'highlight_callback': highlight_callback, 'print': blockly_print})
except Exception as e:
code = APIState.RUN_BLOCKLY_EXCEPTION
blockly_print('run blockly app error: {}'.format(e))
self._report_callbacks[self.REPORT_CONNECT_CHANGED_ID] = connect_changed_callbacks
self._report_callbacks[self.REPORT_STATE_CHANGED_ID] = state_changed_callbacks
self._report_callbacks[self.REPORT_ERROR_WARN_CHANGED_ID] = error_warn_changed_callbacks
self._report_callbacks[self.REPORT_COUNT_CHANGED_ID] = count_changed_callbacks
return code
else:
logger.error('The conversion is incomplete and some blocks are not yet supported.')
return APIState.CONVERT_FAILED
except Exception as e:
logger.error(e)
return APIState.API_EXCEPTION
@xarm_is_connected(_type='get')
def get_hd_types(self):
ret = self.arm_cmd.get_hd_types()
return ret[0], ret[1:]
@xarm_is_connected(_type='set')
def reload_dynamics(self):
ret = self.arm_cmd.reload_dynamics()
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> reload_dynamics -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_counter_reset(self):
ret = self.arm_cmd.cnter_reset()
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_counter_reset -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def set_counter_increase(self, val=1):
ret = self.arm_cmd.cnter_plus()
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_counter_increase -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_report_tau_or_i(self, tau_or_i=0):
ret = self.arm_cmd.set_report_tau_or_i(int(tau_or_i))
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_report_tau_or_i({}) -> code={}'.format(tau_or_i, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_report_tau_or_i(self):
ret = self.arm_cmd.get_report_tau_or_i()
return ret[0], ret[1]
@xarm_is_connected(_type='set')
def set_self_collision_detection(self, on_off):
ret = self.arm_cmd.set_self_collision_detection(int(on_off))
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_self_collision_detection({}) -> code={}'.format(on_off, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_collision_tool_model(self, tool_type, *args, **kwargs):
if tool_type == XCONF.CollisionToolType.BOX:
assert ('z' in kwargs or len(args) >= 3) \
and ('y' in kwargs or len(args) >= 2) \
and ('x' in kwargs or len(args) >= 1), 'params error, must specify x,y,z parameter'
x = kwargs.get('x') if 'x' in kwargs else args[0]
y = kwargs.get('y') if 'y' in kwargs else args[1]
z = kwargs.get('z') if 'z' in kwargs else args[2]
params = [x, y, z]
elif tool_type == XCONF.CollisionToolType.CYLINDER:
assert ('radius' in kwargs or len(args) >= 2) \
and ('height' in kwargs or len(args) >= 1), 'params error, must specify radius,height parameter'
radius = kwargs.get('radius') if 'radius' in kwargs else args[0]
height = kwargs.get('height') if 'height' in kwargs else args[1]
params = [radius, height]
else:
params = [] if tool_type < XCONF.CollisionToolType.USE_PRIMITIVES else list(args)
ret = self.arm_cmd.set_collision_tool_model(tool_type, params)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_collision_tool_model({}, {}) -> code={}'.format(tool_type, params, ret[0]), code=ret[0])
return ret[0]
def get_firmware_config(self):
cgpio_code, cgpio_states = self.get_cgpio_state()
reduced_code, reduced_states = self.get_reduced_states()
tau_code, tau_flag = self.get_report_tau_or_i()
code = cgpio_code if reduced_code == 0 and tau_code == 0 else reduced_code if cgpio_code == 0 and tau_code == 0 else tau_code
return code, {
'COLL_SENS': self.collision_sensitivity, # 碰撞灵敏度
'TEACH_SENS': self.teach_sensitivity, # 示教灵敏度
'GRAV_DIR': self.gravity_direction, # 重力方向
'TCP_LOAD': self.tcp_load, # TCP负载
'TCP_OFFSET': self.position_offset, # TCP偏移
'TCP_MAXACC': self.tcp_acc_limit[1], # TCP的最大加速度
'TCP_JERK': self.tcp_jerk, # TCP加加速度
'JOINT_MAXACC': self.joint_acc_limit[1], # 关节的最大加速度
'JOINT_JERK': self.joint_jerk, # 关节加加速度
'WORLD_OFFSET': self.world_offset, # 基坐标偏移
'REPORT_TAU_OR_I': tau_flag, # 上报力矩还是电流
'CGPIO_INPUT_FUNC_CONFIG': cgpio_states[10], # 控制器数字输入IO的配置功能
'CGPIO_OUTPUT_FUNC_CONFIG': cgpio_states[11], # 控制器数字输出IO的配置功能
'REDUCED_STATES': reduced_states, # 缩减模式的状态
'GPIO_RESET_CONFIG': self.gpio_reset_config, # gpio自动复位配置
'COLL_PARAMS': self.self_collision_params, # 碰撞模型参数
}
def set_firmware_config(self, config):
code, old_config = self.get_firmware_config()
if 'COLL_SENS' in config and config['COLL_SENS'] != old_config['COLL_SENS']:
self.set_collision_sensitivity(config['COLL_SENS'])
if 'TEACH_SENS' in config and config['TEACH_SENS'] != old_config['TEACH_SENS']:
self.set_teach_sensitivity(config['TEACH_SENS'])
if 'GRAV_DIR' in config and config['GRAV_DIR'] != old_config['GRAV_DIR']:
self.set_gravity_direction(config['GRAV_DIR'])
if 'TCP_LOAD' in config and config['TCP_LOAD'] != old_config['TCP_LOAD']:
self.set_tcp_load(*config['TCP_LOAD'])
if 'TCP_OFFSET' in config and config['TCP_OFFSET'] != old_config['TCP_OFFSET']:
self.set_tcp_offset(config['TCP_OFFSET'])
if 'TCP_MAXACC' in config and config['TCP_MAXACC'] != old_config['TCP_MAXACC']:
self.set_tcp_maxacc(config['TCP_MAXACC'])
if 'TCP_JERK' in config and config['TCP_JERK'] != old_config['TCP_JERK']:
self.set_tcp_jerk(config['TCP_JERK'])
if 'JOINT_MAXACC' in config and config['JOINT_MAXACC'] != old_config['JOINT_MAXACC']:
self.set_joint_maxacc(config['JOINT_MAXACC'])
if 'JOINT_JERK' in config and config['JOINT_JERK'] != old_config['JOINT_JERK']:
self.set_joint_jerk(config['JOINT_JERK'])
if 'WORLD_OFFSET' in config and config['WORLD_OFFSET'] != old_config['WORLD_OFFSET']:
self.set_world_offset(config['WORLD_OFFSET'])
if 'REPORT_TAU_OR_I' in config and config['REPORT_TAU_OR_I'] != old_config['REPORT_TAU_OR_I']:
self.set_report_tau_or_i(config['REPORT_TAU_OR_I'])
if 'GPIO_RESET_CONFIG' in config and config['GPIO_RESET_CONFIG'] != old_config['GPIO_RESET_CONFIG']:
self.config_io_reset_when_stop(0, config['GPIO_RESET_CONFIG'][0])
self.config_io_reset_when_stop(1, config['GPIO_RESET_CONFIG'][1])
if 'REDUCED_STATES' in config:
states = config['REDUCED_STATES']
old_states = old_config['REDUCED_STATES']
if states[1] != old_states[1]:
self.set_reduced_tcp_boundary(states[1])
if states[2] != old_states[2]:
self.set_reduced_max_tcp_speed(states[2])
if states[3] != old_states[3]:
self.set_reduced_max_joint_speed(states[3])
if len(states) > 4 and len(old_states) > 4:
if states[4] != old_states[4]:
self.set_reduced_joint_range(states[4])
if len(states) > 5 and len(old_states) > 5:
if states[5] != old_states[5]:
self.set_fense_mode(states[5])
if len(states) > 6 and len(old_states) > 6:
if states[4] != old_states[6]:
self.set_collision_rebound(states[6])
self.set_reduced_mode(states[0])
if 'CGPIO_INPUT_FUNC_CONFIG' in config and config['CGPIO_INPUT_FUNC_CONFIG'] != old_config['CGPIO_INPUT_FUNC_CONFIG']:
for i in range(len(config['CGPIO_INPUT_FUNC_CONFIG'])):
if config['CGPIO_INPUT_FUNC_CONFIG'][i] != old_config['CGPIO_INPUT_FUNC_CONFIG'][i]:
self.set_cgpio_digital_input_function(i, config['CGPIO_INPUT_FUNC_CONFIG'][i])
if 'CGPIO_OUTPUT_FUNC_CONFIG' in config and config['CGPIO_OUTPUT_FUNC_CONFIG'] != old_config['CGPIO_OUTPUT_FUNC_CONFIG']:
for i in range(len(config['CGPIO_OUTPUT_FUNC_CONFIG'])):
if config['CGPIO_OUTPUT_FUNC_CONFIG'][i] != old_config['CGPIO_OUTPUT_FUNC_CONFIG'][i]:
self.set_cgpio_digital_output_function(i, config['CGPIO_OUTPUT_FUNC_CONFIG'][i])
if 'COLL_PARAMS' in config and config['COLL_PARAMS'] != old_config['COLL_PARAMS']:
if config['COLL_PARAMS'][0] != old_config['COLL_PARAMS'][0]:
self.set_self_collision_detection(config['COLL_PARAMS'][0])
if config['COLL_PARAMS'][1] != old_config['COLL_PARAMS'][1] or config['COLL_PARAMS'][2] != old_config['COLL_PARAMS'][2]:
self.set_collision_tool_model(config['COLL_PARAMS'][1], *config['COLL_PARAMS'][2])
self.save_conf()
@xarm_is_connected(_type='get')
def get_power_board_version(self):
ret = self.arm_cmd.get_power_board_version()
ret[0] = self._check_code(ret[0])
return ret[0], ret[1:]
@xarm_is_connected(_type='get')
def get_movement(self):
ret = self.arm_cmd.get_movement()
ret[0] = self._check_code(ret[0])
return ret[0], ret[1]
@xarm_is_connected(_type='set')
def vc_set_joint_velocity(self, speeds, is_radian=None, is_sync=True, check_mode=True, duration=-1):
# if check_mode and not self._check_mode_is_correct(4):
# return APIState.MODE_IS_NOT_CORRECT
is_radian = self._default_is_radian if is_radian is None else is_radian
jnt_v = [0] * 7
for i, spd in enumerate(speeds):
if i >= 7:
break
jnt_v[i] = to_radian(spd, is_radian)
ret = self.arm_cmd.vc_set_jointv(jnt_v, 1 if is_sync else 0, duration if self.version_is_ge(1, 8, 0) else -1)
ret[0] = self._check_code(ret[0], is_move_cmd=True, mode=4)
self.log_api_info('API -> vc_set_joint_velocity -> code={}, speeds={}, is_sync={}'.format(
ret[0], jnt_v, is_sync
), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def vc_set_cartesian_velocity(self, speeds, is_radian=None, is_tool_coord=False, check_mode=True, duration=-1):
# if check_mode and not self._check_mode_is_correct(5):
# return APIState.MODE_IS_NOT_CORRECT
is_radian = self._default_is_radian if is_radian is None else is_radian
line_v = [0] * 6
for i, spd in enumerate(speeds):
if i >= 6:
break
line_v[i] = spd if i <= 2 else to_radian(spd, is_radian)
ret = self.arm_cmd.vc_set_linev(line_v, 1 if is_tool_coord else 0, duration if self.version_is_ge(1, 8, 0) else -1)
ret[0] = self._check_code(ret[0], is_move_cmd=True, mode=5)
self.log_api_info('API -> vc_set_cartesian_velocity -> code={}, speeds={}, is_tool_coord={}'.format(
ret[0], line_v, is_tool_coord
), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def calibrate_tcp_coordinate_offset(self, four_points, is_radian=None):
assert len(four_points) >= 4, 'The parameter four_points must contain 4 TCP points'
is_radian = self._default_is_radian if is_radian is None else is_radian
points = []
for i in range(4):
assert len(four_points[i]) >= 6, 'Each TCP point in the parameter four_points must contain x/y/z/roll/pitch/yaw'
points.append([four_points[i][j] if j <= 2 else to_radian(four_points[i][j], is_radian) for j in range(6)])
ret = self.arm_cmd.cali_tcp_pose(points)
ret[0] = self._check_code(ret[0])
return ret[0], ret[1:]
@xarm_is_connected(_type='get')
def calibrate_tcp_orientation_offset(self, rpy_be, rpy_bt, input_is_radian=None, return_is_radian=None):
input_is_radian = self._default_is_radian if input_is_radian is None else input_is_radian
return_is_radian = self._default_is_radian if return_is_radian is None else return_is_radian
rpy_be_ = [to_radian(rpy_be[i], input_is_radian) for i in range(3)]
rpy_bt_ = [to_radian(rpy_bt[i], input_is_radian) for i in range(3)]
ret = self.arm_cmd.cali_tcp_orient(rpy_be_, rpy_bt_)
ret[0] = self._check_code(ret[0])
return ret[0], [ret[i+1] if return_is_radian else math.degrees(ret[i+1]) for i in range(3)]
@xarm_is_connected(_type='get')
def calibrate_user_orientation_offset(self, three_points, mode=0, trust_ind=0, input_is_radian=None, return_is_radian=None):
assert len(three_points) >= 3, 'The parameter three_points must contain 3 TCP points'
input_is_radian = self._default_is_radian if input_is_radian is None else input_is_radian
return_is_radian = self._default_is_radian if return_is_radian is None else return_is_radian
points = []
for i in range(3):
assert len(three_points[i]) >= 6, 'Each TCP point in the parameter three_points must contain x/y/z/roll/pitch/yaw'
points.append([three_points[i][j] if j <= 2 else to_radian(three_points[i][j], input_is_radian) for j in range(6)])
ret = self.arm_cmd.cali_user_orient(points, mode=mode, trust_ind=trust_ind)
ret[0] = self._check_code(ret[0])
return ret[0], [ret[i+1] if return_is_radian else math.degrees(ret[i+1]) for i in range(3)]
@xarm_is_connected(_type='get')
def calibrate_user_coordinate_offset(self, rpy_ub, pos_b_uorg, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
rpy_ub_ = [rpy_ub[i] if is_radian else math.radians(rpy_ub[i]) for i in range(3)]
ret = self.arm_cmd.cali_user_pos(rpy_ub_, pos_b_uorg)
ret[0] = self._check_code(ret[0])
return ret[0], ret[1:4]
@xarm_is_connected(_type='set')
def get_tcp_rotation_radius(self, value=6):
ret = self.arm_cmd.get_tcp_rotation_radius(value)
self.log_api_info('API -> get_tcp_rotation_radius -> code={}'.format(ret[0]), code=ret[0])
ret[0] = self._check_code(ret[0])
return ret[0], ret[1][0]
@xarm_is_connected(_type='set')
def get_max_joint_velocity(self, eveloc, joint_pos, is_radian=None):
"""
Obtain maximum joint angular velocity
:param eveloc: Maximum TCP speed
:param joint_pos: joint angle list (unit: rad if is_radian is True else °), angle should be a list of values
whose length is the number of joints like [axis-1, axis-2, axis-3, axis-3, axis-4, axis-5, axis-6, axis-7]
:param is_radian: the max_joint_speed of the states is in radians or not, default is self.default_is_radian
"""
is_radian = self._default_is_radian if is_radian is None else is_radian
joints = [0] * 7
for i in range(min(len(joint_pos), 7)):
joints[i] = to_radian(joint_pos[i], is_radian)
return self.arm_cmd.get_max_joint_velocity(eveloc, joints)
@xarm_is_connected(_type='get')
def iden_tcp_load(self, estimated_mass=0):
protocol_identifier = self.arm_cmd.get_protocol_identifier()
self.arm_cmd.set_protocol_identifier(2)
self._keep_heart = False
if self.version_is_ge(1, 9, 100) and estimated_mass <= 0:
estimated_mass = 0.5
ret = self.arm_cmd.iden_tcp_load(estimated_mass)
self.arm_cmd.set_protocol_identifier(protocol_identifier)
self._keep_heart = True
self.log_api_info('API -> iden_tcp_load -> code={}'.format(ret[0]), code=ret[0])
return self._check_code(ret[0]), ret[1:5]
@xarm_is_connected(_type='set')
def set_cartesian_velo_continuous(self, on_off):
ret = self.arm_cmd.set_cartesian_velo_continuous(int(on_off))
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_cartesian_velo_continuous({}) -> code={}'.format(on_off, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_allow_approx_motion(self, on_off):
ret = self.arm_cmd.set_allow_approx_motion(int(on_off))
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_allow_approx_motion({}) -> code={}'.format(on_off, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_allow_approx_motion(self):
ret = self.arm_cmd.get_allow_approx_motion()
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> get_allow_approx_motion() -> code={}'.format(ret[0]), code=ret[0])
return ret[0], ret[-1]
@xarm_is_connected(_type='get')
def iden_joint_friction(self, sn=None):
if sn is None:
code, sn = self.get_robot_sn()
if code != 0:
self.log_api_info('iden_joint_friction -> get_robot_sn failed, code={}'.format(code), code=code)
return APIState.API_EXCEPTION, -1
if len(sn) != 14:
self.log_api_info('iden_joint_friction, sn is not correct, sn={}'.format(sn), code=APIState.API_EXCEPTION)
return APIState.API_EXCEPTION, -1
sn = sn.upper()
axis_map = {5: 'F', 6: 'I', 7: 'S'}
valid_850 = self.is_850 and sn[0] == 'F' and sn[1] == 'X'
valid_lite = self.is_lite6 and sn[0] == 'L' and sn[1] == 'I'
valid_xarm = not self.is_850 and not self.is_lite6 and sn[0] == 'X' and sn[1] == axis_map.get(self.axis, '')
if not (valid_850 or valid_lite or valid_xarm):
self.log_api_info('iden_joint_friction, sn is not correct, axis={}, type={}, sn={}'.format(self.axis, self.device_type, sn), code=APIState.API_EXCEPTION)
return APIState.API_EXCEPTION, -1
protocol_identifier = self.arm_cmd.get_protocol_identifier()
self.arm_cmd.set_protocol_identifier(2)
self._keep_heart = False
ret = self.arm_cmd.iden_joint_friction(sn)
self.arm_cmd.set_protocol_identifier(protocol_identifier)
self._keep_heart = True
self.log_api_info('API -> iden_joint_friction -> code={}'.format(ret[0]), code=ret[0])
return self._check_code(ret[0]), 0 if int(ret[1]) == 0 else -1
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def wait_all_task_finish(self, timeout=None, **kwargs):
if not self._support_feedback:
return APIState.CMD_NOT_EXIST
wait = kwargs.pop('wait', True)
feedback_key, studio_wait = self._gen_feedback_key(wait, **kwargs)
ret = self.arm_cmd.check_feedback(feedback_key=feedback_key)
trans_id = self._get_feedback_transid(feedback_key, studio_wait)
ret[0] = self._check_code(ret[0])
if wait and ret[0] == 0:
ret[0] = self._wait_feedback(timeout, trans_id=trans_id, ignore_log=True)[0]
if ret[0] == 0:
time.sleep(0.5)
return ret[0]
@xarm_wait_until_not_pause
@xarm_wait_until_cmdnum_lt_max
@xarm_is_ready(_type='set')
def send_hex_cmd(self, datas, timeout=10):
ret = self.arm_cmd.send_hex_cmd(datas, timeout)
return ret[1:]
# ret = self.arm_cmd.send_hex_request(datas)
# if ret == -1:
# return [XCONF.UxbusState.ERR_NOTTCP]
# ret = self.arm_cmd.recv_hex_request(ret, timeout)
# return ret
|
e8b9e44c51ebf0d968dd705aca453b0c9e600791
|
f3dfbfb9c128ac5bc7c0098f7eff91a2119d6183
|
/src/biotite/application/localapp.py
|
acfd1bd8bdc39339b216f24ec963280492599a49
|
[
"BSD-3-Clause"
] |
permissive
|
biotite-dev/biotite
|
2c2afafc6c4dad51af023c50c156c8f19a20154d
|
67d801683bfe79087a8e67e82de7333e79c827bb
|
refs/heads/master
| 2023-09-06T00:03:24.761607
| 2023-09-03T14:28:27
| 2023-09-03T14:28:27
| 98,795,444
| 463
| 80
|
BSD-3-Clause
| 2023-09-09T16:47:12
| 2017-07-30T12:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 9,450
|
py
|
localapp.py
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.application"
__author__ = "Patrick Kunzmann"
__all__ = ["LocalApp"]
import abc
import copy
from os import chdir, getcwd, remove
from .application import Application, AppState, AppStateError, requires_state
from subprocess import Popen, PIPE, SubprocessError, TimeoutExpired
class LocalApp(Application, metaclass=abc.ABCMeta):
"""
The base class for all locally installed applications, that are used
via the command line.
Internally this creates a :class:`Popen` instance, which handles
the execution.
Parameters
----------
bin_path : str
Path of the application represented by this class.
"""
def __init__(self, bin_path):
super().__init__()
self._bin_path = bin_path
self._arguments = []
self._options = []
self._exec_dir = getcwd()
self._process = None
self._command = None
self._stdin_file = None
@requires_state(AppState.CREATED)
def set_arguments(self, arguments):
"""
Set command line arguments for the application run.
PROTECTED: Do not call from outside.
Parameters
----------
arguments : list of str
A list of strings representing the command line options.
"""
self._arguments = copy.copy(arguments)
@requires_state(AppState.CREATED)
def set_stdin(self, file):
"""
Set a file as standard input for the application run.
PROTECTED: Do not call from outside.
Parameters
----------
file : file object
The file for the standard input.
Must have a valid file descriptor, e.g. file-like objects
such as `StringIO` are invalid.
"""
self._stdin_file = file
@requires_state(AppState.CREATED)
def add_additional_options(self, options):
"""
Add additional options for the command line program.
These options are put before the arguments automatically
determined by the respective :class:`LocalApp` subclass.
This method is focused on advanced users, who have knowledge on
the available options of the command line program and the
options already used by the :class:`LocalApp` subclasses.
Ignoring the already used options may result in conflicting
CLI arguments and potential unexpected results.
It is recommended to use this method only, when the respective
:class:`LocalApp` subclass does not provide a method to set the
desired option.
Parameters
----------
options : list of str
A list of strings representing the command line options.
Notes
-----
In order to see which options the command line execution used,
try the :meth:`get_command()` method.
Examples
--------
>>> seq1 = ProteinSequence("BIQTITE")
>>> seq2 = ProteinSequence("TITANITE")
>>> seq3 = ProteinSequence("BISMITE")
>>> seq4 = ProteinSequence("IQLITE")
>>> # Run application without additional arguments
>>> app = ClustalOmegaApp([seq1, seq2, seq3, seq4])
>>> app.start()
>>> app.join()
>>> print(app.get_command())
clustalo --in ...fa --out ...fa --force --output-order=tree-order --seqtype Protein --guidetree-out ...tree
>>> # Run application with additional argument
>>> app = ClustalOmegaApp([seq1, seq2, seq3, seq4])
>>> app.add_additional_options(["--full"])
>>> app.start()
>>> app.join()
>>> print(app.get_command())
clustalo --full --in ...fa --out ...fa --force --output-order=tree-order --seqtype Protein --guidetree-out ...tree
"""
self._options += options
@requires_state(
AppState.RUNNING | \
AppState.CANCELLED | \
AppState.FINISHED | \
AppState.JOINED
)
def get_command(self):
"""
Get the executed command.
Cannot be called until the application has been started.
Returns
-------
command : str
The executed command.
Examples
--------
>>> seq1 = ProteinSequence("BIQTITE")
>>> seq2 = ProteinSequence("TITANITE")
>>> seq3 = ProteinSequence("BISMITE")
>>> seq4 = ProteinSequence("IQLITE")
>>> app = ClustalOmegaApp([seq1, seq2, seq3, seq4])
>>> app.start()
>>> print(app.get_command())
clustalo --in ...fa --out ...fa --force --output-order=tree-order --seqtype Protein --guidetree-out ...tree
"""
return " ".join(self._command)
@requires_state(AppState.CREATED)
def set_exec_dir(self, exec_dir):
"""
Set the directory where the application should be executed.
If not set, it will be executed in the working directory at the
time the application was created.
PROTECTED: Do not call from outside.
Parameters
----------
exec_dir : str
The execution directory.
"""
self._exec_dir = exec_dir
@requires_state(AppState.RUNNING | AppState.FINISHED)
def get_process(self):
"""
Get the `Popen` instance.
PROTECTED: Do not call from outside.
Returns
-------
process : Popen
The `Popen` instance
"""
return self._process
@requires_state(AppState.FINISHED | AppState.JOINED)
def get_exit_code(self):
"""
Get the exit code of the process.
PROTECTED: Do not call from outside.
Returns
-------
code : int
The exit code.
"""
return self._process.returncode
@requires_state(AppState.FINISHED | AppState.JOINED)
def get_stdout(self):
"""
Get the STDOUT pipe content of the process.
PROTECTED: Do not call from outside.
Returns
-------
stdout : str
The standard output.
"""
return self._stdout
@requires_state(AppState.FINISHED | AppState.JOINED)
def get_stderr(self):
"""
Get the STDERR pipe content of the process.
PROTECTED: Do not call from outside.
Returns
-------
stdout : str
The standard error.
"""
return self._stderr
def run(self):
cwd = getcwd()
chdir(self._exec_dir)
self._command = [self._bin_path] + self._options + self._arguments
self._process = Popen(
self._command, stdin=self._stdin_file, stdout=PIPE, stderr=PIPE,
encoding="UTF-8"
)
chdir(cwd)
def is_finished(self):
code = self._process.poll()
if code == None:
return False
else:
self._stdout, self._stderr = self._process.communicate()
return True
@requires_state(AppState.RUNNING | AppState.FINISHED)
def join(self, timeout=None):
# Override method as repetitive calls of 'is_finished()'
# are not necessary as 'communicate()' already waits for the
# finished application
try:
self._stdout, self._stderr = self._process.communicate(
timeout=timeout
)
except TimeoutExpired:
self.cancel()
raise TimeoutError(
f"The application expired its timeout ({timeout:.1f} s)"
)
self._state = AppState.FINISHED
try:
self.evaluate()
except AppStateError:
raise
except:
self._state = AppState.CANCELLED
raise
else:
self._state = AppState.JOINED
self.clean_up()
def wait_interval(self):
# Not used in this implementation of 'join()'
raise NotImplementedError()
def evaluate(self):
super().evaluate()
# Check if applicaion terminated correctly
exit_code = self.get_exit_code()
if exit_code != 0:
err_msg = self.get_stderr().replace("\n", " ")
raise SubprocessError(
f"'{self._bin_path}' returned with exit code {exit_code}: "
f"{err_msg}"
)
def clean_up(self):
if self.get_app_state() == AppState.CANCELLED:
self._process.kill()
def cleanup_tempfile(temp_file):
"""
Close a :class:`NamedTemporaryFile` and delete it manually,
if `delete` is set to ``False``.
This function is a small helper function intended for usage in
`LocalApp` subclasses.
The manual deletion is necessary, as Windows does not allow to open
a :class:`NamedTemporaryFile` as second time
(e.g. by the file name), if `delete` is set to ``True``.
Parameters
----------
temp_file : NamedTemporaryFile
The temporary file to be closed and deleted.
"""
temp_file.close()
if not temp_file.delete:
remove(temp_file.name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.