id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
4,900 | enum val add documentation | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: reflection
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class EnumVal(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = EnumVal()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsEnumVal(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def EnumValBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x42\x46\x42\x53", size_prefixed=size_prefixed)
# EnumVal
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# EnumVal
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# EnumVal
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# EnumVal
def UnionType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from reflection.Type import Type
obj = Type()
obj.Init(self._tab.Bytes, x)
return obj
return None
# EnumVal
def Documentation(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# EnumVal
def DocumentationLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# EnumVal
def DocumentationIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
# EnumVal
def Attributes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# EnumVal
def AttributesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# EnumVal
def AttributesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
def EnumValStart(builder):
builder.StartObject(6)
def Start(builder):
EnumValStart(builder)
def EnumValAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
EnumValAddName(builder, name)
def EnumValAddValue(builder, value):
builder.PrependInt64Slot(1, value, 0)
def AddValue(builder, value):
EnumValAddValue(builder, value)
def EnumValAddUnionType(builder, unionType):
builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(unionType), 0)
def AddUnionType(builder, unionType):
EnumValAddUnionType(builder, unionType)
def METHOD_NAME(builder, documentation):
builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(documentation), 0)
def AddDocumentation(builder, documentation):
METHOD_NAME(builder, documentation)
def EnumValStartDocumentationVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartDocumentationVector(builder, numElems: int) -> int:
return EnumValStartDocumentationVector(builder, numElems)
def EnumValAddAttributes(builder, attributes):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(attributes), 0)
def AddAttributes(builder, attributes):
EnumValAddAttributes(builder, attributes)
def EnumValStartAttributesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartAttributesVector(builder, numElems: int) -> int:
return EnumValStartAttributesVector(builder, numElems)
def EnumValEnd(builder):
return builder.EndObject()
def End(builder):
return EnumValEnd(builder) |
4,901 | tear down | # Copyright (c) 2021, Frappe and Contributors
# See license.txt
import frappe
import unittest
from unittest.mock import patch
from press.press.doctype.app.test_app import create_test_app
from press.press.doctype.marketplace_app.test_marketplace_app import (
create_test_marketplace_app,
)
from press.marketplace.doctype.marketplace_app_plan.test_marketplace_app_plan import (
create_test_marketplace_app_plan,
)
from press.press.doctype.site.test_site import create_test_site
from press.press.doctype.team.test_team import create_test_team
def create_test_marketplace_app_subscription(
site: str = None, app: str = None, plan: str = None, team: str = None
):
app = (
app if app and frappe.db.exists("Marketplace App", app) else create_test_app().name
)
create_test_marketplace_app(app)
plan = plan if plan else create_test_marketplace_app_plan().name
team = team if team else create_test_team().name
site = site if site else create_test_site(team=team).name
subscription = frappe.get_doc(
{
"doctype": "Marketplace App Subscription",
"app": app,
"marketplace_app_plan": plan,
"site": site,
"team": team,
}
).insert(ignore_if_duplicate=True)
return subscription
class TestMarketplaceAppSubscription(unittest.TestCase):
def setUp(self) -> None:
self.marketplace_subscription = create_test_marketplace_app_subscription()
self.subscription = frappe.get_doc(
"Subscription",
{
"marketplace_app_subscription": self.marketplace_subscription.name,
"document_type": "Marketplace App",
"document_name": self.marketplace_subscription.app,
"enabled": 1,
},
)
self.plan = frappe.get_doc("Plan", self.subscription.plan)
def METHOD_NAME(self) -> None:
frappe.db.rollback()
def test_subscription_creation(self):
"""
Check if subscription doc is created and linked after_insert of Marketplace App Subscription
"""
self.assertEqual(
self.subscription.marketplace_app_subscription, self.marketplace_subscription.name
)
def test_subscription_daily(self):
"""
Check if usage records are created for chargable document
Check if only one subscription is created and invoice total is correct
"""
today = frappe.utils.getdate()
tomorrow = frappe.utils.add_days(today, 1)
desired_value = self.plan.get_price_per_day("INR") * 2
is_last_day_of_month = frappe.utils.data.get_last_day(today) == today
yesterday = frappe.utils.add_days(today, -1)
# Consider yesterday's and today's record instead of today and tomorrow
# Became flaky if it was last day of month because
# tomorrow went outside of this month's invoice's period
if is_last_day_of_month:
tomorrow = today
today = yesterday
with patch.object(frappe.utils, "today", return_value=today):
self.subscription.create_usage_record()
self.assertTrue(
frappe.db.exists("Usage Record", {"subscription": self.subscription.name})
)
# this should not create duplicate record
self.subscription.create_usage_record()
# time travel to tomorrow
with patch.object(frappe.utils, "today", return_value=tomorrow):
self.subscription.create_usage_record()
invoice = frappe.get_doc(
"Invoice", {"team": self.subscription.team, "status": "Draft"}
)
self.assertEqual(invoice.total, desired_value)
def test_subscription_for_non_chargable_document(self):
def method():
return False
# subscription calls this method when checking if it should create a usage record
self.subscription.can_charge_for_subscription = method
with patch.object(
self.subscription,
"get_subscribed_document",
return_value=self.marketplace_subscription,
):
# shouldn't create a usage record
usage_record = self.subscription.create_usage_record()
self.assertTrue(usage_record is None)
def test_subscription_on_trial_plan(self):
self.plan.price_usd = 0
self.plan.price_inr = 0
self.plan.save()
today = frappe.utils.getdate()
tomorrow = frappe.utils.add_days(today, 1)
with patch.object(frappe.utils, "today", return_value=today):
# shouldn't create a usage record as site is in trial
self.subscription.create_usage_record()
# time travel to tomorrow
with patch.object(frappe.utils, "today", return_value=tomorrow):
# shouldn't create a usage record as site is in trial
self.subscription.create_usage_record()
invoice = frappe.get_doc(
"Invoice", {"team": self.marketplace_subscription.team, "status": "Draft"}
)
self.assertEqual(invoice.total, 0) |
4,902 | assert issue status | """Unit tests for the Jira issue status collector."""
from model.issue_status import IssueStatusCategory
from .base import JiraTestCase
class JiraIssuesTest(JiraTestCase):
"""Unit tests for the Jira issue status collector."""
METRIC_TYPE = "issue_status"
ISSUE_NAME = "Issue name"
CREATED = "1970-01-01T00:00:00.000+0000"
RELEASE_NAME = "1.0"
RELEASE_RELEASED = False
RELEASE_DATE = "3000-01-02"
SPRINT_NAME = "Sprint 1"
SPRINT_STATE = "active"
SPRINT_ENDDATE = "3000-01-01"
def setUp(self):
"""Extend to add an issue tracker to the metric."""
super().setUp()
self.metric["issue_tracker"] = {"type": "jira", "parameters": {"url": "https://jira"}}
self.metric["issue_ids"] = ["FOO-42"]
def METHOD_NAME( # noqa: PLR0913
self,
response,
summary: str | None = None,
connection_error: str | None = None,
parse_error: str | None = None,
status_category: IssueStatusCategory = "todo",
release: bool = False,
sprint: bool = False,
) -> None:
"""Assert that the issue has the expected attributes."""
issue_status = response.as_dict()["issue_status"][0]
self.assertEqual("FOO-42", issue_status["issue_id"])
if summary:
self.assertEqual(summary, issue_status["summary"])
if connection_error or parse_error:
self.assertNotIn("name", issue_status)
self.assertNotIn("status_category", issue_status)
if connection_error:
self.assertIn(connection_error, issue_status["connection_error"])
self.assertNotIn("parse_error", issue_status)
if parse_error:
self.assertIn(parse_error, issue_status["parse_error"])
self.assertNotIn("connection_error", issue_status)
else:
self.assertEqual(self.ISSUE_NAME, issue_status["name"])
self.assertEqual(status_category, issue_status["status_category"])
self.assertEqual(self.CREATED, issue_status["created"])
if sprint:
self.assertEqual(self.SPRINT_NAME, issue_status["sprint_name"])
self.assertEqual(self.SPRINT_STATE, issue_status["sprint_state"])
self.assertEqual(self.SPRINT_ENDDATE, issue_status["sprint_enddate"])
else:
self.assertNotIn("sprint_name", issue_status)
self.assertNotIn("sprint_state", issue_status)
self.assertNotIn("sprint_enddate", issue_status)
if release:
self.assertEqual(self.RELEASE_NAME, issue_status["release_name"])
self.assertEqual(self.RELEASE_RELEASED, issue_status["release_released"])
self.assertEqual(self.RELEASE_DATE, issue_status["release_date"])
else:
self.assertNotIn("release_name", issue_status)
self.assertNotIn("release_released", issue_status)
self.assertNotIn("release_date", issue_status)
self.assertNotIn("connection_error", issue_status)
self.assertNotIn("parse_error", issue_status)
self.assertEqual(
"https://jira/rest/agile/1.0/issue/FOO-42?fields=created,status,summary,updated,duedate,fixVersions,sprint",
issue_status["api_url"],
)
self.assertEqual("https://jira/browse/FOO-42", issue_status["landing_url"])
async def test_issue_status(self):
"""Test that the issue status is returned."""
issue_status_json = {
"fields": {"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "new"}}, "created": self.CREATED},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response)
async def test_issue_status_doing(self):
"""Test that the issue status is returned."""
issue_status_json = {
"fields": {
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "indeterminate"}},
"created": self.CREATED,
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response, status_category="doing")
async def test_issue_status_done(self):
"""Test that the issue status is returned."""
issue_status_json = {
"fields": {"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "done"}}, "created": self.CREATED},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response, status_category="done")
async def test_issue_summary(self):
"""Test that the issue summary is returned."""
issue_status_json = {
"fields": {
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "new"}},
"summary": "Issue summary",
"created": self.CREATED,
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response, summary="Issue summary")
async def test_issue_release(self):
"""Test that the issue release is returned."""
issue_status_json = {
"fields": {
"created": self.CREATED,
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "done"}},
"fixVersions": [
{"name": self.RELEASE_NAME, "released": self.RELEASE_RELEASED, "releaseDate": self.RELEASE_DATE},
],
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response, status_category="done", release=True)
async def test_issue_sprint(self):
"""Test that the issue sprint is returned."""
issue_status_json = {
"fields": {
"created": self.CREATED,
"status": {"name": self.ISSUE_NAME, "statusCategory": {"key": "done"}},
"sprint": {"name": self.SPRINT_NAME, "state": self.SPRINT_STATE, "endDate": self.SPRINT_ENDDATE},
},
}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response, status_category="done", sprint=True)
async def test_connection_error(self):
"""Test that the issue status is returned, even when there is a connection error."""
response = await self.collect(get_request_side_effect=BrokenPipeError)
self.METHOD_NAME(response, connection_error="BrokenPipeError")
async def test_parse_error(self):
"""Test that the issue status is returned, even when there is a parse error."""
issue_status_json = {"fields": {"status": None}}
response = await self.collect(get_request_json_return_value=issue_status_json)
self.METHOD_NAME(response, parse_error="TypeError") |
4,903 | request host | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import urllib.parse
from elasticapm.conf import constants
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import DroppedSpan, capture_span, execution_context
from elasticapm.utils import default_ports, sanitize_url
from elasticapm.utils.disttracing import TracingOptions
# copied and adapted from urllib.request
def METHOD_NAME(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
parse_result = urllib.parse.urlparse(url)
scheme, host, port = parse_result.scheme, parse_result.hostname, parse_result.port
try:
port = int(port)
except (ValueError, TypeError):
pass
if host == "":
host = request.get_header("Host", "")
if port and port != default_ports.get(scheme):
host = "%s:%s" % (host, port)
return host
class UrllibInstrumentation(AbstractInstrumentedModule):
name = "urllib"
instrument_list = [("urllib.request", "AbstractHTTPHandler.do_open")]
def call(self, module, method, wrapped, instance, args, kwargs):
request_object = args[1] if len(args) > 1 else kwargs["req"]
method = request_object.get_method()
host = METHOD_NAME(request_object)
url = sanitize_url(request_object.get_full_url())
signature = method.upper() + " " + host
transaction = execution_context.get_transaction()
with capture_span(
signature,
span_type="external",
span_subtype="http",
extra={"http": {"url": url}},
leaf=True,
) as span:
# if urllib has been called in a leaf span, this span might be a DroppedSpan.
leaf_span = span
while isinstance(leaf_span, DroppedSpan):
leaf_span = leaf_span.parent
parent_id = leaf_span.id if leaf_span else transaction.id
trace_parent = transaction.trace_parent.copy_from(
span_id=parent_id, trace_options=TracingOptions(recorded=True)
)
self._set_disttracing_headers(request_object, trace_parent, transaction)
if leaf_span:
leaf_span.dist_tracing_propagated = True
response = wrapped(*args, **kwargs)
if response:
status = getattr(response, "status", None) or response.getcode() # Python 2 compat
if span.context:
span.context["http"]["status_code"] = status
span.set_success() if status < 400 else span.set_failure()
return response
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
request_object = args[1] if len(args) > 1 else kwargs["req"]
# since we don't have a span, we set the span id to the transaction id
trace_parent = transaction.trace_parent.copy_from(
span_id=transaction.id, trace_options=TracingOptions(recorded=False)
)
self._set_disttracing_headers(request_object, trace_parent, transaction)
return args, kwargs
def _set_disttracing_headers(self, request_object, trace_parent, transaction):
trace_parent_str = trace_parent.to_string()
request_object.add_header(constants.TRACEPARENT_HEADER_NAME, trace_parent_str)
if transaction.tracer.config.use_elastic_traceparent_header:
request_object.add_header(constants.TRACEPARENT_LEGACY_HEADER_NAME, trace_parent_str)
if trace_parent.tracestate:
request_object.add_header(constants.TRACESTATE_HEADER_NAME, trace_parent.tracestate) |
4,904 | dispatch | import hmac
import logging
from hashlib import sha256
from django.http import HttpRequest, HttpResponse
from django.utils.crypto import constant_time_compare
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from sentry.api import client
from sentry.exceptions import HookValidationError
from sentry.models import ApiKey, Project, ProjectOption
from sentry.plugins.base import plugins
from sentry.utils import json
logger = logging.getLogger("sentry.webhooks")
class ReleaseWebhookView(View):
def verify(self, plugin_id, project_id, token, signature):
return constant_time_compare(
signature,
hmac.new(
key=token.encode("utf-8"),
msg=(f"{plugin_id}-{project_id}").encode(),
digestmod=sha256,
).hexdigest(),
)
@method_decorator(csrf_exempt)
def METHOD_NAME(self, *args, **kwargs):
return super().METHOD_NAME(*args, **kwargs)
def _handle_builtin(self, request: HttpRequest, project):
endpoint = f"/projects/{project.organization.slug}/{project.slug}/releases/"
try:
data = json.loads(request.body)
except json.JSONDecodeError as exc:
return HttpResponse(
status=400,
content=json.dumps({"error": str(exc)}),
content_type="application/json",
)
try:
# Ideally the API client would support some kind of god-mode here
# as we've already confirmed credentials and simply want to execute
# the view code. Instead we hack around it with an ApiKey instance
god = ApiKey(organization_id=project.organization_id, scope_list=["project:write"])
resp = client.post(endpoint, data=data, auth=god)
except client.ApiError as exc:
return HttpResponse(
status=exc.status_code,
content=json.dumps(exc.body),
content_type="application/json",
)
return HttpResponse(
status=resp.status_code, content=json.dumps(resp.data), content_type="application/json"
)
def post(self, request: HttpRequest, plugin_id, project_id, signature) -> HttpResponse:
try:
project = Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
logger.warning(
"release-webhook.invalid-project",
extra={"project_id": project_id, "plugin_id": plugin_id},
)
return HttpResponse(status=404)
logger.info(
"release-webhook.incoming", extra={"project_id": project_id, "plugin_id": plugin_id}
)
token = ProjectOption.objects.get_value(project, "sentry:release-token")
if token is None:
logger.warning(
"release-webhook.missing-token",
extra={"project_id": project_id, "plugin_id": plugin_id},
)
return HttpResponse(status=403)
if not self.verify(plugin_id, project_id, token, signature):
logger.warning(
"release-webhook.invalid-signature",
extra={"project_id": project_id, "plugin_id": plugin_id},
)
return HttpResponse(status=403)
if plugin_id == "builtin":
return self._handle_builtin(request, project)
plugin = plugins.get(plugin_id)
if not plugin.is_enabled(project):
logger.warning(
"release-webhook.plugin-disabled",
extra={"project_id": project_id, "plugin_id": plugin_id},
)
return HttpResponse(status=403)
cls = plugin.get_release_hook()
hook = cls(project)
try:
hook.handle(request)
except HookValidationError as exc:
return HttpResponse(
status=400,
content=json.dumps({"error": str(exc)}),
content_type="application/json",
)
return HttpResponse(status=204) |
4,905 | generate script | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import io
import sys
import os
import argparse
from subprocess import PIPE, Popen
from pathlib import Path
from psychopy import __version__
# DO NOT IMPORT ANY OTHER PSYCHOPY SUB-PACKAGES OR THEY WON'T SWITCH VERSIONS
parser = argparse.ArgumentParser(description='Compile your python file from here')
parser.add_argument('infile', help='The input (psyexp) file to be compiled')
parser.add_argument('--version', '-v', help='The PsychoPy version to use for compiling the script. e.g. 1.84.1')
parser.add_argument('--outfile', '-o', help='The output (py) file to be generated (defaults to the ')
class LegacyScriptError(ChildProcessError):
pass
def METHOD_NAME(experimentPath, exp, target="PsychoPy"):
"""
Generate python script from the current builder experiment.
Parameters
----------
experimentPath: str
Experiment path and filename
exp: experiment.Experiment object
The current PsychoPy experiment object generated using Builder
target: str
PsychoPy or PsychoJS - determines whether Python or JS script is generated.
Returns
-------
"""
import logging # import here not at top of script (or useVersion fails)
print("Generating {} script...\n".format(target))
exp.expPath = os.path.abspath(experimentPath)
if sys.platform == 'win32': # get name of executable
pythonExec = sys.executable
else:
pythonExec = sys.executable.replace(' ', r'\ ')
filename = experimentPath
# compile script from command line using version
compiler = 'psychopy.scripts.psyexpCompile'
# if version is not specified then don't touch useVersion at all
version = exp.settings.params['Use version'].val
# if useVersion is different to installed version...
if version not in [None, 'None', '', __version__]:
# make sure we have a legacy save file
if not Path(exp.legacyFilename).is_file():
exp.saveToXML(filename=exp.filename)
# generate command to run compile from requested version
cmd = [
pythonExec, '-m', compiler, str(exp.legacyFilename), '-o', experimentPath
]
# run command
cmd.extend(['-v', version])
logging.info(' '.join(cmd))
output = Popen(cmd,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True)
stdout, stderr = output.communicate()
sys.stdout.write(stdout)
sys.stderr.write(stderr)
# we got a non-zero error code, raise an error
if output.returncode != 0:
raise LegacyScriptError(
'Error: Script compile exited with code {}. Traceback:\n'
'{}'.format(output.returncode, stderr))
else:
compileScript(infile=exp, version=None, outfile=filename)
def compileScript(infile=None, version=None, outfile=None):
"""
Compile either Python or JS PsychoPy script from .psyexp file.
Parameters
----------
infile: string, experiment.Experiment object
The input (psyexp) file to be compiled
version: str
The PsychoPy version to use for compiling the script. e.g. 1.84.1.
Warning: Cannot set version if module imported. Set version from
command line interface only.
outfile: string
The output file to be generated (defaults to Python script).
"""
def _setVersion(version):
"""
Sets the version to be used for compiling using the useVersion function
Parameters
----------
version: string
The version requested
"""
# Set version
if version:
from psychopy import useVersion
useVersion(version)
# import logging here not at top of script (or useVersion fails)
global logging
from psychopy import logging
if __name__ != '__main__' and version not in [None, 'None', 'none', '']:
version = None
msg = "You cannot set version by calling compileScript() manually. Setting 'version' to None."
logging.warning(msg)
return version
def _getExperiment(infile, version):
"""
Get experiment if infile is not type experiment.Experiment.
Parameters
----------
infile: string, experiment.Experiment object
The input (psyexp) file to be compiled
version: string
The version requested
Returns
-------
experiment.Experiment
The experiment object used for generating the experiment script
"""
# import PsychoPy experiment and write script with useVersion active
from psychopy.app.builder import experiment
# Check infile type
if isinstance(infile, experiment.Experiment):
thisExp = infile
else:
thisExp = experiment.Experiment()
thisExp.loadFromXML(infile)
thisExp.psychopyVersion = version
return thisExp
def _setTarget(outfile):
"""
Set target for compiling i.e., Python or JavaScript.
Parameters
----------
outfile : string
The output file to be generated (defaults to Python script).
Returns
-------
string
The Python or JavaScript target type
"""
# Set output type, either JS or Python
if outfile.endswith(".js"):
targetOutput = "PsychoJS"
else:
targetOutput = "PsychoPy"
return targetOutput
def _makeTarget(thisExp, outfile, targetOutput):
"""
Generate the actual scripts for Python and/or JS.
Parameters
----------
thisExp : experiment.Experiment object
The current experiment created under requested version
outfile : string
The output file to be generated (defaults to Python script).
targetOutput : string
The Python or JavaScript target type
"""
# Write script
if targetOutput == "PsychoJS":
# Write module JS code
script = thisExp.writeScript(outfile, target=targetOutput, modular=True)
# Write no module JS code
outfileNoModule = outfile.replace('.js', '-legacy-browsers.js') # For no JS module script
scriptNoModule = thisExp.writeScript(outfileNoModule, target=targetOutput, modular=False)
# Store scripts in list
scriptDict = {'outfile': script, 'outfileNoModule': scriptNoModule}
else:
script = thisExp.writeScript(outfile, target=targetOutput)
scriptDict = {'outfile': script}
# Output script to file
for scripts in scriptDict:
if not type(scriptDict[scripts]) in (str, type(u'')):
# We have a stringBuffer not plain string/text
scriptText = scriptDict[scripts].getvalue()
else:
# We already have the text
scriptText = scriptDict[scripts]
with io.open(eval(scripts), 'w', encoding='utf-8-sig') as f:
f.write(scriptText)
return 1
###### Write script #####
version = _setVersion(version)
thisExp = _getExperiment(infile, version)
targetOutput = _setTarget(outfile)
_makeTarget(thisExp, outfile, targetOutput)
if __name__ == "__main__":
# define args
args = parser.parse_args()
if args.outfile is None:
args.outfile = args.infile.replace(".psyexp", ".py")
compileScript(args.infile, args.version, args.outfile) |
4,906 | auto | """
Support for Alternatives system
:codeauthor: Radek Rada <radek.rada@gmail.com>
"""
import logging
import os
import salt.utils.files
import salt.utils.path
__outputter__ = {
"display": "txt",
"install": "txt",
"remove": "txt",
}
log = logging.getLogger(__name__)
# Don't shadow built-in's.
__func_alias__ = {"set_": "set"}
def __virtual__():
"""
Only if alternatives dir is available
"""
if os.path.isdir("/etc/alternatives"):
return True
return (False, "Cannot load alternatives module: /etc/alternatives dir not found")
def _get_cmd():
"""
Alteratives commands and differ across distributions
"""
if __grains__["os_family"] == "RedHat":
return "alternatives"
return "update-alternatives"
def display(name):
"""
Display alternatives settings for defined command name
CLI Example:
.. code-block:: bash
salt '*' alternatives.display editor
"""
cmd = [_get_cmd(), "--display", name]
out = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if out["retcode"] > 0 and out["stderr"] != "":
return out["stderr"]
return out["stdout"]
def _read_alternative_link_directly(name, path):
try:
with salt.utils.files.fopen(os.path.join(path, name), "rb") as r_file:
contents = salt.utils.stringutils.to_unicode(r_file.read())
return contents.splitlines(True)[1].rstrip("\n")
except OSError:
log.error("alternatives: %s does not exist", name)
except (OSError, IndexError) as exc: # pylint: disable=duplicate-except
log.error(
"alternatives: unable to get master link for %s. Exception: %s",
name,
exc,
)
return False
def _read_alternative_link_with_command(name):
cmd = [_get_cmd(), "--query", name]
out = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if out["retcode"] > 0 and out["stderr"] != "":
return False
first_block = out["stdout"].split("\n\n", 1)[0]
for line in first_block.split("\n"):
if line.startswith("Link:"):
return line.split(":", 1)[1].strip()
return False
def show_link(name):
"""
Display master link for the alternative
.. versionadded:: 2015.8.13,2016.3.4,2016.11.0
CLI Example:
.. code-block:: bash
salt '*' alternatives.show_link editor
"""
if __grains__["os_family"] == "RedHat":
return _read_alternative_link_directly(name, "/var/lib/alternatives")
elif __grains__["os_family"] == "Suse":
return _read_alternative_link_directly(name, "/var/lib/rpm/alternatives")
else:
# Debian based systems
return _read_alternative_link_with_command(name)
def show_current(name):
"""
Display the current highest-priority alternative for a given alternatives
link
CLI Example:
.. code-block:: bash
salt '*' alternatives.show_current editor
"""
try:
return _read_link(name)
except OSError:
log.error("alternative: %s does not exist", name)
return False
def check_exists(name, path):
"""
Check if the given path is an alternative for a name.
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' alternatives.check_exists name path
"""
cmd = [_get_cmd(), "--display", name]
out = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if out["retcode"] > 0 and out["stderr"] != "":
return False
return any(line.startswith(path) for line in out["stdout"].splitlines())
def check_installed(name, path):
"""
Check if the current highest-priority match for a given alternatives link
is set to the desired path
CLI Example:
.. code-block:: bash
salt '*' alternatives.check_installed name path
"""
try:
return _read_link(name) == path
except OSError:
return False
def install(name, link, path, priority):
"""
Install symbolic links determining default commands
CLI Example:
.. code-block:: bash
salt '*' alternatives.install editor /usr/bin/editor /usr/bin/emacs23 50
"""
cmd = [_get_cmd(), "--install", link, name, path, str(priority)]
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] > 0 and out["stderr"] != "":
return out["stderr"]
return out["stdout"]
def remove(name, path):
"""
Remove symbolic links determining the default commands.
CLI Example:
.. code-block:: bash
salt '*' alternatives.remove name path
"""
cmd = [_get_cmd(), "--remove", name, path]
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] > 0:
return out["stderr"]
return out["stdout"]
def METHOD_NAME(name):
"""
Trigger alternatives to set the path for <name> as
specified by priority.
CLI Example:
.. code-block:: bash
salt '*' alternatives.auto name
"""
cmd = [_get_cmd(), "--auto", name]
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] > 0:
return out["stderr"]
return out["stdout"]
def set_(name, path):
"""
Manually set the alternative <path> for <name>.
CLI Example:
.. code-block:: bash
salt '*' alternatives.set name path
"""
cmd = [_get_cmd(), "--set", name, path]
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if out["retcode"] > 0:
return out["stderr"]
return out["stdout"]
def _read_link(name):
"""
Read the link from /etc/alternatives
Throws an OSError if the link does not exist
"""
alt_link_path = "/etc/alternatives/{}".format(name)
return salt.utils.path.readlink(alt_link_path) |
4,907 | get sliding window org sample rate | from calendar import IllegalMonthError, monthrange
from datetime import datetime, timezone
from typing import Optional
import sentry_sdk
from sentry import options
from sentry.dynamic_sampling.rules.utils import get_redis_client_for_ds
# In case a misconfiguration happens on the server side which makes the option invalid, we want to define a fallback
# sliding window size, which in this case will be 24 hours.
FALLBACK_SLIDING_WINDOW_SIZE = 24
# Sentinel value used to mark that an error happened when computing the sliding window sample rate for a specific
# project.
SLIDING_WINDOW_CALCULATION_ERROR = "sliding_window_error"
# We want to keep the entry for 1 hour, so that in case an org is not considered for 1 hour, the system will fall back
# to the blended sample rate.
# Important: this TTL should be a factor of the cron schedule for dynamic-sampling-sliding-window/-org located in
# sentry.conf.server.py.
EXECUTED_CACHE_KEY_TTL = 60 * 60 * 1000
def generate_sliding_window_executed_cache_key() -> str:
return "ds::sliding_window_executed"
def mark_sliding_window_executed() -> None:
redis_client = get_redis_client_for_ds()
cache_key = generate_sliding_window_executed_cache_key()
redis_client.set(cache_key, 1)
redis_client.pexpire(cache_key, EXECUTED_CACHE_KEY_TTL)
def was_sliding_window_executed() -> bool:
redis_client = get_redis_client_for_ds()
cache_key = generate_sliding_window_executed_cache_key()
return bool(redis_client.exists(cache_key))
def generate_sliding_window_cache_key(org_id: int) -> str:
return f"ds::o:{org_id}:sliding_window"
def get_sliding_window_sample_rate(
org_id: int, project_id: int, error_sample_rate_fallback: float
) -> Optional[float]:
redis_client = get_redis_client_for_ds()
cache_key = generate_sliding_window_cache_key(org_id=org_id)
try:
value = redis_client.hget(cache_key, project_id)
# In case we had an explicit error or the sliding window was not run, we want to return the error fallback
# sample rate.
if value == SLIDING_WINDOW_CALCULATION_ERROR:
sentry_sdk.capture_message("Sliding window calculation error stored in cache")
return error_sample_rate_fallback
return float(value)
# Thrown if the input is not a string or a float (e.g., None).
except TypeError:
# In case we couldn't convert the value to float, that is, it is a string or the value is not there, we want
# to fall back to 100% in case we know that the sliding window was executed. We track whether the task was
# executed and completed successfully under the assumption that, if that is the case, all orgs and projects
# with at least 1 metric were considered, thus if they are not in cache, we assume they had 0 metrics.
if was_sliding_window_executed():
return 1.0
# In the other case were the sliding window was not run, maybe because of an issue, we will just fallback to
# blended sample rate, to avoid oversampling.
sentry_sdk.capture_message(
"Sliding window value not stored in cache and sliding window not executed"
)
return error_sample_rate_fallback
# Thrown if the input is not a valid float.
except ValueError:
sentry_sdk.capture_message("Invalid sliding window value stored in cache")
return error_sample_rate_fallback
def generate_sliding_window_org_executed_cache_key() -> str:
return "ds::sliding_window_org_executed"
def mark_sliding_window_org_executed() -> None:
redis_client = get_redis_client_for_ds()
cache_key = generate_sliding_window_org_executed_cache_key()
redis_client.set(cache_key, 1)
redis_client.pexpire(cache_key, EXECUTED_CACHE_KEY_TTL)
def was_sliding_window_org_executed() -> bool:
redis_client = get_redis_client_for_ds()
cache_key = generate_sliding_window_org_executed_cache_key()
return bool(redis_client.exists(cache_key))
def generate_sliding_window_org_cache_key(org_id: int) -> str:
return f"ds::o:{org_id}:sliding_window_org_sample_rate"
def METHOD_NAME(
org_id: int, default_sample_rate: Optional[float] = None
) -> Optional[float]:
redis_client = get_redis_client_for_ds()
cache_key = generate_sliding_window_org_cache_key(org_id)
try:
return float(redis_client.get(cache_key))
except (TypeError, ValueError):
return default_sample_rate
def get_sliding_window_size() -> Optional[int]:
try:
size = options.get("dynamic-sampling:sliding_window.size")
# We want to explicitly handle the None case, which will signal that the system should be stopped.
return None if size is None else int(size)
except ValueError:
# In case the value set is invalid, we will fallback to a default value, to keep the system up and running.
return FALLBACK_SLIDING_WINDOW_SIZE
def extrapolate_monthly_volume(volume: int, hours: int) -> Optional[int]:
# We don't support a lower granularity than 1 hour.
if hours < 1:
return None
# Get current year and month
year = datetime.now(tz=timezone.utc).year
month = datetime.now(tz=timezone.utc).month
try:
# Get number of days in the month.
_, days_in_month = monthrange(year=year, month=month)
# We compute the number of hours in a month.
hours_in_month = days_in_month * 24
# We compute how many groups of hours can fit in a month.
groups_of_hours = hours_in_month / hours
# Given n groups we just multiply the volume per group of hours.
return int(volume * groups_of_hours)
except IllegalMonthError:
return None |
4,908 | test bandpass | import unittest
import numpy as np
from tests.utils_testing import get_path_for_data_file
from urh.signalprocessing.Filter import Filter
from urh.signalprocessing.Modulator import Modulator
from urh.signalprocessing.Signal import Signal
import array
from matplotlib import pyplot as plt
from urh.cythonext import signal_functions
from urh.signalprocessing.Spectrogram import Spectrogram
class SpectrogramTest(unittest.TestCase):
""" short time fourier transform of audio signal """
def stft(self, samples, window_size, overlap_factor=0.5, window_function=np.hanning):
"""
Perform Short-time Fourier transform to get the spectrogram for the given samples
:param samples: Complex samples
:param window_size: Size of DFT window
:param overlap_factor: Value between 0 (= No Overlapping) and 1 (= Full overlapping) of windows
:param window_function: Function for DFT window
:return: short-time Fourier transform of the given signal
"""
window = window_function(window_size)
# hop size determines by how many samples the window is advanced
hop_size = window_size - int(overlap_factor * window_size)
# pad with zeros to ensure last window fits signal
padded_samples = np.append(samples, np.zeros((len(samples) - window_size) % hop_size))
num_frames = ((len(padded_samples) - window_size) // hop_size) + 1
frames = [padded_samples[i*hop_size:i*hop_size+window_size] * window for i in range(num_frames)]
return np.fft.fft(frames)
def setUp(self):
self.signal = Signal(get_path_for_data_file("two_participants.complex16s"), "test")
def test_numpy_impl(self):
sample_rate = 1e6
spectrogram = np.fft.fftshift(self.stft(self.signal.iq_array.data, 2**10, overlap_factor=0.5)) / 1024
ims = 10 * np.log10(spectrogram.real ** 2 + spectrogram.imag ** 2) # convert amplitudes to decibel
num_time_bins, num_freq_bins = np.shape(ims)
plt.imshow(np.transpose(ims), aspect="auto", cmap="magma")
plt.colorbar()
plt.xlabel("time in seconds")
plt.ylabel("frequency in Hz")
plt.ylim(ymin=0, ymax=num_freq_bins)
x_tick_pos = np.linspace(0, num_time_bins - 1, 5, dtype=np.float32)
plt.xticks(x_tick_pos, ["%.02f" % l for l in (x_tick_pos * len(self.signal.iq_array.data) / num_time_bins) / sample_rate])
y_tick_pos = np.linspace(0, num_freq_bins - 1, 10, dtype=np.int16)
frequencies = np.fft.fftshift(np.fft.fftfreq(num_freq_bins, 1/sample_rate))
plt.yticks(y_tick_pos, ["%.02f" % frequencies[i] for i in y_tick_pos])
plt.show()
def narrowband_iir(self, fc, bw, fs):
fc /= fs
bw /= fs
R = 1 - 3 * bw
K = (1 - 2 * R * np.cos(2 * np.pi * fc) + R ** 2) / (2 - 2*np.cos(2 * np.pi * fc))
a = np.array([K, -2*K*np.cos(2 * np.pi * fc), K], dtype=np.float64)
b = np.array([2 * R * np.cos(2 * np.pi * fc), -R**2], dtype=np.float64)
return a, b
def METHOD_NAME(self):
# Generate a noisy signal
fs = 2000
T = 0.1
nsamples = T * fs
t = np.linspace(0, T, nsamples, endpoint=False)
a = 0.02
f0 = 600
x = 0.25 * np.sin(2 * np.pi * 0.25*f0 * t)
x += 0.25 * np.sin(2 * np.pi * f0 * t)
x += 0.25 * np.sin(2 * np.pi * 2*f0 * t)
x += 0.25 * np.sin(2 * np.pi * 3*f0 * t)
import time
lowcut = f0 - 200
highcut = f0 + 200
# Define the parameters
fc = f0 / fs
b = 0.05
data = x
y = Filter.apply_bandpass_filter(data, lowcut / fs, highcut / fs, filter_bw=b)
plt.plot(y, label='Filtered signal (%g Hz)' % f0)
plt.plot(data, label='Noisy signal')
plt.legend(loc='upper left')
plt.show()
def test_iir_bandpass(self):
# Generate a noisy signal
fs = 2400
T = 6
nsamples = T * fs
t = np.linspace(0, T, nsamples, endpoint=False)
a = 0.02
f0 = 300
x = 0.5 * np.sin(2 * np.pi * f0 * t)
x += 0.25 * np.sin(2 * np.pi * 2 * f0 * t)
x += 0.25 * np.sin(2 * np.pi * 3 * f0 * t)
#data = x.astype(np.complex64)
data = np.sin(2 * np.pi * f0 * t).astype(np.complex64)
print("Len data", len(data))
a, b = self.narrowband_iir(f0, 100, fs)
s = a.sum() + b.sum()
#a /= s
#b /= s
print(a, b)
filtered_data = signal_functions.iir_filter(a, b, data)
#plt.plot(data, label='Noisy signal')
plt.plot(np.fft.fft(filtered_data), label='Filtered signal (%g Hz)' % f0)
plt.legend(loc='upper left')
plt.show()
def test_channels(self):
sample_rate = 10 ** 6
channel1_freq = 40 * 10 ** 3
channel2_freq = 240 * 10 ** 3
channel1_data = array.array("B", [1, 0, 1, 0, 1, 0, 0, 1])
channel2_data = array.array("B", [1, 1, 0, 0, 1, 1, 0, 1])
channel3_data = array.array("B", [1, 0, 0, 1, 0, 1, 1, 1])
filter_bw = 0.1
filter_freq1_high = 1.5 * channel1_freq
filter_freq1_low = 0.5 * channel1_freq
filter_freq2_high = 1.5*channel2_freq
filter_freq2_low = 0.5 * channel2_freq
modulator1, modulator2, modulator3 = Modulator("test"), Modulator("test2"), Modulator("test3")
modulator1.carrier_freq_hz = channel1_freq
modulator2.carrier_freq_hz = channel2_freq
modulator3.carrier_freq_hz = -channel2_freq
modulator1.sample_rate = modulator2.sample_rate = modulator3.sample_rate = sample_rate
data1 = modulator1.modulate(channel1_data)
data2 = modulator2.modulate(channel2_data)
data3 = modulator3.modulate(channel3_data)
mixed_signal = data1 + data2 + data3
mixed_signal.tofile("/tmp/three_channels.complex")
plt.subplot("221")
plt.title("Signal")
plt.plot(mixed_signal)
spectrogram = Spectrogram(mixed_signal)
plt.subplot("222")
plt.title("Spectrogram")
plt.imshow(np.transpose(spectrogram.data), aspect="auto", cmap="magma")
plt.ylim(0, spectrogram.freq_bins)
chann1_filtered = Filter.apply_bandpass_filter(mixed_signal, filter_freq1_low / sample_rate, filter_freq1_high / sample_rate, filter_bw)
plt.subplot("223")
plt.title("Channel 1 Filtered ({})".format("".join(map(str, channel1_data))))
plt.plot(chann1_filtered)
chann2_filtered = Filter.apply_bandpass_filter(mixed_signal, filter_freq2_low / sample_rate, filter_freq2_high / sample_rate, filter_bw)
plt.subplot("224")
plt.title("Channel 2 Filtered ({})".format("".join(map(str, channel2_data))))
plt.plot(chann2_filtered)
plt.show()
def test_bandpass_h(self):
f_low = -0.4
f_high = -0.3
bw = 0.01
f_shift = (f_low + f_high) / 2
f_c = (f_high - f_low) / 2
N = Filter.get_filter_length_from_bandwidth(bw)
h = Filter.design_windowed_sinc_lpf(f_c, bw=bw) * np.exp(np.complex(0,1) * np.pi * 2 * f_shift * np.arange(0, N, dtype=complex))
#h = Filter.design_windowed_sinc_bandpass(f_low=f_low, f_high=f_high, bw=bw)
#h = Filter.design_windowed_sinc_lpf(0.42, bw=0.08)
impulse = np.exp(1j * np.linspace(0, 1, 50))
plt.subplot("221")
plt.title("f_low={} f_high={} bw={}".format(f_low, f_high, bw))
plt.plot(np.fft.fftfreq(1024), np.fft.fft(h, 1024))
plt.subplot("222")
plt.plot(h)
plt.show()
# h = cls.design_windowed_sinc_bandpass(f_low, f_high, filter_bw) |
4,909 | test from euler | import logging
from unittest import TestCase
import numpy as np
import pytest
from scipy.spatial.transform import Rotation as sp_rot
from aspire.utils import Rotation, utest_tolerance
logger = logging.getLogger(__name__)
class UtilsTestCase(TestCase):
def setUp(self):
self.dtype = np.float32
self.num_rots = 32
self.rot_obj = Rotation.generate_random_rotations(
self.num_rots, seed=0, dtype=self.dtype
)
self.angles = self.rot_obj.angles
self.matrices = self.rot_obj.matrices
def testRotMatrices(self):
rot_ref = sp_rot.from_matrix(self.matrices)
matrices = rot_ref.as_matrix().astype(self.dtype)
self.assertTrue(
np.allclose(self.matrices, matrices, atol=utest_tolerance(self.dtype))
)
def testRotAngles(self):
rot_ref = sp_rot.from_euler("ZYZ", self.angles, degrees=False)
angles = rot_ref.as_euler("ZYZ", degrees=False).astype(self.dtype)
self.assertTrue(np.allclose(self.angles, angles))
def testFromMatrix(self):
rot_ref = sp_rot.from_matrix(self.matrices)
angles = rot_ref.as_euler("ZYZ", degrees=False).astype(self.dtype)
rot = Rotation.from_matrix(self.matrices, dtype=self.dtype)
self.assertTrue(np.allclose(rot.angles, angles))
def METHOD_NAME(self):
rot_ref = sp_rot.from_euler("ZYZ", self.angles, degrees=False)
matrices = rot_ref.as_matrix().astype(self.dtype)
rot = Rotation.from_euler(self.angles, dtype=self.dtype)
self.assertTrue(np.allclose(rot._matrices, matrices))
def testInvert(self):
rot_mat = self.rot_obj.matrices
rot_mat_t = self.rot_obj.invert()
self.assertTrue(np.allclose(rot_mat_t, np.transpose(rot_mat, (0, 2, 1))))
def testMultiplication(self):
result = (self.rot_obj * self.rot_obj.invert()).matrices
for i in range(len(self.rot_obj)):
self.assertTrue(
np.allclose(np.eye(3), result[i], atol=utest_tolerance(self.dtype))
)
def testRegisterRots(self):
q_mat = Rotation.generate_random_rotations(1, dtype=self.dtype)[0]
for flag in [0, 1]:
regrots_ref = self.rot_obj.apply_registration(q_mat, flag)
q_mat_est, flag_est = self.rot_obj.find_registration(regrots_ref)
self.assertTrue(
np.allclose(flag_est, flag)
and np.allclose(q_mat_est, q_mat, atol=utest_tolerance(self.dtype))
)
def testRegister(self):
# These will yield two more distinct sets of random rotations wrt self.rot_obj
set1 = Rotation.generate_random_rotations(self.num_rots, dtype=self.dtype)
set2 = Rotation.generate_random_rotations(
self.num_rots, dtype=self.dtype, seed=7
)
# Align both sets of random rotations to rot_obj
aligned_rots1 = self.rot_obj.register(set1)
aligned_rots2 = self.rot_obj.register(set2)
self.assertTrue(aligned_rots1.mse(aligned_rots2) < utest_tolerance(self.dtype))
self.assertTrue(aligned_rots2.mse(aligned_rots1) < utest_tolerance(self.dtype))
def testMSE(self):
q_ang = [np.random.random(3)]
q_mat = sp_rot.from_euler("ZYZ", q_ang, degrees=False).as_matrix()[0]
for flag in [0, 1]:
regrots_ref = self.rot_obj.apply_registration(q_mat, flag)
mse = self.rot_obj.mse(regrots_ref)
self.assertTrue(mse < utest_tolerance(self.dtype))
def testCommonLines(self):
ell_ij, ell_ji = self.rot_obj.common_lines(8, 11, 360)
self.assertTrue(ell_ij == 235 and ell_ji == 284)
def testString(self):
logger.debug(str(self.rot_obj))
def testRepr(self):
logger.debug(repr(self.rot_obj))
def testLen(self):
self.assertTrue(len(self.rot_obj) == self.num_rots)
def testSetterGetter(self):
# Excute set
tmp = np.arange(9).reshape((3, 3))
self.rot_obj[13] = tmp
# Execute get
self.assertTrue(np.all(self.rot_obj[13] == tmp))
def testDtype(self):
self.assertTrue(self.dtype == self.rot_obj.dtype)
def testFromRotvec(self):
# Build random rotation vectors.
axis = np.array([1, 0, 0], dtype=self.dtype)
angles = np.random.uniform(0, 2 * np.pi, 10)
rot_vecs = np.array([angle * axis for angle in angles], dtype=self.dtype)
# Build rotations using from_rotvec and about_axis (as reference).
rotations = Rotation.from_rotvec(rot_vecs, dtype=self.dtype)
ref_rots = Rotation.about_axis("x", angles, dtype=self.dtype)
self.assertTrue(isinstance(rotations, Rotation))
self.assertTrue(rotations.matrices.dtype == self.dtype)
self.assertTrue(np.allclose(rotations.matrices, ref_rots.matrices))
def test_angle_dist():
dtype = np.float32
angles = np.array([i * np.pi / 360 for i in range(360)], dtype=dtype)
rots = Rotation.about_axis("x", angles, dtype=dtype)
# Calculate the angular distance between the identity, rots[0],
# and rotations by multiples of pi/360 about the x-axis.
# These should be equal to `angles`.
angular_dist = Rotation.angle_dist(rots[0], rots, dtype)
assert np.allclose(angles, angular_dist, atol=utest_tolerance(dtype))
# Test incompatible shape error.
with pytest.raises(ValueError, match=r"r1 and r2 are not broadcastable*"):
_ = Rotation.angle_dist(rots[:3], rots[:5])
def test_mean_angular_distance():
rots_z = Rotation.about_axis(
"z", [0, np.pi / 4, np.pi / 2], dtype=np.float32
).matrices
rots_id = Rotation.about_axis("z", [0, 0, 0], dtype=np.float32).matrices
mean_ang_dist = Rotation.mean_angular_distance(rots_z, rots_id)
assert np.allclose(mean_ang_dist, np.pi / 4) |
4,910 | test configure duo behavior | from unittest import TestCase
import requests_mock
from parameterized import parameterized
from hvac.adapters import JSONAdapter
from hvac.api.auth_methods import Mfa
from hvac.api.auth_methods.github import DEFAULT_MOUNT_POINT
class TestMfa(TestCase):
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "cathub"),
]
)
@requests_mock.Mocker()
def test_configure(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
mock_url = "http://localhost:8200/v1/auth/{mount_point}/mfa_config".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
mfa = Mfa(adapter=JSONAdapter())
response = mfa.configure(
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=response.status_code,
)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "cathub"),
]
)
@requests_mock.Mocker()
def test_read_configuration(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
mock_response = {
"lease_id": "",
"warnings": None,
"wrap_info": None,
"auth": None,
"lease_duration": 0,
"request_id": "18ecf194-aba2-ba99-ebb5-1b90e5e231c7",
"data": {"type": "duo"},
"renewable": False,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/mfa_config".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
mfa = Mfa(adapter=JSONAdapter())
response = mfa.read_configuration(
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=response,
)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "cathub"),
]
)
@requests_mock.Mocker()
def test_configure_duo_access(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
mock_url = "http://localhost:8200/v1/auth/{mount_point}/duo/access".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
mfa = Mfa(adapter=JSONAdapter())
response = mfa.configure_duo_access(
mount_point=mount_point,
host="someapisubdomain.python-hvac.org",
integration_key="ikey",
secret_key="supersecret",
)
self.assertEqual(
first=expected_status_code,
second=response.status_code,
)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "cathub"),
]
)
@requests_mock.Mocker()
def METHOD_NAME(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
mock_url = "http://localhost:8200/v1/auth/{mount_point}/duo/config".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
mfa = Mfa(adapter=JSONAdapter())
response = mfa.configure_duo_behavior(
mount_point=mount_point, push_info="howdy"
)
self.assertEqual(
first=expected_status_code,
second=response.status_code,
)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "cathub"),
]
)
@requests_mock.Mocker()
def test_read_duo_behvaior_configuration(
self, test_label, mount_point, requests_mocker
):
expected_status_code = 200
mock_response = {
"lease_id": "",
"warnings": None,
"wrap_info": None,
"auth": None,
"lease_duration": 0,
"request_id": "7ea734e8-bbc4-e2de-2769-d052d6a320c6",
"data": {"username_format": "%s", "push_info": "", "user_agent": ""},
"renewable": False,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/duo/config".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
mfa = Mfa(adapter=JSONAdapter())
response = mfa.read_duo_behavior_configuration(
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=response,
) |
4,911 | recupera parlamentares | from django.db.models import Count
from sapl.base.models import Autor
from sapl.comissoes.models import Participacao
from sapl.materia.models import Relatoria, UnidadeTramitacao, Autoria, Proposicao
from sapl.norma.models import AutoriaNorma
from sapl.parlamentares.models import Parlamentar, ComposicaoMesa, Dependente, Filiacao, Mandato, Frente, Votante
from sapl.protocoloadm.models import Protocolo, DocumentoAdministrativo
from sapl.sessao.models import IntegranteMesa, JustificativaAusencia, OradorExpediente, PresencaOrdemDia, \
RetiradaPauta, SessaoPlenariaPresenca, VotoParlamentar, OradorOrdemDia
models = [ComposicaoMesa, Dependente, Filiacao, IntegranteMesa, JustificativaAusencia, Mandato, OradorOrdemDia,
OradorExpediente, Participacao, PresencaOrdemDia, Relatoria, RetiradaPauta, SessaoPlenariaPresenca,
UnidadeTramitacao, VotoParlamentar, Votante]
# Tratar FRENTE pois ela é 1-to-many (campo parlamentares) com Parlamentar
models_autor = [AutoriaNorma, Autoria, Frente, Proposicao, Protocolo, DocumentoAdministrativo]
## Verificar se TipoAutor é sempre 1 para parlamentar e ContentType é sempre 26 para parlamentar.
TIPO_PARLAMENTAR = 1
CONTENT_TYPE_PARLAMENTAR = 26
def METHOD_NAME():
return [[parlamentar for parlamentar in Parlamentar.objects.filter(nome_parlamentar=nome_parlamentar).order_by('id')]
for nome_parlamentar in Parlamentar.objects.values_list('nome_parlamentar', flat=True)
.annotate(qntd=Count('nome_parlamentar')).filter(qntd__gt=1)]
def deduplica_parlamentares(parlamentares):
for parlamentar in parlamentares:
parlamentar_principal = parlamentar[0]
print('Corrigindo parlamentar {}'.format(parlamentar_principal))
for clone in parlamentar[1:]:
if parlamentar_principal.biografia and clone.biografia:
parlamentar_principal.biografia += f'\n\n------------------------\n\n{clone.biografia}'
parlamentar_principal.save()
elif clone.biografia:
parlamentar_principal.biografia = clone.biografia
autor_principal = Autor.objects.filter(tipo_id=TIPO_PARLAMENTAR,
content_type_id=CONTENT_TYPE_PARLAMENTAR,
object_id=parlamentar_principal.id)
for a in Autor.objects.filter(tipo_id=TIPO_PARLAMENTAR, content_type_id=CONTENT_TYPE_PARLAMENTAR, object_id=clone.id):
if not autor_principal:
print('Ajustando autor de %s' % parlamentar)
a.object_id = parlamentar_principal.id
try:
a.save()
except Exception as e:
print(f"Erro ao mover referencia de autor do model {ma} para {autor_principal[0]}")
print(e)
else:
print('Movendo referencias de autor')
for ma in models_autor:
for ra in ma.objects.filter(autor=a):
ra.autor = autor_principal[0]
try:
ra.save()
except Exception as e:
print(f"Erro ao mover referencia de autor do model {ma} para {autor_principal[0]}")
print(e)
a.delete()
# Muda apontamento de models que referenciam parlamentar
for model in models:
print(f"Mudando apontamento de model {model}...")
for obj in model.objects.filter(parlamentar_id=clone.id):
obj.parlamentar = parlamentar_principal
try:
obj.save()
except Exception as e:
print(f"Erro ao alterar parlamentar do model {model} para a instancia {obj}")
print(e)
clone.delete()
def estatisticas(parlamentares):
stats = []
for ps in parlamentares:
for p in ps:
d = {
'id': p.id,
'nome': p.nome_parlamentar,
'stats': {m.__name__: m.objects.filter(parlamentar=p).count() for m in models}
}
for m in models_autor:
d['stats'].update({m.__name__:m.objects.filter(autor__object_id=p.id,
autor__content_type=CONTENT_TYPE_PARLAMENTAR,
autor__tipo_id=TIPO_PARLAMENTAR).count()})
stats.append(d)
for s in stats:
print('---------------------------------------------------')
print(s['id'], s['nome'])
print(s['stats'])
def main():
parlamentares = METHOD_NAME()
estatisticas(parlamentares)
deduplica_parlamentares(parlamentares)
estatisticas(parlamentares)
if __name__ == '__main__':
main() |
4,912 | reset gui | import json
import cv2
import base64
import threading
import time
from datetime import datetime
from websocket_server import WebsocketServer
import logging
import numpy as np
from interfaces.pose3d import ListenerPose3d
import re
import os
from map import MAP
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host, hal):
t = threading.Thread(target=self.run_server)
self.payload = {'image': '', 'map': '', 'array': ''}
self.server = None
self.client = None
self.host = host
self.image_to_be_shown = None
self.image_to_be_shown_updated = False
self.image_show_lock = threading.Lock()
self.array_lock = threading.Lock()
self.array = None
self.acknowledge = False
self.acknowledge_lock = threading.Lock()
self.mapXY = None
self.worldXY = None
# Take the console object to set the same websocket and client
self.hal = hal
t.start()
# Create the lap object
self.pose3d_object = ListenerPose3d("/taxi_holo/odom")
self.map = MAP(self.pose3d_object)
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
# Function to get value of Acknowledge
def get_acknowledge(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge
# Function to get value of Acknowledge
def set_acknowledge(self, value):
self.acknowledge_lock.acquire()
self.acknowledge = value
self.acknowledge_lock.release()
# encode the image data to be sent to websocket
def payloadImage(self):
self.image_show_lock.acquire()
image_to_be_shown_updated = self.image_to_be_shown_updated
image_to_be_shown = self.image_to_be_shown
self.image_show_lock.release()
image = image_to_be_shown
payload = {'image': '', 'shape': ''}
if (image_to_be_shown_updated == False):
return payload
shape = image.shape
frame = cv2.imencode('.PNG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.image_show_lock.acquire()
self.image_to_be_shown_updated = False
self.image_show_lock.release()
return payload
# load the image data
def showNumpy(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Process the array(ideal path) to be sent to websocket
def showPath(self, array):
self.array_lock.acquire()
strArray = ''.join(str(e) for e in array)
# Remove unnecesary spaces in the array to avoid JSON syntax error in javascript
strArray = re.sub(r"\[[ ]+", "[", strArray)
strArray = re.sub(r"[ ]+", ", ", strArray)
strArray = re.sub(r",[ ]+]", "]", strArray)
strArray = re.sub(r",,", ",", strArray)
strArray = re.sub(r"]\[", "],[", strArray)
strArray = "[" + strArray + "]"
self.array = strArray
self.array_lock.release()
def getTargetPose(self):
if (self.worldXY != None):
return [self.worldXY[1], self.worldXY[0]]
else:
return None
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.payloadImage()
self.payload["image"] = json.dumps(payload)
self.payload["array"] = self.array
# Payload Map Message
pos_message1 = self.map.getTaxiCoordinates()
# print(self.pose3d_object.getPose3d())
ang_message = self.map.getTaxiAngle()
pos_message = str(pos_message1 + ang_message)
# print("pos2 : {} , ang : {}".format(pos_message,ang_message))
self.payload["map"] = pos_message
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
return list(pos_message1)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if (message[:4] == "#ack"):
self.set_acknowledge(True)
# Check for mouse click data on the map
elif (message[:5] == "#pick"):
data = eval(message[5:])
self.mapXY = data
x, y = self.mapXY
worldx, worldy = self.map.gridToWorld(x, y)
self.worldXY = [worldx, worldy]
print("World : {}".format(self.worldXY))
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
home_dir = os.path.expanduser('~')
logged = False
while not logged:
try:
f = open(f"{home_dir}/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# Function to reset
def METHOD_NAME(self):
self.map.reset()
# This class decouples the user thread
# and the GUI update thread
class ThreadGUI:
def __init__(self, gui):
self.gui = gui
# Time variables
self.ideal_cycle = 80
self.measured_cycle = 80
self.iteration_counter = 0
# Function to start the execution of threads
def start(self):
self.measure_thread = threading.Thread(target=self.measure_thread)
self.thread = threading.Thread(target=self.run)
self.measure_thread.start()
self.thread.start()
print("GUI Thread Started!")
# The measuring thread to measure frequency
def measure_thread(self):
while (self.gui.client == None):
pass
previous_time = datetime.now()
while (True):
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.measured_cycle = ms / self.iteration_counter
except:
self.measured_cycle = 0
# Reset the counter
self.iteration_counter = 0
def run(self):
while (self.gui.client == None):
pass
while (True):
start_time = datetime.now()
self.gui.update_gui()
acknowledge_message = self.gui.get_acknowledge()
while (acknowledge_message == False):
acknowledge_message = self.gui.get_acknowledge()
self.gui.set_acknowledge(False)
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if (ms < self.ideal_cycle):
time.sleep((self.ideal_cycle - ms) / 1000.0) |
4,913 | hisat2 general stats table | """ MultiQC module to parse output from HISAT2 """
import logging
import re
from collections import OrderedDict
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import bargraph
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
"""HISAT2 module, parses stderr logs."""
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="HISAT2",
anchor="hisat2",
href="https://ccb.jhu.edu/software/hisat2/",
info="is a fast and sensitive alignment program for mapping "
"NGS reads (both DNA and RNA) against a reference genome or "
"population of reference genomes.",
doi=["10.1038/nmeth.3317", "10.1038/s41587-019-0201-4"],
)
# Find and load any HISAT2 reports
self.hisat2_data = dict()
for f in self.find_log_files("hisat2", filehandles=True):
self.parse_hisat2_logs(f)
# Filter to strip out ignored sample names
self.hisat2_data = self.ignore_samples(self.hisat2_data)
if len(self.hisat2_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.hisat2_data)))
# Write parsed report data to a file
self.write_data_file(self.hisat2_data, "multiqc_hisat2")
# Basic Stats Table
# Report table is immutable, so just updating it works
self.METHOD_NAME()
# Alignment Rate Plot
self.hisat2_alignment_plot()
def parse_hisat2_logs(self, f):
"""
Parse statistics generated by HISAT2 >= v2.1.0 that has been run
with the --new-summary option. Older versions or logs from runs without
that option are identical to that from bowtie2 and will be parsed
by that module.
"""
# Regexes
regexes = {
"unpaired_total": r"Total(?: unpaired)? reads: (\d+)",
"unpaired_aligned_none": r"Aligned 0 times?: (\d+) \([\d\.]+%\)",
"unpaired_aligned_one": r"Aligned 1 time: (\d+) \([\d\.]+%\)",
"unpaired_aligned_multi": r"Aligned >1 times: (\d+) \([\d\.]+%\)",
"paired_total": r"Total pairs: (\d+)",
"paired_aligned_none": r"Aligned concordantly or discordantly 0 time: (\d+) \([\d\.]+%\)",
"paired_aligned_one": r"Aligned concordantly 1 time: (\d+) \([\d\.]+%\)",
"paired_aligned_multi": r"Aligned concordantly >1 times: (\d+) \([\d\.]+%\)",
"paired_aligned_discord_one": r"Aligned discordantly 1 time: (\d+) \([\d\.]+%\)",
}
# Go through log file line by line
s_name = f["s_name"]
parsed_data = {}
for l in f["f"]:
# Attempt in vain to find original hisat2 command, logged by another program
hscmd = re.search(r"hisat2 .+ -[1U] ([^\s,]+)", l)
if hscmd:
s_name = self.clean_s_name(hscmd.group(1), f)
log.debug("Found a HISAT2 command, updating sample name to '{}'".format(s_name))
# Run through all regexes
for k, r in regexes.items():
match = re.search(r, l)
if match:
parsed_data[k] = int(match.group(1))
# Overall alignment rate
overall = re.search(r"Overall alignment rate: ([\d\.]+)%", l)
if overall:
parsed_data["overall_alignment_rate"] = float(overall.group(1))
# Save parsed data
if s_name in self.hisat2_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name)
self.hisat2_data[s_name] = parsed_data
# Reset in case we find more in this log file
s_name = f["s_name"]
parsed_data = {}
def METHOD_NAME(self):
"""Take the parsed stats from the HISAT2 report and add it to the
basic stats table at the top of the report"""
headers = OrderedDict()
headers["overall_alignment_rate"] = {
"title": "% Aligned",
"description": "overall alignment rate",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "YlGn",
}
self.general_stats_addcols(self.hisat2_data, headers)
def hisat2_alignment_plot(self):
"""Make the HighCharts HTML to plot the alignment rates"""
# Split the data into SE and PE
sedata = {}
pedata = {}
for s_name, data in self.hisat2_data.items():
if "paired_total" in data:
# Save half 'pairs' of mate counts
m_keys = ["unpaired_total", "unpaired_aligned_none", "unpaired_aligned_one", "unpaired_aligned_multi"]
for k in m_keys:
if k in data:
data[k] = float(data[k]) / 2.0
pedata[s_name] = data
else:
sedata[s_name] = data
# Two plots, don't mix SE with PE
if len(sedata) > 0:
sekeys = OrderedDict()
sekeys["unpaired_aligned_one"] = {"color": "#20568f", "name": "SE mapped uniquely"}
sekeys["unpaired_aligned_multi"] = {"color": "#f7a35c", "name": "SE multimapped"}
sekeys["unpaired_aligned_none"] = {"color": "#981919", "name": "SE not aligned"}
pconfig = {
"id": "hisat2_se_plot",
"title": "HISAT2: SE Alignment Scores",
"ylab": "# Reads",
"cpswitch_counts_label": "Number of Reads",
}
self.add_section(plot=bargraph.plot(sedata, sekeys, pconfig))
if len(pedata) > 0:
pekeys = OrderedDict()
pekeys["paired_aligned_one"] = {"color": "#20568f", "name": "PE mapped uniquely"}
pekeys["paired_aligned_discord_one"] = {"color": "#5c94ca", "name": "PE mapped discordantly uniquely"}
pekeys["unpaired_aligned_one"] = {"color": "#95ceff", "name": "PE one mate mapped uniquely"}
pekeys["paired_aligned_multi"] = {"color": "#f7a35c", "name": "PE multimapped"}
pekeys["unpaired_aligned_multi"] = {"color": "#ffeb75", "name": "PE one mate multimapped"}
pekeys["unpaired_aligned_none"] = {"color": "#981919", "name": "PE neither mate aligned"}
pconfig = {
"id": "hisat2_pe_plot",
"title": "HISAT2: PE Alignment Scores",
"ylab": "# Reads",
"cpswitch_counts_label": "Number of Reads",
}
self.add_section(
description="<em>Please note that single mate alignment counts are halved to tally with pair counts properly.</em>",
plot=bargraph.plot(pedata, pekeys, pconfig),
) |
4,914 | index | from datetime import datetime, timedelta
from dateutil import parser
from django.contrib.auth.decorators import login_required
from django.forms.models import modelformset_factory
from django.shortcuts import redirect
from django.contrib.auth.models import User
from tendenci.apps.theme.shortcuts import themed_response as render_to_resp
from tendenci.apps.dashboard.models import DashboardStatType
from tendenci.apps.event_logs.models import EventLog
from tendenci.apps.perms.decorators import superuser_required
from tendenci.apps.site_settings.models import Setting
from tendenci.apps.site_settings.utils import get_setting
@login_required
def METHOD_NAME(request, template_name="dashboard/index.html"):
profile_redirect = get_setting('site', 'global', 'profile_redirect')
if profile_redirect and profile_redirect != '/dashboard' and not request.user.profile.is_superuser:
return redirect(profile_redirect)
# self signup free trial version
has_paid = True
activate_url = ''
expired = False
expiration_dt = ''
if get_setting('site', 'developer', 'partner') == 'Self-Signup' \
and get_setting('site', 'developer', 'freepaid') == 'free':
has_paid = False
activate_url = get_setting('site', 'developer', 'siteactivatepaymenturl')
site_create_dt = get_setting('site', 'developer', 'sitecreatedt')
if site_create_dt:
site_create_dt = parser.parse(site_create_dt)
else:
# find the site create date in user's table
u = User.objects.get(pk=1)
site_create_dt = u.date_joined
expiration_dt = site_create_dt + timedelta(days=30)
now = datetime.now()
if now >= expiration_dt:
expired = True
statistics = DashboardStatType.objects.filter(displayed=True)
EventLog.objects.log()
return render_to_resp(request=request, template_name=template_name, context={
'has_paid': has_paid,
'activate_url': activate_url,
'expired': expired,
'expiration_dt': expiration_dt,
'statistics': statistics,
})
@login_required
def new(request, template_name="dashboard/new.html"):
if get_setting('module', 'dashboard', 'themeredirect'):
redirect_setting = Setting.objects.get(scope_category='dashboard',
name='themeredirect')
redirect_setting.set_value(False)
redirect_setting.save()
return redirect('theme_editor.picker')
# Redirect to Group dashboard url if any
if get_setting('site', 'global', 'groupdashboard') and not request.user.profile.is_superuser:
group_dashboard_urls = [m for m in request.user.group_member
.values_list('group__dashboard_url', flat=True) if m]
if group_dashboard_urls:
url = group_dashboard_urls[0]
return redirect(url)
# Redirect to the url speficied in the Profile Redirect setting if any
profile_redirect = get_setting('site', 'global', 'profile_redirect')
if profile_redirect and profile_redirect != '/dashboard' and not request.user.profile.is_superuser:
if "<username>" in profile_redirect:
profile_redirect = profile_redirect.replace("<username>", request.user.username)
return redirect(profile_redirect)
# self signup free trial version
has_paid = True
activate_url = ''
expired = False
expiration_dt = ''
if get_setting('site', 'developer', 'partner') == 'Self-Signup' \
and get_setting('site', 'developer', 'freepaid') == 'free':
has_paid = False
activate_url = get_setting('site', 'developer', 'siteactivatepaymenturl')
site_create_dt = get_setting('site', 'developer', 'sitecreatedt')
if site_create_dt:
site_create_dt = parser.parse(site_create_dt)
else:
# find the site create date in user's table
u = User.objects.get(pk=1)
site_create_dt = u.date_joined
expiration_dt = site_create_dt + timedelta(days=30)
now = datetime.now()
if now >= expiration_dt:
expired = True
statistics = DashboardStatType.objects.filter(displayed=True)
EventLog.objects.log()
return render_to_resp(request=request, template_name=template_name, context={
'has_paid': has_paid,
'activate_url': activate_url,
'expired': expired,
'expiration_dt': expiration_dt,
'statistics': statistics,
})
@superuser_required
def customize(request, template_name="dashboard/customize.html"):
DashboardStatFormSet = modelformset_factory(
DashboardStatType,
exclude=('name',),
extra=0
)
if request.method == "POST":
formset = DashboardStatFormSet(request.POST)
if formset.is_valid():
formset.save()
return redirect('dashboard')
else:
formset = DashboardStatFormSet(queryset=DashboardStatType.objects.all())
return render_to_resp(request=request, template_name=template_name, context={
'formset': formset,
}) |
4,915 | test instantiated version is empty | import unittest
from brewpiVersion import AvrInfo
from distutils.version import LooseVersion
class VersionTestCase(unittest.TestCase):
def assertVersionEqual(self, v, versionString):
self.assertEqual(v.version, LooseVersion(versionString))
self.assertEqual(versionString, v.toString())
def assertEmptyVersion(self, v):
self.assertEqual(v.toString(), "0.0.0")
self.assertEqual(v.version, None)
def newVersion(self):
return AvrInfo()
def METHOD_NAME(self):
v = self.newVersion()
self.assertEmptyVersion(v)
def test_parseEmptyStringIsEmptyVersion(self):
v = self.newVersion()
v.parse("")
self.assertEmptyVersion(v)
def test_parseNoStringIsEmptyVersion(self):
v = self.newVersion()
s = None
v.parse(s)
self.assertEmptyVersion(v)
def test_canParseStringVersion(self):
v = self.newVersion()
v.parse("0.1.0")
self.assertVersionEqual(v, "0.1.0")
def test_canParseJsonVersion(self):
v = self.newVersion()
v.parse('{"v":"0.1.0"}')
self.assertVersionEqual(v, "0.1.0")
def test_doesNotCrashOnInvalidJsonVersion(self):
v = self.newVersion()
v.parse('{"v":"0.2.8","n":""0.2.8"","s":2,"y":0,"b":"l","l":"1"}')
def test_canParseJsonSimulatorEnabled(self):
v = self.newVersion()
v.parse('{"y":1}')
self.assertEqual(v.simulator, True)
def test_canParseJsonSimulatorDisabled(self):
v = self.newVersion()
v.parse('{"y":0}')
self.assertEqual(v.simulator, False)
def test_canParseShieldRevC(self):
v = self.newVersion()
v.parse('{"s":2}')
self.assertEqual(v.shield, AvrInfo.shield_revC)
def test_canParseBoardLeonardo(self):
v = self.newVersion()
v.parse('{"b":"l"}')
self.assertEqual(v.board, AvrInfo.board_leonardo)
def test_canParseBoardStandard(self):
v = self.newVersion()
v.parse('{"b":"s"}')
self.assertEqual(v.board, AvrInfo.board_standard)
def test_canParseAll(self):
v = AvrInfo('{"v":"1.2.3","n":"99","c":"12345678", "b":"l", "y":0, "s":2 }')
self.assertVersionEqual(v, "1.2.3")
self.assertEqual(v.build, "99")
self.assertEqual(v.commit, "12345678")
self.assertEqual(v.board, AvrInfo.board_leonardo)
self.assertEqual(v.simulator, False)
self.assertEqual(v.shield, AvrInfo.shield_revC)
def test_canPrintExtendedVersionEmpty(self):
v = AvrInfo("")
self.assertEqual("BrewPi v0.0.0", v.toExtendedString());
def test_canPrintExtendedVersionFull(self):
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"l", "y":1, "s":2 }')
self.assertEqual('BrewPi v1.2.3, running commit 12345678, running on an Arduino Leonardo with a revC shield, running as simulator', v.toExtendedString())
def test_isNewer(self):
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"l", "y":1, "s":2 }')
self.assertFalse(v.isNewer("1.0.0"))
self.assertFalse(v.isNewer("1.2.3"))
self.assertTrue(v.isNewer("1.2.4"))
self.assertTrue(v.isNewer("2.0.0"))
def test_fullName(self):
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"l", "y":1, "s":2 }')
self.assertEqual("Arduino Leonardo",v.fullName())
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"x", "y":1, "s":2 }')
self.assertEqual("Spark Core",v.fullName())
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"?", "y":1, "s":2 }')
self.assertEqual("???? ????",v.fullName())
def test_articleFullName(self):
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"l", "y":1, "s":2 }')
self.assertEqual("an Arduino Leonardo",v.articleFullName())
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"s", "y":1, "s":2 }')
self.assertEqual("an Arduino Uno",v.articleFullName())
v = AvrInfo('{"v":"1.2.3","c":"12345678", "b":"x", "y":1, "s":2 }')
self.assertEqual("a Spark Core",v.articleFullName())
if __name__ == '__main__':
unittest.main() |
4,916 | generate | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
from conan.tools.files import get, copy, rmdir
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class SevenBitConfConan(ConanFile):
name = "7bitconf"
homepage = "https://github.com/7bitCoder/7bitConf"
description = "7bitConf is a simple C++17 centralized configuration provider library."
topics = ("cpp17", "configuration", "provider", "configuration-files")
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"header_only": [True, False],
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"header_only": False,
"shared": False,
"fPIC": True,
}
@property
def _min_cppstd(self):
return 17
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"msvc": "192",
"gcc": "8",
"clang": "7",
"apple-clang": "10",
}
def config_options(self):
if self.settings.os == "Windows":
self.options.rm_safe("fPIC")
def configure(self):
if self.options.get_safe("shared") or self.options.header_only:
self.options.rm_safe("fPIC")
if self.options.header_only:
self.options.rm_safe("shared")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("taocpp-json/1.0.0-beta.13", transitive_headers=True)
def package_id(self):
if self.info.options.header_only:
self.info.clear()
def validate(self):
compiler = self.settings.compiler
compiler_name = str(compiler)
if compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._minimum_compilers_version.get(compiler_name, False)
if minimum_version and Version(compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"Requires compiler {compiler_name} minimum version: {minimum_version} with C++17 support."
)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def METHOD_NAME(self):
if not self.options.header_only:
tc = CMakeToolchain(self)
tc.variables["_7BIT_CONF_BUILD_EXAMPLES"] = False
tc.variables["_7BIT_CONF_BUILD_TESTS"] = False
tc.variables["_7BIT_CONF_BUILD_SINGLE_HEADER"] = False
tc.variables["_7BIT_CONF_INSTALL"] = True
tc.variables["_7BIT_CONF_LIBRARY_TYPE"] = self.getSevenBitConfLibraryType()
tc.METHOD_NAME()
cmake_deps = CMakeDeps(self)
cmake_deps.METHOD_NAME()
def getSevenBitConfLibraryType(self):
if self.options.header_only:
return "HeaderOnly"
if self.options.shared:
return "Shared"
return "Static"
def build(self):
if not self.options.header_only:
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(
self,
"LICENSE",
dst=os.path.join(self.package_folder, "licenses"),
src=self.source_folder,
)
if self.options.header_only:
copy(
self,
src=os.path.join(self.source_folder, "Include"),
pattern="*.hpp",
dst=os.path.join(self.package_folder, "include"),
)
else:
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "7bitConf")
self.cpp_info.set_property("cmake_target_name", "7bitConf::7bitConf")
self.cpp_info.requires = ["taocpp-json::taocpp-json"]
if self.options.header_only:
self.cpp_info.libs = []
self.cpp_info.bindirs = []
else:
suffix = "d" if self.settings.build_type == "Debug" else ""
self.cpp_info.libs = ["7bitConf" + suffix]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
|
4,917 | test login no idp | import json
import os
import re
import sys
import unittest
from os import path
from unittest.mock import patch
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
import requests
from httpretty import HTTPretty
try:
from onelogin.saml2.utils import OneLogin_Saml2_Utils
except ImportError:
# Only available for python 2.7 at the moment, so don't worry if this fails
pass
from ...exceptions import AuthMissingParameter
from .base import BaseBackendTest
DATA_DIR = path.join(path.dirname(__file__), "data")
@unittest.skipIf(
"TRAVIS" in os.environ,
"Travis-ci segfaults probably due to a bad " "dependencies build",
)
@unittest.skipIf(
"__pypy__" in sys.builtin_module_names, "dm.xmlsec not compatible with pypy"
)
class SAMLTest(BaseBackendTest):
backend_path = "social_core.backends.saml.SAMLAuth"
expected_username = "myself"
def extra_settings(self):
name = path.join(DATA_DIR, "saml_config.json")
with open(name) as config_file:
config_str = config_file.read()
return json.loads(config_str)
def setUp(self):
"""Patch the time so that we can replay canned
request/response pairs"""
super().setUp()
@staticmethod
def fixed_time():
return OneLogin_Saml2_Utils.parse_SAML_to_time("2015-05-09T03:57:22Z")
now_patch = patch.object(OneLogin_Saml2_Utils, "now", fixed_time)
now_patch.start()
self.addCleanup(now_patch.stop)
def install_http_intercepts(self, start_url, return_url):
# When we request start_url
# (https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO...)
# we will eventually get a redirect back, with SAML assertion
# data in the query string. A pre-recorded correct response
# is kept in this .txt file:
name = path.join(DATA_DIR, "saml_response.txt")
with open(name) as response_file:
response_url = response_file.read()
HTTPretty.register_uri(
HTTPretty.GET, start_url, status=301, location=response_url
)
HTTPretty.register_uri(HTTPretty.GET, return_url, status=200, body="foobar")
def do_start(self):
start_url = self.backend.start().url
# Modify the start URL to make the SAML request consistent
# from test to test:
start_url = self.modify_start_url(start_url)
# If the SAML Identity Provider recognizes the user, we will
# be redirected back to:
return_url = self.backend.redirect_uri
self.install_http_intercepts(start_url, return_url)
response = requests.get(start_url)
self.assertTrue(response.url.startswith(return_url))
self.assertEqual(response.text, "foobar")
query_values = {
k: v[0] for k, v in parse_qs(urlparse(response.url).query).items()
}
self.assertNotIn(" ", query_values["SAMLResponse"])
self.strategy.set_request_data(query_values, self.backend)
return self.backend.complete()
def test_metadata_generation(self):
"""Test that we can generate the metadata without error"""
xml, errors = self.backend.generate_metadata_xml()
self.assertEqual(len(errors), 0)
self.assertEqual(xml.decode()[0], "<")
def test_login(self):
"""Test that we can authenticate with a SAML IdP (TestShib)"""
# pretend we've started with a URL like /login/saml/?idp=testshib:
self.strategy.set_request_data({"idp": "testshib"}, self.backend)
self.do_login()
def METHOD_NAME(self):
"""Logging in without an idp param should raise AuthMissingParameter"""
with self.assertRaises(AuthMissingParameter):
self.do_start()
def modify_start_url(self, start_url):
"""
Given a SAML redirect URL, parse it and change the ID to
a consistent value, so the request is always identical.
"""
# Parse the SAML Request URL to get the XML being sent to TestShib
url_parts = urlparse(start_url)
query = {k: v[0] for (k, v) in parse_qs(url_parts.query).items()}
xml = OneLogin_Saml2_Utils.decode_base64_and_inflate(query["SAMLRequest"])
# Modify the XML:
xml = xml.decode()
xml, changed = re.subn(r'ID="[^"]+"', 'ID="TEST_ID"', xml)
self.assertEqual(changed, 1)
# Update the URL to use the modified query string:
query["SAMLRequest"] = OneLogin_Saml2_Utils.deflate_and_base64_encode(xml)
url_parts = list(url_parts)
url_parts[4] = urlencode(query)
return urlunparse(url_parts) |
4,918 | get voltage low threshold | #############################################################################
#
# Module contains an implementation of SONiC Platform Base API and
# provides the PSUs status which are available in the platform
#
#############################################################################
try:
from sonic_platform_base.psu_base import PsuBase
from sonic_py_common.logger import Logger
from sonic_platform.fan import Fan
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
SYSLOG_IDENTIFIER = 'thermalctld'
logger = Logger(SYSLOG_IDENTIFIER)
# To do: should be defined in platDev
PSU_MAX_VOUT = 12.0 # voltage
PSU_MIN_VOUT = 3.3 # voltage
PSU_MAX_TEMP = 50.0 # C
class Psu(PsuBase):
"""Platform-specific Psu class"""
def __init__(self, index, info_list, is_bmc):
PsuBase.__init__(self)
self.index = index
self.is_bmc = is_bmc
self.attr_path = info_list[0]
self.status_path = info_list[1]
if is_bmc:
speed_file = self.attr_path + 'psu{}_fan_speed'.format(index+1)
else:
speed_file = self.attr_path + 'psu_fan_speed_1'
fan = Fan(index, 0, [self.status_path, speed_file], True)
self._fan_list.append(fan)
self.psu_name = "PSU{}".format(self.index+1)
def __read_attr_file(self, filepath, line=0xFF):
try:
with open(filepath, 'r') as fd:
if line == 0xFF:
data = fd.read()
return data.rstrip('\r\n')
else:
data = fd.readlines()
return data[line].rstrip('\r\n')
except FileNotFoundError:
logger.log_error(f"File {filepath} not found. Aborting")
except OSError as ex:
logger.log_error("Cannot open - {}: {}".format(filepath, repr(ex)))
return None
def get_name(self):
return self.psu_name
def get_presence(self):
"""
Retrieves the presence status of power supply unit (PSU) defined
Returns:
bool: True if PSU is present, False if not
"""
data = self.__read_attr_file(
self.status_path + 'psu{}_prnt'.format(self.index+1))
if data == '1':
return True
else:
return False
def get_powergood_status(self):
"""
Retrieves the powergood status of PSU
Returns:
A boolean, True if PSU has stablized its output voltages and passed all
its internal self-tests, False if not.
"""
data = self.__read_attr_file(
self.status_path + 'psu{}_good'.format(self.index+1))
if data == '1':
return True
else:
return False
def get_voltage(self):
"""
Retrieves current PSU voltage output
Returns:
A float number, the output voltage in volts,
e.g. 12.1
"""
if self.is_bmc:
path = self.attr_path + 'psu{}_vout'.format(self.index+1)
else:
path = self.attr_path + "/psu_vout"
vout = self.__read_attr_file(path, 0)
if vout is not None:
return float(vout) / 1000
return False
def get_current(self):
"""
Retrieves present electric current supplied by PSU
Returns:
A float number, the electric current in amperes, e.g 15.4
"""
if self.is_bmc:
path = self.attr_path + 'psu{}_iout'.format(self.index+1)
else:
path = self.attr_path + "/psu_iout"
iout = self.__read_attr_file(path, 0)
if iout is not None:
return float(iout) / 1000
return False
def get_power(self):
"""
Retrieves current energy supplied by PSU
Returns:
A float number, the power in watts, e.g. 302.6
"""
if self.is_bmc:
path = self.attr_path + 'psu{}_pout'.format(self.index+1)
else:
path = self.attr_path + "/psu_pout"
pout = self.__read_attr_file(path, 0)
if pout is not None:
return float(pout) / 1000000
return False
def set_status_led(self, color):
"""
Sets the state of the PSU status LED
Args:
color: A string representing the color with which to set the
PSU status LED
Returns:
bool: True if status LED state is set successfully, False if not
"""
raise NotImplementedError
def get_status_led(self):
"""
Gets the state of the PSU status LED
Returns:
A string, one of the predefined STATUS_LED_COLOR_* strings above
"""
raise NotImplementedError
def get_temperature(self):
"""
Retrieves current temperature reading from PSU
Returns:
A float number of current temperature in Celsius up to nearest thousandth
of one degree Celsius, e.g. 30.125
"""
if self.is_bmc:
path = self.attr_path+'psu{}_temp'.format(self.index+1)
else:
path = self.attr_path + "/psu_temp_1"
temperature = self.__read_attr_file(path, 0)
if temperature is not None:
return float(temperature) / 1000
return False
def get_temperature_high_threshold(self):
"""
Retrieves the high threshold temperature of PSU
Returns:
A float number, the high threshold temperature of PSU in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return PSU_MAX_TEMP
def get_voltage_high_threshold(self):
"""
Retrieves the high threshold PSU voltage output
Returns:
A float number, the high threshold output voltage in volts,
e.g. 12.1
"""
return PSU_MAX_VOUT
def METHOD_NAME(self):
"""
Retrieves the low threshold PSU voltage output
Returns:
A float number, the low threshold output voltage in volts,
e.g. 12.1
"""
return PSU_MIN_VOUT |
4,919 | put | """
SCP Module
==========
.. versionadded:: 2019.2.0
Module to copy files via `SCP <https://man.openbsd.org/scp>`_
"""
import logging
# Import salt modules
try:
import paramiko
import scp
HAS_SCP = True
except ImportError:
HAS_SCP = False
__proxyenabled__ = ["*"]
__virtualname__ = "scp"
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_SCP:
return False, "Please install SCP for this modules: pip install scp"
return __virtualname__
def _select_kwargs(**kwargs):
paramiko_kwargs = {}
scp_kwargs = {}
paramiko_args = __utils__["args.get_function_argspec"](paramiko.SSHClient.connect)[
0
]
paramiko_args.append("auto_add_policy")
scp_args = __utils__["args.get_function_argspec"](scp.SCPClient.__init__)[0]
scp_args.pop(0) # strip transport arg (it is passed in _prepare_connection)
for key, val in kwargs.items():
if key in paramiko_args and val is not None:
paramiko_kwargs[key] = val
if key in scp_args and val is not None:
scp_kwargs[key] = val
return paramiko_kwargs, scp_kwargs
def _prepare_connection(**kwargs):
"""
Prepare the underlying SSH connection with the remote target.
"""
paramiko_kwargs, scp_kwargs = _select_kwargs(**kwargs)
ssh = paramiko.SSHClient()
if paramiko_kwargs.pop("auto_add_policy", False):
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(**paramiko_kwargs)
scp_client = scp.SCPClient(ssh.get_transport(), **scp_kwargs)
return scp_client
def get(remote_path, local_path="", recursive=False, preserve_times=False, **kwargs):
"""
Transfer files and directories from remote host to the localhost of the
Minion.
remote_path
Path to retrieve from remote host. Since this is evaluated by scp on the
remote host, shell wildcards and environment variables may be used.
recursive: ``False``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' scp.get /var/tmp/file /tmp/file hostname=10.10.10.1 auto_add_policy=True
"""
scp_client = _prepare_connection(**kwargs)
get_kwargs = {"recursive": recursive, "preserve_times": preserve_times}
if local_path:
get_kwargs["local_path"] = local_path
return scp_client.get(remote_path, **get_kwargs)
def METHOD_NAME(
files,
remote_path=None,
recursive=False,
preserve_times=False,
saltenv="base",
**kwargs
):
"""
Transfer files and directories to remote host.
files
A single path or a list of paths to be transferred.
remote_path
The path on the remote device where to store the files.
recursive: ``True``
Transfer files and directories recursively.
preserve_times: ``False``
Preserve ``mtime`` and ``atime`` of transferred files and directories.
hostname
The hostname of the remote device.
port: ``22``
The port of the remote device.
username
The username required for SSH authentication on the device.
password
Used for password authentication. It is also used for private key
decryption if ``passphrase`` is not given.
passphrase
Used for decrypting private keys.
pkey
An optional private key to use for authentication.
key_filename
The filename, or list of filenames, of optional private key(s) and/or
certificates to try for authentication.
timeout
An optional timeout (in seconds) for the TCP connect.
socket_timeout: ``10``
The channel socket timeout in seconds.
buff_size: ``16384``
The size of the SCP send buffer.
allow_agent: ``True``
Set to ``False`` to disable connecting to the SSH agent.
look_for_keys: ``True``
Set to ``False`` to disable searching for discoverable private key
files in ``~/.ssh/``
banner_timeout
An optional timeout (in seconds) to wait for the SSH banner to be
presented.
auth_timeout
An optional timeout (in seconds) to wait for an authentication
response.
auto_add_policy: ``False``
Automatically add the host to the ``known_hosts``.
CLI Example:
.. code-block:: bash
salt '*' scp.put /path/to/file /var/tmp/file hostname=server1 auto_add_policy=True
"""
scp_client = _prepare_connection(**kwargs)
put_kwargs = {"recursive": recursive, "preserve_times": preserve_times}
if remote_path:
put_kwargs["remote_path"] = remote_path
cached_files = []
if not isinstance(files, (list, tuple)):
files = [files]
for file_ in files:
cached_file = __salt__["cp.cache_file"](file_, saltenv=saltenv)
cached_files.append(cached_file)
return scp_client.METHOD_NAME(cached_files, **put_kwargs) |
4,920 | is sentence boundary | #!/usr/bin/python2
# -*- coding: utf-8; mode: Python; indent-tabs-mode: t; tab-width: 4; python-indent: 4 -*-
# Copyright (C) 2012, 2014, 2015 Olga Yakovleva <yakovleva.o.v@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os.path
import argparse
import re
import codecs
import collections
import json
import regex
tag_regex=re.compile("<[^<]+>")
class base(object):
def __init__(self,conf):
self.replacements=conf["replace"]
self.alphabet=set(conf["alphabet"])
def replace(self,text):
res=text
for k,v in self.replacements.iteritems():
res=res.replace(k,v)
return res
class word_extractor(base):
def __init__(self,conf):
base.__init__(self,conf)
alphabet_string=u"".join(sorted(self.alphabet))
self.word_regex=re.compile(ur"(?ui)\b[{a}]+(-[{a}]+)*\b".format(a=alphabet_string))
self.words=collections.Counter()
def __call__(self,arg,dirname,filenames):
for filename in filenames:
path=os.path.join(dirname,filename)
if os.path.isfile(path):
with codecs.open(path,"r","utf-8") as f:
for word in (m.group(0).lower() for m in self.word_regex.finditer(tag_regex.sub(" ",f.read()))):
# letters=set(c for c in word if c.isalpha())
# if letters.issubset(self.alphabet):
self.words[self.replace(word)]+=1
def words(conf):
ext=word_extractor(conf)
os.path.walk(conf["source"],ext,None)
with codecs.open("words","w","utf-8") as f:
for w,c in ext.words.most_common():
f.write(u"{} {}\n".format(w,c))
class sentence_selector(base):
def __init__(self,conf):
base.__init__(self,conf)
self.count=0
self.sentences=dict()
self.words=set()
with codecs.open("words","r","utf-8") as f:
for line in f:
w,n=line.split()
if int(n)<conf["min_word_frequency"]:
break
self.words.add(w)
self.min_length=conf["min_length"]
self.max_length=conf["max_length"]
self.vowels=set(conf["vowels"])
self.ignore_case=conf["ignore_case"]
self.allow_initialisms=conf["allow_initialisms"]
self.exclude_capitalized=conf["exclude_capitalized_words"]
self.plain=conf["plain"]
self.wp_delim_regex=re.compile(u"(?u)\s*(?:</?doc(?:\s+[^<>]+)?>\s*)+")
self.tok_delim_regex=re.compile(r"\s+")
self.par_delim_regex=re.compile(r"(\r?\n){2,}")
self.tok_parse_regex=regex.compile(u"(?V1i)^(?:(\\p{{P}}*)([{a}]+(?:-[{a}]+)*)(\\p{{P}}*)|(\\p{{Pd}}))$".format(a=u"".join(self.alphabet)))
def add_paragraph(self,paragraph):
remaining_tokens=collections.deque(self.tok_delim_regex.split(self.replace(paragraph.strip())))
if not remaining_tokens:
return
sentence_tokens=list()
while remaining_tokens:
sentence_tokens.append(remaining_tokens.popleft())
if self.METHOD_NAME(sentence_tokens,remaining_tokens):
if self.is_nice_sentence(sentence_tokens):
sentence=u" ".join(sentence_tokens)
if sentence not in self.sentences:
self.sentences[sentence]=self.count
self.count+=1
sentence_tokens=list()
def METHOD_NAME(self,sentence_tokens,remaining_tokens):
if not sentence_tokens:
return False
if not remaining_tokens:
return True
if not self.ignore_case:
for c in remaining_tokens[0]:
if c.isalpha():
if not c.isupper():
return False
break
last_token=sentence_tokens[-1]
if (last_token[-1]==".") and (len(last_token)>1) and last_token[-2].isalpha() and ((len(last_token)==2) or (not last_token[-3].isalpha())):
return False
for c in reversed(last_token):
if c in [".","?","!"]:
return True
elif c.isalpha() or c.isdigit():
return False
return False
def check_token(self,token,first,last):
match=self.tok_parse_regex.match(token)
if not match:
return 0
if match.group(4):
return 1
word=match.group(2)
lword=word.lower()
if (not lword in self.words):
return 0
punc=match.group(3)
if "!" in punc:
return 0
if last and ("." not in punc) and ("?" not in punc):
return 0
if (not self.ignore_case) and first and (not word[0].isupper()):
return 0
if self.exclude_capitalized and (not first) and word[0].isupper():
return 0
if (not last) and (punc=="."):
return 0
if (not self.allow_initialisms) and (len(lword)>1) and (not self.vowels.intersection(lword)):
return 0
return 2
def is_nice_sentence(self,tokens):
num_tokens=len(tokens)
num_words=0
for i,token in enumerate(tokens):
n=self.check_token(token,i==0,i==(num_tokens-1))
if n==0:
return False
if n==2:
num_words+=1
if num_words < self.min_length:
return False
if num_words > self.max_length:
return False
return True
def process_wikipedia_text(self,text):
for article_text in self.wp_delim_regex.split(text):
if article_text:
clean_article_text=tag_regex.sub(u" ",article_text).replace("()","")
for paragraph in self.par_delim_regex.split(clean_article_text):
self.add_paragraph(paragraph)
def process_plain_text(self,text):
for paragraph in self.par_delim_regex.split(text):
self.add_paragraph(paragraph)
def __call__(self,arg,dirname,filenames):
for filename in sorted(filenames):
path=os.path.join(dirname,filename)
if os.path.isfile(path):
with codecs.open(path,"r","utf-8") as f:
contents=f.read()
if self.plain:
self.process_plain_text(contents)
else:
self.process_wikipedia_text(contents)
def sentences(conf):
sel=sentence_selector(conf)
os.path.walk(conf["source"],sel,None)
with codecs.open("sentences","w","utf-8") as f:
for sentence,id in sorted(sel.sentences.iteritems(),key=lambda p: p[1]):
f.write(sentence)
f.write("\n")
if __name__=="__main__":
parser=argparse.ArgumentParser(description="Select nice sentences for recording")
parser.add_argument("--config",required=True,help="the path to the configuration file")
subparsers=parser.add_subparsers()
words_parser=subparsers.add_parser("words")
words_parser.set_defaults(func=words)
sentences_parser=subparsers.add_parser("sentences")
sentences_parser.set_defaults(func=sentences)
args=parser.parse_args()
with open(args.config,"r") as f:
conf=json.load(f)
args.func(conf) |
4,921 | test output | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import opengate as gate
import opengate_core as g4
from scipy.spatial.transform import Rotation
import matplotlib.pyplot as plt
from opengate.user_hooks import check_production_cuts
paths = gate.get_default_test_paths(__file__, "gate_test036_adder_depth")
def create_simulation(geom):
# create the simulation
sim = gate.Simulation()
# main options
ui = sim.user_info
ui.g4_verbose = False
ui.visu = False
ui.number_of_threads = 1
ui.random_seed = 123456
# units
m = gate.g4_units("m")
cm = gate.g4_units("cm")
nm = gate.g4_units("nm")
keV = gate.g4_units("keV")
mm = gate.g4_units("mm")
Bq = gate.g4_units("Bq")
kBq = 1000 * Bq
# world size
world = sim.world
world.size = [2 * m, 2 * m, 2 * m]
# material
sim.add_material_database(paths.data / "GateMaterials.db")
# fake spect head
head = sim.add_volume("Box", "SPECThead")
head.size = [55 * cm, 42 * cm, 18 * cm]
head.translation = [0, 0, 15 * cm] ## not use if array of 2 heads
head.material = "G4_AIR"
# crystal
crystal = sim.add_volume("Box", "crystal")
crystal.mother = "SPECThead"
crystal.size = [55 * cm, 42 * cm, 2 * cm]
crystal.translation = [0, 0, 4 * cm]
crystal.material = "Plastic"
crystal.color = [1, 0, 0, 1]
# pixel crystal
crystal_pixel = sim.add_volume("Box", "crystal_pixel")
crystal_pixel.mother = crystal.name
crystal_pixel.size = [0.5 * cm, 0.5 * cm, 2 * cm]
crystal_pixel.material = "NaITl"
crystal_pixel.color = [1, 1, 0, 1]
# geom
size = [110, 84, 1]
tr = [0.5 * cm, 0.5 * cm, 0]
if geom == "repeat":
le = gate.repeat_array(crystal_pixel.name, size, tr)
crystal_pixel.translation = None
crystal_pixel.rotation = None
crystal_pixel.repeat = le
if geom == "param":
crystal_repeater = gate.build_param_repeater(
sim, crystal.name, crystal_pixel.name, size, tr
)
# FIXME add a second head
head.translation = None
head.rotation = None
tr = 30 * cm
le = gate.repeat_array(head.name, [1, 1, 2], [0, 0, tr])
le[0]["rotation"] = Rotation.from_euler("X", 180, degrees=True).as_matrix()
head.repeat = le
# physic list
sim.physics_manager.physics_list_name = "G4EmStandardPhysics_option4"
sim.physics_manager.enable_decay = False
sim.physics_manager.global_production_cuts.gamma = 0.01 * mm
sim.physics_manager.global_production_cuts.electron = 0.01 * mm
sim.physics_manager.global_production_cuts.positron = 1 * mm
sim.physics_manager.global_production_cuts.proton = 1 * mm
# cuts = p.production_cuts
# cuts.world.gamma = 0.01 * mm
# cuts.world.electron = 0.01 * mm
# cuts.world.positron = 1 * mm
# cuts.world.proton = 1 * mm
# default source for tests
activity = 40 * kBq / ui.number_of_threads
# activity = 5 * Bq / ui.number_of_threads
source = sim.add_source("GenericSource", "src1")
source.particle = "gamma"
source.energy.mono = 333 * keV
source.position.type = "sphere"
source.position.radius = 5 * cm
source.direction.type = "momentum"
source.direction.momentum = [0, 0, 1]
source.activity = activity
# default source for tests
source = sim.add_source("GenericSource", "src2")
source.particle = "gamma"
source.energy.mono = 222 * keV
source.position.type = "sphere"
source.position.radius = 5 * cm
source.direction.type = "momentum"
source.direction.momentum = [0, 0, -1]
source.activity = activity
# add stat actor
sim.add_actor("SimulationStatisticsActor", "Stats")
# hits collection
hc = sim.add_actor("DigitizerHitsCollectionActor", "Hits")
hc.mother = crystal.name
hc.output = paths.output / "test036.root"
hc.attributes = [
"KineticEnergy",
"PostPosition",
# 'HitPosition', 'PrePosition',
"TotalEnergyDeposit",
"GlobalTime", # 'EventID',
# 'TrackVolumeName', 'TrackID', # 'Test',
# 'ProcessDefinedStep',
"PreStepUniqueVolumeID",
# 'TrackVolumeCopyNo', 'TrackVolumeInstanceID'
]
# singles collection
sc = sim.add_actor("DigitizerAdderActor", "Singles")
sc.mother = crystal.name
sc.input_digi_collection = "Hits"
# sc.policy = 'EnergyWinnerPosition'
sc.policy = "EnergyWeightedCentroidPosition"
# same filename, there will be two branches in the file
sc.output = hc.output
sec = gate.g4_units("second")
ui.running_verbose_level = 2
# sim.run_timing_intervals = [[0, 0.33 * sec], [0.33 * sec, 0.66 * sec], [0.66 * sec, 1 * sec]]
sim.run_timing_intervals = [[0, 1 * sec]]
# print cuts
print(sim.physics_manager.dump_production_cuts())
# add a user hook function to dump production cuts frmo Geant4
sim.user_fct_after_init = check_production_cuts
return sim
def METHOD_NAME(output):
# retrieve the information about the touched volumes
man = g4.GateUniqueVolumeIDManager.GetInstance()
vols = man.GetAllVolumeIDs()
print(f"There are {len(vols)} volumes used in the adder")
"""for v in vols:
vid = v.GetVolumeDepthID()
print(f'Volume {v.fID}: ', end='')
for x in vid:
print(f' {x.fDepth} {x.fVolumeName} {x.fCopyNb} / ', end='')
print()"""
# stat
gate.warning("Compare stats")
stats = output.get_actor("Stats")
print(stats)
print(f"Number of runs was {stats.counts.run_count}. Set to 1 before comparison")
stats.counts.run_count = 1 # force to 1
stats_ref = gate.read_stat_file(paths.gate_output / "stats.txt")
is_ok = gate.assert_stats(stats, stats_ref, tolerance=0.07)
# root compare HITS
print()
hc = output.get_actor("Hits").user_info
gate.warning("Compare HITS")
gate_file = paths.gate_output / "spect.root"
checked_keys = ["posX", "posY", "posZ", "edep", "time", "trackId"]
keys1, keys2, scalings2, tols = gate.get_keys_correspondence(checked_keys)
scalings = [1.0] * len(scalings2)
tols[2] = 2 # Z
# tols[4] = 0.01 # energy
is_ok = gate.compare_root3(
gate_file,
hc.output,
"Hits",
"Hits",
keys1,
keys2,
tols,
scalings,
scalings2,
paths.output / "test036_hits.png",
)
# Root compare SINGLES
print()
sc = output.get_actor("Singles").user_info
gate.warning("Compare SINGLES")
gate_file = paths.gate_output / "spect.root"
checked_keys = ["time", "globalPosX", "globalPosY", "globalPosZ", "energy"]
keys1, keys2, scalings2, tols = gate.get_keys_correspondence(checked_keys)
scalings = [1.0] * len(scalings2)
tols[3] = 0.9 # Z
# tols[1] = 1.0 # X
# tols[2] = 1.0 # Y
# tols[4] = 0.02 # energy
is_ok = (
gate.compare_root3(
gate_file,
sc.output,
"Singles",
"Singles",
keys1,
keys2,
tols,
scalings,
scalings2,
paths.output / "test036_singles.png",
)
and is_ok
)
# this is the end, my friend
return is_ok |
4,922 | resolve | # Released under the MIT License. See LICENSE for details.
#
"""Provides the AppConfig class."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
import _babase
if TYPE_CHECKING:
from typing import Any
_g_pending_apply = False # pylint: disable=invalid-name
class AppConfig(dict):
"""A special dict that holds the game's persistent configuration values.
Category: **App Classes**
It also provides methods for fetching values with app-defined fallback
defaults, applying contained values to the game, and committing the
config to storage.
Call babase.appconfig() to get the single shared instance of this class.
AppConfig data is stored as json on disk on so make sure to only place
json-friendly values in it (dict, list, str, float, int, bool).
Be aware that tuples will be quietly converted to lists when stored.
"""
def METHOD_NAME(self, key: str) -> Any:
"""Given a string key, return a config value (type varies).
This will substitute application defaults for values not present in
the config dict, filter some invalid values, etc. Note that these
values do not represent the state of the app; simply the state of its
config. Use babase.App to access actual live state.
Raises an Exception for unrecognized key names. To get the list of keys
supported by this method, use babase.AppConfig.builtin_keys(). Note
that it is perfectly legal to store other data in the config; it just
needs to be accessed through standard dict methods and missing values
handled manually.
"""
return _babase.resolve_appconfig_value(key)
def default_value(self, key: str) -> Any:
"""Given a string key, return its predefined default value.
This is the value that will be returned by babase.AppConfig.resolve()
if the key is not present in the config dict or of an incompatible
type.
Raises an Exception for unrecognized key names. To get the list of keys
supported by this method, use babase.AppConfig.builtin_keys(). Note
that it is perfectly legal to store other data in the config; it just
needs to be accessed through standard dict methods and missing values
handled manually.
"""
return _babase.get_appconfig_default_value(key)
def builtin_keys(self) -> list[str]:
"""Return the list of valid key names recognized by babase.AppConfig.
This set of keys can be used with resolve(), default_value(), etc.
It does not vary across platforms and may include keys that are
obsolete or not relevant on the current running version. (for instance,
VR related keys on non-VR platforms). This is to minimize the amount
of platform checking necessary)
Note that it is perfectly legal to store arbitrary named data in the
config, but in that case it is up to the user to test for the existence
of the key in the config dict, fall back to consistent defaults, etc.
"""
return _babase.get_appconfig_builtin_keys()
def apply(self) -> None:
"""Apply config values to the running app.
This call is thread-safe and asynchronous; changes will happen
in the next logic event loop cycle.
"""
_babase.app.push_apply_app_config()
def commit(self) -> None:
"""Commits the config to local storage.
Note that this call is asynchronous so the actual write to disk may not
occur immediately.
"""
commit_app_config()
def apply_and_commit(self) -> None:
"""Run apply() followed by commit(); for convenience.
(This way the commit() will not occur if apply() hits invalid data)
"""
self.apply()
self.commit()
def read_app_config() -> tuple[AppConfig, bool]:
"""Read the app config."""
import os
import json
config_file_healthy = False
# NOTE: it is assumed that this only gets called once and the
# config object will not change from here on out
config_file_path = _babase.app.env.config_file_path
config_contents = ''
try:
if os.path.exists(config_file_path):
with open(config_file_path, encoding='utf-8') as infile:
config_contents = infile.read()
config = AppConfig(json.loads(config_contents))
else:
config = AppConfig()
config_file_healthy = True
except Exception:
logging.exception(
"Error reading config file at time %.3f: '%s'.",
_babase.apptime(),
config_file_path,
)
# Whenever this happens lets back up the broken one just in case it
# gets overwritten accidentally.
logging.info(
"Backing up current config file to '%s.broken'", config_file_path
)
try:
import shutil
shutil.copyfile(config_file_path, config_file_path + '.broken')
except Exception:
logging.exception('Error copying broken config.')
config = AppConfig()
# Now attempt to read one of our 'prev' backup copies.
prev_path = config_file_path + '.prev'
try:
if os.path.exists(prev_path):
with open(prev_path, encoding='utf-8') as infile:
config_contents = infile.read()
config = AppConfig(json.loads(config_contents))
else:
config = AppConfig()
config_file_healthy = True
logging.info('Successfully read backup config.')
except Exception:
logging.exception('Error reading prev backup config.')
return config, config_file_healthy
def commit_app_config(force: bool = False) -> None:
"""Commit the config to persistent storage.
Category: **General Utility Functions**
(internal)
"""
plus = _babase.app.plus
assert plus is not None
if not _babase.app.config_file_healthy and not force:
logging.warning(
'Current config file is broken; '
'skipping write to avoid losing settings.'
)
return
plus.mark_config_dirty() |
4,923 | test exec step | #!/usr/bin/env python3
# Copyright 2016 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
# Test the mi interface for the debugger
import BoostBuild
import TestCmd
import re
def split_stdin_stdout(text):
"""stdin is all text after the prompt up to and including
the next newline. Everything else is stdout. stdout
may contain regular expressions enclosed in {{}}."""
prompt = re.escape('(gdb) \n')
pattern = re.compile('(?<=%s)((?:\d*-.*)\n)' % prompt)
stdin = ''.join(re.findall(pattern, text))
stdout = re.sub(pattern, '', text)
outside_pattern = re.compile(r'(?:\A|(?<=\}\}))(?:[^\{]|(?:\{(?!\{)))*(?:(?=\{\{)|\Z)')
def escape_line(line):
line = re.sub(outside_pattern, lambda m: re.escape(m.group(0)), line)
return re.sub(r'\{\{|\}\}', '', line)
stdout = '\n'.join([escape_line(line) for line in stdout.split('\n')])
return (stdin,stdout)
def run(tester, io):
(input,output) = split_stdin_stdout(io)
tester.run_build_system(stdin=input, stdout=output, match=TestCmd.match_re)
def make_tester():
return BoostBuild.Tester(["-dmi"], pass_toolset=False, pass_d0=False,
use_test_config=False, ignore_toolset_requirements=False, match=TestCmd.match_re)
def test_exec_run():
t = make_tester()
t.write("test.jam", """\
UPDATE ;
""")
run(t, """\
=thread-group-added,id="i1"
(gdb)
72-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
72^running
(gdb)
*stopped,reason="exited-normally"
(gdb)
73-gdb-exit
73^exit
""")
t.cleanup()
def test_exit_status():
t = make_tester()
t.write("test.jam", """\
EXIT : 1 ;
""")
run(t, """\
=thread-group-added,id="i1"
(gdb)
72-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
72^running
(gdb)
*stopped,reason="exited",exit-code="1"
(gdb)
73-gdb-exit
73^exit
""")
t.cleanup()
def METHOD_NAME():
t = make_tester()
t.write("test.jam", """\
rule g ( )
{
a = 1 ;
b = 2 ;
}
rule f ( )
{
g ;
c = 3 ;
}
f ;
""")
run(t, """\
=thread-group-added,id="i1"
(gdb)
-break-insert f
^done,bkpt={number="1",type="breakpoint",disp="keep",enabled="y",func="f"}
(gdb)
72-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
72^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="1",disp="keep",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="8"},thread-id="1",stopped-threads="all"
(gdb)
1-exec-step
1^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="g",args=[],file="test.jam",fullname="{{.*}}test.jam",line="3"},thread-id="1"
(gdb)
2-exec-step
2^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="g",args=[],file="test.jam",fullname="{{.*}}test.jam",line="4"},thread-id="1"
(gdb)
3-exec-step
3^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="9"},thread-id="1"
(gdb)
73-gdb-exit
73^exit
""")
t.cleanup()
def test_exec_next():
t = make_tester()
t.write("test.jam", """\
rule g ( )
{
a = 1 ;
}
rule f ( )
{
g ;
b = 2 ;
c = 3 ;
}
rule h ( )
{
f ;
g ;
}
h ;
d = 4 ;
""")
run(t, """\
=thread-group-added,id="i1"
(gdb)
-break-insert f
^done,bkpt={number="1",type="breakpoint",disp="keep",enabled="y",func="f"}
(gdb)
72-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
72^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="1",disp="keep",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="7"},thread-id="1",stopped-threads="all"
(gdb)
1-exec-next
1^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="8"},thread-id="1"
(gdb)
2-exec-next
2^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="9"},thread-id="1"
(gdb)
3-exec-next
3^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="h",args=[],file="test.jam",fullname="{{.*}}test.jam",line="14"},thread-id="1"
(gdb)
4-exec-next
4^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="module scope",args=[],file="test.jam",fullname="{{.*}}test.jam",line="17"},thread-id="1"
(gdb)
73-gdb-exit
73^exit
""")
t.cleanup()
def test_exec_finish():
t = make_tester()
t.write("test.jam", """\
rule f ( )
{
a = 1 ;
}
rule g ( )
{
f ;
b = 2 ;
i ;
}
rule h ( )
{
g ;
i ;
}
rule i ( )
{
c = 3 ;
}
h ;
d = 4 ;
""")
run(t, """\
=thread-group-added,id="i1"
(gdb)
-break-insert f
^done,bkpt={number="1",type="breakpoint",disp="keep",enabled="y",func="f"}
(gdb)
72-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
72^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="1",disp="keep",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="3"},thread-id="1",stopped-threads="all"
(gdb)
1-exec-finish
1^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="g",args=[],file="test.jam",fullname="{{.*}}test.jam",line="8"},thread-id="1"
(gdb)
2-exec-finish
2^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="h",args=[],file="test.jam",fullname="{{.*}}test.jam",line="14"},thread-id="1"
(gdb)
3-exec-finish
3^running
(gdb)
*stopped,reason="end-stepping-range",frame={func="module scope",args=[],file="test.jam",fullname="{{.*}}test.jam",line="21"},thread-id="1"
(gdb)
73-gdb-exit
73^exit
""")
t.cleanup()
def test_breakpoints():
"""Tests the interaction between the following commands:
break, clear, delete, disable, enable"""
t = make_tester()
t.write("test.jam", """\
rule f ( )
{
a = 1 ;
}
rule g ( )
{
b = 2 ;
}
rule h ( )
{
c = 3 ;
d = 4 ;
}
f ;
g ;
h ;
UPDATE ;
""")
run(t, """\
=thread-group-added,id="i1"
(gdb)
-break-insert f
^done,bkpt={number="1",type="breakpoint",disp="keep",enabled="y",func="f"}
(gdb)
72-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
72^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="1",disp="keep",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="3"},thread-id="1",stopped-threads="all"
(gdb)
-interpreter-exec console kill
^done
(gdb)
-break-insert g
^done,bkpt={number="2",type="breakpoint",disp="keep",enabled="y",func="g"}
(gdb)
-break-disable 1
^done
(gdb)
73-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
73^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="2",disp="keep",frame={func="g",args=[],file="test.jam",fullname="{{.*}}test.jam",line="7"},thread-id="1",stopped-threads="all"
(gdb)
-interpreter-exec console kill
^done
(gdb)
-break-enable 1
^done
(gdb)
74-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
74^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="1",disp="keep",frame={func="f",args=[],file="test.jam",fullname="{{.*}}test.jam",line="3"},thread-id="1",stopped-threads="all"
(gdb)
-interpreter-exec console kill
^done
(gdb)
-break-delete 1
^done
(gdb)
75-exec-run -ftest.jam
=thread-created,id="1",group-id="i1"
75^running
(gdb)
*stopped,reason="breakpoint-hit",bkptno="2",disp="keep",frame={func="g",args=[],file="test.jam",fullname="{{.*}}test.jam",line="7"},thread-id="1",stopped-threads="all"
(gdb)
76-gdb-exit
76^exit
""")
t.cleanup()
test_exec_run()
test_exit_status()
METHOD_NAME()
test_exec_next()
test_exec_finish()
test_breakpoints() |
4,924 | pop |
class CircularBuffer:
"""
Fixed capacity circular buffer that can be used as
a queue, stack or array
"""
def __init__(self, capacity:int, defaultValue=None) -> None:
if capacity <= 0:
raise ValueError("capacity must be greater than zero")
self.defaultValue = defaultValue
self.buffer:list = [None] * capacity
self.capacity:int = capacity
self.count:int = 0
self.tailIndex:int = 0
def head(self):
"""
Non-destructive get of the entry at the head (index count-1)
return: if list is not empty, the head (most recently pushed/enqueued) entry.
if list is empty, the default value provided in the constructor
"""
if self.count > 0:
return self.buffer[(self.tailIndex + self.count - 1) % self.capacity]
return self.defaultValue
def tail(self):
"""
Non-destructive get of the entry at the tail (index 0)
return: if list is not empty, the tail (least recently pushed/enqueued) entry.
if list is empty, the default value provided in the constructor
"""
if self.count > 0:
return self.buffer[self.tailIndex]
return self.defaultValue
def enqueue(self, value):
"""
Push a value onto the head of the buffer.
If the buffer is full, then the value
at the tail is dropped to make room.
"""
if self.count < self.capacity:
self.count += 1
else:
# drop entry at the tail
self.tailIndex = (self.tailIndex + 1) % self.capacity
# write value at head
self.buffer[(self.tailIndex + self.count - 1) % self.capacity] = value
def dequeue(self):
"""
Remove value at tail of list and return it
return: if list not empty, value at tail
if list empty, the default value
"""
theValue = self.tail()
if self.count > 0:
self.count -= 1
self.tailIndex = (self.tailIndex + 1) % self.capacity
return theValue
def push(self, value):
"""
Push a value onto the head of the buffer.
If the buffer is full, then a IndexError
is raised.
"""
if self.count >= self.capacity:
raise IndexError("Attempt to push to a full buffer")
self.enqueue(value)
def METHOD_NAME(self):
"""
Remove value at head of list and return it
return: if list not empty, the value at head
if list empty, the default value
"""
theValue = self.head()
if self.count > 0:
self.count -= 1
return theValue
def append(self, value):
"""
append a value to the tail (index count-1) of the buffer.
If the buffer is at capacity, then this
will raise an IndexError.
"""
if self.count >= self.capacity:
raise IndexError("Attempt to append to a full buffer")
# make space a tail for the value
self.count += 1
self.tailIndex = (self.tailIndex - 1) % self.capacity
self.buffer[self.tailIndex] = value
def get(self, i:int):
"""
Get value at given index where
head is index 0 and tail is is index count-1;
i: index from 0 to count-1 (head is zero)
return: value at index
or the default value if index is out of range
"""
if (i >= 0) and (i < self.count):
return self.buffer[(self.tailIndex + (self.count + i - 1)) % self.capacity]
return self.defaultValue
def set(self, i:int, value):
"""
Set value at given index where
head is index 0 and tail is is index count-1;
"""
if (i >= 0) and (i < self.count):
self.buffer[(self.tailIndex + (self.count + i - 1)) % self.capacity] = value
return
raise IndexError("buffer index is out of range")
def truncateTo(self, count):
"""
Truncate the list to the given number of values.
If the given number is greater than or equal to
the current count(), then nothing is changed.
If the given number is less than the
current count(), then elements are dropped
from the tail to resize to the given number.
The capactity of the queue is not changed.
So to drop all entries except the head:
truncateTo(1)
count: the desired number of elements to
leave in the queue (maximum)
"""
if count < 0 or count > self.capacity:
raise ValueError("count is out of range")
self.count = count
self.tailIndex = (self.tailIndex + count) % self.capacity
|
4,925 | test creation | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.core.serialization import Buffer
from bokeh.core.types import ID
from bokeh.protocol import Protocol
from bokeh.protocol.exceptions import ValidationError
# Module under test
from bokeh.protocol import receiver # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
proto = Protocol()
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def METHOD_NAME() -> None:
receiver.Receiver(None)
async def test_validation_success() -> None:
msg = proto.create('ACK')
r = receiver.Receiver(proto)
partial = await r.consume(msg.header_json)
assert partial is None
partial = await r.consume(msg.metadata_json)
assert partial is None
partial = await r.consume(msg.content_json)
assert partial is not None
assert partial.msgtype == msg.msgtype
assert partial.header == msg.header
assert partial.content == msg.content
assert partial.metadata == msg.metadata
async def test_validation_success_with_one_buffer() -> None:
r = receiver.Receiver(proto)
partial = await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}')
assert partial is None
partial = await r.consume('{}')
assert partial is None
partial = await r.consume('{"bar": 10}')
assert partial is None
partial = await r.consume('{"id": "buf_header"}')
assert partial is None
partial = await r.consume(b'payload')
assert partial is not None
assert partial.msgtype == "PATCH-DOC"
assert partial.header == {"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}
assert partial.content == {"bar":10}
assert partial.metadata == {}
assert partial.buffers == [Buffer(ID("buf_header"), b"payload")]
async def test_multiple_validation_success_with_multiple_buffers() -> None:
r = receiver.Receiver(proto)
for N in range(10):
partial = await r.consume(f'{{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":{N}}}')
partial = await r.consume('{}')
partial = await r.consume('{"bar": 10}')
for i in range(N):
partial = await r.consume(f'{{"id": "header{i}"}}')
partial = await r.consume(f'payload{i}'.encode())
assert partial is not None
assert partial.msgtype == "PATCH-DOC"
assert partial.header == {"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers": N}
assert partial.content == {"bar":10}
assert partial.metadata == {}
for i in range(N):
assert partial.buffers[i] == Buffer(ID(f"header{i}"), f"payload{i}".encode())
async def test_binary_header_raises_error() -> None:
r = receiver.Receiver(proto)
with pytest.raises(ValidationError):
await r.consume(b'{"msgtype": "PATCH-DOC", "msgid": "10"}')
async def test_binary_metadata_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10"}')
with pytest.raises(ValidationError):
await r.consume(b'metadata')
async def test_binary_content_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10"}')
await r.consume('metadata')
with pytest.raises(ValidationError):
await r.consume(b'content')
async def test_binary_payload_header_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}')
await r.consume('{}')
await r.consume('{}')
with pytest.raises(ValidationError):
await r.consume(b'{"id": "buf_header"}')
async def test_text_payload_buffer_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}')
await r.consume('{}')
await r.consume('{}')
await r.consume('{"id": "buf_header"}')
with pytest.raises(ValidationError):
await r.consume('buf_payload')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- |
4,926 | test override file | import os
from pathlib import Path
import pytest
from sunpy.data.data_manager.tests.mocks import MOCK_HASH, write_to_test_file
from sunpy.util.exceptions import SunpyUserWarning
def test_basic(storage, downloader, data_function):
data_function()
assert downloader.times_called == 1
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def test_download_cache(manager, storage, downloader, data_function):
"""
Test calling function multiple times does not redownload.
"""
data_function()
data_function()
assert downloader.times_called == 1
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def test_file_tampered(manager, storage, downloader, data_function):
"""
Test calling function multiple times does not redownload.
"""
data_function()
write_to_test_file(manager._tempdir + '/sunpy.test_file', 'b')
with pytest.warns(SunpyUserWarning):
data_function()
assert downloader.times_called == 2
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def test_wrong_hash_provided(manager):
@manager.require('test_file', ['url1'], 'wrong_hash')
def test_foo():
pass
with pytest.raises(RuntimeError):
test_foo()
def test_skip_all(manager, storage, downloader, data_function):
"""
Test skip_hash_check redownloads data.
"""
data_function()
with manager.skip_hash_check():
data_function()
assert downloader.times_called == 2
assert len(storage._store) == 1
assert Path(storage._store[0]['file_path']).name == ('sunpy.test_file')
def METHOD_NAME(manager, storage, downloader, data_function, tmpdir):
"""
Test the override_file functionality.
"""
def default_tester(manager):
"""
Function to test whether the file name is test_file.
"""
assert manager.get('test_file').name == ('sunpy.test_file')
def override_file_tester(manager):
"""
Function to test whether the file is /tmp/another_file.
"""
assert manager.get('test_file') == Path(f'{folder}/another_file')
# Outside the context manager file is default
folder = tmpdir.strpath
data_function(default_tester)
write_to_test_file(str(Path(folder+'/another_file')), 'a')
with manager.override_file('test_file', f'file://{folder}/another_file'):
# Inside the file is replaced
data_function(override_file_tester)
# TODO: this combined with the check above fails on windows
# with manager.override_file('test_file', f'{folder}/another_file'):
# # Inside the file is replaced
# data_function(override_file_tester)
# check the function works with hash provided
with manager.override_file('test_file', f'file://{folder}/another_file', MOCK_HASH):
data_function(override_file_tester)
with pytest.raises(ValueError): # noqa: PT012
# check if functions errors with the wrong hash
with manager.override_file('test_file', f'file://{folder}/another_file', 'wrong_hash'):
# Inside the file is replaced
data_function(override_file_tester)
# Even after context manager call outside the file is default
data_function(default_tester)
def test_override_file_remote(manager, downloader, data_function):
replace_url = 'http://example.com/another_file'
data_function()
assert downloader.times_called == 1
with manager.override_file('test_file', replace_url):
data_function()
assert downloader.times_called == 2
assert downloader.last_called_url == replace_url
def test_wrong_hash_error(manager, storage):
storage._store.append({
'file_path': '/tmp/test_file',
'file_hash': 'aa',
'url': 'url1'
})
@manager.require('test_file', ['url1', 'url2'], 'asdf')
def foo():
pass
with pytest.raises(ValueError):
foo()
def test_file_changed(data_function, storage):
# Download the file first
data_function()
file = storage._store[0]['file_path']
# The file was then locally changed
write_to_test_file(file, "asd")
# Now it should error
with pytest.warns(SunpyUserWarning):
data_function()
def test_delete_db(sqlmanager, sqlstorage):
# Download the file
@sqlmanager.require('test_file', ['http://example.com/test_file'], MOCK_HASH)
def test_function():
pass
test_function()
# The DB file was then deleted
os.remove(str(sqlstorage._db_path))
# SQLite should not throw an error
test_function()
def test_same_file_id_different_module(downloader, storage,
data_function, data_function_from_fake_module):
# Uses name 'test_file' to refer to the file
data_function()
# Change hash of the above file to allow MockDownloader to download another file
# Otherwise it will skip the download because a file with the same hash already exists
storage._store[0]['file_hash'] = 'abc'
# This function from a different module uses same name 'test_file' to refer to a different file
data_function_from_fake_module()
assert len(storage._store) == 2
assert downloader.times_called == 2
# Check if the files are namespaced correctly
assert Path(storage._store[0]['file_path']).name == 'sunpy.test_file'
assert Path(storage._store[1]['file_path']).name == 'fake_module.test_file'
def test_namespacing_with_manager_override_file(module_patched_manager, downloader,
storage, data_function_from_fake_module):
# Download a file using manager.require()
data_function_from_fake_module()
assert len(storage._store) == 1
assert downloader.times_called == 1
assert Path(storage._store[0]['file_path']).name == 'fake_module.test_file'
# Override the file name with a different URI
with module_patched_manager.override_file(
'test_file', 'http://www.different_uri.com/new_file', MOCK_HASH):
data_function_from_fake_module()
assert downloader.times_called == 2
# New file entry is stored in manager._file_cache only
# It's not stored in InMemStorage or SqlStorage
assert len(storage._store) == 1
assert Path(
module_patched_manager._file_cache['test_file']['fake_module.']
).name == 'fake_module.new_file'
# Storage still contains original test_file
assert Path(storage._store[0]['file_path']).name == 'fake_module.test_file'
# Request the original file again
data_function_from_fake_module()
# File doesn't get redownloaded, instead it is retrieved using the file hash
assert downloader.times_called == 2
# new_file entry in manager._file_cache is replaced with the original test_file
assert Path(
module_patched_manager._file_cache['test_file']['fake_module.']
).name == 'fake_module.test_file'
# Storage still contains original test_file
assert Path(storage._store[0]['file_path']).name == 'fake_module.test_file' |
4,927 | add to register | import pytest
from DIRAC import gLogger
from DIRAC.MonitoringSystem.Client.DataOperationSender import DataOperationSender
gLogger.setLevel("DEBUG")
dataOpSender = DataOperationSender()
dataOpSender.monitoringOptions["Accounting", "Monitoring"]
dataOpMonitoringData = [
{
"OperationType": "se.getFile",
"User": "rpozzi",
"ExecutionSite": "",
"Source": "CertificationSandboxSE",
"Destination": "LCG.PIC.es",
"Protocol": "dips",
"FinalStatus": "Successful",
"TransferSize": 3,
"TransferTime": 1458226213,
"RegistrationTime": 1458226213,
"TransferOK": 20,
"TransferTotal": 50,
"RegistrationOK": 10,
"RegistrationTotal": 40,
},
{
"OperationType": "se.getFile",
"User": "fstagni",
"ExecutionSite": "",
"Source": "Failed",
"Destination": "LCG.PIC.es",
"Protocol": "dips",
"FinalStatus": "Failed",
"TransferSize": 343,
"TransferTime": 1458226213,
"RegistrationTime": 1458226213,
"TransferOK": 6,
"TransferTotal": 26,
"RegistrationOK": 3,
"RegistrationTotal": 35,
},
{
"OperationType": "se.getFile",
"User": "fstagni",
"ExecutionSite": "",
"Source": "Failed",
"Destination": "LCG.PIC.es",
"Protocol": "dips",
"FinalStatus": "Failed",
"TransferSize": 35555,
"TransferTime": 1458226213,
"RegistrationTime": 1458226213,
"TransferOK": 1345,
"TransferTotal": 2614,
"RegistrationOK": 31245,
"RegistrationTotal": 351255,
},
{
"OperationType": "se.getFile",
"User": "rpozzi",
"ExecutionSite": "",
"Source": "Failed",
"Destination": "LCG.CNAF.it",
"Protocol": "dips",
"FinalStatus": "Failed",
"TransferSize": 1000,
"TransferTime": 1458222546,
"RegistrationTime": 1458226000,
"TransferOK": 109,
"TransferTotal": 1204,
"RegistrationOK": 321,
"RegistrationTotal": 5000,
},
]
delayedDataOpData = [
{
"OperationType": "se.getFile",
"User": "fstagni",
"ExecutionSite": "",
"Source": "Failed",
"Destination": "LCG.PIC.es",
"Protocol": "dips",
"FinalStatus": "Failed",
"TransferSize": 3,
"TransferTime": 1458226213,
"RegistrationTime": 1458226213,
"TransferOK": 6,
"TransferTotal": 26,
"RegistrationOK": 3,
"RegistrationTotal": 35,
},
{
"OperationType": "se.getFile",
"User": "rpozzi",
"ExecutionSite": "",
"Source": "Failed",
"Destination": "LCG.CNAF.it",
"Protocol": "dips",
"FinalStatus": "Successfull",
"TransferSize": 10,
"TransferTime": 1458226300,
"RegistrationTime": 1458226300,
"TransferOK": 23,
"TransferTotal": 113,
"RegistrationOK": 11,
"RegistrationTotal": 403,
},
{
"OperationType": "se.getFile",
"User": "rpozzi",
"ExecutionSite": "",
"Source": "Failed",
"Destination": "LCG.CNAF.it",
"Protocol": "dips",
"FinalStatus": "Successfull",
"TransferSize": 10,
"TransferTime": 1458226300,
"RegistrationTime": 1458226300,
"TransferOK": 23,
"TransferTotal": 113,
"RegistrationOK": 11,
"RegistrationTotal": 403,
},
]
# fixture to have before the test methods
@pytest.fixture
def METHOD_NAME():
# Add the first set
for record in dataOpMonitoringData:
add_result = dataOpSender.sendData(record, False, False)
assert add_result["OK"]
# Add the second set
for record in delayedDataOpData:
add_result = dataOpSender.sendData(record, False, False)
assert add_result["OK"]
yield METHOD_NAME
# Test all possible options for the class
@pytest.mark.parametrize(("commitFlag, delayedCommit"), [(False, False), (True, False), (True, True), (False, True)])
def test_DataOperationSender(commitFlag, delayedCommit):
for record in dataOpMonitoringData:
result = dataOpSender.sendData(record, commitFlag, delayedCommit)
if not commitFlag and not delayedCommit:
commit = dataOpSender.concludeSending()
assert commit["OK"]
assert result["OK"], result["Message"]
def test_delayed_DataOpSender(METHOD_NAME):
# Try to conclude sending of data added to the register by the fixture method addToRegister
result = dataOpSender.concludeSending()
assert result["OK"], result["Message"] |
4,928 | fit | """Transformer that can automatically featurize text columns using featuretools' nlp_primitives."""
import string
import featuretools as ft
from featuretools.primitives import (
DiversityScore,
MeanCharactersPerWord,
NumCharacters,
NumWords,
PolarityScore,
)
from evalml.pipelines.components.transformers.preprocessing import LSA, TextTransformer
from evalml.utils import infer_feature_types
class NaturalLanguageFeaturizer(TextTransformer):
"""Transformer that can automatically featurize text columns using featuretools' nlp_primitives.
Since models cannot handle non-numeric data, any text must be broken down into features that
provide useful information about that text. This component splits each text column into
several informative features: Diversity Score, Mean Characters per Word, Polarity Score,
LSA (Latent Semantic Analysis), Number of Characters, and Number of Words.
Calling transform on this component will replace any text columns in the given dataset with these numeric columns.
Args:
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Natural Language Featurizer"
hyperparameter_ranges = {}
"""{}"""
def __init__(self, random_seed=0, **kwargs):
self._trans = [
NumWords,
NumCharacters,
DiversityScore,
MeanCharactersPerWord,
PolarityScore,
]
self._features = None
self._lsa = LSA(random_seed=random_seed)
self._primitives_provenance = {}
super().__init__(random_seed=random_seed, **kwargs)
def _clean_text(self, X):
"""Remove all non-alphanum chars other than spaces, and make lowercase."""
def normalize(text):
text = text.translate(str.maketrans("", "", string.punctuation))
return text.lower()
for col_name in X.columns:
# we assume non-str values will have been filtered out prior to calling NaturalLanguageFeaturizer. casting to str is a safeguard.
X[col_name].fillna("", inplace=True)
col = X[col_name].astype(str)
X[col_name] = col.apply(normalize)
return X
def _make_entity_set(self, X, text_columns):
X_text = X[text_columns].copy()
X_text = self._clean_text(X_text)
# featuretools expects str-type column names
X_text.rename(columns=str, inplace=True)
all_text_logical_types = {
col_name: "natural_language" for col_name in X_text.columns
}
es = ft.EntitySet()
es.add_dataframe(
dataframe_name="X",
dataframe=X_text,
index="index",
make_index=True,
logical_types=all_text_logical_types,
)
return es
def METHOD_NAME(self, X, y=None):
"""Fits component to data.
Args:
X (pd.DataFrame or np.ndarray): The input training data of shape [n_samples, n_features]
y (pd.Series): The target training data of length [n_samples]
Returns:
self
"""
X = infer_feature_types(X)
self._text_columns = self._get_text_columns(X)
if len(self._text_columns) == 0:
return self
self._lsa.METHOD_NAME(X)
es = self._make_entity_set(X, self._text_columns)
self._features = ft.dfs(
entityset=es,
target_dataframe_name="X",
trans_primitives=self._trans,
max_depth=1,
features_only=True,
)
return self
@staticmethod
def _get_primitives_provenance(features):
provenance = {}
for feature in features:
input_col = feature.base_features[0].get_name()
# Return a copy because `get_feature_names` returns a reference to the names
output_features = [name for name in feature.get_feature_names()]
if input_col not in provenance:
provenance[input_col] = output_features
else:
provenance[input_col] += output_features
return provenance
def transform(self, X, y=None):
"""Transforms data X by creating new features using existing text columns.
Args:
X (pd.DataFrame): The data to transform.
y (pd.Series, optional): Ignored.
Returns:
pd.DataFrame: Transformed X
"""
X_ww = infer_feature_types(X)
if self._features is None or len(self._features) == 0:
return X_ww
es = self._make_entity_set(X_ww, self._text_columns)
nan_mask = X[self._text_columns].isna()
any_nans = nan_mask.any().any()
X_nlp_primitives = ft.calculate_feature_matrix(
features=self._features,
entityset=es,
)
if X_nlp_primitives.isnull().any().any():
X_nlp_primitives.fillna(0, inplace=True)
X_ww_altered = infer_feature_types(
X_ww.ww[self._text_columns].fillna(""),
{s: "NaturalLanguage" for s in self._text_columns},
)
X_lsa = self._lsa.transform(X_ww_altered)
X_nlp_primitives.set_index(X_ww.index, inplace=True)
if any_nans:
primitive_features = self._get_primitives_provenance(self._features)
for column, derived_features in primitive_features.items():
X_nlp_primitives.loc[nan_mask[column], derived_features] = None
lsa_features = self._lsa._get_feature_provenance()
for column, derived_features in lsa_features.items():
X_lsa.loc[nan_mask[column], derived_features] = None
X_lsa.ww.init(logical_types={col: "Double" for col in X_lsa.columns})
X_nlp_primitives.ww.init(
logical_types={col: "Double" for col in X_nlp_primitives.columns},
)
X_ww = X_ww.ww.drop(self._text_columns)
for col in X_nlp_primitives:
X_ww.ww[col] = X_nlp_primitives[col]
for col in X_lsa:
X_ww.ww[col] = X_lsa[col]
return X_ww
def _get_feature_provenance(self):
if not self._text_columns:
return {}
provenance = self._get_primitives_provenance(self._features)
for col, lsa_features in self._lsa._get_feature_provenance().items():
if col in provenance:
provenance[col] += lsa_features
return provenance |
4,929 | get queryset | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from rest_framework import permissions, status, mixins
from rest_framework.response import Response
from rest_framework.exceptions import ErrorDetail
from gcloud.contrib.collection.models import Collection
from gcloud.core.apis.drf.serilaziers.collection import CollectionSerializer
from gcloud.core.apis.drf.viewsets import GcloudReadOnlyViewSet
from gcloud import err_code
from gcloud.iam_auth import IAMMeta, utils as iam_auth_utils
from django.utils.translation import ugettext_lazy as _
import logging
logger = logging.getLogger("root")
class CollectionPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
action = view.action
if action in ["retrieve", "destroy"]:
return request.user.username == obj.username
class CollectionViewSet(GcloudReadOnlyViewSet, mixins.CreateModelMixin, mixins.DestroyModelMixin):
queryset = Collection.objects.all()
serializer_class = CollectionSerializer
pagination_class = None
permission_classes = [permissions.IsAuthenticated, CollectionPermission]
filter_fields = ["id", "category"]
append_resource_actions = {
IAMMeta.FLOW_RESOURCE: [
IAMMeta.FLOW_VIEW_ACTION,
IAMMeta.FLOW_CREATE_TASK_ACTION,
IAMMeta.FLOW_CREATE_PERIODIC_TASK_ACTION,
],
IAMMeta.COMMON_FLOW_RESOURCE: [IAMMeta.COMMON_FLOW_VIEW_ACTION],
IAMMeta.MINI_APP_RESOURCE: [IAMMeta.MINI_APP_VIEW_ACTION],
}
def METHOD_NAME(self):
query_set = super().METHOD_NAME()
return query_set.filter(username=self.request.user.username)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data, many=True)
serializer.is_valid(raise_exception=True)
for item in serializer.validated_data:
username = item["username"]
category = item["category"]
instance_id = item["instance_id"]
if Collection.objects.filter(username=username, category=category, instance_id=instance_id).exists():
message = _(f"重复收藏: {username}实例ID: {category}, 类别: {instance_id}, 已经收藏过了, 无需再次收藏")
logger.error(message)
return Response({"detail": ErrorDetail(message, err_code.REQUEST_PARAM_INVALID.code)}, exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def injection_auth_actions(self, request, serializer_data, queryset_data):
resource_id_list_map = {r_type: [] for r_type in self.append_resource_actions}
resource_allowed_actions_map = {}
if not isinstance(serializer_data, list):
serializer_data = [serializer_data]
for item in serializer_data:
if item["category"] in resource_id_list_map:
resource_id_list_map[item["category"]].append(item["extra_info"]["id"])
for r_type, id_list in resource_id_list_map.items():
resource_allowed_actions_map[r_type] = getattr(
iam_auth_utils, "get_{}_allowed_actions_for_user".format(r_type)
)(request.user.username, self.append_resource_actions[r_type], id_list)
for item in serializer_data:
if item["category"] not in resource_allowed_actions_map:
item["auth_actions"] = []
continue
resource_allowed_actions = resource_allowed_actions_map[item["category"]]
item["auth_actions"] = [
act for act, allow in resource_allowed_actions.get(str(item["extra_info"]["id"]), {}).items() if allow
]
return serializer_data |
4,930 | test lookups | # !/usr/bin/env python
# encoding: utf-8
"""
SEED Platform (TM), Copyright (c) Alliance for Sustainable Energy, LLC, and other contributors.
See also https://github.com/seed-platform/seed/main/LICENSE.md
"""
from django.test import TestCase
from seed.landing.models import SEEDUser as User
from seed.models.measures import Measure, _snake_case
from seed.models.property_measures import PropertyMeasure
from seed.utils.organizations import create_organization
class TestMeasures(TestCase):
def setUp(self):
self.user = User.objects.create_superuser('test_user@demo.com', 'test_user@demo.com', 'test_pass')
self.org, _, _ = create_organization(self.user)
Measure.populate_measures(self.org.id)
def test_populate_measures(self):
# BuildingSync v1.0.0 has 222 enums
self.assertEqual(Measure.objects.count(), 222)
# if we run it again, it shouldn't add anything new
Measure.populate_measures(self.org.id)
self.assertEqual(Measure.objects.count(), 222)
def test_snake_case(self):
self.assertEqual(_snake_case("AbCdEf"), "ab_cd_ef")
self.assertEqual(_snake_case("Clean and/or repair"), "clean_and_or_repair")
self.assertEqual(
_snake_case("Upgrade operating protocols, calibration, and/or sequencing"),
"upgrade_operating_protocols_calibration_and_or_sequencing"
)
self.assertEqual(_snake_case("AdvancedMeteringSystems"), "advanced_metering_systems")
def test_validate_measures(self):
measures = [
("renewable_energy_systems", "install_photovoltaic_system"),
("other_hvac", "add_or_repair_economizer"),
("chiller_plant_improvements", "clean_and_or_repair")
]
objs = []
for m in measures:
objs.append(Measure.objects.get(category=m[0], name=m[1]))
obj_ids = [m.id for m in objs]
obj_names = ["{}.{}".format(m.category, m.name) for m in objs]
results = Measure.validate_measures(obj_ids)
self.assertEqual(obj_ids, results)
results = Measure.validate_measures(obj_names)
self.assertEqual(obj_ids, results)
results = Measure.validate_measures(['.'])
self.assertEqual([], results)
extra_blank = list(obj_ids)
extra_blank.append("")
results = Measure.validate_measures(extra_blank)
self.assertEqual(obj_ids, results)
extra_malformed = list(obj_ids)
extra_malformed.append("abcdef")
results = Measure.validate_measures(extra_malformed)
self.assertEqual(obj_ids, results)
extra_missing = list(obj_ids)
extra_missing.append("a.b")
results = Measure.validate_measures(extra_missing)
self.assertEqual(obj_ids, results)
results = Measure.validate_measures([])
self.assertEqual(results, [])
class TestPropertyMeasures(TestCase):
def setUp(self):
self.user = User.objects.create_superuser('test_user@demo.com', 'test_user@demo.com', 'test_pass')
self.org, _, _ = create_organization(self.user)
Measure.populate_measures(self.org.id)
# get some property instances
def METHOD_NAME(self):
self.assertEqual(PropertyMeasure.str_to_impl_status(PropertyMeasure.MEASURE_DISCARDED), 5)
self.assertEqual(PropertyMeasure.str_to_impl_status('measure discarded'), None)
self.assertEqual(PropertyMeasure.str_to_impl_status('Discarded'), 5)
self.assertEqual(PropertyMeasure.str_to_impl_status(None), None)
self.assertEqual(
PropertyMeasure.str_to_category_affected(PropertyMeasure.CATEGORY_DOMESTIC_HOT_WATER), 5
)
self.assertEqual(PropertyMeasure.str_to_category_affected('domestic nothing'), None)
self.assertEqual(PropertyMeasure.str_to_category_affected('Domestic Hot Water'), 5)
self.assertEqual(PropertyMeasure.str_to_category_affected(None), None)
self.assertEqual(
PropertyMeasure.str_to_application_scale(PropertyMeasure.SCALE_ENTIRE_FACILITY), 5
)
self.assertEqual(PropertyMeasure.str_to_application_scale('Nothing entirely'), None)
self.assertEqual(PropertyMeasure.str_to_application_scale('Entire facility'), 5)
self.assertEqual(PropertyMeasure.str_to_application_scale(None), None)
def test_populate_measures(self):
self.assertEqual(Measure.objects.count(), 222)
# if we run it again, it shouldn't add anything new
Measure.populate_measures(self.org.id)
self.assertEqual(Measure.objects.count(), 222) |
4,931 | update train kd loss section | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from functools import partial
from functools import reduce
from typing import Any, Dict, List, NamedTuple
import pytest
import torch
from torch.optim import SGD
from nncf import NNCFConfig
from nncf.config.structures import BNAdaptationInitArgs
from nncf.experimental.torch.nas.bootstrapNAS import EpochBasedTrainingAlgorithm
from nncf.torch.model_creation import create_nncf_network
from nncf.torch.utils import get_model_device
from tests.torch.helpers import create_ones_mock_dataloader
from tests.torch.nas.helpers import move_model_to_cuda_if_available
from tests.torch.nas.models.synthetic import ThreeConvModel
from tests.torch.nas.models.synthetic import ThreeConvModelMode
from tests.torch.nas.test_scheduler import fixture_schedule_params # pylint: disable=unused-import
class PSControllerTestDesc(NamedTuple):
model_creator: Any
blocks_to_skip: List[List[str]] = None
input_sizes: List[int] = [1, 3, 32, 32]
algo_params: Dict = {}
name: str = None
mode: str = "auto"
def __str__(self):
if hasattr(self.model_creator, "__name__"):
name = self.model_creator.__name__
elif self.name is not None:
name = self.name
else:
name = "NOT_DEFINED"
return name
def prepare_test_model(ps_ctrl_desc, bn_adapt_section_is_called, knowledge_distillation_loss_is_called: bool = False):
config = {
"input_info": {"sample_size": ps_ctrl_desc.input_sizes},
"bootstrapNAS": {
"training": {
"batchnorm_adaptation": {"num_bn_adaptation_samples": 2},
},
},
}
nncf_config = NNCFConfig.from_dict(config)
update_train_bn_adapt_section(nncf_config, bn_adapt_section_is_called)
METHOD_NAME(nncf_config, knowledge_distillation_loss_is_called)
bn_adapt_args = BNAdaptationInitArgs(data_loader=create_ones_mock_dataloader(nncf_config))
nncf_config.register_extra_structs([bn_adapt_args])
model = ps_ctrl_desc.model_creator()
move_model_to_cuda_if_available(model)
return model, bn_adapt_args, nncf_config
def update_train_bn_adapt_section(nncf_config, bn_adapt_section_is_called):
if not bn_adapt_section_is_called:
nncf_config["bootstrapNAS"]["training"]["batchnorm_adaptation"]["num_bn_adaptation_samples"] = 0
def METHOD_NAME(nncf_config, knowledge_distillation_loss_is_called):
if knowledge_distillation_loss_is_called:
nncf_config["bootstrapNAS"]["training"].update(
{"compression": [{"algorithm": "knowledge_distillation", "type": "mse"}]}
)
def cal_loss_actual(output, input_, training_ctrl):
return training_ctrl.loss()
def calc_loss_reference(output, input_, kd_model):
mse = torch.nn.MSELoss().to(get_model_device(kd_model))
kd_output = kd_model(input_)
return mse(output, kd_output)
def run_train(training_ctrl, model, mock_dataloader, calc_loss_fn):
optimizer = SGD(model.parameters(), lr=1e-02, weight_decay=1e-02)
training_ctrl.set_training_lr_scheduler_args(optimizer, len(mock_dataloader))
training_ctrl.scheduler.epoch_step()
training_ctrl.multi_elasticity_handler.activate_minimum_subnet()
model.train()
output_storage = []
for _, (input_, __) in enumerate(mock_dataloader):
input_ = input_.to(get_model_device(model))
output = model(input_)
output_storage.append(output)
loss = calc_loss_fn(output, input_)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return output_storage
# pylint: disable=protected-access
class TestProgressiveTrainingController:
@pytest.mark.parametrize(
"bn_adapt_section_is_called",
[False, True],
ids=["section_with_zero_num_samples", "section_with_non_zero_num_samples"],
)
def test_bn_adapt(self, mocker, bn_adapt_section_is_called, schedule_params):
test_desc = PSControllerTestDesc(
model_creator=ThreeConvModel,
algo_params={"width": {"min_width": 1, "width_step": 1}},
input_sizes=ThreeConvModel.INPUT_SIZE,
)
bn_adapt_run_patch = mocker.patch(
"nncf.common.initialization.batchnorm_adaptation.BatchnormAdaptationAlgorithm.run"
)
model, _, nncf_config = prepare_test_model(test_desc, bn_adapt_section_is_called)
model = create_nncf_network(model, nncf_config)
training_algorithm = EpochBasedTrainingAlgorithm.from_config(model, nncf_config)
training_algorithm._training_ctrl.prepare_for_validation()
if bn_adapt_section_is_called:
bn_adapt_run_patch.assert_called()
else:
bn_adapt_run_patch.assert_not_called()
def test_knowledge_distillation_training_process(self):
test_desc = PSControllerTestDesc(
model_creator=ThreeConvModel,
algo_params={"width": {"min_width": 1, "width_step": 1}},
input_sizes=ThreeConvModel.INPUT_SIZE,
)
model, _, nncf_config = prepare_test_model(test_desc, False, True)
model = create_nncf_network(model, nncf_config)
torch.manual_seed(2)
number_of_iters = 2
batch_size = 1
mock_dataloader = create_ones_mock_dataloader(
nncf_config, num_samples=batch_size * number_of_iters, batch_size=batch_size
)
model.mode = ThreeConvModelMode.SUPERNET
training_algorithm = EpochBasedTrainingAlgorithm.from_config(deepcopy(model), nncf_config)
actual_outputs = run_train(
training_algorithm._training_ctrl,
training_algorithm._model,
mock_dataloader,
partial(cal_loss_actual, training_ctrl=training_algorithm._training_ctrl),
)
training_algorithm = EpochBasedTrainingAlgorithm.from_config(deepcopy(model), nncf_config)
reference_outputs = run_train(
training_algorithm._training_ctrl,
training_algorithm._model,
mock_dataloader,
partial(calc_loss_reference, kd_model=deepcopy(model)),
)
assert reduce(lambda a, b: a and torch.allclose(b[0], b[1]), zip(actual_outputs, reference_outputs), True), (
"Outputs of model with actual KD implementation doesn't match outputs from model with reference "
"Knowledge Distillation implementation"
) |
4,932 | set | #!/usr/bin/python
# SPDX-License-Identifier: LGPL-2.1-or-later
from __future__ import print_function
import os
import sys
import dbus
import dbus.service
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import bluezutils
class Player(dbus.service.Object):
properties = None
metadata = None
def set_object(self, obj = None):
if obj != None:
bus = dbus.SystemBus()
mp = dbus.Interface(bus.get_object("org.bluez", obj),
"org.bluez.MediaPlayer1")
prop = dbus.Interface(bus.get_object("org.bluez", obj),
"org.freedesktop.DBus.Properties")
self.properties = prop.GetAll("org.bluez.MediaPlayer1")
bus.add_signal_receiver(self.properties_changed,
path = obj,
dbus_interface = "org.freedesktop.DBus.Properties",
signal_name = "PropertiesChanged")
else:
track = dbus.Dictionary({
"xesam:title" : "Title",
"xesam:artist" : ["Artist"],
"xesam:album" : "Album",
"xesam:genre" : ["Genre"],
"xesam:trackNumber" : dbus.Int32(1),
"mpris:length" : dbus.Int64(10000) },
signature="sv")
self.properties = dbus.Dictionary({
"PlaybackStatus" : "playing",
"Identity" : "SimplePlayer",
"LoopStatus" : "None",
"Rate" : dbus.Double(1.0),
"Shuffle" : dbus.Boolean(False),
"Metadata" : track,
"Volume" : dbus.Double(1.0),
"Position" : dbus.Int64(0),
"MinimumRate" : dbus.Double(1.0),
"MaximumRate" : dbus.Double(1.0),
"CanGoNext" : dbus.Boolean(False),
"CanGoPrevious" : dbus.Boolean(False),
"CanPlay" : dbus.Boolean(False),
"CanSeek" : dbus.Boolean(False),
"CanControl" : dbus.Boolean(False),
},
signature="sv")
handler = InputHandler(self)
GObject.io_add_watch(sys.stdin, GObject.IO_IN,
handler.handle)
@dbus.service.method("org.freedesktop.DBus.Properties",
in_signature="ssv", out_signature="")
def METHOD_NAME(self, interface, key, value):
print("Set (%s, %s)" % (key, value), file=sys.stderr)
return
@dbus.service.signal("org.freedesktop.DBus.Properties",
signature="sa{sv}as")
def PropertiesChanged(self, interface, properties,
invalidated = dbus.Array()):
"""PropertiesChanged(interface, properties, invalidated)
Send a PropertiesChanged signal. 'properties' is a dictionary
containing string parameters as specified in doc/media-api.txt.
"""
pass
def help(self, func):
help(self.__class__.__dict__[func])
def properties_changed(self, interface, properties, invalidated):
print("properties_changed(%s, %s)" % (properties, invalidated))
self.PropertiesChanged(interface, properties, invalidated)
class InputHandler:
commands = { 'PropertiesChanged': '(interface, properties)',
'help': '(cmd)' }
def __init__(self, player):
self.player = player
print('\n\nAvailable commands:')
for cmd in self.commands:
print('\t', cmd, self.commands[cmd], sep='')
print("\nUse python syntax to pass arguments to available methods.\n" \
"E.g.: PropertiesChanged({'Metadata' : {'Title': 'My title', \
'Album': 'my album' }})")
self.prompt()
def prompt(self):
print('\n>>> ', end='')
sys.stdout.flush()
def handle(self, fd, condition):
s = os.read(fd.fileno(), 1024).strip()
try:
cmd = s[:s.find('(')]
if not cmd in self.commands:
print("Unknown command ", cmd)
except ValueError:
print("Malformed command")
return True
try:
exec "self.player.%s" % s
except Exception as e:
print(e)
pass
self.prompt()
return True
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
if len(sys.argv) > 1:
path = bluezutils.find_adapter(sys.argv[1]).object_path
else:
path = bluezutils.find_adapter().object_path
media = dbus.Interface(bus.get_object("org.bluez", path),
"org.bluez.Media1")
path = "/test/player"
player = Player(bus, path)
mainloop = GObject.MainLoop()
if len(sys.argv) > 2:
player.set_object(sys.argv[2])
else:
player.set_object()
print('Register media player with:\n\tProperties: %s' \
% (player.properties))
media.RegisterPlayer(dbus.ObjectPath(path), player.properties)
mainloop.run() |
4,933 | azure resource name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'HostNameResponse',
'NameIdentifierResponse',
'TldLegalAgreementResponse',
]
@pulumi.output_type
class HostNameResponse(dict):
"""
Details of a hostname derived from a domain.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "azureResourceName":
suggest = "azure_resource_name"
elif key == "azureResourceType":
suggest = "azure_resource_type"
elif key == "customHostNameDnsRecordType":
suggest = "custom_host_name_dns_record_type"
elif key == "hostNameType":
suggest = "host_name_type"
elif key == "siteNames":
suggest = "site_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in HostNameResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
HostNameResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
HostNameResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
METHOD_NAME: Optional[str] = None,
azure_resource_type: Optional[str] = None,
custom_host_name_dns_record_type: Optional[str] = None,
host_name_type: Optional[str] = None,
name: Optional[str] = None,
site_names: Optional[Sequence[str]] = None):
"""
Details of a hostname derived from a domain.
:param str azure_resource_name: Name of the Azure resource the hostname is assigned to. If it is assigned to a Traffic Manager then it will be the Traffic Manager name otherwise it will be the app name.
:param str azure_resource_type: Type of the Azure resource the hostname is assigned to.
:param str custom_host_name_dns_record_type: Type of the DNS record.
:param str host_name_type: Type of the hostname.
:param str name: Name of the hostname.
:param Sequence[str] site_names: List of apps the hostname is assigned to. This list will have more than one app only if the hostname is pointing to a Traffic Manager.
"""
if METHOD_NAME is not None:
pulumi.set(__self__, "azure_resource_name", METHOD_NAME)
if azure_resource_type is not None:
pulumi.set(__self__, "azure_resource_type", azure_resource_type)
if custom_host_name_dns_record_type is not None:
pulumi.set(__self__, "custom_host_name_dns_record_type", custom_host_name_dns_record_type)
if host_name_type is not None:
pulumi.set(__self__, "host_name_type", host_name_type)
if name is not None:
pulumi.set(__self__, "name", name)
if site_names is not None:
pulumi.set(__self__, "site_names", site_names)
@property
@pulumi.getter(name="azureResourceName")
def METHOD_NAME(self) -> Optional[str]:
"""
Name of the Azure resource the hostname is assigned to. If it is assigned to a Traffic Manager then it will be the Traffic Manager name otherwise it will be the app name.
"""
return pulumi.get(self, "azure_resource_name")
@property
@pulumi.getter(name="azureResourceType")
def azure_resource_type(self) -> Optional[str]:
"""
Type of the Azure resource the hostname is assigned to.
"""
return pulumi.get(self, "azure_resource_type")
@property
@pulumi.getter(name="customHostNameDnsRecordType")
def custom_host_name_dns_record_type(self) -> Optional[str]:
"""
Type of the DNS record.
"""
return pulumi.get(self, "custom_host_name_dns_record_type")
@property
@pulumi.getter(name="hostNameType")
def host_name_type(self) -> Optional[str]:
"""
Type of the hostname.
"""
return pulumi.get(self, "host_name_type")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the hostname.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="siteNames")
def site_names(self) -> Optional[Sequence[str]]:
"""
List of apps the hostname is assigned to. This list will have more than one app only if the hostname is pointing to a Traffic Manager.
"""
return pulumi.get(self, "site_names")
@pulumi.output_type
class NameIdentifierResponse(dict):
"""
Identifies an object.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
Identifies an object.
:param str name: Name of the object.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the object.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class TldLegalAgreementResponse(dict):
"""
Legal agreement for a top level domain.
"""
def __init__(__self__, *,
agreement_key: str,
content: str,
title: str,
url: Optional[str] = None):
"""
Legal agreement for a top level domain.
:param str agreement_key: Unique identifier for the agreement.
:param str content: Agreement details.
:param str title: Agreement title.
:param str url: URL where a copy of the agreement details is hosted.
"""
pulumi.set(__self__, "agreement_key", agreement_key)
pulumi.set(__self__, "content", content)
pulumi.set(__self__, "title", title)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="agreementKey")
def agreement_key(self) -> str:
"""
Unique identifier for the agreement.
"""
return pulumi.get(self, "agreement_key")
@property
@pulumi.getter
def content(self) -> str:
"""
Agreement details.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def title(self) -> str:
"""
Agreement title.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
URL where a copy of the agreement details is hosted.
"""
return pulumi.get(self, "url")
|
4,934 | forward | from typing import Any, Callable, Optional, Tuple, Type
from ..config import registry
from ..model import Model
from ..shims import MXNetShim
from ..types import ArgsKwargs
from ..util import convert_recursive, is_mxnet_array, is_xp_array, mxnet2xp, xp2mxnet
@registry.layers("MXNetWrapper.v1")
def MXNetWrapper(
mxnet_model,
convert_inputs: Optional[Callable] = None,
convert_outputs: Optional[Callable] = None,
model_class: Type[Model] = Model,
model_name: str = "mxnet",
) -> Model[Any, Any]:
"""Wrap a MXNet model, so that it has the same API as Thinc models.
To optimize the model, you'll need to create a MXNet optimizer and call
optimizer.step() after each batch.
Your MXNet model's forward method can take arbitrary args and kwargs,
but must return either a single tensor as output or a tuple. You may find the
MXNet register_forward_hook helpful if you need to adapt the output.
The convert functions are used to map inputs and outputs to and from your
MXNet model. Each function should return the converted output, and a callback
to use during the backward pass. So:
Xmxnet, get_dX = convert_inputs(X)
Ymxnet, mxnet_backprop = model.shims[0](Xmxnet, is_train)
Y, get_dYmxnet = convert_outputs(Ymxnet)
To allow maximum flexibility, the MXNetShim expects ArgsKwargs objects
on the way into the forward and backward passes. The ArgsKwargs objects
will be passed straight into the model in the forward pass, and straight
into `mxnet.autograd.backward` during the backward pass.
"""
if convert_inputs is None:
convert_inputs = convert_mxnet_default_inputs
if convert_outputs is None:
convert_outputs = convert_mxnet_default_outputs
return model_class(
model_name,
METHOD_NAME,
attrs={"convert_inputs": convert_inputs, "convert_outputs": convert_outputs},
shims=[MXNetShim(mxnet_model)],
)
def METHOD_NAME(model: Model, X: Any, is_train: bool) -> Tuple[Any, Callable]:
"""Return the output of the wrapped MXNet model for the given input,
along with a callback to handle the backward pass.
"""
convert_inputs = model.attrs["convert_inputs"]
convert_outputs = model.attrs["convert_outputs"]
Xmxnet, get_dX = convert_inputs(model, X, is_train)
Ymxnet, mxnet_backprop = model.shims[0](Xmxnet, is_train)
Y, get_dYmxnet = convert_outputs(model, (X, Ymxnet), is_train)
def backprop(dY: Any) -> Any:
dYmxnet = get_dYmxnet(dY)
dXmxnet = mxnet_backprop(dYmxnet)
dX = get_dX(dXmxnet)
return dX
return Y, backprop
# Default conversion functions
def convert_mxnet_default_inputs(
model: Model, X: Any, is_train: bool
) -> Tuple[ArgsKwargs, Callable[[ArgsKwargs], Any]]:
xp2mxnet_ = lambda x: xp2mxnet(x, requires_grad=is_train)
converted = convert_recursive(is_xp_array, xp2mxnet_, X)
if isinstance(converted, ArgsKwargs):
def reverse_conversion(dXmxnet):
return convert_recursive(is_mxnet_array, mxnet2xp, dXmxnet)
return converted, reverse_conversion
elif isinstance(converted, dict):
def reverse_conversion(dXmxnet):
dX = convert_recursive(is_mxnet_array, mxnet2xp, dXmxnet)
return dX.kwargs
return ArgsKwargs(args=tuple(), kwargs=converted), reverse_conversion
elif isinstance(converted, (tuple, list)):
def reverse_conversion(dXmxnet):
dX = convert_recursive(is_mxnet_array, mxnet2xp, dXmxnet)
return dX.args
return ArgsKwargs(args=tuple(converted), kwargs={}), reverse_conversion
else:
def reverse_conversion(dXmxnet):
dX = convert_recursive(is_mxnet_array, mxnet2xp, dXmxnet)
return dX.args[0]
return ArgsKwargs(args=(converted,), kwargs={}), reverse_conversion
def convert_mxnet_default_outputs(model: Model, X_Ymxnet: Any, is_train: bool):
X, Ymxnet = X_Ymxnet
Y = convert_recursive(is_mxnet_array, mxnet2xp, Ymxnet)
def reverse_conversion(dY: Any) -> ArgsKwargs:
dYmxnet = convert_recursive(is_xp_array, xp2mxnet, dY)
return ArgsKwargs(args=((Ymxnet,),), kwargs={"head_grads": dYmxnet})
return Y, reverse_conversion |
4,935 | test t output | """Testing models module."""
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from nilearn.glm import OLSModel
N = 10
X = np.c_[np.linspace(-1, 1, N), np.ones((N,))]
Y = np.r_[range(5), range(1, 6)]
MODEL = OLSModel(X)
RESULTS = MODEL.fit(Y)
""" R script
::
X = cbind(0:9 * 2/9 -1, 1)
Y = as.matrix(c(0:4, 1:5))
results = lm(Y ~ X-1)
print(results)
print(summary(results))
gives::
Call:
lm(formula = Y ~ X - 1)
Coefficients:
X1 X2
1.773 2.500
Residuals:
Min 1Q Median 3Q Max
-1.6970 -0.6667 0.0000 0.6667 1.6970
Coefficients:
Estimate Std. Error t value Pr(>|t|)
X1 1.7727 0.5455 3.250 0.0117 *
X2 2.5000 0.3482 7.181 9.42e-05 ***
---
Residual standard error: 1.101 on 8 degrees of freedom
Multiple R-squared: 0.8859, Adjusted R-squared: 0.8574
F-statistic: 31.06 on 2 and 8 DF, p-value: 0.0001694
"""
def test_model():
# Check basics about the model fit
# Check we fit the mean
assert_array_almost_equal(RESULTS.theta[1], np.mean(Y))
# Check we get the same as R
assert_array_almost_equal(RESULTS.theta, [1.773, 2.5], 3)
percentile = np.percentile
pcts = percentile(RESULTS.residuals, [0, 25, 50, 75, 100])
assert_array_almost_equal(pcts, [-1.6970, -0.6667, 0, 0.6667, 1.6970], 4)
def test_t_contrast():
# Test individual t against R
assert_array_almost_equal(RESULTS.t(0), 3.25)
assert_array_almost_equal(RESULTS.t(1), 7.181, 3)
# And contrast
assert_array_almost_equal(RESULTS.Tcontrast([1, 0]).t, 3.25)
assert_array_almost_equal(RESULTS.Tcontrast([0, 1]).t, 7.181, 3)
# Input matrix checked for size
with pytest.raises(ValueError):
RESULTS.Tcontrast([1])
with pytest.raises(ValueError):
RESULTS.Tcontrast([1, 0, 0])
# And shape
with pytest.raises(ValueError):
RESULTS.Tcontrast(np.array([1, 0])[:, None])
def METHOD_NAME():
# Check we get required outputs
exp_t = RESULTS.t(0)
exp_effect = RESULTS.theta[0]
exp_sd = exp_effect / exp_t
res = RESULTS.Tcontrast([1, 0])
assert_array_almost_equal(res.t, exp_t)
assert_array_almost_equal(res.effect, exp_effect)
assert_array_almost_equal(res.sd, exp_sd)
res = RESULTS.Tcontrast([1, 0], store=("effect",))
assert res.t is None
assert_array_almost_equal(res.effect, exp_effect)
assert res.sd is None
res = RESULTS.Tcontrast([1, 0], store=("t",))
assert_array_almost_equal(res.t, exp_t)
assert res.effect is None
assert res.sd is None
res = RESULTS.Tcontrast([1, 0], store=("sd",))
assert res.t is None
assert res.effect is None
assert_array_almost_equal(res.sd, exp_sd)
res = RESULTS.Tcontrast([1, 0], store=("effect", "sd"))
assert res.t is None
assert_array_almost_equal(res.effect, exp_effect)
assert_array_almost_equal(res.sd, exp_sd)
def test_f_output():
# Test f_output
res = RESULTS.Fcontrast([1, 0])
exp_f = RESULTS.t(0) ** 2
assert_array_almost_equal(exp_f, res.F)
# Test arrays work as well as lists
res = RESULTS.Fcontrast(np.array([1, 0]))
assert_array_almost_equal(exp_f, res.F)
# Test with matrix against R
res = RESULTS.Fcontrast(np.eye(2))
assert_array_almost_equal(31.06, res.F, 2)
# Input matrix checked for size
with pytest.raises(ValueError):
RESULTS.Fcontrast([1])
with pytest.raises(ValueError):
RESULTS.Fcontrast([1, 0, 0])
# And shape
with pytest.raises(ValueError):
RESULTS.Fcontrast(np.array([1, 0])[:, None])
def test_f_output_new_api():
res = RESULTS.Fcontrast([1, 0])
assert_array_almost_equal(res.effect, RESULTS.theta[0])
assert_array_almost_equal(res.covariance, RESULTS.vcov()[0][0])
def test_conf_int():
lower_, upper_ = RESULTS.conf_int()
assert (lower_ < upper_).all()
assert (lower_ > upper_ - 10).all()
lower_, upper_ = RESULTS.conf_int(cols=[1]).T
assert lower_ < upper_
assert lower_ > upper_ - 10 |
4,936 | test field definition is non string iterable | from types import ModuleType
from typing import Any, Callable, Iterable, List, Optional, Sequence, Union
from unittest.mock import MagicMock
import pytest
from typing_extensions import Annotated
from litestar import get
from litestar._signature import SignatureModel
from litestar.params import Body, Parameter
from litestar.status_codes import HTTP_200_OK, HTTP_204_NO_CONTENT
from litestar.testing import TestClient, create_test_client
from litestar.types import Empty
from litestar.types.helper_types import OptionalSequence
from litestar.utils.signature import ParsedSignature
def test_create_function_signature_model_parameter_parsing() -> None:
@get()
def my_fn(a: int, b: str, c: Optional[bytes], d: bytes = b"123", e: Optional[dict] = None) -> None:
pass
model = SignatureModel.create(
dependency_name_set=set(),
fn=my_fn.fn.value,
data_dto=None,
parsed_signature=ParsedSignature.from_fn(my_fn.fn.value, {}),
type_decoders=[],
)
fields = model._fields
assert fields["a"].annotation is int
assert not fields["a"].is_optional
assert fields["b"].annotation is str
assert not fields["b"].is_optional
assert fields["c"].annotation is Optional[bytes]
assert fields["c"].is_optional
assert fields["c"].default is Empty
assert fields["d"].annotation is bytes
assert fields["d"].default == b"123"
assert fields["e"].annotation == Optional[dict]
assert fields["e"].is_optional
assert fields["e"].default is Empty
def test_create_function_signature_model_ignore_return_annotation() -> None:
@get(path="/health", status_code=HTTP_204_NO_CONTENT)
async def health_check() -> None:
return None
signature_model_type = SignatureModel.create(
dependency_name_set=set(),
fn=health_check.fn.value,
data_dto=None,
parsed_signature=ParsedSignature.from_fn(health_check.fn.value, {}),
type_decoders=[],
)
assert signature_model_type().to_dict() == {}
def test_signature_model_resolves_forward_ref_annotations(create_module: Callable[[str], ModuleType]) -> None:
module = create_module(
"""
from __future__ import annotations
from pydantic import BaseModel
from litestar import Litestar, get
from litestar.di import Provide
class Test(BaseModel):
hello: str
async def get_dep() -> Test:
return Test(hello="world")
@get("/", dependencies={"test": Provide(get_dep)})
def hello_world(test: Test) -> Test:
return test
app = Litestar(route_handlers=[hello_world], openapi_config=None)
"""
)
with TestClient(app=module.app) as client:
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"hello": "world"}
@pytest.mark.parametrize(("query", "exp"), [("?a=1&a=2&a=3", [1, 2, 3]), ("", None)])
def test_parse_optional_sequence_from_connection_kwargs(query: str, exp: Any) -> None:
@get("/")
def test(a: Optional[List[int]] = Parameter(query="a", default=None, required=False)) -> Optional[List[int]]:
return a
with create_test_client(route_handlers=[test]) as client:
response = client.get(f"/{query}")
assert response.status_code == HTTP_200_OK, response.json()
assert response.json() == exp
def METHOD_NAME() -> None:
def fn(a: Iterable[int], b: Optional[Iterable[int]]) -> None:
pass
model = SignatureModel.create(
dependency_name_set=set(),
fn=fn,
data_dto=None,
parsed_signature=ParsedSignature.from_fn(fn, {}),
type_decoders=[],
)
assert model._fields["a"].is_non_string_iterable
assert model._fields["b"].is_non_string_iterable
def test_field_definition_is_non_string_sequence() -> None:
def fn(a: Sequence[int], b: OptionalSequence[int]) -> None:
pass
model = SignatureModel.create(
dependency_name_set=set(),
fn=fn,
data_dto=None,
parsed_signature=ParsedSignature.from_fn(fn, signature_namespace={}),
type_decoders=[],
)
assert model._fields["a"].is_non_string_sequence
assert model._fields["b"].is_non_string_sequence
@pytest.mark.parametrize("query,expected", [("1", True), ("true", True), ("0", False), ("false", False)])
def test_query_param_bool(query: str, expected: bool) -> None:
mock = MagicMock()
@get("/")
def handler(param: bool) -> None:
mock(param)
with create_test_client(route_handlers=[handler]) as client:
response = client.get(f"/?param={query}")
assert response.status_code == HTTP_200_OK, response.json()
mock.assert_called_once_with(expected)
def test_union_constraint_handling() -> None:
mock = MagicMock()
@get("/")
def handler(param: Annotated[Union[str, List[str]], Body(max_length=3, max_items=3)]) -> None:
mock(param)
with create_test_client([handler]) as client:
response = client.get("/?param=foo")
assert response.status_code == 200
mock.assert_called_once_with("foo") |
4,937 | test standardize shape with non iterable | import numpy as np
from keras_core import backend
from keras_core import initializers
from keras_core.backend.common.variables import AutocastScope
from keras_core.backend.common.variables import KerasVariable
from keras_core.backend.common.variables import standardize_shape
from keras_core.testing import test_case
class VariablesTest(test_case.TestCase):
def test_deferred_initialization(self):
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
# Variables can nevertheless be accessed
_ = v + 1
self.assertEqual(v._value.shape, (2, 2))
with self.assertRaisesRegex(ValueError, "while in a stateless scope"):
with backend.StatelessScope():
v = backend.Variable(initializer=0)
def test_deferred_assignment(self):
with backend.StatelessScope() as scope:
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
v.assign(np.zeros((2, 2)))
v.assign_add(2 * np.ones((2, 2)))
v.assign_sub(np.ones((2, 2)))
out = scope.get_current_value(v)
self.assertAllClose(out, np.ones((2, 2)))
def test_autocasting(self):
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
print("open scope")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Test non-float variables are not affected
v = backend.Variable(
initializer=initializers.Ones(),
shape=(2, 2),
dtype="int32",
trainable=False,
)
self.assertEqual(v.dtype, "int32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
with AutocastScope("float16"):
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
def test_standardize_dtype_with_torch_dtype(self):
import torch
x = torch.randn(4, 4)
backend.standardize_dtype(x.dtype)
def test_name_validation(self):
# Test when name is not a string
with self.assertRaisesRegex(
ValueError, "Argument `name` must be a string"
):
KerasVariable(initializer=initializers.RandomNormal(), name=12345)
# Test when name contains a '/'
with self.assertRaisesRegex(ValueError, "cannot contain character `/`"):
KerasVariable(
initializer=initializers.RandomNormal(), name="invalid/name"
)
def test_standardize_shape_with_none(self):
with self.assertRaisesRegex(
ValueError, "Undefined shapes are not supported."
):
standardize_shape(None)
def METHOD_NAME(self):
with self.assertRaisesRegex(
ValueError, "Cannot convert '42' to a shape."
):
standardize_shape(42)
def test_standardize_shape_with_valid_input(self):
shape = [3, 4, 5]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
# TODO
# (3.9,torch) FAILED keras_core/backend/common/variables_test.py
# ::VariablesTest::test_standardize_shape_with_non_integer_entry:
# - AssertionError "Cannot convert '\(3, 4, 'a'\)' to a shape.
# " does not match "invalid literal for int() with base 10: 'a'"
# def test_standardize_shape_with_non_integer_entry(self):
# with self.assertRaisesRegex(
# ValueError,
# "Cannot convert '\\(3, 4, 'a'\\)' to a shape. Found invalid",
# ):
# standardize_shape([3, 4, "a"])
def test_standardize_shape_with_negative_entry(self):
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
standardize_shape([3, 4, -5])
def test_autocast_scope_with_non_float_dtype(self):
with self.assertRaisesRegex(
ValueError,
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32") |
4,938 | skip test if missing module | #!/usr/bin/env python3
# Copyright (c) 2017-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet resends transactions periodically."""
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
)
from test_framework.messages import DEFAULT_MEMPOOL_EXPIRY_HOURS
from test_framework.p2p import P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class ResendWalletTransactionsTest(BitcoinTestFramework):
def add_options(self, parser):
self.add_wallet_options(parser)
def set_test_params(self):
self.num_nodes = 1
def METHOD_NAME(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0] # alias
peer_first = node.add_p2p_connection(P2PTxInvStore())
self.log.info("Create a new transaction and wait until it's broadcast")
parent_utxo, indep_utxo = node.listunspent()[:2]
addr = node.getnewaddress()
txid = node.send(outputs=[{addr: 1}], inputs=[parent_utxo])["txid"]
# Can take a few seconds due to transaction trickling
peer_first.wait_for_broadcast([txid])
# Add a second peer since txs aren't rebroadcast to the same peer (see m_tx_inventory_known_filter)
peer_second = node.add_p2p_connection(P2PTxInvStore())
self.log.info("Create a block")
# Create and submit a block without the transaction.
# Transactions are only rebroadcast if there has been a block at least five minutes
# after the last time we tried to broadcast. Use mocktime and give an extra minute to be sure.
block_time = int(time.time()) + 6 * 60
node.setmocktime(block_time)
block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount() + 1), block_time)
block.solve()
node.submitblock(block.serialize().hex())
# Set correct m_best_block_time, which is used in ResubmitWalletTransactions
node.syncwithvalidationinterfacequeue()
now = int(time.time())
# Transaction should not be rebroadcast within first 12 hours
# Leave 2 mins for buffer
twelve_hrs = 12 * 60 * 60
two_min = 2 * 60
node.setmocktime(now + twelve_hrs - two_min)
node.mockscheduler(60) # Tell scheduler to call MaybeResendWalletTxs now
assert_equal(int(txid, 16) in peer_second.get_invs(), False)
self.log.info("Bump time & check that transaction is rebroadcast")
# Transaction should be rebroadcast approximately 24 hours in the future,
# but can range from 12-36. So bump 36 hours to be sure.
with node.assert_debug_log(['resubmit 1 unconfirmed transactions']):
node.setmocktime(now + 36 * 60 * 60)
# Tell scheduler to call MaybeResendWalletTxs now.
node.mockscheduler(60)
# Give some time for trickle to occur
node.setmocktime(now + 36 * 60 * 60 + 600)
peer_second.wait_for_broadcast([txid])
self.log.info("Chain of unconfirmed not-in-mempool txs are rebroadcast")
# This tests that the node broadcasts the parent transaction before the child transaction.
# To test that scenario, we need a method to reliably get a child transaction placed
# in mapWallet positioned before the parent. We cannot predict the position in mapWallet,
# but we can observe it using listreceivedbyaddress and other related RPCs.
#
# So we will create the child transaction, use listreceivedbyaddress to see what the
# ordering of mapWallet is, if the child is not before the parent, we will create a new
# child (via bumpfee) and remove the old child (via removeprunedfunds) until we get the
# ordering of child before parent.
child_txid = node.send(outputs=[{addr: 0.5}], inputs=[{"txid":txid, "vout":0}])["txid"]
while True:
txids = node.listreceivedbyaddress(minconf=0, address_filter=addr)[0]["txids"]
if txids == [child_txid, txid]:
break
bumped = node.bumpfee(child_txid)
# The scheduler queue creates a copy of the added tx after
# send/bumpfee and re-adds it to the wallet (undoing the next
# removeprunedfunds). So empty the scheduler queue:
node.syncwithvalidationinterfacequeue()
node.removeprunedfunds(child_txid)
child_txid = bumped["txid"]
entry_time = node.getmempoolentry(child_txid)["time"]
block_time = entry_time + 6 * 60
node.setmocktime(block_time)
block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount() + 1), block_time)
block.solve()
node.submitblock(block.serialize().hex())
# Set correct m_best_block_time, which is used in ResubmitWalletTransactions
node.syncwithvalidationinterfacequeue()
evict_time = block_time + 60 * 60 * DEFAULT_MEMPOOL_EXPIRY_HOURS + 5
# Flush out currently scheduled resubmit attempt now so that there can't be one right between eviction and check.
with node.assert_debug_log(['resubmit 2 unconfirmed transactions']):
node.setmocktime(evict_time)
node.mockscheduler(60)
# Evict these txs from the mempool
indep_send = node.send(outputs=[{node.getnewaddress(): 1}], inputs=[indep_utxo])
node.getmempoolentry(indep_send["txid"])
assert_raises_rpc_error(-5, "Transaction not in mempool", node.getmempoolentry, txid)
assert_raises_rpc_error(-5, "Transaction not in mempool", node.getmempoolentry, child_txid)
# Rebroadcast and check that parent and child are both in the mempool
with node.assert_debug_log(['resubmit 2 unconfirmed transactions']):
node.setmocktime(evict_time + 36 * 60 * 60) # 36 hrs is the upper limit of the resend timer
node.mockscheduler(60)
node.getmempoolentry(txid)
node.getmempoolentry(child_txid)
if __name__ == '__main__':
ResendWalletTransactionsTest().main() |
4,939 | get start hour | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A cron handler that builds data_types.CrashStatistic.
Given `end_hour`, we build the aggregated row for the end hour.
The rows are aggregated by crash_signature, fuzzer_name, job_type, platform,
revision, and project.
Here's how it works:
1. Get the latest end_hour, so that we can run the next one.
2. Send query to BigQuery and tell BigQuery to insert the result to a specific
table.
3. Poll the result.
4. Store the end_hour.
"""
import datetime
import json
import logging
import time
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.google_cloud_utils import big_query
from clusterfuzz._internal.metrics import crash_stats
from handlers import base_handler
from libs import handler
# After insertion, it takes a few seconds for a record to show up.
# We give it a few minutes.
#
# We add one hour because our hour spans from 0 minutes to 59 minutes.
BIGQUERY_INSERTION_DELAY = datetime.timedelta(hours=1, minutes=2)
# The template for job ids. Running on a already-run hour is fine to certain
# degree. We de-dup when reading.
JOB_ID_TEMPLATE = 'build_crash_stats_test_{unique_number}'
TIMEOUT = 2 * 60
SQL = """
SELECT
COUNT(*) as count,
crash_type, crash_state, security_flag, parent_fuzzer_name, fuzzer_name,
job_type, revision, platform, project, reproducible_flag,
IF(STARTS_WITH(platform, 'android'), 'android', '') AS parent_platform,
CAST(FLOOR(UNIX_SECONDS(created_at) / 3600) AS INT64) as hour,
MIN(crash_time_in_ms) AS min_crash_time_in_ms,
MAX(crash_time_in_ms) AS max_crash_time_in_ms,
SUM(crash_time_in_ms) AS sum_crash_time_in_ms,
CAST(SUM(POW(crash_time_in_ms, 2)) AS INT64) AS sum_square_crash_time_in_ms,
ANY_VALUE(new_flag=True) AS new_flag
FROM main.crashes
WHERE
CAST(FLOOR(UNIX_SECONDS(created_at) / 3600) AS INT64) = {end_hour} AND
_PARTITIONTIME = TIMESTAMP_TRUNC('{end_date}', DAY)
GROUP BY
crash_type, crash_state, security_flag, parent_fuzzer_name, fuzzer_name,
job_type, revision, parent_platform, platform, project, hour,
reproducible_flag
"""
class TooEarlyError(Exception):
"""The end hour is too early according to BIGQUERY_INSERTION_DELAY."""
def METHOD_NAME():
"""Get the start hour from the first crash."""
client = big_query.Client()
sql = """
SELECT min(CAST(FLOOR(UNIX_SECONDS(created_at) / 3600) AS INT64)) as min_hour
FROM main.crashes
"""
result = client.query(query=sql)
if result and result.rows:
return result.rows[0]['min_hour']
return 0
def get_last_successful_hour_or_start_hour():
"""Get the last hour that ran successfully or the start hour."""
last_hour = crash_stats.get_last_successful_hour()
if last_hour:
return last_hour
return METHOD_NAME()
def get_next_end_hour():
"""Get the next end hour. If it's too early to compute data for the next end
hour, return None."""
last_successful_hour = get_last_successful_hour_or_start_hour()
if not last_successful_hour:
# No crashes seen, too early to start building stats.
raise TooEarlyError()
next_end_hour = last_successful_hour + 1
next_datetime = crash_stats.get_datetime(next_end_hour)
if (utils.utcnow() - next_datetime) <= BIGQUERY_INSERTION_DELAY:
raise TooEarlyError()
return next_end_hour
def make_request(client, job_id, end_hour):
"""Make a request to BigQuery to build crash stats."""
table_id = (
'crash_stats$%s' % crash_stats.get_datetime(end_hour).strftime('%Y%m%d'))
sql = SQL.format(
end_hour=end_hour,
end_date=(crash_stats.get_datetime(end_hour).strftime('%Y-%m-%d')))
logging.info('TableID: %s\nJobID: %s\nSQL: %s', table_id, job_id, sql)
client.insert_from_query(
dataset_id='main', table_id=table_id, job_id=job_id, query=sql)
def build(end_hour):
"""Build crash stats for the end hour."""
logging.info('Started building crash stats for %s.',
crash_stats.get_datetime(end_hour))
job_id = JOB_ID_TEMPLATE.format(unique_number=int(time.time()))
client = big_query.Client()
make_request(client, job_id, end_hour)
start_time = time.time()
while (time.time() - start_time) < TIMEOUT:
time.sleep(10)
result = client.get_job(job_id)
logging.info('Checking %s', json.dumps(result))
if result['status']['state'] == 'DONE':
if result['status'].get('errors'):
raise Exception(json.dumps(result)) # pylint: disable=broad-exception-raised
return
raise Exception('Building crash stats exceeded %d seconds.' % TIMEOUT) # pylint: disable=broad-exception-raised
def build_if_needed():
"""Get the next end hour and decide whether to execute build(). If build()
succeeds, then record the next end hour."""
try:
end_hour = get_next_end_hour()
build(end_hour)
job_history = data_types.BuildCrashStatsJobHistory()
job_history.end_time_in_hours = end_hour
job_history.put()
logging.info('CrashStatistics for end_hour=%s is built successfully',
crash_stats.get_datetime(end_hour))
return end_hour
except TooEarlyError:
logging.info("Skip building crash stats because it's too early.")
return None
class Handler(base_handler.Handler):
"""Handler for building data_types.CrashsStats2."""
@handler.cron()
def get(self):
"""Process a GET request from a cronjob."""
end_hour = build_if_needed()
return 'OK (end_hour=%s)' % end_hour |
4,940 | is expired | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from rest_framework.authtoken.models import Token
from apps.core.models import UnitScheme
from apps.user import countries
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
"""
Create an auth token for every newly created user.
"""
if created:
Token.objects.create(user=instance)
class ItsiUserManager(models.Manager):
def create_itsi_user(self, user, itsi_id):
itsi_user = self.create(user=user, itsi_id=itsi_id)
return itsi_user
class ItsiUser(models.Model):
user = models.OneToOneField(User,
on_delete=models.CASCADE,
primary_key=True)
itsi_id = models.IntegerField()
objects = ItsiUserManager()
def __unicode__(self):
return str(self.user.username)
class ConcordUser(models.Model):
user = models.OneToOneField(User,
on_delete=models.CASCADE,
primary_key=True)
concord_id = models.IntegerField()
def __unicode__(self):
return str(self.user.username)
class UserProfile(models.Model):
UNSPECIFIED = 'Unspecified'
UNI_FACULTY = 'University Faculty'
UNI_PROFESSIONAL = 'University Professional or Research Staff'
POST_DOC = 'Post-Doctoral Fellow'
UNI_GRAD_STU = 'University Graduate Student'
UNI_UGRAD_STU = 'University Undergraduate Student'
COMMERCIAL = 'Commercial/Professional'
GOVERNMENT = 'Government Official'
K_12_STU = 'School Student Kindergarten to 12th Grade'
K_12_TEACHER = 'School Teacher Kindergarten to 12th Grade'
OTHER = 'Other'
USER_TYPE_CHOICES = (
(UNSPECIFIED, UNSPECIFIED),
(UNI_FACULTY, UNI_FACULTY),
(UNI_PROFESSIONAL, UNI_PROFESSIONAL),
(POST_DOC, POST_DOC),
(UNI_GRAD_STU, UNI_GRAD_STU),
(UNI_UGRAD_STU, UNI_UGRAD_STU),
(COMMERCIAL, COMMERCIAL),
(GOVERNMENT, GOVERNMENT),
(K_12_STU, K_12_STU),
(K_12_TEACHER, K_12_TEACHER),
(OTHER, OTHER),
)
UNIT_SCHEME_CHOICES = (
(UnitScheme.METRIC, 'Metric'),
(UnitScheme.USCUSTOMARY, 'US Customary'),
)
user = models.OneToOneField(User,
on_delete=models.CASCADE,
primary_key=True)
was_skipped = models.BooleanField(default=False)
is_complete = models.BooleanField(default=False)
has_seen_hotspot_info = models.BooleanField(default=False)
organization = models.TextField(blank=True)
user_type = models.TextField(choices=USER_TYPE_CHOICES,
default=UNSPECIFIED)
country = models.TextField(choices=countries.COUNTRY_CHOICES,
default=countries.US)
postal_code = models.TextField(blank=True)
unit_scheme = models.TextField(choices=UNIT_SCHEME_CHOICES,
default=UnitScheme.METRIC)
class HydroShareToken(models.Model):
"""
HydroShare Token details for a given user
The following field names are same as in HydroShare OAuth2
authorization interface and cannot be changed:
access_token, token_type, expires_in, refresh_token, scope
"""
user = models.OneToOneField(User,
on_delete=models.CASCADE,
primary_key=True)
access_token = models.CharField(max_length=255)
token_type = models.CharField(max_length=255, default='Bearer')
expires_in = models.IntegerField(default=0)
refresh_token = models.CharField(max_length=255)
scope = models.CharField(max_length=255, default='read write')
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.access_token if not self.is_expired else 'Expired'
def METHOD_NAME(self):
now = timezone.now()
refreshed = self.modified_at
expiry = timedelta(seconds=self.expires_in)
return now > refreshed + expiry
is_expired = property(METHOD_NAME)
def get_oauth_dict(self):
return {i: getattr(self, i)
for i in ['access_token', 'token_type', 'expires_in',
'refresh_token', 'scope']} |
4,941 | get metrics | """
This example is to show how to use an existing PyTorch model with Determined.
The flags and configurations can be found under const.yaml. For more information
regarding the optional flas view the original script linked below.
This implementation is based on:
https://github.com/huggingface/transformers/blob/v2.2.0/examples/run_glue.py
"""
from typing import Dict, Sequence, Union
import constants
import data
import numpy as np
import torch
from torch import nn
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_processors as processors
from determined.pytorch import DataLoader, LRScheduler, PyTorchTrial, PyTorchTrialContext
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
class BertPyTorch(PyTorchTrial):
def __init__(self, context: PyTorchTrialContext) -> None:
self.context = context
# Create a unique download directory for each rank so they don't overwrite each
# other when doing distributed training.
self.download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
self.data_downloaded = False
config_class, model_class, tokenizer_class = constants.MODEL_CLASSES[
self.context.get_hparam("model_type")
]
processor = processors[f"{self.context.get_data_config().get('task').lower()}"]()
label_list = processor.get_labels()
num_labels = len(label_list)
cache_dir_per_rank = f"/tmp/{self.context.distributed.get_rank()}"
config = config_class.from_pretrained(
self.context.get_data_config().get("model_name_or_path"),
num_labels=num_labels,
finetuning_task=self.context.get_data_config().get("task").lower(),
cache_dir=cache_dir_per_rank,
)
self.model = self.context.wrap_model(
model_class.from_pretrained(
self.context.get_data_config().get("model_name_or_path"),
from_tf=(".ckpt" in self.context.get_data_config().get("model_name_or_path")),
config=config,
cache_dir=cache_dir_per_rank,
)
)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.context.get_hparam("weight_decay"),
},
{
"params": [
p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
self.optimizer = self.context.wrap_optimizer(
AdamW(
optimizer_grouped_parameters,
lr=self.context.get_hparam("learning_rate"),
eps=self.context.get_hparam("adam_epsilon"),
)
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.context.get_hparam("num_warmup_steps"),
num_training_steps=self.context.get_hparam("num_training_steps"),
),
LRScheduler.StepMode.STEP_EVERY_BATCH,
)
def download_dataset(self) -> None:
task = self.context.get_data_config().get("task")
path_to_mrpc = self.context.get_data_config().get("path_to_mrpc")
if not self.context.get_data_config().get("download_data"):
# Exit if you do not want to download data at all
return
data.download_data(task, self.download_directory, path_to_mrpc)
self.data_downloaded = True
def build_training_data_loader(self) -> DataLoader:
if not self.data_downloaded:
self.download_dataset()
train_dataset = data.load_and_cache_examples(
base_data_dir=self.download_directory,
config=self.context.get_data_config(),
model_type=self.context.get_hparam("model_type"),
max_seq_length=self.context.get_hparam("max_seq_length"),
evaluate=False,
)
return DataLoader(train_dataset, batch_size=self.context.get_per_slot_batch_size())
def build_validation_data_loader(self) -> DataLoader:
if not self.data_downloaded:
self.download_dataset()
test_dataset = data.load_and_cache_examples(
base_data_dir=self.download_directory,
config=self.context.get_data_config(),
model_type=self.context.get_hparam("model_type"),
max_seq_length=self.context.get_hparam("max_seq_length"),
evaluate=True,
)
return DataLoader(test_dataset, batch_size=self.context.get_per_slot_batch_size())
def METHOD_NAME(self, outputs, inputs):
"""
Based on outputs calculate the metrics
"""
loss, logits = outputs[:2]
preds = logits.detach().cpu().numpy()
out_labels_ids = inputs["labels"].detach().cpu().numpy()
if self.context.get_data_config()["output_mode"] == "classification":
preds = np.argmax(preds, axis=1)
elif self.context.get_data_config()["output_mode"] == "regression":
preds = np.squeeze(preds)
results = compute_metrics(
self.context.get_data_config().get("task").lower(), preds, out_labels_ids
)
results["loss"] = loss
return results
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int):
"""
Trains the provided batch.
Returns: Dictionary of the calculated Metrics
"""
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.context.get_hparam("model_type") != "distilbert":
inputs["token_type_ids"] = (
batch[2] if self.context.get_hparam("model_type") in ["bert", "xlnet"] else None
)
outputs = self.model(**inputs)
results = self.METHOD_NAME(outputs, inputs)
self.context.backward(results["loss"])
self.context.step_optimizer(self.optimizer)
return results
def evaluate_batch(self, batch: TorchData):
"""
Evaluates the provided batch.
Returns: Dictionary of the calculated Metrics
"""
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.context.get_hparam("model_type") != "distilbert":
inputs["token_type_ids"] = (
batch[2] if self.context.get_hparam("model_type") in ["bert", "xlnet"] else None
)
outputs = self.model(**inputs)
results = self.METHOD_NAME(outputs, inputs)
return results |
4,942 | system info | import json
import logging
import os
from .env import DVC_NO_ANALYTICS
logger = logging.getLogger(__name__)
def collect_and_send_report(args=None, return_code=None):
"""
Collect information from the runtime/environment and the command
being executed into a report and send it over the network.
To prevent analytics from blocking the execution of the main thread,
sending the report is done in a separate process.
The inter-process communication happens through a file containing the
report as a JSON, where the _collector_ generates it and the _sender_
removes it after sending it.
"""
import tempfile
from dvc.daemon import daemon
report = {}
# Include command execution information on the report only when available.
if args and hasattr(args, "func"):
report.update({"cmd_class": args.func.__name__})
if return_code is not None:
report.update({"cmd_return_code": return_code})
with tempfile.NamedTemporaryFile(delete=False, mode="w") as fobj:
json.dump(report, fobj)
daemon(["analytics", fobj.name])
def is_enabled():
from dvc.config import Config, to_bool
from dvc.utils import env2bool
if env2bool("DVC_TEST"):
return False
enabled = not os.getenv(DVC_NO_ANALYTICS)
if enabled:
enabled = to_bool(
Config.from_cwd(validate=False).get("core", {}).get("analytics", "true")
)
logger.debug("Analytics is %sabled.", "en" if enabled else "dis")
return enabled
def send(path):
"""
Side effect: Removes the report after sending it.
The report is generated and stored in a temporary file, see:
`collect_and_send_report`. Sending happens on another process,
thus, the need of removing such file afterwards.
"""
import requests
url = "https://analytics.dvc.org"
headers = {"content-type": "application/json"}
with open(path, encoding="utf-8") as fobj:
report = json.load(fobj)
report.update(_runtime_info())
try:
requests.post(url, json=report, headers=headers, timeout=5)
except requests.exceptions.RequestException:
logger.debug("failed to send analytics report", exc_info=True)
os.remove(path)
def _scm_in_use():
from dvc.exceptions import NotDvcRepoError
from dvc.repo import Repo
from dvc.scm import NoSCM
from .scm import SCM, SCMError
try:
scm = SCM(root_dir=Repo.find_root())
return type(scm).__name__
except SCMError:
return NoSCM.__name__
except NotDvcRepoError:
pass
def _runtime_info():
"""
Gather information from the environment where DVC runs to fill a report.
"""
from iterative_telemetry import _generate_ci_id, find_or_create_user_id
from dvc import __version__
from dvc.utils import is_binary
ci_id = _generate_ci_id()
if ci_id:
group_id, user_id = ci_id
else:
group_id, user_id = None, find_or_create_user_id()
return {
"dvc_version": __version__,
"is_binary": is_binary(),
"scm_class": _scm_in_use(),
"system_info": METHOD_NAME(),
"user_id": user_id,
"group_id": group_id,
}
def METHOD_NAME():
import platform
import sys
import distro
system = platform.system()
if system == "Windows":
version = sys.getwindowsversion() # type: ignore[attr-defined]
return {
"os": "windows",
"windows_version_build": version.build,
"windows_version_major": version.major,
"windows_version_minor": version.minor,
"windows_version_service_pack": version.service_pack,
}
if system == "Darwin":
return {"os": "mac", "mac_version": platform.mac_ver()[0]}
if system == "Linux":
return {
"os": "linux",
"linux_distro": distro.id(),
"linux_distro_like": distro.like(),
"linux_distro_version": distro.version(),
}
# We don't collect data for any other system.
raise NotImplementedError |
4,943 | test term negative matches | """
Typograhic Number Theory tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from pygments.lexers.tnt import TNTLexer
from pygments.token import Text, Operator, Keyword, Name, Number, \
Punctuation, Error
@pytest.fixture(autouse=True)
def lexer():
yield TNTLexer()
# whitespace
@pytest.mark.parametrize('text', (' a', ' \t0', '\n\n 3'))
def test_whitespace_positive_matches(lexer, text):
"""Test fragments that should be tokenized as whitespace text."""
assert lexer.whitespace(0, text) == len(text) - 1
assert lexer.whitespace(0, text, True) == len(text) - 1
assert lexer.cur[-1] == (0, Text, text[:-1])
@pytest.mark.parametrize('text', ('0 a=b premise', 'b=a symmetry'))
def test_whitespace_negative_matches(lexer, text):
"""Test statements that do not start with whitespace text."""
assert lexer.whitespace(0, text) == 0
with pytest.raises(AssertionError):
lexer.whitespace(0, text, True)
assert not lexer.cur
# terms that can go on either side of an = sign
@pytest.mark.parametrize('text', ('a ', "a' ", 'b ', "c' "))
def test_variable_positive_matches(lexer, text):
"""Test fragments that should be tokenized as variables."""
assert lexer.variable(0, text) == len(text) - 1
assert lexer.cur[-1] == (0, Name.Variable, text[:-1])
@pytest.mark.parametrize('text', ("' ", 'f ', "f' "))
def test_variable_negative_matches(lexer, text):
"""Test fragments that should **not** be tokenized as variables."""
with pytest.raises(AssertionError):
lexer.variable(0, text)
assert not lexer.cur
@pytest.mark.parametrize('text', ('0', 'S0', 'SSSSS0'))
def test_numeral_positive_matches(lexer, text):
"""Test fragments that should be tokenized as (unary) numerals."""
assert lexer.term(0, text) == len(text)
assert lexer.cur[-1] == (len(text) - 1, Number.Integer, text[-1])
if text != '0':
assert lexer.cur[-2] == (0, Number.Integer, text[:-1])
@pytest.mark.parametrize('text', (
'(a+b)', '(b.a)', '(c+d)'
))
def test_multiterm_positive_matches(lexer, text):
"""Test fragments that should be tokenized as a compound term."""
assert lexer.term(0, text) == len(text)
assert [t[1] for t in lexer.cur] == [
Punctuation, Name.Variable, Operator,
Name.Variable, Punctuation
]
@pytest.mark.parametrize('text', ('1', '=', 'A'))
def METHOD_NAME(lexer, text):
"""Test fragments that should not be tokenized as terms at all."""
with pytest.raises(AssertionError):
lexer.term(0, text)
assert not lexer.cur
# full statements, minus rule
@pytest.mark.parametrize('text', ('~a=b ', '~~~~a=b '))
def test_negator_positive_matches(lexer, text):
"""Test statements that start with a negation."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0] == (0, Operator, text[:-4])
@pytest.mark.parametrize('text', ('Aa:a=b ', 'Eb:a=b '))
def test_quantifier_positive_matches(lexer, text):
"""Test statements that start with a quantifier."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0][1] == Keyword.Declaration
assert lexer.cur[1][1] == Name.Variable
assert lexer.cur[2] == (2, Punctuation, ':')
@pytest.mark.parametrize('text', ('Aaa=b', 'Eba=b'))
def test_quantifier_negative_matches(lexer, text):
"""Test quantifiers that are only partially valid."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
# leftovers should still be valid
assert lexer.cur[0][1] == Keyword.Declaration
assert lexer.cur[1][1] == Name.Variable
@pytest.mark.parametrize('text', ('<a=b&b=a>', '<a=b|b=a>', '<a=b]b=a>'))
def test_compound_positive_matches(lexer, text):
"""Test statements that consist of multiple formulas compounded."""
assert lexer.formula(0, text) == len(text)
assert lexer.cur[0] == (0, Punctuation, '<')
assert lexer.cur[4][1] == Operator
assert lexer.cur[-1] == (len(text)-1, Punctuation, '>')
@pytest.mark.parametrize('text', ('<a=b/b=a>', '<a=b&b=a '))
def test_compound_negative_matches(lexer, text):
"""Test statements that look like compounds but are invalid."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
assert lexer.cur[0] == (0, Punctuation, '<')
@pytest.mark.parametrize('text', ('a=b ', 'a=0 ', '0=b '))
def test_formula_postive_matches(lexer, text):
"""Test the normal singular formula."""
assert lexer.formula(0, text) == len(text) - 1
assert lexer.cur[0][2] == text[0]
assert lexer.cur[1] == (1, Operator, '=')
assert lexer.cur[2][2] == text[2]
@pytest.mark.parametrize('text', ('a/b', '0+0 '))
def test_formula_negative_matches(lexer, text):
"""Test anything but an equals sign."""
with pytest.raises(AssertionError):
lexer.formula(0, text)
# rules themselves
@pytest.mark.parametrize('text', (
'fantasy rule', 'carry over line 5', 'premise', 'joining',
'double-tilde', 'switcheroo', 'De Morgan', 'specification'
))
def test_rule_positive_matches(lexer, text):
"""Test some valid rules of TNT."""
assert lexer.rule(0, text) == len(text)
assert lexer.cur[0][:2] == (0, Keyword)
if text[-1].isdigit():
assert lexer.cur[1][1] == Number.Integer
@pytest.mark.parametrize('text', (
'fantasy', 'carry over', 'premse', 'unjoining',
'triple-tilde', 'switcheru', 'De-Morgan', 'despecification'
))
def test_rule_negative_matches(lexer, text):
"""Test some invalid rules of TNT."""
with pytest.raises(AssertionError):
lexer.rule(0, text)
# referrals
@pytest.mark.parametrize('text', ('(lines 1, 2, and 4)', '(line 3,5,6)',
'(lines 1, 6 and 0)'))
def test_lineno_positive_matches(lexer, text):
"""Test line referrals."""
assert lexer.lineno(0, text) == len(text)
assert lexer.cur[0] == (0, Punctuation, '(')
assert lexer.cur[1][:2] == (1, Text)
assert lexer.cur[2][1] == Number.Integer
assert lexer.cur[3] == (len(text)-1, Punctuation, ')')
@pytest.mark.parametrize('text', (
'(lines one, two, and four)1 ', # to avoid IndexError
'(lines 1 2 and 3)', '(lines 1 2 3)'
))
def test_lineno_negative_matches(lexer, text):
"""Test invalid line referrals."""
with pytest.raises(AssertionError):
lexer.lineno(0, text)
# worst-case: error text
@pytest.mark.parametrize('text', ('asdf', 'fdsa\nasdf', 'asdf\n '))
def test_error_till_line_end(lexer, text):
try:
nl = text.index('\n')
except ValueError:
nl = len(text)
try:
end = text.find(text.split(None, 2)[1])
except IndexError: # split failed
end = len(text)
assert lexer.error_till_line_end(0, text) == end
assert lexer.cur[0] == (0, Error, text[:nl])
# full statement, including rule (because this can't be tested any other way)
@pytest.mark.parametrize('text', ('[ push', '] pop'))
def test_fantasy_positive_matches(lexer, text):
"""Test statements that should be tokenized as push/pop statements."""
assert lexer.get_tokens_unprocessed(text)[0] == (0, Keyword, text[0])
# full text is already done by examplefiles, but here's some exceptions
@pytest.mark.parametrize('text', (
'0', 'a=b', 'premise',
'0 a=b premise', '1 b=a symmetry (line 0)'
))
def test_no_crashing(lexer, text):
"""Test incomplete text fragments that shouldn't crash the whole lexer."""
assert lexer.get_tokens(text) |
4,944 | test bom | from .. import util
machinery = util.import_importlib('importlib.machinery')
import codecs
import importlib.util
import re
import types
# Because sys.path gets essentially blanked, need to have unicodedata already
# imported for the parser to use.
import unicodedata
import unittest
import warnings
CODING_RE = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
class EncodingTest:
"""PEP 3120 makes UTF-8 the default encoding for source code
[default encoding].
PEP 263 specifies how that can change on a per-file basis. Either the first
or second line can contain the encoding line [encoding first line]
encoding second line]. If the file has the BOM marker it is considered UTF-8
implicitly [BOM]. If any encoding is specified it must be UTF-8, else it is
an error [BOM and utf-8][BOM conflict].
"""
variable = '\u00fc'
character = '\u00c9'
source_line = "{0} = '{1}'\n".format(variable, character)
module_name = '_temp'
def run_test(self, source):
with util.create_modules(self.module_name) as mapping:
with open(mapping[self.module_name], 'wb') as file:
file.write(source)
loader = self.machinery.SourceFileLoader(self.module_name,
mapping[self.module_name])
return self.load(loader)
def create_source(self, encoding):
encoding_line = "# coding={0}".format(encoding)
assert CODING_RE.match(encoding_line)
source_lines = [encoding_line.encode('utf-8')]
source_lines.append(self.source_line.encode(encoding))
return b'\n'.join(source_lines)
def test_non_obvious_encoding(self):
# Make sure that an encoding that has never been a standard one for
# Python works.
encoding_line = "# coding=koi8-r"
assert CODING_RE.match(encoding_line)
source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
self.run_test(source)
# [default encoding]
def test_default_encoding(self):
self.run_test(self.source_line.encode('utf-8'))
# [encoding first line]
def test_encoding_on_first_line(self):
encoding = 'Latin-1'
source = self.create_source(encoding)
self.run_test(source)
# [encoding second line]
def test_encoding_on_second_line(self):
source = b"#/usr/bin/python\n" + self.create_source('Latin-1')
self.run_test(source)
# [BOM]
def METHOD_NAME(self):
self.run_test(codecs.BOM_UTF8 + self.source_line.encode('utf-8'))
# [BOM and utf-8]
def test_bom_and_utf_8(self):
source = codecs.BOM_UTF8 + self.create_source('utf-8')
self.run_test(source)
# [BOM conflict]
def test_bom_conflict(self):
source = codecs.BOM_UTF8 + self.create_source('latin-1')
with self.assertRaises(SyntaxError):
self.run_test(source)
class EncodingTestPEP451(EncodingTest):
def load(self, loader):
module = types.ModuleType(self.module_name)
module.__spec__ = importlib.util.spec_from_loader(self.module_name, loader)
loader.exec_module(module)
return module
(Frozen_EncodingTestPEP451,
Source_EncodingTestPEP451
) = util.test_both(EncodingTestPEP451, machinery=machinery)
class EncodingTestPEP302(EncodingTest):
def load(self, loader):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return loader.load_module(self.module_name)
(Frozen_EncodingTestPEP302,
Source_EncodingTestPEP302
) = util.test_both(EncodingTestPEP302, machinery=machinery)
class LineEndingTest:
r"""Source written with the three types of line endings (\n, \r\n, \r)
need to be readable [cr][crlf][lf]."""
def run_test(self, line_ending):
module_name = '_temp'
source_lines = [b"a = 42", b"b = -13", b'']
source = line_ending.join(source_lines)
with util.create_modules(module_name) as mapping:
with open(mapping[module_name], 'wb') as file:
file.write(source)
loader = self.machinery.SourceFileLoader(module_name,
mapping[module_name])
return self.load(loader, module_name)
# [cr]
def test_cr(self):
self.run_test(b'\r')
# [crlf]
def test_crlf(self):
self.run_test(b'\r\n')
# [lf]
def test_lf(self):
self.run_test(b'\n')
class LineEndingTestPEP451(LineEndingTest):
def load(self, loader, module_name):
module = types.ModuleType(module_name)
module.__spec__ = importlib.util.spec_from_loader(module_name, loader)
loader.exec_module(module)
return module
(Frozen_LineEndingTestPEP451,
Source_LineEndingTestPEP451
) = util.test_both(LineEndingTestPEP451, machinery=machinery)
class LineEndingTestPEP302(LineEndingTest):
def load(self, loader, module_name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return loader.load_module(module_name)
(Frozen_LineEndingTestPEP302,
Source_LineEndingTestPEP302
) = util.test_both(LineEndingTestPEP302, machinery=machinery)
if __name__ == '__main__':
unittest.main() |
4,945 | test select with setting none | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from unittest import TestCase
from lisa import LisaException, constants
from selftests.test_testsuite import cleanup_cases_metadata, select_and_check
class SelectorTestCase(TestCase):
def setUp(self) -> None:
cleanup_cases_metadata()
def test_no_case_selected(self) -> None:
runbook = [{constants.TESTCASE_CRITERIA: {"area": "demo"}}]
select_and_check(self, runbook, [])
def test_select_by_priority(self) -> None:
runbook = [{constants.TESTCASE_CRITERIA: {"priority": 0}}]
select_and_check(self, runbook, ["ut1"])
def test_select_by_tag(self) -> None:
runbook = [{constants.TESTCASE_CRITERIA: {"tags": "t1"}}]
select_and_check(self, runbook, ["ut1", "ut2"])
def test_select_by_one_of_tag(self) -> None:
runbook = [{constants.TESTCASE_CRITERIA: {"tags": ["t1", "t3"]}}]
select_and_check(self, runbook, ["ut1", "ut2", "ut3"])
def test_select_by_two_rules(self) -> None:
runbook = [{constants.TESTCASE_CRITERIA: {"tags": ["t1", "t3"], "area": "a1"}}]
select_and_check(self, runbook, ["ut1", "ut2"])
def test_select_by_two_criteria(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"name": "mock_ut1"}},
{constants.TESTCASE_CRITERIA: {"name": "mock_ut2"}},
]
select_and_check(self, runbook, ["ut1", "ut2"])
def test_select_then_drop(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"tags": "t1"}},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
constants.TESTCASE_SELECT_ACTION: "exclude",
},
]
select_and_check(self, runbook, ["ut1"])
def test_select_drop_select(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"tags": "t1"}},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
constants.TESTCASE_SELECT_ACTION: "exclude",
},
{constants.TESTCASE_CRITERIA: {"tags": "t1"}},
]
select_and_check(self, runbook, ["ut1", "ut2"])
def test_select_force_include(self) -> None:
runbook = [
{
constants.TESTCASE_CRITERIA: {"tags": "t1"},
constants.TESTCASE_SELECT_ACTION: "forceInclude",
},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
constants.TESTCASE_SELECT_ACTION: "exclude",
},
]
select_and_check(self, runbook, ["ut1", "ut2"])
def test_select_force_conflict(self) -> None:
runbook = [
{
constants.TESTCASE_CRITERIA: {"tags": "t1"},
constants.TESTCASE_SELECT_ACTION: "forceInclude",
},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
constants.TESTCASE_SELECT_ACTION: "forceExclude",
},
]
with self.assertRaises(LisaException) as cm:
select_and_check(self, runbook, ["ut1", "ut2"])
self.assertIsInstance(cm.exception, LisaException)
self.assertIn("force", str(cm.exception))
def test_select_force_conflict_exclude(self) -> None:
runbook = [
{
constants.TESTCASE_CRITERIA: {"tags": "t1"},
constants.TESTCASE_SELECT_ACTION: "include",
},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
constants.TESTCASE_SELECT_ACTION: "forceExclude",
},
{
constants.TESTCASE_CRITERIA: {"tags": "t1"},
constants.TESTCASE_SELECT_ACTION: "forceInclude",
},
]
with self.assertRaises(LisaException) as cm:
select_and_check(self, runbook, [])
self.assertIsInstance(cm.exception, LisaException)
self.assertIn("force", str(cm.exception))
def test_select_with_setting(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"tags": "t1"}, "retry": 2},
]
selected = select_and_check(self, runbook, ["ut1", "ut2"])
self.assertListEqual([2, 2], [case.retry for case in selected])
def test_select_with_times(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"tags": "t1"}},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
"times": 2,
constants.TESTCASE_SELECT_ACTION: "none",
},
]
selected = select_and_check(self, runbook, ["ut1", "ut2", "ut2"])
self.assertListEqual([1, 2, 2], [case.times for case in selected])
self.assertListEqual([0, 0, 0], [case.retry for case in selected])
def METHOD_NAME(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"tags": "t1"}},
{
constants.TESTCASE_CRITERIA: {"name": "mock_ut2"},
"retry": 2,
constants.TESTCASE_SELECT_ACTION: "none",
},
]
selected = select_and_check(self, runbook, ["ut1", "ut2"])
self.assertListEqual([0, 2], [case.retry for case in selected])
def test_select_with_diff_setting(self) -> None:
runbook = [
{constants.TESTCASE_CRITERIA: {"tags": "t1"}, "retry": 2},
{constants.TESTCASE_CRITERIA: {"name": "mock_ut2"}, "retry": 3},
]
selected = select_and_check(self, runbook, ["ut1", "ut2"])
self.assertListEqual([2, 3], [case.retry for case in selected]) |
4,946 | test fs | #!/usr/bin/env python
from unicorn import *
from unicorn.x86_const import *
from struct import pack
import regress
CODE_ADDR = 0x40000
CODE_SIZE = 0x1000
SCRATCH_ADDR = 0x80000
SCRATCH_SIZE = 0x1000
SEGMENT_ADDR = 0x5000
SEGMENT_SIZE = 0x1000
FSMSR = 0xC0000100
GSMSR = 0xC0000101
def set_msr(uc, msr, value, scratch=SCRATCH_ADDR):
'''
set the given model-specific register (MSR) to the given value.
this will clobber some memory at the given scratch address, as it emits some code.
'''
# save clobbered registers
orax = uc.reg_read(UC_X86_REG_RAX)
ordx = uc.reg_read(UC_X86_REG_RDX)
orcx = uc.reg_read(UC_X86_REG_RCX)
orip = uc.reg_read(UC_X86_REG_RIP)
# x86: wrmsr
buf = '\x0f\x30'
uc.mem_write(scratch, buf)
uc.reg_write(UC_X86_REG_RAX, value & 0xFFFFFFFF)
uc.reg_write(UC_X86_REG_RDX, (value >> 32) & 0xFFFFFFFF)
uc.reg_write(UC_X86_REG_RCX, msr & 0xFFFFFFFF)
uc.emu_start(scratch, scratch+len(buf), count=1)
# restore clobbered registers
uc.reg_write(UC_X86_REG_RAX, orax)
uc.reg_write(UC_X86_REG_RDX, ordx)
uc.reg_write(UC_X86_REG_RCX, orcx)
uc.reg_write(UC_X86_REG_RIP, orip)
def get_msr(uc, msr, scratch=SCRATCH_ADDR):
'''
fetch the contents of the given model-specific register (MSR).
this will clobber some memory at the given scratch address, as it emits some code.
'''
# save clobbered registers
orax = uc.reg_read(UC_X86_REG_RAX)
ordx = uc.reg_read(UC_X86_REG_RDX)
orcx = uc.reg_read(UC_X86_REG_RCX)
orip = uc.reg_read(UC_X86_REG_RIP)
# x86: rdmsr
buf = '\x0f\x32'
uc.mem_write(scratch, buf)
uc.reg_write(UC_X86_REG_RCX, msr & 0xFFFFFFFF)
uc.emu_start(scratch, scratch+len(buf), count=1)
eax = uc.reg_read(UC_X86_REG_EAX)
edx = uc.reg_read(UC_X86_REG_EDX)
# restore clobbered registers
uc.reg_write(UC_X86_REG_RAX, orax)
uc.reg_write(UC_X86_REG_RDX, ordx)
uc.reg_write(UC_X86_REG_RCX, orcx)
uc.reg_write(UC_X86_REG_RIP, orip)
return (edx << 32) | (eax & 0xFFFFFFFF)
def set_gs(uc, addr):
'''
set the GS.base hidden descriptor-register field to the given address.
this enables referencing the gs segment on x86-64.
'''
return set_msr(uc, GSMSR, addr)
def get_gs(uc):
'''
fetch the GS.base hidden descriptor-register field.
'''
return get_msr(uc, GSMSR)
def set_fs(uc, addr):
'''
set the FS.base hidden descriptor-register field to the given address.
this enables referencing the fs segment on x86-64.
'''
return set_msr(uc, FSMSR, addr)
def get_fs(uc):
'''
fetch the FS.base hidden descriptor-register field.
'''
return get_msr(uc, FSMSR)
class TestGetSetMSR(regress.RegressTest):
def test_msr(self):
uc = Uc(UC_ARCH_X86, UC_MODE_64)
uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE)
set_msr(uc, FSMSR, 0x1000)
self.assertEqual(0x1000, get_msr(uc, FSMSR))
set_msr(uc, GSMSR, 0x2000)
self.assertEqual(0x2000, get_msr(uc, GSMSR))
def test_gs(self):
uc = Uc(UC_ARCH_X86, UC_MODE_64)
uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE)
uc.mem_map(CODE_ADDR, CODE_SIZE)
uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE)
code = '6548330C2518000000'.decode('hex') # x86-64: xor rcx, qword ptr gs:[0x18]
uc.mem_write(CODE_ADDR, code)
uc.mem_write(SEGMENT_ADDR+0x18, 'AAAAAAAA')
set_gs(uc, SEGMENT_ADDR)
self.assertEqual(SEGMENT_ADDR, get_gs(uc))
uc.emu_start(CODE_ADDR, CODE_ADDR+len(code))
self.assertEqual(uc.reg_read(UC_X86_REG_RCX), 0x4141414141414141)
def METHOD_NAME(self):
uc = Uc(UC_ARCH_X86, UC_MODE_64)
uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE)
uc.mem_map(CODE_ADDR, CODE_SIZE)
uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE)
code = '6448330C2518000000'.decode('hex') # x86-64: xor rcx, qword ptr fs:[0x18]
uc.mem_write(CODE_ADDR, code)
uc.mem_write(SEGMENT_ADDR+0x18, 'AAAAAAAA')
set_fs(uc, SEGMENT_ADDR)
self.assertEqual(SEGMENT_ADDR, get_fs(uc))
uc.emu_start(CODE_ADDR, CODE_ADDR+len(code))
self.assertEqual(uc.reg_read(UC_X86_REG_RCX), 0x4141414141414141)
if __name__ == '__main__':
regress.main() |
4,947 | map aggregate | from queue import SimpleQueue
from torch.fx.node import map_arg
def _parent_name(target):
"""Get the qualified name of the parent module of submodule 'target'
Args:
target (str): target attribute of a call_module node (which is a str)
"""
*parent, name = target.rsplit(".", 1)
return parent[0] if parent else "", name
def _replace_node_module(node, modules, replacement_module):
"""Replace the node's original target module by replacement_module.
Args:
node (torch.fx.node.Node): a graph call_module node whose target
is to be replaced by replacement_module.
modules (list[torch.nn.Modules]): a list of modules, all named submodules
of the root moduel.
replacement_module (torch.nn.Module): the module to replace the node's
original target.
"""
parent_name, name = _parent_name(node.target)
modules[node.target] = replacement_module
setattr(modules[parent_name], name, replacement_module)
def _replace_input_node(input_node, new_input_node, user_node):
"""Replace the user_node's input_node by new_input_node.
Args:
input_node: the input_node to be replaced.
new_input_node: the new input node.
user_node: the user node whose input_node is to be
replaced by new_input_node
"""
def maybe_replace_node(n):
if n == input_node:
return new_input_node
else:
return n
new_args = map_arg(user_node.args, maybe_replace_node)
new_kwargs = map_arg(user_node.kwargs, maybe_replace_node)
assert isinstance(new_args, tuple)
assert isinstance(new_kwargs, dict)
user_node.args = new_args
user_node.kwargs = new_kwargs
new_input_node.args = (input_node,)
def topo_sort(nodes):
"""Topologically sort the nodes. Backported from torch.fx.passes.utils.fuser_utils (1.13)
Args:
nodes: a list of graph node
Return:
sorted_nodes: a list of node, in topological order.
"""
# sort nodes according to the topological order
indegree_map = {node: 0 for node in nodes}
candidates = SimpleQueue()
for node in nodes:
for n in node.all_input_nodes:
if n in indegree_map:
indegree_map[node] += 1
if indegree_map[node] == 0:
candidates.put(node)
sorted_nodes = list()
while not candidates.empty():
node = candidates.get()
sorted_nodes.append(node)
for n in node.users:
if n in indegree_map:
indegree_map[n] -= 1
if indegree_map[n] == 0:
candidates.put(n)
assert len(nodes) == len(sorted_nodes), "topological sorted nodes doesn't have same length as input nodes"
return sorted_nodes
def METHOD_NAME(input_, fn):
"""
Apply fn to each element in input_. input_ may be a list, tuple, slice, or dict with string keys.
Args:
input_: the input whose elements are to be mapped by fn
fn: the Callable to transform the elements
Returns:
The input_ whose elements are transformed by fn
"""
if isinstance(input_, tuple):
t = tuple(METHOD_NAME(elem, fn) for elem in input_)
# Support NamedTuple (if it has `_fields`) by repacking into original type.
return t if not hasattr(input_, "_fields") else type(input_)(*t)
elif isinstance(input_, list):
return list(METHOD_NAME(elem, fn) for elem in input_)
elif isinstance(input_, dict):
return dict((k, METHOD_NAME(v, fn)) for k, v in input_.items())
elif isinstance(input_, slice):
return slice(METHOD_NAME(input_.start, fn), METHOD_NAME(input_.stop, fn), METHOD_NAME(input_.step, fn))
else:
return fn(input_)
def combine_map_aggregate(input_0, input_1, fn):
"""
Traverse input_0 and input_1 simultaneously and apply fn to each pair of elements.
Args:
input_0, input_1: the inputs to be traversed
fn: the Callable to transform the elements
Returns:
The transformed input_0
"""
if isinstance(input_0, tuple) and isinstance(input_1, tuple):
return tuple(combine_map_aggregate(a, b, fn) for a, b in zip(input_0, input_1))
elif isinstance(input_0, list) and isinstance(input_1, list):
for i in range(len(input_0)):
input_0[i] = combine_map_aggregate(input_0[i], input_1[i], fn)
elif isinstance(input_0, dict) and isinstance(input_1, dict):
for k in input_0:
input_0[k] = combine_map_aggregate(input_0[k], input_1[k], fn)
elif isinstance(input_0, slice) and isinstance(input_1, slice):
return slice(
combine_map_aggregate(input_0.start, input_1.start, fn),
combine_map_aggregate(input_0.stop, input_1.stop, fn),
combine_map_aggregate(input_0.step, input_1.step, fn),
)
else:
fn(input_0, input_1)
return input_0
return input_0
# _pack and _unpack methods are backported from torch
def _pack_kwargs(*args, **kwargs):
kwarg_keys = []
flat_args = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return tuple(flat_args), tuple(kwarg_keys)
def _unpack_kwargs(flat_args, kwarg_keys):
"""See _pack_kwargs."""
assert len(kwarg_keys) <= len(flat_args), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}"
if len(kwarg_keys) == 0:
return flat_args, {}
args = flat_args[: -len(kwarg_keys)]
kwargs = dict(zip(kwarg_keys, flat_args[-len(kwarg_keys) :])) # noqa: E203
return args, kwargs |
4,948 | get archive from bytes | from collections import namedtuple
from difflib import SequenceMatcher
import io
import logging
import os
import re
import tempfile
from typing import Iterable, Union
import zipfile
from guessit import guessit
import pysubs2
import rarfile
from subliminal.subtitle import fix_line_ending
from subliminal_patch.core import Episode
from subliminal_patch.subtitle import guess_matches
from ._agent_list import FIRST_THOUSAND_OR_SO_USER_AGENTS
logger = logging.getLogger(__name__)
_MatchingSub = namedtuple("_MatchingSub", ("file", "priority", "context"))
def _get_matching_sub(
sub_names, forced=False, episode=None, episode_title=None, **kwargs
):
guess_options = {"single_value": True}
if episode is not None:
guess_options["type"] = "episode" # type: ignore
matching_subs = []
for sub_name in sub_names:
if not forced and os.path.splitext(sub_name.lower())[0].endswith("forced"):
logger.debug("Ignoring forced subtitle: %s", sub_name)
continue
# If it's a movie then get the first subtitle
if episode is None and episode_title is None:
logger.debug("Movie subtitle found: %s", sub_name)
matching_subs.append(_MatchingSub(sub_name, 2, "Movie subtitle"))
break
guess = guessit(sub_name, options=guess_options)
matched_episode_num = guess.get("episode")
if matched_episode_num:
logger.debug("No episode number found in file: %s", sub_name)
if episode_title is not None:
from_name = _analize_sub_name(sub_name, episode_title)
if from_name is not None:
matching_subs.append(from_name)
if episode == matched_episode_num:
logger.debug("Episode matched from number: %s", sub_name)
matching_subs.append(_MatchingSub(sub_name, 2, "Episode number matched"))
if matching_subs:
matching_subs.sort(key=lambda x: x.priority, reverse=True)
logger.debug("Matches: %s", matching_subs)
return matching_subs[0].file
else:
logger.debug("Nothing matched")
return None
def _analize_sub_name(sub_name: str, title_):
titles = re.split(r"[.-]", os.path.splitext(sub_name)[0])
for title in titles:
title = title.strip()
ratio = SequenceMatcher(None, title, title_).ratio()
if ratio > 0.85:
logger.debug(
"Episode title matched: '%s' -> '%s' [%s]", title, sub_name, ratio
)
# Avoid false positives with short titles
if len(title_) > 4 and ratio >= 0.98:
return _MatchingSub(sub_name, 3, "Perfect title ratio")
return _MatchingSub(sub_name, 1, "Normal title ratio")
logger.debug("No episode title matched from file: %s", sub_name)
return None
def get_subtitle_from_archive(
archive, forced=False, episode=None, get_first_subtitle=False, **kwargs
):
"Get subtitle from Rarfile/Zipfile object. Return None if nothing is found."
subs_in_archive = [
name
for name in archive.namelist()
if name.endswith((".srt", ".sub", ".ssa", ".ass"))
]
if not subs_in_archive:
logger.info("No subtitles found in archive")
return None
logger.debug("Subtitles in archive: %s", subs_in_archive)
if len(subs_in_archive) == 1 or get_first_subtitle:
logger.debug("Getting first subtitle in archive: %s", subs_in_archive)
return fix_line_ending(archive.read(subs_in_archive[0]))
matching_sub = _get_matching_sub(subs_in_archive, forced, episode, **kwargs)
if matching_sub is not None:
logger.info("Using %s from archive", matching_sub)
return fix_line_ending(archive.read(matching_sub))
logger.debug("No subtitle found in archive")
return None
def is_episode(content):
return "episode" in guessit(content, {"type": "episode"})
_ENCS = ("utf-8", "ascii", "iso-8859-1", "iso-8859-2", "iso-8859-5", "cp1252")
def _zip_from_subtitle_file(content):
with tempfile.NamedTemporaryFile(prefix="spsub", suffix=".srt") as tmp_f:
tmp_f.write(content)
sub = None
for enc in _ENCS:
try:
logger.debug("Trying %s encoding", enc)
sub = pysubs2.load(tmp_f.name, encoding=enc)
except Exception as error:
logger.debug("%s: %s", type(error).__name__, error)
continue
else:
break
if sub is not None:
logger.debug("Identified subtitle file: %s", sub)
zip_obj = zipfile.ZipFile(io.BytesIO(), mode="x")
zip_obj.write(tmp_f.name, os.path.basename(tmp_f.name))
return zip_obj
logger.debug("Couldn't load subtitle file")
return None
def METHOD_NAME(content: bytes):
"""Get RarFile/ZipFile object from bytes. A ZipFile instance will be returned
if a subtitle-like stream is found. Return None if something else is found."""
archive_stream = io.BytesIO(content)
if rarfile.is_rarfile(archive_stream):
logger.debug("Identified rar archive")
return rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug("Identified zip archive")
return zipfile.ZipFile(archive_stream)
logger.debug("No compression format found. Trying with subtitle-like files")
return _zip_from_subtitle_file(content)
def update_matches(
matches,
video,
release_info: Union[str, Iterable[str]],
split="\n",
**guessit_options
):
"""Update matches set from release info string or Iterable.
Use the split parameter to iterate over the set delimiter; set None to avoid split."""
guessit_options["type"] = "episode" if isinstance(video, Episode) else "movie"
logger.debug("Guessit options to update matches: %s", guessit_options)
if isinstance(release_info, str):
release_info = release_info.split(split)
for release in release_info:
for release_split in release.split(split):
logger.debug("Updating matches from release info: %s", release)
matches |= guess_matches(
video, guessit(release_split.strip(), guessit_options)
)
logger.debug("New matches: %s", matches)
return matches |
4,949 | crop module name | # Copyright 2015-2020, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Commandline argument parsing for our test runner.
"""
import getopt
import stem.util.conf
import stem.util.log
import test
from typing import Any, Dict, List, NamedTuple, Optional, Sequence
LOG_TYPE_ERROR = """\
'%s' isn't a logging runlevel, use one of the following instead:
TRACE, DEBUG, INFO, NOTICE, WARN, ERROR
"""
CONFIG = stem.util.conf.config_dict('test', {
'msg.help': '',
'target.description': {},
'target.torrc': {},
})
OPT = 'auit:l:qvh'
OPT_EXPANDED = ['all', 'unit', 'integ', 'targets=', 'test=', 'exclude-test=', 'log=', 'log-file=', 'tor=', 'quiet', 'verbose', 'help']
class Arguments(NamedTuple):
run_unit: bool = False
run_integ: bool = False
specific_test: List[str] = []
exclude_test: List[str] = []
logging_runlevel: Optional[str] = None
logging_path: Optional[str] = None
tor_path: str = 'tor'
run_targets: List['test.Target'] = [test.Target.RUN_OPEN]
attribute_targets: List['test.Target'] = []
quiet: bool = False
verbose: bool = False
print_help: bool = False
@staticmethod
def parse(argv: Sequence[str]) -> 'test.arguments.Arguments':
"""
Parses our commandline arguments into this class.
:param list argv: input arguments to be parsed
:returns: :class:`test.arguments.Arguments` for this commandline input
:raises: **ValueError** if we got an invalid argument
"""
args = {} # type: Dict[str, Any]
try:
recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED) # type: ignore
if unrecognized_args:
error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument"
raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg))
except Exception as exc:
raise ValueError('%s (for usage provide --help)' % exc)
for opt, arg in recognized_args:
if opt in ('-a', '--all'):
args['run_unit'] = True
args['run_integ'] = True
elif opt in ('-u', '--unit'):
args['run_unit'] = True
elif opt in ('-i', '--integ'):
args['run_integ'] = True
elif opt in ('-t', '--targets'):
run_targets, attribute_targets = [], []
integ_targets = arg.split(',')
all_run_targets = [t for t in test.Target if CONFIG['target.torrc'].get(t) is not None]
# validates the targets and split them into run and attribute targets
if not integ_targets:
raise ValueError('No targets provided')
for target in integ_targets:
if target not in test.Target:
raise ValueError('Invalid integration target: %s' % target)
elif target in all_run_targets:
run_targets.append(target)
else:
attribute_targets.append(target)
# check if we were told to use all run targets
if test.Target.RUN_ALL in attribute_targets:
attribute_targets.remove(test.Target.RUN_ALL)
run_targets = all_run_targets
# if no RUN_* targets are provided then keep the default (otherwise we
# won't have any tests to run)
if run_targets:
args['run_targets'] = run_targets
args['attribute_targets'] = attribute_targets
elif opt == '--test':
args.setdefault('specific_test', []).append(METHOD_NAME(arg))
elif opt == '--exclude-test':
args.setdefault('exclude_test', []).append(METHOD_NAME(arg))
elif opt in ('-l', '--log'):
arg = arg.upper()
if arg not in stem.util.log.LOG_VALUES:
raise ValueError(LOG_TYPE_ERROR % arg)
args['logging_runlevel'] = arg
elif opt == '--log-file':
args['logging_path'] = arg
elif opt in ('--tor'):
args['tor_path'] = arg
elif opt in ('-q', '--quiet'):
args['quiet'] = True
elif opt in ('-v', '--verbose'):
args['verbose'] = True
elif opt in ('-h', '--help'):
args['print_help'] = True
return Arguments(**args)
@staticmethod
def get_help() -> str:
"""
Provides usage information, as provided by the '--help' argument. This
includes a listing of the valid integration targets.
:returns: **str** with our usage information
"""
help_msg = CONFIG['msg.help']
# gets the longest target length so we can show the entries in columns
target_name_length = max(map(len, test.Target))
description_format = '\n %%-%is - %%s' % target_name_length
for target in test.Target:
help_msg += description_format % (target, CONFIG['target.description'].get(target, ''))
help_msg += '\n'
return help_msg
def METHOD_NAME(name: str) -> str:
"""
Test modules have a 'test.unit.' or 'test.integ.' prefix which can
be omitted from our '--test' argument. Cropping this so we can do
normalized comparisons.
:param str name: module name to crop
:returns: **str** with the cropped module name
"""
if name.startswith('test.unit.'):
return name[10:]
elif name.startswith('test.integ.'):
return name[11:]
else:
return name |
4,950 | test locale qualification in | # -*- coding: utf-8 -*-
from tests.unit import AWSMockServiceTestCase
from boto.mturk.connection import MTurkConnection
from boto.mturk.question import ExternalQuestion
from boto.mturk.qualification import \
Qualifications, LocaleRequirement
MOCK_SERVER_RESPONSE = b"""
<MockServerResponse>
<Request>
<IsValid>True</IsValid>
</Request>
</MockServerResponse>"""
class TestMTurkPostingWithQualificationsIn(AWSMockServiceTestCase):
connection_class = MTurkConnection
def setUp(self):
super(TestMTurkPostingWithQualificationsIn, self).setUp()
def METHOD_NAME(self):
self.set_http_response(
status_code=200,
body=MOCK_SERVER_RESPONSE)
q = ExternalQuestion(
external_url="http://samplesite",
frame_height=800)
keywords = ['boto', 'test', 'doctest']
title = "Boto External Question Test"
annotation = 'An annotation from boto external question test'
qualifications = Qualifications()
test_requirement = LocaleRequirement(
comparator='In',
locale=[('US', 'WA'), 'CA'])
qualifications.add(test_requirement)
create_hit_rs = self.service_connection.create_hit(
question=q,
lifetime=60*65,
max_assignments=2,
title=title,
keywords=keywords,
reward=0.05,
duration=60*6,
approval_delay=60*60,
annotation=annotation,
qualifications=qualifications)
self.assert_request_parameters({
'QualificationRequirement.1.Comparator': 'In',
'QualificationRequirement.1.LocaleValue.1.Country': 'US',
'QualificationRequirement.1.LocaleValue.1.Subdivision': 'WA',
'QualificationRequirement.1.LocaleValue.2.Country': 'CA',
'QualificationRequirement.1.QualificationTypeId':
'00000000000000000071'},
ignore_params_values=['AWSAccessKeyId',
'SignatureVersion',
'Timestamp',
'Title',
'Question',
'AssignmentDurationInSeconds',
'RequesterAnnotation',
'Version',
'LifetimeInSeconds',
'AutoApprovalDelayInSeconds',
'Reward.1.Amount',
'Description',
'MaxAssignments',
'Reward.1.CurrencyCode',
'Keywords',
'Operation'])
self.assertEquals(create_hit_rs.status, True)
def test_locale_qualification_notin_in(self):
self.set_http_response(
status_code=200,
body=MOCK_SERVER_RESPONSE)
q = ExternalQuestion(
external_url="http://samplesite",
frame_height=800)
keywords = ['boto', 'test', 'doctest']
title = "Boto External Question Test"
annotation = 'An annotation from boto external question test'
qualifications = Qualifications()
test_requirement1 = LocaleRequirement(
comparator='NotIn',
locale=[('US', 'WA'), 'CA'])
test_requirement2 = LocaleRequirement(
comparator='In',
locale=[('US', 'CA')])
qualifications.add(test_requirement1)
qualifications.add(test_requirement2)
create_hit_rs = self.service_connection.create_hit(
question=q,
lifetime=60*65,
max_assignments=2,
title=title,
keywords=keywords,
reward=0.05,
duration=60*6,
approval_delay=60*60,
annotation=annotation,
qualifications=qualifications)
self.assert_request_parameters({
'QualificationRequirement.1.Comparator': 'NotIn',
'QualificationRequirement.1.LocaleValue.1.Country': 'US',
'QualificationRequirement.1.LocaleValue.1.Subdivision': 'WA',
'QualificationRequirement.1.LocaleValue.2.Country': 'CA',
'QualificationRequirement.1.QualificationTypeId':
'00000000000000000071',
'QualificationRequirement.2.Comparator': 'In',
'QualificationRequirement.2.LocaleValue.1.Country': 'US',
'QualificationRequirement.2.LocaleValue.1.Subdivision': 'CA',
'QualificationRequirement.2.QualificationTypeId':
'00000000000000000071'},
ignore_params_values=['AWSAccessKeyId',
'SignatureVersion',
'Timestamp',
'Title',
'Question',
'AssignmentDurationInSeconds',
'RequesterAnnotation',
'Version',
'LifetimeInSeconds',
'AutoApprovalDelayInSeconds',
'Reward.1.Amount',
'Description',
'MaxAssignments',
'Reward.1.CurrencyCode',
'Keywords',
'Operation'])
self.assertEquals(create_hit_rs.status, True) |
4,951 | test simple | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
from astropy import units as u
from ... import splatalogue
SPLAT_DATA = 'CO_colons.csv'
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def METHOD_NAME(patch_post):
splatalogue.Splatalogue.query_lines(min_frequency=114 * u.GHz,
max_frequency=116 * u.GHz,
chemical_name=' CO ')
@pytest.mark.remote_data
def test_init(patch_post):
x = splatalogue.Splatalogue.query_lines(min_frequency=114 * u.GHz,
max_frequency=116 * u.GHz,
chemical_name=' CO ')
S = splatalogue.Splatalogue(chemical_name=' CO ')
y = S.query_lines(min_frequency=114 * u.GHz, max_frequency=116 * u.GHz)
# it is not currently possible to test equality between tables:
# masked arrays fail
# assert y == x
assert len(x) == len(y)
assert all(y['Species'] == x['Species'])
assert all(x['Chemical Name'] == y['Chemical Name'])
def test_load_species_table():
tbl = splatalogue.load_species_table.species_lookuptable()
CO = tbl.find(' CO ')
assert len(CO) == 4
# regression test: get_query_payload should work (#308)
def test_get_payload():
q = splatalogue.core.Splatalogue.query_lines_async(min_frequency=1 * u.GHz,
max_frequency=10 * u.GHz,
get_query_payload=True)
assert '__utma' in q
# regression test: line lists should ask for only one line list, not all
def test_line_lists():
q = splatalogue.core.Splatalogue.query_lines_async(min_frequency=1 * u.GHz,
max_frequency=10 * u.GHz,
line_lists=['JPL'],
get_query_payload=True)
assert q['displayJPL'] == 'displayJPL'
assert q['displaySLAIM'] == ''
# regression test: raise an exception if a string is passed to line_lists
# uses get_query_payload to avoid having to monkeypatch
def test_linelist_type():
with pytest.raises(TypeError) as exc:
splatalogue.core.Splatalogue.query_lines_async(min_frequency=1 * u.GHz,
max_frequency=10 * u.GHz,
line_lists='JPL',
get_query_payload=True)
assert exc.value.args[0] == ("Line lists should be a list of linelist "
"names. See Splatalogue.ALL_LINE_LISTS")
def test_top20_crashorno():
splatalogue.core.Splatalogue.query_lines_async(min_frequency=114 * u.GHz,
max_frequency=116 * u.GHz,
top20='top20',
get_query_payload=True)
with pytest.raises(ValueError) as exc:
splatalogue.core.Splatalogue.query_lines_async(
min_frequency=114 * u.GHz, max_frequency=116 * u.GHz,
top20='invalid', get_query_payload=True)
assert exc.value.args[0] == "Top20 is not one of the allowed values"
def test_band_crashorno():
splatalogue.core.Splatalogue.query_lines_async(band='alma3',
get_query_payload=True)
with pytest.raises(ValueError) as exc:
splatalogue.core.Splatalogue.query_lines_async(band='invalid',
get_query_payload=True)
assert exc.value.args[0] == "Invalid frequency band."
# Upstream changed: there is no distinction between versions for this molecule
# # regression test : version selection should work
# # Unfortunately, it looks like version1 = version2 on the web page now, so this
# # may no longer be a valid test
# @pytest.mark.remote_data
# def test_version_selection():
# results = splatalogue.Splatalogue.query_lines(
# min_frequency= 703*u.GHz,
# max_frequency=706*u.GHz,
# chemical_name='Acetaldehyde',
# version='v1.0'
# )
# assert len(results)==1
def test_exclude(patch_post):
# regression test for issue 616
d = splatalogue.Splatalogue.query_lines_async(min_frequency=114 * u.GHz,
max_frequency=116 * u.GHz,
chemical_name=' CO ',
exclude=None,
get_query_payload=True)
exclusions = {'no_atmospheric': 'no_atmospheric',
'no_potential': 'no_potential',
'no_probable': 'no_probable', }
for k, v in exclusions.items():
assert d[k] == v
d = splatalogue.Splatalogue.query_lines_async(min_frequency=114 * u.GHz,
max_frequency=116 * u.GHz,
chemical_name=' CO ',
exclude='none',
get_query_payload=True)
for k, v in exclusions.items():
assert k not in d
for k in d:
assert k[:3] != 'no_'
@pytest.mark.remote_data
def test_exclude_remote():
# regression test for issue 616
# only entry should be "D213CO Formaldehyde 351.96064 3.9e-06 ...."
results = splatalogue.Splatalogue.query_lines(
min_frequency=351.9*u.GHz,
max_frequency=352.*u.GHz,
chemical_name='Formaldehyde',
exclude='none')
assert len(results) >= 1 |
4,952 | subscribe to list |
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
"""
This script is to sync the groups and group subscribers with the campaign monitor
To run the command: python manage.py sync_campaign_monitor --verbosity 2
"""
def handle(self, *args, **options):
from tendenci.apps.user_groups.models import Group
from tendenci.apps.profiles.models import Profile
from tendenci.apps.campaign_monitor.models import (ListMap, setup_custom_fields)
from tendenci.apps.campaign_monitor.utils import sync_campaigns, sync_templates
from createsend import (Client, List, Subscriber,
BadRequest, Unauthorized)
verbosity = 1
if 'verbosity' in options:
verbosity = options['verbosity']
def METHOD_NAME(subscriber_obj, list_id, name, email, custom_data):
# check if this user has already subscribed, if not, subscribe it
try:
subscriber = subscriber_obj.get(list_id, email)
if str(subscriber.State).lower() == 'active':
print(name, email, ' - UPDATED')
subscriber = subscriber_obj.update(email, name, custom_data, True)
except BadRequest as br:
print(br)
try:
subscriber_obj.add(list_id, email, name, custom_data, True) # Returns email_address
if verbosity >=2:
print("%s (%s)" % (name, email))
except BadRequest as br:
print(name, email, ' - NOT ADDED: %s' % br)
api_key = getattr(settings, 'CAMPAIGNMONITOR_API_KEY', None)
client_id = getattr(settings, 'CAMPAIGNMONITOR_API_CLIENT_ID', None)
#CreateSend.api_key = api_key
auth = {'api_key': api_key}
cl = Client(auth, client_id)
lists = cl.lists()
list_ids = [list.ListID for list in lists]
list_names = [list.Name for list in lists]
list_ids_d = dict(zip(list_names, list_ids))
groups = Group.objects.filter(status=1, status_detail='active', sync_newsletters=1)
listmaps = ListMap.objects.filter(group__sync_newsletters=1)
syncd_groups = [listmap.group for listmap in listmaps]
cm_list = List(auth)
print("Starting to sync groups with campaign monitor...")
print()
for group in groups:
if group not in syncd_groups:
# get the list id or create a list if not exists
# campaing monitor requires the list title
if group.name in list_names:
list_id = list_ids_d[group.name]
else:
# add group to the campaign monitor
list_id = cm_list.create(client_id, group.name, "", False, "")
print("Added group '%s' to the C.M. list." % group.name)
print()
# insert to the listmap
list_map = ListMap(group=group,
list_id=list_id)
list_map.save()
else:
list_map = ListMap.objects.filter(group=group)[0]
list_id = list_map.list_id
# if a previous added list is deleted on campaign monitor, add it back
# TODO: we might need a setting to decide whether we want to add it back or not.
a_list = List(auth, list_id)
try:
#list_stats = a_list.stats()
# set up custom fields
print("Setting up custom fields...")
setup_custom_fields(a_list)
#num_unsubscribed = list_stats.TotalUnsubscribes
#if num_unsubscribed > 0:
# # a list of all unsubscribed
# unsubscribed_obj = a_list.unsubscribed('2011-5-1')
# unsubscribed_emails = [res.EmailAddress for res in unsubscribed_obj.Results]
# unsubscribed_names = [res.Name for res in unsubscribed_obj.Results]
# unsubscribed_list = zip(unsubscribed_emails, unsubscribed_names)
except Unauthorized as e:
if 'Invalid ListID' in e:
# this list might be deleted on campaign monitor, add it back
list_id = cm_list.create(client_id, group.name, "", False, "")
# update the list_map
list_map.list_id = list_id
list_map.save()
# sync subscribers in this group
print("Subscribing users to the C.M. list '%s'..." % group.name)
members = group.members.all()
for i, member in enumerate(members, 1):
# Append custom fields from the profile
try:
profile = member.profile
except Profile.DoesNotExist:
profile = None
custom_data = []
if profile:
fields = ['city', 'state', 'zipcode', 'country', 'sex', 'member_number']
for field in fields:
data = {}
data['Key'] = field
data['Value'] = getattr(profile, field)
if not data['Value']:
data['Clear'] = True
custom_data.append(data)
email = member.email
name = member.get_full_name()
subscriber_obj = Subscriber(auth, list_id, email)
METHOD_NAME(subscriber_obj, list_id, name, email, custom_data)
print('Done')
print('Starting to sync campaigns with campaign monitor...')
sync_campaigns()
print("Done")
print('Syncing templates...')
sync_templates()
print("Done") |
4,953 | show extra columns | from django.utils.translation import get_language
from django.utils.translation import gettext as _
from django.utils.translation import gettext_noop
from memoized import memoized
from corehq.apps.es import filters as es_filters
from corehq.apps.es import forms as form_es
from corehq.apps.locations.permissions import location_safe
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.display import FormDisplay
from corehq.apps.reports.filters.forms import FormsByApplicationFilter
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.generic import (
ElasticProjectInspectionReport,
GenericTabularReport,
ProjectInspectionReportParamsMixin,
)
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.standard import (
DatespanMixin,
ProjectReport,
ProjectReportParametersMixin,
)
from corehq.apps.reports.standard.monitoring import (
CompletionOrSubmissionTimeMixin,
MultiFormDrilldownMixin,
)
from corehq.apps.reports.util import datespan_from_beginning
from corehq.apps.users.util import SYSTEM_USER_ID
from corehq.const import MISSING_APP_ID
from corehq.toggles import SUPPORT
class ProjectInspectionReport(
ProjectInspectionReportParamsMixin,
GenericTabularReport,
ProjectReport,
ProjectReportParametersMixin
):
"""
Base class for this reporting section
"""
exportable = False
asynchronous = False
ajax_pagination = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.users.SelectMobileWorkerFilter']
def get_user_link(self, user):
user_link = self.get_raw_user_link(user)
return self.table_cell(user.raw_username, user_link)
def get_raw_user_link(self, user):
raise NotImplementedError
class SubmitHistoryMixin(ElasticProjectInspectionReport,
ProjectReportParametersMixin,
CompletionOrSubmissionTimeMixin, MultiFormDrilldownMixin,
DatespanMixin):
name = gettext_noop('Submit History')
slug = 'submit_history'
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.FormsByApplicationFilter',
'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
ajax_pagination = True
include_inactive = True
@property
def default_datespan(self):
return datespan_from_beginning(self.domain_object, self.timezone)
def _get_users_filter(self, mobile_user_and_group_slugs):
if (
EMWF.no_filters_selected(mobile_user_and_group_slugs)
and self.request.couch_user.has_permission(self.domain, 'access_all_locations')
):
return None
user_ids = (EMWF.user_es_query(self.domain,
mobile_user_and_group_slugs,
self.request.couch_user)
.values_list('_id', flat=True))
if HQUserType.UNKNOWN in EMWF.selected_user_types(mobile_user_and_group_slugs):
user_ids.append(SYSTEM_USER_ID)
return form_es.user_id(user_ids)
@staticmethod
def _form_filter(form):
app_id = form.get('app_id', None)
if app_id and app_id != MISSING_APP_ID:
return es_filters.AND(
form_es.app(app_id),
form_es.xmlns(form['xmlns'])
)
return form_es.xmlns(form['xmlns'])
@property
def es_query(self):
time_filter = form_es.submitted if self.by_submission_time else form_es.completed
mobile_user_and_group_slugs = self.request.GET.getlist(EMWF.slug)
query = (form_es.FormES()
.domain(self.domain)
.filter(time_filter(gte=self.datespan.startdate,
lt=self.datespan.enddate_adjusted)))
users_filter = self._get_users_filter(mobile_user_and_group_slugs)
if users_filter:
query = query.filter(users_filter)
# filter results by app and xmlns if applicable
if FormsByApplicationFilter.has_selections(self.request):
form_values = list(self.all_relevant_forms.values())
if form_values:
query = query.OR(*[self._form_filter(f) for f in form_values])
else:
query = query.NOT(es_filters.missing("app_id"))
return query
@property
@memoized
def es_query_result(self):
return (self.es_query
.set_sorting_block(self.get_sorting_block())
.start(self.pagination.start)
.size(self.pagination.count)
.run())
def get_sorting_block(self):
sorting_block = super(SubmitHistoryMixin, self).get_sorting_block()
if sorting_block:
return sorting_block
else:
return [{self.time_field: {'order': 'desc'}}]
@property
def time_field(self):
return 'received_on' if self.by_submission_time else 'form.meta.timeEnd'
@property
def total_records(self):
return int(self.es_query_result.total)
@location_safe
class SubmitHistory(SubmitHistoryMixin, ProjectReport):
@property
def METHOD_NAME(self):
return self.request.user and SUPPORT.enabled(self.request.user.username)
@classmethod
def display_in_dropdown(cls, domain=None, project=None, user=None):
if project and project.commtrack_enabled:
return False
else:
return True
@classmethod
def get_subpages(cls):
def _get_form_name(request=None, **context):
if 'instance' in context:
try:
return context['instance'].form_data['@name']
except KeyError:
pass
return _('View Form')
from corehq.apps.reports.views import FormDataView
return [
{
'title': _get_form_name,
'urlname': FormDataView.urlname,
},
]
@property
def headers(self):
h = [
DataTablesColumn(_("View Form"), css_class="view-form-link"),
DataTablesColumn(_("Username"), prop_name='form.meta.username'),
DataTablesColumn(
_("Submission Time") if self.by_submission_time
else _("Completion Time"),
prop_name=self.time_field
),
DataTablesColumn(_("Form"), prop_name='form.@name'),
]
if self.METHOD_NAME:
h.append(DataTablesColumn(_("Sync Log")))
return DataTablesHeader(*h)
@property
def rows(self):
for form in self.es_query_result.hits:
display = FormDisplay(form, self, lang=get_language())
row = [
display.form_data_link,
display.username,
display.submission_or_completion_time,
display.readable_form_name,
]
if self.METHOD_NAME:
row.append(form.get('last_sync_token', ''))
yield row |
4,954 | test negative exponent | import math
import unittest
class PowTest(unittest.TestCase):
def powtest(self, type):
if type != float:
for i in range(-1000, 1000):
self.assertEqual(pow(type(i), 0), 1)
self.assertEqual(pow(type(i), 1), type(i))
self.assertEqual(pow(type(0), 1), type(0))
self.assertEqual(pow(type(1), 1), type(1))
for i in range(-100, 100):
self.assertEqual(pow(type(i), 3), i*i*i)
pow2 = 1
for i in range(0, 31):
self.assertEqual(pow(2, i), pow2)
if i != 30 : pow2 = pow2*2
for othertype in (int,):
for i in list(range(-10, 0)) + list(range(1, 10)):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
pow(ii, jj)
for othertype in int, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
self.assertRaises(ZeroDivisionError, pow, zero, exp)
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
asseq = self.assertEqual
if type == float:
il = 1
asseq = self.assertAlmostEqual
elif type == int:
jl = 0
elif type == int:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
self.assertRaises(TypeError, pow, type(i), j, k)
continue
asseq(
pow(type(i),j,k),
pow(type(i),j)% type(k)
)
def test_powint(self):
self.powtest(int)
def test_powfloat(self):
self.powtest(float)
def test_other(self):
# Other tests-- not very systematic
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
self.assertEqual(
pow(i,j) % k,
pow(i,j,k)
)
if j >= 0 and k != 0:
self.assertEqual(
pow(int(i),j) % k,
pow(int(i),j,k)
)
def test_bug643260(self):
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
def test_bug705231(self):
# -1.0 raised to an integer should never blow up. It did if the
# platform pow() was buggy, and Python didn't worm around it.
eq = self.assertEqual
a = -1.0
# The next two tests can still fail if the platform floor()
# function doesn't treat all large inputs as integers
# test_math should also fail if that is happening
eq(pow(a, 1.23e167), 1.0)
eq(pow(a, -1.23e167), 1.0)
for b in range(-10, 11):
eq(pow(a, float(b)), b & 1 and -1.0 or 1.0)
for n in range(0, 100):
fiveto = float(5 ** n)
# For small n, fiveto will be odd. Eventually we run out of
# mantissa bits, though, and thereafer fiveto will be even.
expected = fiveto % 2.0 and -1.0 or 1.0
eq(pow(a, fiveto), expected)
eq(pow(a, -fiveto), expected)
eq(expected, 1.0) # else we didn't push fiveto to evenness
def METHOD_NAME(self):
for a in range(-50, 50):
for m in range(-50, 50):
with self.subTest(a=a, m=m):
if m != 0 and math.gcd(a, m) == 1:
# Exponent -1 should give an inverse, with the
# same sign as m.
inv = pow(a, -1, m)
self.assertEqual(inv, inv % m)
self.assertEqual((inv * a - 1) % m, 0)
# Larger exponents
self.assertEqual(pow(a, -2, m), pow(inv, 2, m))
self.assertEqual(pow(a, -3, m), pow(inv, 3, m))
self.assertEqual(pow(a, -1001, m), pow(inv, 1001, m))
else:
with self.assertRaises(ValueError):
pow(a, -1, m)
with self.assertRaises(ValueError):
pow(a, -2, m)
with self.assertRaises(ValueError):
pow(a, -1001, m)
if __name__ == "__main__":
unittest.main() |
4,955 | split binary anon | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# @starlark-rust: allow_string_literals_in_type_expr
load("@prelude//cxx:cxx_toolchain_types.bzl", "CxxToolchainInfo")
load("//antlir/buck2/bzl:ensure_single_output.bzl", "ensure_single_output")
SplitBinaryInfo = provider(fields = [
"stripped",
"debuginfo",
"metadata",
])
def _split_binary_impl(ctx: AnalysisContext) -> list[Provider]:
objcopy = ctx.attrs.objcopy[RunInfo] if ctx.attrs.objcopy else ctx.attrs.cxx_toolchain[CxxToolchainInfo].binary_utilities_info.objcopy
src = ensure_single_output(ctx.attrs.src)
stripped = ctx.actions.declare_output("stripped")
debuginfo = ctx.actions.declare_output("debuginfo")
metadata = ctx.actions.declare_output("metadata.json")
# objcopy needs a temporary file that it can write to. use a buck2 output
# artifact so that it doesn't try to put it somewhere it doesn't have access
# to write
objcopy_tmp = ctx.actions.declare_output("objcopy_tmp")
split = ctx.actions.write("split.py", """#!/usr/bin/env python3
import argparse
import json
import os
import shutil
import subprocess
import sys
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("--objcopy", required=True)
parser.add_argument("--binary", required=True, type=Path)
parser.add_argument("--stripped", required=True, type=Path)
parser.add_argument("--debuginfo", required=True, type=Path)
parser.add_argument("--metadata", required=True, type=Path)
parser.add_argument("--objcopy-tmp", required=True, type=Path)
args = parser.parse_args()
# ensure this exists or buck2 will get mad
args.objcopy_tmp.touch()
with open(args.binary, mode="rb") as src_f:
first_4 = src_f.read(4)
is_elf = first_4 == b"\x7fELF"
# If this is not an ELF binary, it can't be stripped so just copy the original
if not is_elf:
shutil.copyfile(args.binary, args.stripped)
with open(args.debuginfo, "w") as _f:
pass
with open(args.metadata, "w") as f:
json.dump({"elf": False}, f)
sys.exit(0)
# Save debug symbols to a separate debuginfo file
subprocess.run(
[
args.objcopy,
"--only-keep-debug",
args.binary,
args.debuginfo,
],
check=True,
)
# Remove the debug symbols from the stripped binary
subprocess.run(
[
args.objcopy,
"--strip-debug",
"--remove-section=.pseudo_probe",
"--remove-section=.pseudo_probe_desc",
args.binary,
args.stripped,
],
check=True,
)
# Find the BuildID of the binary. This determines where it should go for gdb to
# look it up under /usr/lib/debug
# https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
buildid = subprocess.run(
[
args.objcopy,
"--dump-section",
".note.gnu.build-id=/dev/stdout",
args.binary,
args.objcopy_tmp,
],
capture_output=True,
check=True,
).stdout
# Prefer to install the debug info by BuildID since it does not require another
# objcopy invocation and is more standard
if buildid:
buildid = buildid[len(buildid) - 20 :].hex()
with open(args.metadata, "w") as f:
json.dump({"elf": True, "buildid": buildid}, f)
else:
# We might not be able to get the BuildID if this is a dev-mode binary, so
# fallback to setting the debuglink section in the stripped binary.
# This tells the debugger that there is a debug file available, and records
# the hash of the debuginfo file so that it can be loaded correctly.
subprocess.run(
[
os.path.abspath(args.objcopy),
f"--add-gnu-debuglink={args.debuginfo.name}",
args.stripped.resolve(),
],
cwd=args.stripped.parent,
check=True,
)
with open(args.metadata, "w") as f:
json.dump({"elf": True, "buildid": None}, f)
""", is_executable = True)
ctx.actions.run(
cmd_args(
split,
cmd_args(objcopy, format = "--objcopy={}"),
cmd_args(src, format = "--binary={}"),
cmd_args(stripped.as_output(), format = "--stripped={}"),
cmd_args(debuginfo.as_output(), format = "--debuginfo={}"),
cmd_args(metadata.as_output(), format = "--metadata={}"),
cmd_args(objcopy_tmp.as_output(), format = "--objcopy-tmp={}"),
),
category = "split",
)
return [
DefaultInfo(sub_targets = {
"debuginfo": [DefaultInfo(debuginfo)],
"metadata": [DefaultInfo(metadata)],
"stripped": [DefaultInfo(stripped)],
}),
SplitBinaryInfo(
stripped = stripped,
debuginfo = debuginfo,
metadata = metadata,
),
]
split_binary = rule(
impl = _split_binary_impl,
attrs = {
"cxx_toolchain": attrs.option(attrs.dep(default = "toolchains//:cxx", providers = [CxxToolchainInfo]), default = None),
"objcopy": attrs.option(attrs.exec_dep(), default = None),
"src": attrs.dep(providers = [RunInfo]),
},
)
def METHOD_NAME(
*,
ctx: AnalysisContext | "AnalyzeFeatureContext",
src: Dependency,
objcopy: Dependency) -> "promise":
if RunInfo not in src:
fail("{} does not have a RunInfo provider".format(src.label))
return ctx.actions.anon_target(split_binary, {
"name": "debuginfo//" + src.label.package + ":" + src.label.name + ("[{}]".format(src.label.sub_target) if src.label.sub_target else ""),
"objcopy": objcopy,
"src": src,
}) |
4,956 | options | #!/usr/bin/env python
# encoding: utf-8
# daniel.svensson at purplescout.se 2008
# Thomas Nagy 2016-2018 (ita)
"""
Support for Ruby extensions. A C/C++ compiler is required::
def options(opt):
opt.load('compiler_c ruby')
def configure(conf):
conf.load('compiler_c ruby')
conf.check_ruby_version((1,8,0))
conf.check_ruby_ext_devel()
conf.check_ruby_module('libxml')
def build(bld):
bld(
features = 'c cshlib rubyext',
source = 'rb_mytest.c',
target = 'mytest_ext',
install_path = '${ARCHDIR_RUBY}')
bld.install_files('${LIBDIR_RUBY}', 'Mytest.rb')
"""
import os
from waflib import Errors, Options, Task, Utils
from waflib.TaskGen import before_method, feature, extension
from waflib.Configure import conf
@feature('rubyext')
@before_method('apply_incpaths', 'process_source', 'apply_bundle', 'apply_link')
def init_rubyext(self):
"""
Add required variables for ruby extensions
"""
self.install_path = '${ARCHDIR_RUBY}'
self.uselib = self.to_list(getattr(self, 'uselib', ''))
if not 'RUBY' in self.uselib:
self.uselib.append('RUBY')
if not 'RUBYEXT' in self.uselib:
self.uselib.append('RUBYEXT')
@feature('rubyext')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_ruby_so_name(self):
"""
Strip the *lib* prefix from ruby extensions
"""
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.rubyext_PATTERN
@conf
def check_ruby_version(self, minver=()):
"""
Checks if ruby is installed.
If installed the variable RUBY will be set in environment.
The ruby binary can be overridden by ``--with-ruby-binary`` command-line option.
"""
ruby = self.find_program('ruby', var='RUBY', value=Options.METHOD_NAME.rubybinary)
try:
version = self.cmd_and_log(ruby + ['-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
except Errors.WafError:
self.fatal('could not determine ruby version')
self.env.RUBY_VERSION = version
try:
ver = tuple(map(int, version.split('.')))
except Errors.WafError:
self.fatal('unsupported ruby version %r' % version)
cver = ''
if minver:
cver = '> ' + '.'.join(str(x) for x in minver)
if ver < minver:
self.fatal('ruby is too old %r' % ver)
self.msg('Checking for ruby version %s' % cver, version)
@conf
def check_ruby_ext_devel(self):
"""
Check if a ruby extension can be created
"""
if not self.env.RUBY:
self.fatal('ruby detection is required first')
if not self.env.CC_NAME and not self.env.CXX_NAME:
self.fatal('load a c/c++ compiler first')
version = tuple(map(int, self.env.RUBY_VERSION.split(".")))
def read_out(cmd):
return Utils.to_list(self.cmd_and_log(self.env.RUBY + ['-rrbconfig', '-e', cmd]))
def read_config(key):
return read_out('puts RbConfig::CONFIG[%r]' % key)
cpppath = archdir = read_config('archdir')
if version >= (1, 9, 0):
ruby_hdrdir = read_config('rubyhdrdir')
cpppath += ruby_hdrdir
if version >= (2, 0, 0):
cpppath += read_config('rubyarchhdrdir')
cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])]
self.check(header_name='ruby.h', includes=cpppath, errmsg='could not find ruby header file', link_header_test=False)
self.env.LIBPATH_RUBYEXT = read_config('libdir')
self.env.LIBPATH_RUBYEXT += archdir
self.env.INCLUDES_RUBYEXT = cpppath
self.env.CFLAGS_RUBYEXT = read_config('CCDLFLAGS')
self.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0]
# ok this is really stupid, but the command and flags are combined.
# so we try to find the first argument...
flags = read_config('LDSHARED')
while flags and flags[0][0] != '-':
flags = flags[1:]
# we also want to strip out the deprecated ppc flags
if len(flags) > 1 and flags[1] == "ppc":
flags = flags[2:]
self.env.LINKFLAGS_RUBYEXT = flags
self.env.LINKFLAGS_RUBYEXT += read_config('LIBS')
self.env.LINKFLAGS_RUBYEXT += read_config('LIBRUBYARG_SHARED')
if Options.METHOD_NAME.rubyarchdir:
self.env.ARCHDIR_RUBY = Options.METHOD_NAME.rubyarchdir
else:
self.env.ARCHDIR_RUBY = read_config('sitearchdir')[0]
if Options.METHOD_NAME.rubylibdir:
self.env.LIBDIR_RUBY = Options.METHOD_NAME.rubylibdir
else:
self.env.LIBDIR_RUBY = read_config('sitelibdir')[0]
@conf
def check_ruby_module(self, module_name):
"""
Check if the selected ruby interpreter can require the given ruby module::
def configure(conf):
conf.check_ruby_module('libxml')
:param module_name: module
:type module_name: string
"""
self.start_msg('Ruby module %s' % module_name)
try:
self.cmd_and_log(self.env.RUBY + ['-e', 'require \'%s\';puts 1' % module_name])
except Errors.WafError:
self.end_msg(False)
self.fatal('Could not find the ruby module %r' % module_name)
self.end_msg(True)
@extension('.rb')
def process(self, node):
return self.create_task('run_ruby', node)
class run_ruby(Task.Task):
"""
Task to run ruby files detected by file extension .rb::
def options(opt):
opt.load('ruby')
def configure(ctx):
ctx.check_ruby_version()
def build(bld):
bld.env.RBFLAGS = '-e puts "hello world"'
bld(source='a_ruby_file.rb')
"""
run_str = '${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}'
def METHOD_NAME(opt):
"""
Add the ``--with-ruby-archdir``, ``--with-ruby-libdir`` and ``--with-ruby-binary`` options
"""
opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary')
|
4,957 | exe | import enum
from _typeshed import Incomplete
from typing import Any, NamedTuple
from psutil._common import (
NIC_DUPLEX_FULL as NIC_DUPLEX_FULL,
NIC_DUPLEX_HALF as NIC_DUPLEX_HALF,
NIC_DUPLEX_UNKNOWN as NIC_DUPLEX_UNKNOWN,
AccessDenied as AccessDenied,
NoSuchProcess as NoSuchProcess,
ZombieProcess as ZombieProcess,
isfile_strict as isfile_strict,
parse_environ_block as parse_environ_block,
path_exists_strict as path_exists_strict,
supports_ipv6 as supports_ipv6,
usage_percent as usage_percent,
)
from psutil._compat import PY3 as PY3
__extra__all__: Any
POWER_SUPPLY_PATH: str
HAS_PROC_SMAPS: bool
HAS_PROC_SMAPS_ROLLUP: bool
HAS_PROC_IO_PRIORITY: Any
HAS_CPU_AFFINITY: Any
CLOCK_TICKS: Any
PAGESIZE: Any
BOOT_TIME: Any
LITTLE_ENDIAN: Any
DISK_SECTOR_SIZE: int
AF_LINK: Any
AddressFamily: Any
IOPRIO_CLASS_NONE: int
IOPRIO_CLASS_RT: int
IOPRIO_CLASS_BE: int
IOPRIO_CLASS_IDLE: int
class IOPriority(enum.IntEnum):
IOPRIO_CLASS_NONE: int
IOPRIO_CLASS_RT: int
IOPRIO_CLASS_BE: int
IOPRIO_CLASS_IDLE: int
PROC_STATUSES: Any
TCP_STATUSES: Any
class svmem(NamedTuple):
total: int
available: int
percent: float
used: int
free: int
active: int
inactive: int
buffers: int
cached: int
shared: int
slab: int
class sdiskio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_time: Any
write_time: Any
read_merged_count: Any
write_merged_count: Any
busy_time: Any
class popenfile(NamedTuple):
path: Any
fd: Any
position: Any
mode: Any
flags: Any
class pmem(NamedTuple):
rss: Any
vms: Any
shared: Any
text: Any
lib: Any
data: Any
dirty: Any
class pfullmem(NamedTuple):
rss: Incomplete
vms: Incomplete
shared: Incomplete
text: Incomplete
lib: Incomplete
data: Incomplete
dirty: Incomplete
uss: Incomplete
pss: Incomplete
swap: Incomplete
class pmmap_grouped(NamedTuple):
path: Any
rss: Any
size: Any
pss: Any
shared_clean: Any
shared_dirty: Any
private_clean: Any
private_dirty: Any
referenced: Any
anonymous: Any
swap: Any
pmmap_ext: Any
class pio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_chars: Any
write_chars: Any
class pcputimes(NamedTuple):
user: Any
system: Any
children_user: Any
children_system: Any
iowait: Any
def readlink(path): ...
def file_flags_to_mode(flags): ...
def is_storage_device(name): ...
def set_scputimes_ntuple(procfs_path) -> None: ...
scputimes: Any
prlimit: Any
def calculate_avail_vmem(mems): ...
def virtual_memory() -> svmem: ...
def swap_memory(): ...
def cpu_times(): ...
def per_cpu_times(): ...
def cpu_count_logical(): ...
def cpu_count_cores() -> int | None: ...
def cpu_stats(): ...
def cpu_freq(): ...
net_if_addrs: Any
class _Ipv6UnsupportedError(Exception): ...
class Connections:
tmap: Any
def __init__(self) -> None: ...
def get_proc_inodes(self, pid): ...
def get_all_inodes(self): ...
@staticmethod
def decode_address(addr, family): ...
@staticmethod
def process_inet(file, family, type_, inodes, filter_pid: Incomplete | None = ...) -> None: ...
@staticmethod
def process_unix(file, family, inodes, filter_pid: Incomplete | None = ...) -> None: ...
def retrieve(self, kind, pid: Incomplete | None = ...): ...
def net_connections(kind: str = ...): ...
def net_io_counters(): ...
def net_if_stats(): ...
disk_usage: Any
def disk_io_counters(perdisk: bool = ...): ...
class RootFsDeviceFinder:
major: Incomplete
minor: Incomplete
def __init__(self) -> None: ...
def ask_proc_partitions(self): ...
def ask_sys_dev_block(self): ...
def ask_sys_class_block(self): ...
def find(self): ...
def disk_partitions(all: bool = ...): ...
def sensors_temperatures(): ...
def sensors_fans(): ...
def sensors_battery(): ...
def users(): ...
def boot_time(): ...
def pids(): ...
def pid_exists(pid): ...
def ppid_map(): ...
def wrap_exceptions(fun): ...
class Process:
pid: Any
def __init__(self, pid) -> None: ...
def oneshot_enter(self) -> None: ...
def oneshot_exit(self) -> None: ...
def name(self): ...
def METHOD_NAME(self): ...
def cmdline(self): ...
def environ(self): ...
def terminal(self): ...
def io_counters(self) -> pio: ...
def cpu_times(self): ...
def cpu_num(self): ...
def wait(self, timeout: Incomplete | None = ...): ...
def create_time(self): ...
def memory_info(self): ...
def memory_full_info(self): ...
def memory_maps(self): ...
def cwd(self): ...
def num_ctx_switches(self, _ctxsw_re=...): ...
def num_threads(self, _num_threads_re=...): ...
def threads(self): ...
def nice_get(self): ...
def nice_set(self, value): ...
def cpu_affinity_get(self): ...
def cpu_affinity_set(self, cpus) -> None: ...
def ionice_get(self): ...
def ionice_set(self, ioclass, value): ...
def rlimit(self, resource_, limits: Incomplete | None = ...): ...
def status(self): ...
def open_files(self): ...
def connections(self, kind: str = ...): ...
def num_fds(self): ...
def ppid(self): ...
def uids(self, _uids_re=...): ...
def gids(self, _gids_re=...): ... |
4,958 | plug class filter | import logging
import os
from functools import wraps
from importlib import import_module
from inspect import getmembers, isclass
from os import chdir, getcwd, walk
from os.path import join
from typing import Any
from dongtai_conf.settings import BASE_DIR
logger = logging.getLogger("dongtai.openapi")
PLUGIN_DICT = {}
class DongTaiPlugin:
appname = ""
target_class_name = ""
target_func_name = ""
target_module_name = ""
plugin_type = 1
def before_patch_function(self, func_args, func_kwargs) -> Any:
raise NotImplementedError("To be implemented")
def after_patch_function(self, func_args, func_kwargs, func_res) -> Any:
raise NotImplementedError("To be implemented")
def _monkey_patch(self):
module = import_module(self.target_module_name)
if self.plugin_type == 2:
origin_func = getattr(module, self.target_func_name)
setattr(module, f"_origin_{self.target_func_name}", origin_func)
else:
target_class = getattr(module, self.target_class_name)
origin_func = getattr(target_class, self.target_func_name)
setattr(target_class, f"_origin_{self.target_func_name}", origin_func)
self.target_class = target_class
@wraps(origin_func)
def patched_function(*args, **kwargs):
logger.debug(
f"{self.target_module_name} {self.target_class_name} {self.target_func_name} args:{args} kwargs:{kwargs}"
)
try:
self.before_patch_function(args, kwargs)
except Exception as e:
logger.info(f"plugin error:{e} args: {args} kwargs: {kwargs}", exc_info=True)
res = origin_func(*args, **kwargs)
try:
final_res = self.after_patch_function(args, kwargs, res)
except Exception as e:
logger.info(f"plugin error:{e} args: {args} kwargs: {kwargs}", exc_info=True)
return res
return final_res
if self.plugin_type == 2:
setattr(module, self.target_func_name, patched_function)
else:
setattr(target_class, self.target_func_name, patched_function)
def monkey_patch(self, appname: str) -> None:
if self.appname == appname:
try:
self._monkey_patch()
logger.info(
f"app: {appname} module: {self.target_module_name} class: {self.target_class_name} func : {self.target_func_name} is patched by {type(self).__name__}"
)
except Exception as e:
logger.error(f"monkey_patch failed: {e}", exc_info=True)
def monkey_patch(appname: str) -> None:
plugin_dict = get_plugin_dict()
for plugin in plugin_dict.get(appname, []):
plugin().monkey_patch(appname)
def get_plugin_dict():
if PLUGIN_DICT:
return PLUGIN_DICT
previous_path = getcwd()
PLUGIN_ROOT_PATH = join(BASE_DIR, "dongtai_conf" + os.sep + "plugin")
for root, _directories, files in walk(top=PLUGIN_ROOT_PATH, topdown=False):
for file_ in files:
if file_.startswith("plug_") and (file_.endswith((".py", ".so"))):
if file_.endswith(".py"):
packname = ".".join(
[
root.replace(BASE_DIR + os.sep, "").replace(os.sep, "."),
file_.replace(".py", ""),
]
)
else:
packname = ".".join(
[
root.replace(BASE_DIR + os.sep, "").replace(os.sep, "."),
file_.split(".")[0],
]
)
mod = import_module(packname)
plugin_classes = filter(lambda x: METHOD_NAME(x), getmembers(mod))
for _name, plug_class in plugin_classes:
if PLUGIN_DICT.get(plug_class.appname):
PLUGIN_DICT[plug_class.appname] += [plug_class]
else:
PLUGIN_DICT[plug_class.appname] = [plug_class]
chdir(previous_path)
return PLUGIN_DICT
def METHOD_NAME(tup: tuple) -> bool:
return tup[0].startswith("Plug") and isclass(tup[1]) and issubclass(tup[1], DongTaiPlugin) |
4,959 | test construction |
from os import path
import shutil
from tempfile import mkdtemp
from GangaCore.GPIDev.Lib.File.FileBuffer import FileBuffer
from GangaCore.testlib.GangaUnitTest import GangaUnitTest
from GangaCore.testlib.mark import external
from GangaCore.testlib.monitoring import run_until_completed
from GangaCore.GPIDev.Base.Proxy import stripProxy
def latestLbDevVersion(app):
import subprocess
pipe = subprocess.Popen('lb-dev %s -l' % app, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.DEVNULL)
stdout, stderr = pipe.communicate()
return stdout.split()[0]
def latestDaVinci():
return latestLbDevVersion('DaVinci')
def LFNstr():
return "/not/a/LFN"
def inputOptsFile():
return """
from Gaudi.Configuration import *
EventSelector().Input = [
"DATAFILE='LFN:%s' TYP='POOL_ROOTTREE' OPT='READ'",
]
""" % LFNstr
@external
class TestExternalGaudiExec(GangaUnitTest):
tmpdir_release = mkdtemp()
def setUp(self):
"""Make sure that the Job object isn't destroyed between tests"""
extra_opts = [('TestingFramework', 'AutoCleanup', 'False')]
super(TestExternalGaudiExec, self).setUp(extra_opts=extra_opts)
def METHOD_NAME(self):
"""
This tests that we can construct a GaudiExec object in a simple way
"""
from GangaCore.GPI import Job, prepareGaudiExec
j = Job(application=prepareGaudiExec('DaVinci', latestDaVinci(), TestExternalGaudiExec.tmpdir_release))
assert j.application.directory == path.join(TestExternalGaudiExec.tmpdir_release,
'DaVinciDev_%s' % latestDaVinci())
assert path.isfile(path.join(TestExternalGaudiExec.tmpdir_release, 'DaVinciDev_%s' % latestDaVinci(),
'run'))
assert path.isfile(path.join(TestExternalGaudiExec.tmpdir_release, 'DaVinciDev_%s' % latestDaVinci(),
'Makefile'))
def testParseInputFile(self):
"""
Test that we can parse a fake opts file and get the inputdata from it
"""
from GangaCore.GPI import jobs
j = TestExternalGaudiExec._constructJob()
myOptsFile = path.join(TestExternalGaudiExec.tmpdir_release, 'myOpts.py')
FileBuffer('myOpts.py', inputOptsFile()).create(myOptsFile)
assert path.isfile(myOptsFile)
j.application.readInputData(myOptsFile)
assert len(j.inputdata) == 1
def testPrepareJob(self):
from GangaCore.GPI import Job, LocalFile, prepareGaudiExec
import os
if os.path.exists(TestExternalGaudiExec.tmpdir_release):
os.system("rm -rf %s/*" % TestExternalGaudiExec.tmpdir_release)
j = Job(application=prepareGaudiExec('DaVinci', latestDaVinci(), TestExternalGaudiExec.tmpdir_release))
myHelloOpts = path.join(TestExternalGaudiExec.tmpdir_release, 'hello.py')
FileBuffer('hello.py', 'print("Hello")').create(myHelloOpts)
assert path.isfile(myHelloOpts)
j.application.options = [LocalFile(myHelloOpts)]
j.prepare()
assert j.application.is_prepared.name
assert path.isdir(j.application.is_prepared.path())
def testSubmitJob(self):
from GangaCore.GPI import jobs
j = TestExternalGaudiExec._constructJob()
j.submit()
def testSubmitJobComplete(self):
"""
Test that the job completes successfully
"""
j = TestExternalGaudiExec._constructJob()
j.submit()
run_until_completed(j)
assert j.status == 'completed'
outputfile = path.join(j.outputdir, 'stdout')
assert path.isfile(outputfile)
for this_string in ('testfile.py', 'data.py', 'ThisIsATest', j.application.platform):
assert this_string in open(outputfile).read()
@staticmethod
def _constructJob():
"""
This is a helper method to construct a new GaudiExec job object for submission testing
This just helps reduce repeat code between tests
"""
import os
if os.path.exists(TestExternalGaudiExec.tmpdir_release):
os.system("rm -fr %s/" % TestExternalGaudiExec.tmpdir_release)
from GangaCore.GPI import Job, LocalFile, prepareGaudiExec
j = Job(application=prepareGaudiExec('DaVinci', latestDaVinci(), TestExternalGaudiExec.tmpdir_release))
myOpts = path.join(TestExternalGaudiExec.tmpdir_release, 'testfile.py')
FileBuffer('testfile.py', 'print("ThisIsATest")').create(myOpts)
j.application.options = [LocalFile(myOpts)]
return j
def testSubmitJobDirac(self):
"""
"""
from GangaCore.GPI import Dirac, DiracProxy
j = TestExternalGaudiExec._constructJob()
j.backend = Dirac(credential_requirements=DiracProxy(group='lhcb_user',
encodeDefaultProxyFileName=False))
j.submit()
assert j.status == "submitted"
def testSubmitJobWithInputFile(self):
"""
This test adds a dummy inputfile into the job and tests that it is returned when the job is completed
"""
from GangaCore.GPI import LocalFile
tempName = 'testGaudiExecFile.txt'
tempName2 = 'testGaudiExecFile2.txt'
tempContent = '12345'
tempContent2 = '67890'
j = TestExternalGaudiExec._constructJob()
tempFile = path.join(TestExternalGaudiExec.tmpdir_release, tempName)
tempFile2 = path.join(TestExternalGaudiExec.tmpdir_release, tempName2)
FileBuffer(tempName, tempContent).create(tempFile)
FileBuffer(tempName2, tempContent2).create(tempFile2)
j.inputfiles = [tempFile, LocalFile(tempFile2)]
j.outputfiles = [LocalFile(tempName), LocalFile(tempName2)]
j.submit()
run_until_completed(j)
assert j.status == 'completed'
outputDir = stripProxy(j).getOutputWorkspace(create=False).getPath()
assert path.isfile(tempFile)
assert path.isfile(tempFile2)
assert tempContent in open(tempFile).read()
assert tempContent2 in open(tempFile2).read()
def testSubmitJobDiracWithInput(self):
j = TestExternalGaudiExec._constructJob()
from GangaCore.GPI import LocalFile, Dirac, DiracProxy
j.backend = Dirac(credential_requirements=DiracProxy(group='lhcb_user',
encodeDefaultProxyFileName=False))
tempName = 'testGaudiExecFile.txt'
tempContent = '12345'
tempFile = path.join(TestExternalGaudiExec.tmpdir_release, tempName)
FileBuffer(tempName, tempContent).create(tempFile)
j.inputfiles = [tempFile]
j.outputfiles = [LocalFile(tempName)]
j.submit()
assert j.status == "submitted"
@classmethod
def tearDownClass(cls):
"""
Remove the 'release area'
"""
shutil.rmtree(cls.tmpdir_release, ignore_errors=True) |
4,960 | grep for endianness fun | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2016-2018 (ita)
"""
Various configuration tests.
"""
from waflib import Task
from waflib.Configure import conf
from waflib.TaskGen import feature, before_method, after_method
LIB_CODE = '''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllexport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void) { return 9; }
'''
MAIN_CODE = '''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllimport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void);
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(lib_func() == 9);
}
'''
@feature('link_lib_test')
@before_method('process_source')
def link_lib_test_fun(self):
"""
The configuration test :py:func:`waflib.Configure.run_build` declares a unique task generator,
so we need to create other task generators from here to check if the linker is able to link libraries.
"""
def write_test_file(task):
task.outputs[0].write(task.generator.code)
rpath = []
if getattr(self, 'add_rpath', False):
rpath = [self.bld.path.get_bld().abspath()]
mode = self.mode
m = '%s %s' % (mode, mode)
ex = self.test_exec and 'test_exec' or ''
bld = self.bld
bld(rule=write_test_file, target='test.' + mode, code=LIB_CODE)
bld(rule=write_test_file, target='main.' + mode, code=MAIN_CODE)
bld(features='%sshlib' % m, source='test.' + mode, target='test')
bld(features='%sprogram %s' % (m, ex), source='main.' + mode, target='app', use='test', rpath=rpath)
@conf
def check_library(self, mode=None, test_exec=True):
"""
Checks if libraries can be linked with the current linker. Uses :py:func:`waflib.Tools.c_tests.link_lib_test_fun`.
:param mode: c or cxx or d
:type mode: string
"""
if not mode:
mode = 'c'
if self.env.CXX:
mode = 'cxx'
self.check(
compile_filename = [],
features = 'link_lib_test',
msg = 'Checking for libraries',
mode = mode,
test_exec = test_exec)
########################################################################################
INLINE_CODE = '''
typedef int foo_t;
static %s foo_t static_foo () {return 0; }
%s foo_t foo () {
return 0;
}
'''
INLINE_VALUES = ['inline', '__inline__', '__inline']
@conf
def check_inline(self, **kw):
"""
Checks for the right value for inline macro.
Define INLINE_MACRO to 1 if the define is found.
If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__)
:param define_name: define INLINE_MACRO by default to 1 if the macro is defined
:type define_name: string
:param features: by default *c* or *cxx* depending on the compiler present
:type features: list of string
"""
self.start_msg('Checking for inline')
if not 'define_name' in kw:
kw['define_name'] = 'INLINE_MACRO'
if not 'features' in kw:
if self.env.CXX:
kw['features'] = ['cxx']
else:
kw['features'] = ['c']
for x in INLINE_VALUES:
kw['fragment'] = INLINE_CODE % (x, x)
try:
self.check(**kw)
except self.errors.ConfigurationError:
continue
else:
self.end_msg(x)
if x != 'inline':
self.define('inline', x, quote=False)
return x
self.fatal('could not use inline functions')
########################################################################################
LARGE_FRAGMENT = '''#include <unistd.h>
int main(int argc, char **argv) {
(void)argc; (void)argv;
return !(sizeof(off_t) >= 8);
}
'''
@conf
def check_large_file(self, **kw):
"""
Checks for large file support and define the macro HAVE_LARGEFILE
The test is skipped on win32 systems (DEST_BINFMT == pe).
:param define_name: define to set, by default *HAVE_LARGEFILE*
:type define_name: string
:param execute: execute the test (yes by default)
:type execute: bool
"""
if not 'define_name' in kw:
kw['define_name'] = 'HAVE_LARGEFILE'
if not 'execute' in kw:
kw['execute'] = True
if not 'features' in kw:
if self.env.CXX:
kw['features'] = ['cxx', 'cxxprogram']
else:
kw['features'] = ['c', 'cprogram']
kw['fragment'] = LARGE_FRAGMENT
kw['msg'] = 'Checking for large file support'
ret = True
try:
if self.env.DEST_BINFMT != 'pe':
ret = self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
if ret:
return True
kw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64'
kw['defines'] = ['_FILE_OFFSET_BITS=64']
try:
ret = self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
self.define('_FILE_OFFSET_BITS', 64)
return ret
self.fatal('There is no support for large files')
########################################################################################
ENDIAN_FRAGMENT = '''
short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
int use_ascii (int i) {
return ascii_mm[i] + ascii_ii[i];
}
short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
int use_ebcdic (int i) {
return ebcdic_mm[i] + ebcdic_ii[i];
}
extern int foo;
'''
class grep_for_endianness(Task.Task):
"""
Task that reads a binary and tries to determine the endianness
"""
color = 'PINK'
def run(self):
txt = self.inputs[0].read(flags='rb').decode('latin-1')
if txt.find('LiTTleEnDian') > -1:
self.generator.tmp.append('little')
elif txt.find('BIGenDianSyS') > -1:
self.generator.tmp.append('big')
else:
return -1
@feature('grep_for_endianness')
@after_method('process_source')
def METHOD_NAME(self):
"""
Used by the endianness configuration test
"""
self.create_task('grep_for_endianness', self.compiled_tasks[0].outputs[0])
@conf
def check_endianness(self):
"""
Executes a configuration test to determine the endianness
"""
tmp = []
def check_msg(self):
return tmp[0]
self.check(fragment=ENDIAN_FRAGMENT, features='c grep_for_endianness',
msg='Checking for endianness', define='ENDIANNESS', tmp=tmp, okmsg=check_msg)
return tmp[0]
|
4,961 | stop all | import re
import shutil
import subprocess
import time
import pytest
from unit.applications.lang.python import ApplicationPython
from unit.option import option
prerequisites = {'modules': {'python': 'any'}}
client = ApplicationPython()
@pytest.fixture(autouse=True)
def setup_method_fixture(temp_dir):
client.app_name = f'app-{temp_dir.split("/")[-1]}'
client.app_proc = f'applications/{client.app_name}/processes'
client.load('empty', client.app_name)
def pids_for_process():
time.sleep(0.2)
output = subprocess.check_output(['ps', 'ax'])
pids = set()
for m in re.findall(
fr'.*unit: "{client.app_name}" application', output.decode()
):
pids.add(re.search(r'^\s*(\d+)', m).group(1))
return pids
def conf_proc(conf, path=None):
if path is None:
path = client.app_proc
assert 'success' in client.conf(conf, path), 'configure processes'
def METHOD_NAME():
assert 'success' in client.conf({"listeners": {}, "applications": {}})
assert len(pids_for_process()) == 0, 'stop all'
@pytest.mark.skip('not yet')
def test_python_processes_idle_timeout_zero():
conf_proc({"spare": 0, "max": 2, "idle_timeout": 0})
client.get()
assert len(pids_for_process()) == 0, 'idle timeout 0'
def test_python_prefork():
conf_proc('2')
pids = pids_for_process()
assert len(pids) == 2, 'prefork 2'
client.get()
assert pids_for_process() == pids, 'prefork still 2'
conf_proc('4')
pids = pids_for_process()
assert len(pids) == 4, 'prefork 4'
client.get()
assert pids_for_process() == pids, 'prefork still 4'
METHOD_NAME()
@pytest.mark.skip('not yet')
def test_python_prefork_same_processes():
conf_proc('2')
pids = pids_for_process()
conf_proc('4')
pids_new = pids_for_process()
assert pids.issubset(pids_new), 'prefork same processes'
def test_python_ondemand():
conf_proc({"spare": 0, "max": 8, "idle_timeout": 1})
assert len(pids_for_process()) == 0, 'on-demand 0'
client.get()
pids = pids_for_process()
assert len(pids) == 1, 'on-demand 1'
client.get()
assert pids_for_process() == pids, 'on-demand still 1'
time.sleep(1)
assert len(pids_for_process()) == 0, 'on-demand stop idle'
METHOD_NAME()
def test_python_scale_updown():
conf_proc({"spare": 2, "max": 8, "idle_timeout": 1})
pids = pids_for_process()
assert len(pids) == 2, 'updown 2'
client.get()
pids_new = pids_for_process()
assert len(pids_new) == 3, 'updown 3'
assert pids.issubset(pids_new), 'updown 3 only 1 new'
client.get()
assert pids_for_process() == pids_new, 'updown still 3'
time.sleep(1)
pids = pids_for_process()
assert len(pids) == 2, 'updown stop idle'
client.get()
pids_new = pids_for_process()
assert len(pids_new) == 3, 'updown again 3'
assert pids.issubset(pids_new), 'updown again 3 only 1 new'
METHOD_NAME()
def test_python_reconfigure():
conf_proc({"spare": 2, "max": 6, "idle_timeout": 1})
pids = pids_for_process()
assert len(pids) == 2, 'reconf 2'
client.get()
pids_new = pids_for_process()
assert len(pids_new) == 3, 'reconf 3'
assert pids.issubset(pids_new), 'reconf 3 only 1 new'
conf_proc('6', f'{client.app_proc}/spare')
pids = pids_for_process()
assert len(pids) == 6, 'reconf 6'
client.get()
assert pids_for_process() == pids, 'reconf still 6'
METHOD_NAME()
def test_python_idle_timeout():
conf_proc({"spare": 0, "max": 6, "idle_timeout": 2})
client.get()
pids = pids_for_process()
assert len(pids) == 1, 'idle timeout 1'
time.sleep(1)
client.get()
time.sleep(1)
pids_new = pids_for_process()
assert len(pids_new) == 1, 'idle timeout still 1'
assert pids_for_process() == pids, 'idle timeout still 1 same pid'
time.sleep(1)
assert len(pids_for_process()) == 0, 'idle timed out'
def test_python_processes_connection_keepalive():
conf_proc({"spare": 0, "max": 6, "idle_timeout": 2})
(_, sock) = client.get(
headers={'Host': 'localhost', 'Connection': 'keep-alive'},
start=True,
read_timeout=1,
)
assert len(pids_for_process()) == 1, 'keepalive connection 1'
time.sleep(2)
assert len(pids_for_process()) == 0, 'keepalive connection 0'
sock.close()
def test_python_processes_access():
conf_proc('1')
path = f'/{client.app_proc}'
assert 'error' in client.conf_get(f'{path}/max')
assert 'error' in client.conf_get(f'{path}/spare')
assert 'error' in client.conf_get(f'{path}/idle_timeout')
def test_python_processes_invalid():
assert 'error' in client.conf(
{"spare": -1}, client.app_proc
), 'negative spare'
assert 'error' in client.conf({"max": -1}, client.app_proc), 'negative max'
assert 'error' in client.conf(
{"idle_timeout": -1}, client.app_proc
), 'negative idle_timeout'
assert 'error' in client.conf(
{"spare": 2}, client.app_proc
), 'spare gt max default'
assert 'error' in client.conf(
{"spare": 2, "max": 1}, client.app_proc
), 'spare gt max'
assert 'error' in client.conf(
{"spare": 0, "max": 0}, client.app_proc
), 'max zero'
def test_python_restart(temp_dir):
shutil.copyfile(
f'{option.test_dir}/python/restart/v1.py', f'{temp_dir}/wsgi.py'
)
client.load(
temp_dir,
name=client.app_name,
processes=1,
environment={'PYTHONDONTWRITEBYTECODE': '1'},
)
b = client.get()['body']
assert b == "v1", 'process started'
shutil.copyfile(
f'{option.test_dir}/python/restart/v2.py', f'{temp_dir}/wsgi.py'
)
b = client.get()['body']
assert b == "v1", 'still old process'
assert 'success' in client.conf_get(
f'/control/applications/{client.app_name}/restart'
), 'restart processes'
b = client.get()['body']
assert b == "v2", 'new process started'
assert 'error' in client.conf_get(
'/control/applications/blah/restart'
), 'application incorrect'
assert 'error' in client.conf_delete(
f'/control/applications/{client.app_name}/restart'
), 'method incorrect'
def test_python_restart_multi():
conf_proc('2')
pids = pids_for_process()
assert len(pids) == 2, 'restart 2 started'
assert 'success' in client.conf_get(
f'/control/applications/{client.app_name}/restart'
), 'restart processes'
new_pids = pids_for_process()
assert len(new_pids) == 2, 'restart still 2'
assert len(new_pids.intersection(pids)) == 0, 'restart all new'
def test_python_restart_longstart():
client.load(
'restart',
name=client.app_name,
module="longstart",
processes={"spare": 1, "max": 2, "idle_timeout": 5},
)
assert len(pids_for_process()) == 1, 'longstarts == 1'
client.get()
pids = pids_for_process()
assert len(pids) == 2, 'longstarts == 2'
assert 'success' in client.conf_get(
f'/control/applications/{client.app_name}/restart'
), 'restart processes'
# wait for longstarted app
time.sleep(2)
new_pids = pids_for_process()
assert len(new_pids) == 1, 'restart 1'
assert len(new_pids.intersection(pids)) == 0, 'restart all new' |
4,962 | fixwikilinks | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Render the user / install guide in pdf and wiki formats
# Outputs: userguide.wiki, UserGuide.pdf, install.wiki, INSTALL.pdf
# Author: Alex Dumitrache <broscutamaker@gmail.com>
# License: GPL
import os, re, time, string
import urllib
from mkdoc_utils import system_or_exit
from mkdoc_utils import include
from mkdoc_utils import sed_sub_tex_spec_chars
rst2latex = os.getenv("RST2LATEX", "rst2latex.py")
def include_indent(o, filename, start=0):
f = open(filename).readlines();
for l in f[start:]:
o.write(l.replace("~", "`").replace("--", "~~").replace("~-", "~~").replace("==", "--").replace("-=", "--"))
o.write("\n");
def sub(file, fr, to):
txt = open(file).read()
txt = re.sub(fr, to, txt);
f = open(file,"w")
f.write(txt)
f.close()
def METHOD_NAME(file):
txt = open(file).read()
while 1:
m = re.search(r"\[\[([^]|]+)([^]]*)\]\]", txt, re.MULTILINE)
if not m: break
origstr = "[[" + m.groups()[0] + m.groups()[1] + "]]"
print origstr
x = m.groups()[0]
if 'Image:' in origstr:
txt = txt.replace(origstr, "")
else:
txt = txt.replace(origstr, "`%s <http://magiclantern.wikia.com/wiki/%s>`_" % (x, urllib.quote(x)))
#~ sub("INSTALL.rst", , ")
f = open(file,"w")
f.write(txt)
f.close()
def labelhack(file): # bug in rst2latex? it forgets to place labels in tex source
txt = ""
for l in open(file).readlines():
txt += l
m = re.match(".. _(.*):", l)
if m:
label = m.groups()[0]
txt += r""".. raw:: latex
\subsubsection*{}\label{%s}%%
""" % label.lower().replace("/"," ").replace(" ", " ").replace(" ", " ").replace(" ", "-").replace(".", "-")
f = open(file,"w")
f.write(txt)
f.close()
def add_menu_items_to_contents(file):
txt = ""
for l in open(file).readlines():
txt += l
m = re.match("^\*\*(.*)\*\*\ *$", l)
if m:
item = m.groups()[0]
txt += r"""
.. raw:: latex
\addcontentsline{toc}{subsubsection}{%s}
""" % item.replace("**","").replace("_", r"\_")
f = open(file,"w")
f.write(txt)
f.close()
system_or_exit("montage ../data/cropmks/CineSco2.bmp ../data/cropmks/CrssMtr2.bmp ../data/cropmks/Passport.bmp ../data/cropmks/PhiPhoto.bmp -tile 4x1 -geometry 300x200+5+5 Cropmarks550D.png")
f = open("FEATURES.txt").readlines();
m = open("MANUAL.txt").readlines();
c = open("CONFIG.txt").readlines();
o = open("userguide.rst", "w")
print >> o, """Magic Lantern v2.3 -- User's Guide
====================================================
"""
include(o, "FEATURES.txt");
include(o, "MANUAL.txt", 1);
include(o, "MENUSTART.txt");
include(o, "MN-AUDIO.txt");
include(o, "MN-EXPO.txt");
include(o, "MN-OVERLAY.txt");
include(o, "MN-MOVIE.txt");
include(o, "MN-SHOOT.txt");
include(o, "MN-FOCUS.txt");
include(o, "MN-DISPLAY.txt");
include(o, "MN-PREFS.txt");
include(o, "MN-DEBUG.txt");
include(o, "MENUEND.txt");
include_indent(o, "FAQ.txt");
o.close()
system_or_exit(r"sed -i -e s/.*{{.*}}.*//g userguide.rst")
system_or_exit("pandoc -f rst -t latex -o credits.tex CREDITS.txt")
METHOD_NAME("userguide.rst")
labelhack("userguide.rst")
add_menu_items_to_contents("userguide.rst")
system_or_exit(r"sed -i -e 's/^#//g' userguide.rst")
#system_or_exit("pandoc -f rst -t latex -o userguide-body.tex userguide.rst")
system_or_exit("%s userguide.rst --output-encoding=utf8 --template=ug-template.tex --table-style booktabs > UserGuide.tex" % (rst2latex,))
sed_sub_tex_spec_chars("UserGuide.tex")
#~ system_or_exit(r"sed -i -e 's/\\addcontentsline{toc}{section}{Features}//g' UserGuide.tex")
system_or_exit("pdflatex -interaction=batchmode UserGuide.tex")
system_or_exit("pdflatex -interaction=batchmode UserGuide.tex")
#system_or_exit(r"sed -i 's/\\{\\{clr\\}\\}//g' userguide-body.tex")
#os.system("pdflatex UserGuide.tex")
#os.system("pdflatex UserGuide.tex")
system_or_exit("cp INSTALL.txt INSTALL.rst")
system_or_exit("pandoc -f rst -t mediawiki -s -o install.wiki INSTALL.rst")
#sub("INSTALL.rst", r"\[\[Video:[^]]+\]\]", "`Video installation tutorial <http://vimeo.com/18035870>`_ by saw0media")
METHOD_NAME("INSTALL.rst")
system_or_exit("pandoc -f rst -t latex -o install-body.tex INSTALL.rst")
system_or_exit("%s INSTALL.rst --output-encoding=utf8 --template=ins-template.tex > INSTALL.tex" % (rst2latex,))
system_or_exit(r"sed -i -e 's/\\{\\{clr\\}\\}//g' INSTALL.tex")
system_or_exit("pdflatex -interaction=batchmode INSTALL.tex")
system_or_exit("pdflatex -interaction=batchmode INSTALL.tex") |
4,963 | get guild webhooks | from typing import TYPE_CHECKING, Any, List, Optional
import discord_typings
from interactions.client.utils.serializer import dict_filter_none
from ..route import Route
__all__ = ("WebhookRequests",)
if TYPE_CHECKING:
from interactions.models.discord.snowflake import Snowflake_Type
from interactions import UPLOADABLE_TYPE
class WebhookRequests:
request: Any
async def create_webhook(
self, channel_id: "Snowflake_Type", name: str, avatar: Any = None
) -> discord_typings.WebhookData:
"""
Create a new webhook.
Args:
channel_id: The id of the channel to add this webhook to
name: name of the webhook (1-80 characters)
avatar: The image for the default webhook avatar
"""
return await self.request(
Route("POST", "/channels/{channel_id}/webhooks", channel_id=channel_id),
payload={"name": name, "avatar": avatar},
)
async def get_channel_webhooks(self, channel_id: "Snowflake_Type") -> List[discord_typings.WebhookData]:
"""
Return a list of channel webhook objects.
Args:
channel_id: The id of the channel to query
Returns:
List of webhook objects
"""
return await self.request(Route("GET", "/channels/{channel_id}/webhooks", channel_id=channel_id))
async def METHOD_NAME(self, guild_id: "Snowflake_Type") -> List[discord_typings.WebhookData]:
"""
Return a list of guild webhook objects.
Args:
guild_id: The id of the guild to query
Returns:
List of webhook objects
"""
return await self.request(Route("GET", "/guilds/{guild_id}/webhooks", guild_id=guild_id))
async def get_webhook(
self, webhook_id: "Snowflake_Type", webhook_token: str | None = None
) -> discord_typings.WebhookData:
"""
Return the new webhook object for the given id.
Args:
webhook_id: The ID of the webhook to get
webhook_token: The token for the webhook
Returns:
Webhook object
"""
endpoint = "/webhooks/{webhook_id}" + f"/{webhook_token}" if webhook_token else ""
return await self.request(Route("GET", endpoint, webhook_id=webhook_id, webhook_token=webhook_token))
async def modify_webhook(
self,
webhook_id: "Snowflake_Type",
name: str,
avatar: Any,
channel_id: "Snowflake_Type",
webhook_token: str | None = None,
) -> discord_typings.WebhookData:
"""
Modify a webhook.
Args:
name: the default name of the webhook
avatar: image for the default webhook avatar
channel_id: the new channel id this webhook should be moved to
webhook_id: The ID of the webhook to modify
webhook_token: The token for the webhook
"""
endpoint = "/webhooks/{webhook_id}" + f"/{webhook_token}" if webhook_token else ""
return await self.request(
Route("PATCH", endpoint, webhook_id=webhook_id, webhook_token=webhook_token),
payload={"name": name, "avatar": avatar, "channel_id": channel_id},
)
async def delete_webhook(self, webhook_id: "Snowflake_Type", webhook_token: str | None = None) -> None:
"""
Delete a webhook.
Args:
webhook_id: The ID of the webhook to delete
webhook_token: The token for the webhook
Returns:
Webhook object
"""
endpoint = "/webhooks/{webhook_id}" + f"/{webhook_token}" if webhook_token else ""
return await self.request(Route("DELETE", endpoint, webhook_id=webhook_id, webhook_token=webhook_token))
async def execute_webhook(
self,
webhook_id: "Snowflake_Type",
webhook_token: str,
payload: dict,
wait: bool = False,
thread_id: "Snowflake_Type" = None,
files: list["UPLOADABLE_TYPE"] | None = None,
) -> Optional[discord_typings.MessageData]:
"""
Execute a webhook. Basically send a message as the webhook.
Args:
webhook_id: The ID of the webhook to delete
webhook_token: The token for the webhook
payload: The JSON payload for the message
wait: Waits for server confirmation of message send before response
thread_id: Send a message to the specified thread
files: The files to send with this message
Returns:
The sent `message`, if `wait` is True else None
"""
return await self.request(
Route("POST", "/webhooks/{webhook_id}/{webhook_token}", webhook_id=webhook_id, webhook_token=webhook_token),
params=dict_filter_none({"wait": "true" if wait else "false", "thread_id": thread_id}),
payload=payload,
files=files,
)
async def get_webhook_message(
self, webhook_id: "Snowflake_Type", webhook_token: str, message_id: "Snowflake_Type"
) -> discord_typings.MessageData:
"""
Returns a previously-sent webhook message from the same token. Returns a message object on success.
Args:
webhook_id: The ID of the webhook to delete
webhook_token: The token for the webhook
message_id: The ID of a message sent by this webhook
Returns:
A message object on success
"""
return await self.request(
Route(
"GET",
"/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}",
webhook_id=webhook_id,
webhook_token=webhook_token,
message_id=message_id,
)
)
async def edit_webhook_message(
self,
webhook_id: "Snowflake_Type",
webhook_token: str,
message_id: "Snowflake_Type",
payload: dict,
files: None | list["UPLOADABLE_TYPE"] = None,
) -> discord_typings.MessageData:
"""
Edits a previously-sent webhook message from the same token.
Args:
webhook_id: The ID of the webhook to delete
webhook_token: The token for the webhook
message_id: The ID of a message sent by this webhook
payload: The JSON payload for the message
files: The files to send in this message
Returns:
The updated message on success
"""
return await self.request(
Route(
"PATCH",
"/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}",
webhook_id=webhook_id,
webhook_token=webhook_token,
message_id=message_id,
),
payload=payload,
files=files,
)
async def delete_webhook_message(
self, webhook_id: "Snowflake_Type", webhook_token: str, message_id: "Snowflake_Type"
) -> None:
"""
Delete a message that was created by the same token.
Args:
webhook_id: The ID of the webhook to delete
webhook_token: The token for the webhook
message_id: The ID of a message sent by this webhook
"""
return await self.request(
Route(
"DELETE",
"/webhooks/{webhook_id}/{webhook_token}/messages/{message_id}",
webhook_id=webhook_id,
webhook_token=webhook_token,
message_id=message_id,
)
) |
4,964 | test function dump restore | import pytest
from redis.exceptions import ResponseError
from .conftest import assert_resp_response, skip_if_server_version_lt
engine = "lua"
lib = "mylib"
lib2 = "mylib2"
function = "redis.register_function{function_name='myfunc', callback=function(keys, \
args) return args[1] end, flags={ 'no-writes' }}"
function2 = "redis.register_function('hello', function() return 'Hello World' end)"
set_function = "redis.register_function('set', function(keys, args) return \
redis.call('SET', keys[1], args[1]) end)"
get_function = "redis.register_function('get', function(keys, args) return \
redis.call('GET', keys[1]) end)"
@skip_if_server_version_lt("7.0.0")
class TestFunction:
@pytest.fixture(autouse=True)
def reset_functions(self, r):
r.function_flush()
@pytest.mark.onlynoncluster
def test_function_load(self, r):
assert b"mylib" == r.function_load(f"#!{engine} name={lib} \n {function}")
assert b"mylib" == r.function_load(
f"#!{engine} name={lib} \n {function}", replace=True
)
with pytest.raises(ResponseError):
r.function_load(f"#!{engine} name={lib} \n {function}")
with pytest.raises(ResponseError):
r.function_load(f"#!{engine} name={lib2} \n {function}")
def test_function_delete(self, r):
r.function_load(f"#!{engine} name={lib} \n {set_function}")
with pytest.raises(ResponseError):
r.function_load(f"#!{engine} name={lib} \n {set_function}")
assert r.fcall("set", 1, "foo", "bar") == b"OK"
assert r.function_delete("mylib")
with pytest.raises(ResponseError):
r.fcall("set", 1, "foo", "bar")
def test_function_flush(self, r):
r.function_load(f"#!{engine} name={lib} \n {function}")
assert r.fcall("myfunc", 0, "hello") == b"hello"
assert r.function_flush()
with pytest.raises(ResponseError):
r.fcall("myfunc", 0, "hello")
with pytest.raises(ResponseError):
r.function_flush("ABC")
@pytest.mark.onlynoncluster
def test_function_list(self, r):
r.function_load(f"#!{engine} name={lib} \n {function}")
res = [
[
b"library_name",
b"mylib",
b"engine",
b"LUA",
b"functions",
[[b"name", b"myfunc", b"description", None, b"flags", [b"no-writes"]]],
]
]
resp3_res = [
{
b"library_name": b"mylib",
b"engine": b"LUA",
b"functions": [
{b"name": b"myfunc", b"description": None, b"flags": {b"no-writes"}}
],
}
]
assert_resp_response(r, r.function_list(), res, resp3_res)
assert_resp_response(r, r.function_list(library="*lib"), res, resp3_res)
res[0].extend(
[b"library_code", f"#!{engine} name={lib} \n {function}".encode()]
)
resp3_res[0][b"library_code"] = f"#!{engine} name={lib} \n {function}".encode()
assert_resp_response(r, r.function_list(withcode=True), res, resp3_res)
@pytest.mark.onlycluster
def test_function_list_on_cluster(self, r):
r.function_load(f"#!{engine} name={lib} \n {function}")
function_list = [
[
b"library_name",
b"mylib",
b"engine",
b"LUA",
b"functions",
[[b"name", b"myfunc", b"description", None, b"flags", [b"no-writes"]]],
]
]
resp3_function_list = [
{
b"library_name": b"mylib",
b"engine": b"LUA",
b"functions": [
{b"name": b"myfunc", b"description": None, b"flags": {b"no-writes"}}
],
}
]
primaries = r.get_primaries()
res = {}
resp3_res = {}
for node in primaries:
res[node.name] = function_list
resp3_res[node.name] = resp3_function_list
assert_resp_response(r, r.function_list(), res, resp3_res)
assert_resp_response(r, r.function_list(library="*lib"), res, resp3_res)
node = primaries[0].name
code = f"#!{engine} name={lib} \n {function}".encode()
res[node][0].extend([b"library_code", code])
resp3_res[node][0][b"library_code"] = code
assert_resp_response(r, r.function_list(withcode=True), res, resp3_res)
def test_fcall(self, r):
r.function_load(f"#!{engine} name={lib} \n {set_function}")
r.function_load(f"#!{engine} name={lib2} \n {get_function}")
assert r.fcall("set", 1, "foo", "bar") == b"OK"
assert r.fcall("get", 1, "foo") == b"bar"
with pytest.raises(ResponseError):
r.fcall("myfunc", 0, "hello")
def test_fcall_ro(self, r):
r.function_load(f"#!{engine} name={lib} \n {function}")
assert r.fcall_ro("myfunc", 0, "hello") == b"hello"
r.function_load(f"#!{engine} name={lib2} \n {set_function}")
with pytest.raises(ResponseError):
r.fcall_ro("set", 1, "foo", "bar")
def METHOD_NAME(self, r):
r.function_load(f"#!{engine} name={lib} \n {set_function}")
payload = r.function_dump()
assert r.fcall("set", 1, "foo", "bar") == b"OK"
r.function_delete("mylib")
with pytest.raises(ResponseError):
r.fcall("set", 1, "foo", "bar")
assert r.function_restore(payload)
assert r.fcall("set", 1, "foo", "bar") == b"OK"
r.function_load(f"#!{engine} name={lib2} \n {get_function}")
assert r.fcall("get", 1, "foo") == b"bar"
r.function_delete("mylib")
assert r.function_restore(payload, "FLUSH")
with pytest.raises(ResponseError):
r.fcall("get", 1, "foo") |
4,965 | test job logger warning and error messages | import logging
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
from evalml.automl.automl_search import AutoMLSearch
from evalml.automl.engine import evaluate_pipeline, train_pipeline
from evalml.automl.engine.engine_base import JobLogger
from evalml.automl.utils import AutoMLConfig
from evalml.objectives import F1, LogLossBinary
from evalml.preprocessing import split_data
def test_train_and_score_pipelines(
AutoMLTestEnv,
dummy_classifier_estimator_class,
dummy_binary_pipeline,
X_y_binary,
):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
max_time=1,
max_batches=1,
allowed_component_graphs={
"Mock Binary Classification Pipeline": [dummy_classifier_estimator_class],
},
optimize_thresholds=False,
)
env = AutoMLTestEnv("binary")
with env.test_context(score_return_value={automl.objective.name: 0.42}):
evaluation_result = evaluate_pipeline(
dummy_binary_pipeline,
automl.automl_config,
automl.X_train,
automl.y_train,
logger=MagicMock(),
).get("scores")
assert env.mock_fit.call_count == automl.data_splitter.get_n_splits()
assert env.mock_score.call_count == automl.data_splitter.get_n_splits()
assert evaluation_result.get("training_time") is not None
assert evaluation_result.get("cv_score_mean") == 0.42
pd.testing.assert_series_equal(
evaluation_result.get("cv_scores"),
pd.Series([0.42] * 3),
)
for i in range(automl.data_splitter.get_n_splits()):
assert (
evaluation_result["cv_data"][i]["all_objective_scores"]["Log Loss Binary"]
== 0.42
)
def test_train_and_score_pipelines_error(
AutoMLTestEnv,
dummy_classifier_estimator_class,
dummy_binary_pipeline,
X_y_binary,
caplog,
):
X, y = X_y_binary
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
max_time=1,
max_batches=1,
allowed_component_graphs={
"Mock Binary Classification Pipeline": [dummy_classifier_estimator_class],
},
optimize_thresholds=False,
)
env = AutoMLTestEnv("binary")
job_log = JobLogger()
with env.test_context(mock_score_side_effect=Exception("yeet")):
result = evaluate_pipeline(
dummy_binary_pipeline,
automl.automl_config,
automl.X_train,
automl.y_train,
logger=job_log,
)
evaluation_result, job_log = result.get("scores"), result.get("logger")
logger = logging.getLogger(__name__)
job_log.write_to_logger(logger)
assert env.mock_fit.call_count == automl.data_splitter.get_n_splits()
assert env.mock_score.call_count == automl.data_splitter.get_n_splits()
assert evaluation_result.get("training_time") is not None
assert np.isnan(evaluation_result.get("cv_score_mean"))
pd.testing.assert_series_equal(
evaluation_result.get("cv_scores"),
pd.Series([np.nan] * 3),
)
for i in range(automl.data_splitter.get_n_splits()):
assert np.isnan(
evaluation_result["cv_data"][i]["all_objective_scores"]["Log Loss Binary"],
)
assert "yeet" in caplog.text
@patch("evalml.automl.utils.split_data")
def test_train_pipeline_trains_and_tunes_threshold(
mock_split_data,
X_y_binary,
AutoMLTestEnv,
dummy_binary_pipeline,
):
X, y = X_y_binary
mock_split_data.return_value = split_data(
X,
y,
"binary",
test_size=0.2,
random_seed=0,
)
automl_config = AutoMLConfig(
None,
"binary",
LogLossBinary(),
[],
None,
True,
None,
0,
None,
None,
{},
)
env = AutoMLTestEnv("binary")
with env.test_context():
_ = train_pipeline(dummy_binary_pipeline, X, y, automl_config=automl_config)
env.mock_fit.assert_called_once()
env.mock_optimize_threshold.assert_not_called()
mock_split_data.assert_not_called()
automl_config = AutoMLConfig(
None,
"binary",
LogLossBinary(),
[],
F1(),
True,
None,
0,
None,
None,
{},
)
with env.test_context():
_ = train_pipeline(dummy_binary_pipeline, X, y, automl_config=automl_config)
env.mock_fit.assert_called_once()
env.mock_optimize_threshold.assert_called_once()
mock_split_data.assert_called_once()
def test_train_pipeline_trains_and_tunes_threshold_ts(
ts_data,
dummy_ts_binary_tree_classifier_pipeline_class,
):
X, _, y = ts_data(
train_features_index_dt=False,
train_target_index_dt=False,
no_features=True,
problem_type="time series binary",
)
params = {"gap": 1, "max_delay": 1, "forecast_horizon": 1, "time_index": "date"}
ts_binary = dummy_ts_binary_tree_classifier_pipeline_class(
parameters={"pipeline": params},
)
assert ts_binary.threshold is None
automl_config = AutoMLConfig(
None,
"time series binary",
LogLossBinary(),
[],
F1(),
True,
None,
0,
None,
None,
{},
)
cv_pipeline, _ = train_pipeline(ts_binary, X, y, automl_config=automl_config)
assert cv_pipeline.threshold is not None
def METHOD_NAME(caplog):
job_log = JobLogger()
job_log.warning("This is a warning!")
job_log.error("This is an error!")
logger = logging.getLogger(__name__)
job_log.write_to_logger(logger)
assert "This is a warning!" in caplog.text
assert "This is an error!" in caplog.text
def test_train_pipelines_cache(
AutoMLTestEnv,
dummy_classifier_estimator_class,
dummy_binary_pipeline,
X_y_binary,
caplog,
):
X, y = X_y_binary
X = pd.DataFrame(X)
automl_config = AutoMLConfig(
None,
"binary",
LogLossBinary(),
[],
None,
True,
None,
0,
None,
None,
{},
)
env = AutoMLTestEnv("binary")
with env.test_context():
res = train_pipeline(
dummy_binary_pipeline,
X,
y,
automl_config=automl_config,
get_hashes=False,
)
assert isinstance(res, tuple)
assert res[1] is None
with env.test_context():
res = train_pipeline(
dummy_binary_pipeline,
X,
y,
automl_config=automl_config,
get_hashes=True,
)
assert isinstance(res, tuple)
assert res[1] == hash(tuple(X.index))
def test_train_and_score_pipelines_cache(
AutoMLTestEnv,
dummy_classifier_estimator_class,
dummy_binary_pipeline,
X_y_binary,
):
X, y = X_y_binary
X = pd.DataFrame(X)
automl = AutoMLSearch(
X_train=X,
y_train=y,
problem_type="binary",
max_time=1,
max_batches=1,
allowed_component_graphs={
"Mock Binary Classification Pipeline": [dummy_classifier_estimator_class],
},
optimize_thresholds=False,
)
env = AutoMLTestEnv("binary")
with env.test_context(score_return_value={automl.objective.name: 0.42}):
evaluation_result = evaluate_pipeline(
dummy_binary_pipeline,
automl.automl_config,
automl.X_train,
automl.y_train,
logger=MagicMock(),
).get("cached_data")
assert evaluation_result
assert len(evaluation_result) == automl.data_splitter.n_splits |
4,966 | test bad args | import imghdr
import io
import os
import pathlib
import unittest
import warnings
from test.support import findfile, TESTFN, unlink
TEST_FILES = (
('python.png', 'png'),
('python.gif', 'gif'),
('python.bmp', 'bmp'),
('python.ppm', 'ppm'),
('python.pgm', 'pgm'),
('python.pbm', 'pbm'),
('python.jpg', 'jpeg'),
('python.ras', 'rast'),
('python.sgi', 'rgb'),
('python.tiff', 'tiff'),
('python.xbm', 'xbm'),
('python.webp', 'webp'),
('python.exr', 'exr'),
)
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class TestImghdr(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testfile = findfile('python.png', subdir='imghdrdata')
with open(cls.testfile, 'rb') as stream:
cls.testdata = stream.read()
def tearDown(self):
unlink(TESTFN)
def test_data(self):
for filename, expected in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(filename), expected)
with open(filename, 'rb') as stream:
self.assertEqual(imghdr.what(stream), expected)
with open(filename, 'rb') as stream:
data = stream.read()
self.assertEqual(imghdr.what(None, data), expected)
self.assertEqual(imghdr.what(None, bytearray(data)), expected)
def test_pathlike_filename(self):
for filename, expected in TEST_FILES:
with self.subTest(filename=filename):
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(pathlib.Path(filename)), expected)
def test_register_test(self):
def test_jumbo(h, file):
if h.startswith(b'eggs'):
return 'ham'
imghdr.tests.append(test_jumbo)
self.addCleanup(imghdr.tests.pop)
self.assertEqual(imghdr.what(None, b'eggs'), 'ham')
def test_file_pos(self):
with open(TESTFN, 'wb') as stream:
stream.write(b'ababagalamaga')
pos = stream.tell()
stream.write(self.testdata)
with open(TESTFN, 'rb') as stream:
stream.seek(pos)
self.assertEqual(imghdr.what(stream), 'png')
self.assertEqual(stream.tell(), pos)
def METHOD_NAME(self):
with self.assertRaises(TypeError):
imghdr.what()
with self.assertRaises(AttributeError):
imghdr.what(None)
with self.assertRaises(TypeError):
imghdr.what(self.testfile, 1)
with self.assertRaises(AttributeError):
imghdr.what(os.fsencode(self.testfile))
with open(self.testfile, 'rb') as f:
with self.assertRaises(AttributeError):
imghdr.what(f.fileno())
def test_invalid_headers(self):
for header in (b'\211PN\r\n',
b'\001\331',
b'\x59\xA6',
b'cutecat',
b'000000JFI',
b'GIF80'):
self.assertIsNone(imghdr.what(None, header))
def test_string_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
for filename, _ in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
with open(filename, 'rb') as stream:
data = stream.read().decode('latin1')
with self.assertRaises(TypeError):
imghdr.what(io.StringIO(data))
with self.assertRaises(TypeError):
imghdr.what(None, data)
def test_missing_file(self):
with self.assertRaises(FileNotFoundError):
imghdr.what('missing')
def test_closed_file(self):
stream = open(self.testfile, 'rb')
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
stream = io.BytesIO(self.testdata)
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
def test_unseekable(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
with UnseekableIO(TESTFN, 'rb') as stream:
with self.assertRaises(io.UnsupportedOperation):
imghdr.what(stream)
def test_output_stream(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
stream.seek(0)
with self.assertRaises(OSError) as cm:
imghdr.what(stream)
if __name__ == '__main__':
unittest.main() |
4,967 | run fn | # Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import queue
from horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt
from horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager
notification_manager = WorkerNotificationManager()
class State(object):
"""State representation used for tracking in memory state across workers.
Args:
bcast_object: Function used to broadcast a variable from rank 0 to the other workers.
get_rank: Function that returns the current rank of this worker.
"""
def __init__(self, bcast_object, get_rank):
self._bcast_object = bcast_object
self._rank = get_rank
self._host_messages = queue.Queue()
self._last_updated_timestamp = 0
self._reset_callbacks = []
def register_reset_callbacks(self, callbacks):
"""Register callbacks that will be invoked following a reset event (worker added or removed).
For example, a common use of a reset callback would be to update the learning rate scale with the
new number of workers.
Args:
callbacks: list of functions to execute.
"""
self._reset_callbacks.extend(callbacks)
def on_reset(self):
self._host_messages = queue.Queue()
self.reset()
for callback in self._reset_callbacks:
callback()
def on_hosts_updated(self, timestamp, update_res):
self._host_messages.put((timestamp, update_res))
def commit(self):
"""Commits all modifications to state tracked by this object to host memory.
This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`
if any were detected.
Because commits are a heavy operation involving data copy (potentially from GPU to host), it is
recommended to consider committing less frequently than once per batch. This allows users to tradeoff
between per-batch execution time and lost training steps in the event of a worker failure.
"""
self.save()
self.check_host_updates()
def check_host_updates(self):
"""Checks that a notification has been sent indicating that hosts can be added or will be removed.
Raises a `HostsUpdatedInterrupt` if such a notification has been received.
"""
# Iterate through the update messages sent from the server. If the update timestamp
# is greater than the last update timestamp, then trigger a HostsUpdatedException.
last_updated_timestamp = prev_timestamp = self._last_updated_timestamp
all_update = HostUpdateResult.no_update
while not self._host_messages.empty():
timestamp, update = self._host_messages.get()
if timestamp > last_updated_timestamp:
last_updated_timestamp = timestamp
all_update |= update
# In order to ensure all workers raise the exception at the same time, we need to sync
# the updated state across all the workers.
# TODO(travis): this should be a max allreduce to account for changes in rank 0
prev_timestamp, self._last_updated_timestamp, all_update = \
self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))
# At this point, updated state is globally consistent across all ranks.
if self._last_updated_timestamp > prev_timestamp:
raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)
def save(self):
"""Saves state to host memory."""
raise NotImplementedError()
def restore(self):
"""Restores the last committed state, undoing any uncommitted modifications."""
raise NotImplementedError()
def sync(self):
"""Synchronize state across workers."""
raise NotImplementedError()
def reset(self):
"""Reset objects and variables following a reset event (before synchronization)."""
pass
class ObjectState(State):
"""State for simple Python objects.
Every object is specified as a keyword argument, and will be assigned as an attribute.
Args:
bcast_object: Horovod broadcast object function used to sync state dictionary.
get_rank: Horovod rank function used to identify is this process is the coordinator.
kwargs: Properties to sync, will be exposed as attributes of the object.
"""
def __init__(self, bcast_object, get_rank, **kwargs):
self._bcast_object = bcast_object
self._saved_state = kwargs
self._set_attrs()
super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)
def save(self):
new_state = {}
for attr in self._saved_state.keys():
new_state[attr] = getattr(self, attr)
self._saved_state = new_state
def restore(self):
self._set_attrs()
def sync(self):
if self._saved_state:
self._saved_state = self._bcast_object(self._saved_state)
self._set_attrs()
def _set_attrs(self):
for attr, value in self._saved_state.items():
setattr(self, attr, value)
def METHOD_NAME(func, reset):
@functools.wraps(func)
def wrapper(state, *args, **kwargs):
notification_manager.init()
notification_manager.register_listener(state)
skip_sync = False
try:
while True:
try:
if not skip_sync:
state.sync()
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
skip_sync = False
except HostsUpdatedInterrupt as e:
skip_sync = e.skip_sync
reset()
state.on_reset()
finally:
notification_manager.remove_listener(state)
return wrapper |
4,968 | test load errors cant read | """Tests for certbot.plugins.storage.PluginStorage"""
import json
import sys
from typing import Iterable
from typing import List
from typing import Optional
import unittest
from unittest import mock
import pytest
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import util as test_util
class PluginStorageTest(test_util.ConfigTestCase):
"""Test for certbot.plugins.storage.PluginStorage"""
def setUp(self):
super().setUp()
self.plugin_cls = test_util.DummyInstaller
filesystem.mkdir(self.config.config_dir)
with mock.patch("certbot.reverter.util"):
self.plugin = self.plugin_cls(config=self.config, name="mockplugin")
def METHOD_NAME(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write("dummy")
# When unable to read file that exists
mock_open = mock.mock_open()
mock_open.side_effect = IOError
self.plugin.storage._storagepath = os.path.join(self.config.config_dir,
".pluginstorage.json")
with mock.patch("builtins.open", mock_open):
with mock.patch('certbot.compat.os.path.isfile', return_value=True):
with mock.patch("certbot.reverter.util"):
with pytest.raises(errors.PluginStorageError):
self.plugin.storage._load() # pylint: disable=protected-access
def test_load_errors_empty(self):
with open(os.path.join(self.config.config_dir, ".pluginstorage.json"), "w") as fh:
fh.write('')
with mock.patch("certbot.plugins.storage.logger.debug") as mock_log:
# Should not error out but write a debug log line instead
with mock.patch("certbot.reverter.util"):
nocontent = self.plugin_cls(self.config, "mockplugin")
with pytest.raises(KeyError):
nocontent.storage.fetch("value")
assert mock_log.called
assert "no values loaded" in mock_log.call_args[0][0]
def test_load_errors_corrupted(self):
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), "w") as fh:
fh.write('invalid json')
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
with mock.patch("certbot.reverter.util"):
corrupted = self.plugin_cls(self.config, "mockplugin")
with pytest.raises(errors.PluginError):
corrupted.storage.fetch("value")
assert "is corrupted" in mock_log.call_args[0][0]
def test_save_errors_cant_serialize(self):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
# Set data as something that can't be serialized
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage._storagepath = "/tmp/whatever"
self.plugin.storage._data = self.plugin_cls # pylint: disable=protected-access
with pytest.raises(errors.PluginStorageError):
self.plugin.storage.save()
assert "Could not serialize" in mock_log.call_args[0][0]
def test_save_errors_unable_to_write_file(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("certbot.compat.filesystem.open", mock_open):
with mock.patch("certbot.plugins.storage.logger.error") as mock_log:
self.plugin.storage._data = {"valid": "data"} # pylint: disable=protected-access
self.plugin.storage._initialized = True # pylint: disable=protected-access
self.plugin.storage._storagepath = "/tmp/whatever"
with pytest.raises(errors.PluginStorageError):
self.plugin.storage.save()
assert "Could not write" in mock_log.call_args[0][0]
def test_save_uninitialized(self):
with mock.patch("certbot.reverter.util"):
with pytest.raises(errors.PluginStorageError):
self.plugin_cls(self.config, "x").storage.save()
def test_namespace_isolation(self):
with mock.patch("certbot.reverter.util"):
plugin1 = self.plugin_cls(self.config, "first")
plugin2 = self.plugin_cls(self.config, "second")
plugin1.storage.put("first_key", "first_value")
with pytest.raises(KeyError):
plugin2.storage.fetch("first_key")
with pytest.raises(KeyError):
plugin2.storage.fetch("first")
assert plugin1.storage.fetch("first_key") == "first_value"
def test_saved_state(self):
self.plugin.storage.put("testkey", "testvalue")
# Write to disk
self.plugin.storage.save()
with mock.patch("certbot.reverter.util"):
another = self.plugin_cls(self.config, "mockplugin")
assert another.storage.fetch("testkey") == "testvalue"
with open(os.path.join(self.config.config_dir,
".pluginstorage.json"), 'r') as fh:
psdata = fh.read()
psjson = json.loads(psdata)
assert "mockplugin" in psjson.keys()
assert len(psjson) == 1
assert psjson["mockplugin"]["testkey"] == "testvalue"
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv[1:] + [__file__])) # pragma: no cover |
4,969 | test select labels inactive reasons | from django.urls import reverse
from pyquery import PyQuery as pq
from olympia import activity, amo, core
from olympia.activity.models import ActivityLog, ReviewActionReasonLog
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.reviewers.models import ReviewActionReason
class TestActivityLogAdmin(TestCase):
def setUp(self):
self.list_url = reverse('admin:activity_activitylog_changelist')
def test_list(self):
author = user_factory()
addon1 = addon_factory()
activity.log_create(
amo.LOG.ADD_VERSION, addon1.current_version, addon1, user=author
)
addon2 = addon_factory()
activity.log_create(
amo.LOG.ADD_VERSION, addon2.current_version, addon2, user=author
)
addon3 = addon_factory()
activity.log_create(
amo.LOG.ADD_VERSION, addon3.current_version, addon3, user=author
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, '*:*')
self.client.force_login(user)
with self.assertNumQueries(11):
# - 2 savepoints/release
# - 2 user and groups
# - 1 count for pagination
# - 1 activities
# - 1 all users from activities
# - 1 all versions from activities
# - 1 all translations from those versions
# - 1 all add-ons from activities
# - 1 all translations for those add-ons
response = self.client.get(self.list_url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('#result_list tbody tr')) == 4 # 3 add versions, 1 log in.
def test_search_for_single_ip(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, '*:*')
self.client.force_login(user)
user2 = user_factory()
user3 = user_factory()
addon = addon_factory(users=[user3])
with core.override_remote_addr('127.0.0.2'):
user2.update(email='foo@bar.com')
# That will make user2 match our query.
ActivityLog.create(amo.LOG.LOG_IN, user=user2)
with core.override_remote_addr('127.0.0.2'):
# That will make user3 match our query.
ActivityLog.create(
amo.LOG.ADD_VERSION, addon.current_version, addon, user=user3
)
with core.override_remote_addr('127.0.0.1'):
extra_user = user_factory() # Extra user that shouldn't match
ActivityLog.create(amo.LOG.LOG_IN, user=extra_user)
with self.assertNumQueries(11):
# - 2 savepoints/release
# - 2 user and groups
# - 1 count for pagination
# - 1 activities
# - 1 all users from activities
# - 1 all versions from activities
# - 1 all translations from those versions
# - 1 all add-ons from activities
# - 1 all translations for those add-ons
response = self.client.get(self.list_url, {'q': '127.0.0.2'}, follow=True)
assert response.status_code == 200
doc = pq(response.content.decode('utf-8'))
assert len(doc('#result_list tbody tr')) == 2
# Make sure it's the right records.
assert set(
(
doc('.field-user_link')[0].text_content(),
doc('.field-user_link')[1].text_content(),
)
) == {str(user2), str(user3)}
# Make sure login ip is now displayed, and has the right value.
# (twice since 2 rows are matching)
assert doc('.field-known_ip_adresses').text() == '127.0.0.2 127.0.0.2'
def test_escaping_and_links(self):
user = user_factory(
email='someone@mozilla.com', display_name='<script>alert(52)</script>'
)
addon = addon_factory(name='<script>alert(41)</script>')
activity.log_create(
amo.LOG.ADD_VERSION, addon.current_version, addon, user=user
)
self.grant_permission(user, '*:*')
self.client.force_login(user)
response = self.client.get(self.list_url)
assert response.status_code == 200
content = response.content.decode('utf-8)')
assert (
'<a href="http://testserver/en-US/admin/models/users/userprofile/'
f'{user.pk}/change/"><script>alert(52)</script></a> '
'logged in.'
) in content
assert (
'Version <a href="http://testserver/en-US/admin/models/versions/version/'
f'{addon.current_version.pk}/change/">{addon.current_version.version}</a>'
f' added to <a href="http://testserver/en-US/admin/models/addons/addon/'
f'{addon.pk}/change/"><script>alert(41)</script></a>'
) in content
class TestReviewActionReasonLogAdmin(TestCase):
def setUp(self):
self.admin_home_url = reverse('admin:index')
self.list_url = reverse('admin:activity_reviewactionreasonlog_changelist')
def test_can_see_module_in_admin_with_super_access(self):
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, '*:*')
self.client.force_login(user)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.model-reviewactionreasonlog')
def test_can_not_see_module_in_admin_without_permissions(self):
user = user_factory(email='someone@mozilla.com')
self.client.force_login(user)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.model-reviewactionreasonlog')
def METHOD_NAME(self):
reason_1 = ReviewActionReason.objects.create(
name='reason 1',
is_active=True,
)
inactive_reason = ReviewActionReason.objects.create(
name='inactive reason',
is_active=False,
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, '*:*')
self.client.force_login(user)
activity_log = ActivityLog.objects.create(
action=amo.LOG.APPROVE_VERSION.id, user=user
)
reason_log = ReviewActionReasonLog.objects.create(
activity_log=activity_log,
reason=reason_1,
)
detail_url = reverse(
'admin:activity_reviewactionreasonlog_change', args=(reason_log.pk,)
)
response = self.client.get(detail_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
reason_options = doc('#id_reason option')
assert len(reason_options) == 2
assert reason_options.eq(0).text() == '(** inactive **) ' + inactive_reason.name
assert reason_options.eq(1).text() == reason_1.name |
4,970 | isfile cached | #! /usr/bin/env python
# encoding: utf-8
"""
Windows-specific optimizations
This module can help reducing the overhead of listing files on windows
(more than 10000 files). Python 3.5 already provides the listdir
optimization though.
"""
import os
from waflib import Utils, Build, Node, Logs
try:
TP = '%s\\*'.decode('ascii')
except AttributeError:
TP = '%s\\*'
if Utils.is_win32:
from waflib.Tools import md5_tstamp
import ctypes, ctypes.wintypes
FindFirstFile = ctypes.windll.kernel32.FindFirstFileW
FindNextFile = ctypes.windll.kernel32.FindNextFileW
FindClose = ctypes.windll.kernel32.FindClose
FILE_ATTRIBUTE_DIRECTORY = 0x10
INVALID_HANDLE_VALUE = -1
UPPER_FOLDERS = ('.', '..')
try:
UPPER_FOLDERS = [unicode(x) for x in UPPER_FOLDERS]
except NameError:
pass
def cached_hash_file(self):
try:
cache = self.ctx.cache_listdir_cache_hash_file
except AttributeError:
cache = self.ctx.cache_listdir_cache_hash_file = {}
if id(self.parent) in cache:
try:
t = cache[id(self.parent)][self.name]
except KeyError:
raise IOError('Not a file')
else:
# an opportunity to list the files and the timestamps at once
findData = ctypes.wintypes.WIN32_FIND_DATAW()
find = FindFirstFile(TP % self.parent.abspath(), ctypes.byref(findData))
if find == INVALID_HANDLE_VALUE:
cache[id(self.parent)] = {}
raise IOError('Not a file')
cache[id(self.parent)] = lst_files = {}
try:
while True:
if findData.cFileName not in UPPER_FOLDERS:
thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY
if not thatsadir:
ts = findData.ftLastWriteTime
d = (ts.dwLowDateTime << 32) | ts.dwHighDateTime
lst_files[str(findData.cFileName)] = d
if not FindNextFile(find, ctypes.byref(findData)):
break
except Exception:
cache[id(self.parent)] = {}
raise IOError('Not a file')
finally:
FindClose(find)
t = lst_files[self.name]
fname = self.abspath()
if fname in Build.hashes_md5_tstamp:
if Build.hashes_md5_tstamp[fname][0] == t:
return Build.hashes_md5_tstamp[fname][1]
try:
fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT)
except OSError:
raise IOError('Cannot read from %r' % fname)
f = os.fdopen(fd, 'rb')
m = Utils.md5()
rb = 1
try:
while rb:
rb = f.read(200000)
m.update(rb)
finally:
f.close()
# ensure that the cache is overwritten
Build.hashes_md5_tstamp[fname] = (t, m.digest())
return m.digest()
Node.Node.cached_hash_file = cached_hash_file
def get_bld_sig_win32(self):
try:
return self.ctx.hash_cache[id(self)]
except KeyError:
pass
except AttributeError:
self.ctx.hash_cache = {}
self.ctx.hash_cache[id(self)] = ret = Utils.h_file(self.abspath())
return ret
Node.Node.get_bld_sig = get_bld_sig_win32
def METHOD_NAME(self):
# optimize for nt.stat calls, assuming there are many files for few folders
try:
cache = self.__class__.cache_isfile_cache
except AttributeError:
cache = self.__class__.cache_isfile_cache = {}
try:
c1 = cache[id(self.parent)]
except KeyError:
c1 = cache[id(self.parent)] = []
curpath = self.parent.abspath()
findData = ctypes.wintypes.WIN32_FIND_DATAW()
find = FindFirstFile(TP % curpath, ctypes.byref(findData))
if find == INVALID_HANDLE_VALUE:
Logs.error("invalid win32 handle isfile_cached %r", self.abspath())
return os.path.isfile(self.abspath())
try:
while True:
if findData.cFileName not in UPPER_FOLDERS:
thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY
if not thatsadir:
c1.append(str(findData.cFileName))
if not FindNextFile(find, ctypes.byref(findData)):
break
except Exception as e:
Logs.error('exception while listing a folder %r %r', self.abspath(), e)
return os.path.isfile(self.abspath())
finally:
FindClose(find)
return self.name in c1
Node.Node.METHOD_NAME = METHOD_NAME
def find_or_declare_win32(self, lst):
# assuming that "find_or_declare" is called before the build starts, remove the calls to os.path.isfile
if isinstance(lst, str):
lst = [x for x in Utils.split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if node:
if not node.METHOD_NAME():
try:
node.parent.mkdir()
except OSError:
pass
return node
self = self.get_src()
node = self.find_node(lst)
if node:
if not node.METHOD_NAME():
try:
node.parent.mkdir()
except OSError:
pass
return node
node = self.get_bld().make_node(lst)
node.parent.mkdir()
return node
Node.Node.find_or_declare = find_or_declare_win32
|
4,971 | get rate response | import json
from django.utils.translation import gettext_lazy as _
from corehq.apps.accounting.utils import fmt_dollar_amount
from corehq.apps.hqwebapp.async_handler import BaseAsyncHandler
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.sms.models import INCOMING, OUTGOING, SQLMobileBackend
from corehq.apps.sms.phonenumbers_helper import country_name_for_country_code
from corehq.apps.smsbillables.exceptions import SMSRateCalculatorError
from corehq.apps.smsbillables.models import (
SmsGatewayFee,
SmsGatewayFeeCriteria,
SmsUsageFee,
)
from corehq.apps.smsbillables.utils import log_smsbillables_error
from corehq.util.quickcache import quickcache
NONMATCHING_COUNTRY = 'nonmatching'
class SMSRatesAsyncHandler(BaseAsyncHandler):
slug = 'sms_get_rate'
allowed_actions = [
'get_rate'
]
@property
def METHOD_NAME(self):
gateway = self.data.get('gateway')
try:
backend_api_id = SQLMobileBackend.get_backend_api_id(gateway, is_couch_id=True)
except Exception as e:
log_smsbillables_error(
"Failed to get backend for calculating an sms rate due to: %s"
% e
)
raise SMSRateCalculatorError("Could not obtain connection information.")
country_code = self.data.get('country_code')
if country_code == NONMATCHING_COUNTRY:
country_code = None
direction = self.data.get('direction')
gateway_fee = SmsGatewayFee.get_by_criteria(
backend_api_id, direction, backend_instance=gateway,
country_code=country_code,
)
usage_fee = SmsUsageFee.get_by_criteria(direction, self.request.domain)
usd_gateway_fee = gateway_fee.amount / gateway_fee.currency.rate_to_default
usd_total = usage_fee.amount + usd_gateway_fee
return {
'rate': _("%s per 160 character SMS") % fmt_dollar_amount(usd_total),
}
class SMSRatesSelect2AsyncHandler(BaseAsyncHandler):
slug = 'sms_rate_calc'
allowed_actions = [
'country_code',
]
@property
def country_code_response(self):
gateway = self.data.get('gateway')
try:
backend_api_id = SQLMobileBackend.get_backend_api_id(gateway, is_couch_id=True)
except Exception:
return []
direction = self.data.get('direction')
criteria_query = SmsGatewayFeeCriteria.objects.filter(
direction=direction, backend_api_id=backend_api_id
)
country_codes = criteria_query.exclude(
country_code__exact=None
).values_list('country_code', flat=True).distinct()
final_codes = []
for code in country_codes:
country_name = country_name_for_country_code(code)
final_codes.append((code, country_name))
search_term = self.data.get('searchString')
if search_term:
search_term = search_term.lower().replace('+', '')
final_codes = [
x for x in final_codes
if str(x[0]).startswith(search_term) or x[1].lower().startswith(search_term)
]
final_codes = [(c[0], "+%s%s" % (c[0], " (%s)" % c[1] if c[1] else '')) for c in final_codes]
if criteria_query.filter(country_code__exact=None).exists():
final_codes.append((
NONMATCHING_COUNTRY,
_('Any Country (Delivery not guaranteed via connection)')
))
return final_codes
def _fmt_success(self, response):
success = json.dumps({
'results': [{
'id': r[0],
'text': r[1],
} for r in response]
}, cls=LazyEncoder)
return success
class PublicSMSRatesAsyncHandler(BaseAsyncHandler):
slug = 'public_sms_rate_calc'
allowed_actions = [
'public_rate',
]
@property
def public_rate_response(self):
return self.get_rate_table(self.data.get('country_code'))
@quickcache(['country_code'], timeout=24 * 60 * 60)
def get_rate_table(self, country_code):
backends = SQLMobileBackend.get_global_backends(SQLMobileBackend.SMS)
def _directed_fee(direction, backend_api_id, backend_instance_id):
gateway_fee = SmsGatewayFee.get_by_criteria(
backend_api_id,
direction,
backend_instance=backend_instance_id,
country_code=country_code
)
if not gateway_fee or gateway_fee.amount is None:
return None
usd_gateway_fee = gateway_fee.amount / gateway_fee.currency.rate_to_default
usage_fee = SmsUsageFee.get_by_criteria(direction)
return fmt_dollar_amount(usage_fee.amount + usd_gateway_fee)
rate_table = []
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
for backend_instance in backends:
# Skip Testing backends
if isinstance(backend_instance, SQLTestSMSBackend):
continue
# skip if country is not in supported countries
if backend_instance.supported_countries:
if ('*' not in backend_instance.supported_countries and
str(country_code) not in backend_instance.supported_countries):
continue
gateway_fee_incoming = _directed_fee(
INCOMING,
backend_instance.hq_api_id,
backend_instance.couch_id
)
gateway_fee_outgoing = _directed_fee(
OUTGOING,
backend_instance.hq_api_id,
backend_instance.couch_id
)
if gateway_fee_outgoing or gateway_fee_incoming:
rate_table.append({
'gateway': backend_instance.display_name,
'inn': gateway_fee_incoming or 'NA', # 'in' is reserved
'out': gateway_fee_outgoing or 'NA'
})
return rate_table |
4,972 | int2byte | # Partial copy of https://bitbucket.org/gutworth/six/src/8e634686c53a35092dd705172440a9231c90ddd1/six.py?at=default
# With some differences to take into account that the iterXXX version may be defined in user code.
# Original __author__ = "Benjamin Peterson <benjamin@python.org>"
# Base __version__ = "1.7.3"
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import types
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY3:
xrange = range
unicode = str
bytes = bytes
def iterkeys(d, **kw):
if hasattr(d, 'iterkeys'):
return iter(d.iterkeys(**kw))
return iter(d.keys(**kw))
def itervalues(d, **kw):
if hasattr(d, 'itervalues'):
return iter(d.itervalues(**kw))
return iter(d.values(**kw))
def iteritems(d, **kw):
if hasattr(d, 'iteritems'):
return iter(d.iteritems(**kw))
return iter(d.items(**kw))
def iterlists(d, **kw):
if hasattr(d, 'iterlists'):
return iter(d.iterlists(**kw))
return iter(d.lists(**kw))
def keys(d, **kw):
return list(iterkeys(d, **kw))
else:
unicode = unicode
xrange = xrange
bytes = str
def keys(d, **kw):
return d.keys(**kw)
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if PY3:
import operator
def b(s):
if isinstance(s, str):
return s.encode("latin-1")
assert isinstance(s, bytes)
return s
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def METHOD_NAME(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
METHOD_NAME = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
METHOD_NAME = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringI |
4,973 | ftpparse | # Copyright (C) 2009-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Python implementation of a part of Dan Bernstein's ftpparse library.
See also http://cr.yp.to/ftpparse.html
"""
months = (
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
)
def ismonth(txt):
"""Check if given text is a month name."""
return txt.lower() in months
def METHOD_NAME(line):
"""Parse a FTP list line into a dictionary with attributes:
name - name of file (string)
trycwd - False if cwd is definitely pointless, True otherwise
tryretr - False if retr is definitely pointless, True otherwise
If the line has no file information, None is returned
"""
if len(line) < 2:
# an empty name in EPLF, with no info, could be 2 chars
return None
info = dict(name=None, trycwd=False, tryretr=False)
# EPLF format
# http://pobox.com/~djb/proto/eplf.html
# "+i8388621.29609,m824255902,/,\tdev"
# "+i8388621.44468,m839956783,r,s10376,\tRFCEPLF"
if line[0] == '+':
if '\t' in line:
flags, name = line.split('\t', 1)
info['name'] = name
flags = flags.split(',')
info['trycwd'] = '/' in flags
info['tryretr'] = 'r' in flags
return info
# UNIX-style listing, without inum and without blocks
# "-rw-r--r-- 1 root other 531 Jan 29 03:26 README"
# "dr-xr-xr-x 2 root other 512 Apr 8 1994 etc"
# "dr-xr-xr-x 2 root 512 Apr 8 1994 etc"
# "lrwxrwxrwx 1 root other 7 Jan 25 00:17 bin -> usr/bin"
# Also produced by Microsoft's FTP servers for Windows:
# "---------- 1 owner group 1803128 Jul 10 10:18 ls-lR.Z"
# "d--------- 1 owner group 0 May 9 19:45 Softlib"
# Also WFTPD for MS-DOS:
# "-rwxrwxrwx 1 noone nogroup 322 Aug 19 1996 message.ftp"
# Also NetWare:
# "d [R----F--] supervisor 512 Jan 16 18:53 login"
# "- [R----F--] rhesus 214059 Oct 20 15:27 cx.exe"
# Also NetPresenz for the Mac:
# "-------r-- 326 1391972 1392298 Nov 22 1995 MegaPhone.sit"
# "drwxrwxr-x folder 2 May 10 1996 network"
if line[0] in 'bcdlps-':
if line[0] == 'd':
info['trycwd'] = True
if line[0] == '-':
info['tryretr'] = True
if line[0] == 'l':
info['trycwd'] = info['tryretr'] = True
parts = line.split()
if len(parts) < 7:
return None
del parts[0] # skip permissions
if parts[0] != 'folder':
del parts[0] # skip nlink
del parts[0] # skip uid
del parts[0] # skip gid or size
if not ismonth(parts[0]):
del parts[0] # skip size
if not ismonth(parts[0]):
return None
del parts[0] # skip month
del parts[0] # skip day
if not parts:
return None
del parts[0] # skip year or time
name = " ".join(parts)
# resolve links
if line[0] == 'l' and ' -> ' in name:
name = name.split(' -> ', 1)[1]
# eliminate extra NetWare spaces
if line[1] in ' [' and name.startswith(' '):
name = name[3:]
info["name"] = name
return info
# MultiNet (some spaces removed from examples)
# "00README.TXT;1 2 30-DEC-1996 17:44 [SYSTEM] (RWED,RWED,RE,RE)"
# "CORE.DIR;1 1 8-SEP-1996 16:09 [SYSTEM] (RWE,RWE,RE,RE)"
# and non-MutliNet VMS:
# "CII-MANUAL.TEX;1 213/216 29-JAN-1996 03:33:12 [ANONYMOU,ANONYMOUS] (RWED,RWED,,)"
i = line.find(';')
if i != -1:
name = line[:i]
if name.endswith(".DIR"):
name = name[:-4]
info["trycwd"] = True
else:
info["tryretr"] = True
info["name"] = name
return info
# MS-DOS format
# 04-27-00 09:09PM <DIR> licensed
# 07-18-00 10:16AM <DIR> pub
# 04-14-00 03:47PM 589 readme.htm
if line[0].isdigit():
parts = line.split()
if len(parts) != 4:
return None
info['name'] = parts[3]
if parts[2][0] == '<':
info['trycwd'] = True
else:
info['tryretr'] = True
return info
# Some useless lines, safely ignored:
# "Total of 11 Files, 10966 Blocks." (VMS)
# "total 14786" (UNIX)
# "DISK$ANONFTP:[ANONYMOUS]" (VMS)
# "Directory DISK$PCSA:[ANONYM]" (VMS)
return None |
4,974 | load name | # coding: utf-8
"""
This module provides a Loader class for locating and reading templates.
"""
import os
import sys
from pystache import common
from pystache import defaults
from pystache.locator import Locator
# We make a function so that the current defaults take effect.
# TODO: revisit whether this is necessary.
def _make_to_unicode():
def to_unicode(s, encoding=None):
"""
Raises a TypeError exception if the given string is already unicode.
"""
if encoding is None:
encoding = defaults.STRING_ENCODING
return unicode(s, encoding, defaults.DECODE_ERRORS)
return to_unicode
class Loader(object):
"""
Loads the template associated to a name or user-defined object.
All load_*() methods return the template as a unicode string.
"""
def __init__(self, file_encoding=None, extension=None, to_unicode=None,
search_dirs=None):
"""
Construct a template loader instance.
Arguments:
extension: the template file extension, without the leading dot.
Pass False for no extension (e.g. to use extensionless template
files). Defaults to the package default.
file_encoding: the name of the encoding to use when converting file
contents to unicode. Defaults to the package default.
search_dirs: the list of directories in which to search when loading
a template by name or file name. Defaults to the package default.
to_unicode: the function to use when converting strings of type
str to unicode. The function should have the signature:
to_unicode(s, encoding=None)
It should accept a string of type str and an optional encoding
name and return a string of type unicode. Defaults to calling
Python's built-in function unicode() using the package string
encoding and decode errors defaults.
"""
if extension is None:
extension = defaults.TEMPLATE_EXTENSION
if file_encoding is None:
file_encoding = defaults.FILE_ENCODING
if search_dirs is None:
search_dirs = defaults.SEARCH_DIRS
if to_unicode is None:
to_unicode = _make_to_unicode()
self.extension = extension
self.file_encoding = file_encoding
# TODO: unit test setting this attribute.
self.search_dirs = search_dirs
self.to_unicode = to_unicode
def _make_locator(self):
return Locator(extension=self.extension)
def unicode(self, s, encoding=None):
"""
Convert a string to unicode using the given encoding, and return it.
This function uses the underlying to_unicode attribute.
Arguments:
s: a basestring instance to convert to unicode. Unlike Python's
built-in unicode() function, it is okay to pass unicode strings
to this function. (Passing a unicode string to Python's unicode()
with the encoding argument throws the error, "TypeError: decoding
Unicode is not supported.")
encoding: the encoding to pass to the to_unicode attribute.
Defaults to None.
"""
if isinstance(s, unicode):
return unicode(s)
return self.to_unicode(s, encoding)
def read(self, path, encoding=None):
"""
Read the template at the given path, and return it as a unicode string.
"""
b = common.read(path)
if encoding is None:
encoding = self.file_encoding
return self.unicode(b, encoding)
def load_file(self, file_name):
"""
Find and return the template with the given file name.
Arguments:
file_name: the file name of the template.
"""
locator = self._make_locator()
path = locator.find_file(file_name, self.search_dirs)
return self.read(path)
def METHOD_NAME(self, name):
"""
Find and return the template with the given template name.
Arguments:
name: the name of the template.
"""
locator = self._make_locator()
path = locator.find_name(name, self.search_dirs)
return self.read(path)
# TODO: unit-test this method.
def load_object(self, obj):
"""
Find and return the template associated to the given object.
Arguments:
obj: an instance of a user-defined class.
search_dirs: the list of directories in which to search.
"""
locator = self._make_locator()
path = locator.find_object(obj, self.search_dirs)
return self.read(path) |
4,975 | load from pandas | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Here we reuse the datasets used by LSTNet as the processed url of the datasets
are available on GitHub.
"""
from pathlib import Path
from typing import List, NamedTuple, Optional, cast
import pandas as pd
from gluonts.dataset import DatasetWriter
from gluonts.dataset.common import MetaData, TrainDatasets
from gluonts.dataset.repository._util import metadata
def METHOD_NAME(
df: pd.DataFrame,
time_index: pd.PeriodIndex,
agg_freq: Optional[str] = None,
) -> List[pd.Series]:
df: pd.DataFrame = df.set_index(time_index)
pivot_df = df.transpose()
pivot_df.head()
timeseries = []
for row in pivot_df.iterrows():
ts = pd.Series(row[1].values, index=time_index)
if agg_freq is not None:
ts = ts.resample(agg_freq).sum()
first_valid = ts[ts.notnull()].index[0]
last_valid = ts[ts.notnull()].index[-1]
ts = ts[first_valid:last_valid]
timeseries.append(ts)
return timeseries
class LstnetDataset(NamedTuple):
name: str
url: str
num_series: int
num_time_steps: int
prediction_length: int
rolling_evaluations: int
freq: str
start_date: str
agg_freq: Optional[str] = None
root = (
"https://raw.githubusercontent.com/laiguokun/"
"multivariate-time-series-data/master/"
)
datasets_info = {
"exchange_rate": LstnetDataset(
name="exchange_rate",
url=root + "exchange_rate/exchange_rate.txt.gz",
num_series=8,
num_time_steps=7588,
prediction_length=30,
rolling_evaluations=5,
start_date="1990-01-01",
freq="1B",
agg_freq=None,
),
"electricity": LstnetDataset(
name="electricity",
url=root + "electricity/electricity.txt.gz",
# original dataset can be found at
# https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014#
# the aggregated ones that is used from LSTNet filters out from the
# initial 370 series the one with no data in 2011
num_series=321,
num_time_steps=26304,
prediction_length=24,
rolling_evaluations=7,
start_date="2012-01-01",
freq="1H",
agg_freq=None,
),
"traffic": LstnetDataset(
name="traffic",
url=root + "traffic/traffic.txt.gz",
# note there are 963 in the original dataset from
# https://archive.ics.uci.edu/ml/datasets/PEMS-SF but only 862 in
# LSTNet
num_series=862,
num_time_steps=17544,
prediction_length=24,
rolling_evaluations=7,
start_date="2015-01-01",
freq="H",
agg_freq=None,
),
"solar-energy": LstnetDataset(
name="solar-energy",
url=root + "solar-energy/solar_AL.txt.gz",
num_series=137,
num_time_steps=52560,
prediction_length=24,
rolling_evaluations=7,
start_date="2006-01-01",
freq="10min",
agg_freq="1H",
),
}
def generate_lstnet_dataset(
dataset_path: Path,
dataset_name: str,
dataset_writer: DatasetWriter,
prediction_length: Optional[int] = None,
):
ds_info = datasets_info[dataset_name]
time_index = pd.period_range(
start=ds_info.start_date,
freq=ds_info.freq,
periods=ds_info.num_time_steps,
)
df = cast(
pd.DataFrame,
pd.read_csv(ds_info.url, header=None), # type: ignore
)
assert df.shape == (
ds_info.num_time_steps,
ds_info.num_series,
), (
"expected num_time_steps/num_series"
f" {(ds_info.num_time_steps, ds_info.num_series)} but got {df.shape}"
)
timeseries = METHOD_NAME(
df=df, time_index=time_index, agg_freq=ds_info.agg_freq
)
# the last date seen during training
ts_index = cast(pd.PeriodIndex, timeseries[0].index)
training_end = ts_index[int(len(ts_index) * (8 / 10))]
train_ts = []
for cat, ts in enumerate(timeseries):
sliced_ts = ts[:training_end]
if len(sliced_ts) > 0:
train_ts.append(
{
"target": sliced_ts.values,
"start": sliced_ts.index[0],
"feat_static_cat": [cat],
"item_id": cat,
}
)
assert len(train_ts) == ds_info.num_series
# time of the first prediction
prediction_dates = [
training_end + i * ds_info.prediction_length
for i in range(ds_info.rolling_evaluations)
]
test_ts = []
for prediction_start_date in prediction_dates:
for cat, ts in enumerate(timeseries):
# print(prediction_start_date)
prediction_end_date = (
prediction_start_date + ds_info.prediction_length
)
sliced_ts = ts[:prediction_end_date]
test_ts.append(
{
"target": sliced_ts.values,
"start": sliced_ts.index[0],
"feat_static_cat": [cat],
"item_id": cat,
}
)
assert len(test_ts) == ds_info.num_series * ds_info.rolling_evaluations
meta = MetaData(
**metadata(
cardinality=ds_info.num_series,
freq=ds_info.freq
if ds_info.agg_freq is None
else ds_info.agg_freq,
prediction_length=prediction_length or ds_info.prediction_length,
)
)
dataset = TrainDatasets(metadata=meta, train=train_ts, test=test_ts)
dataset.save(
path_str=str(dataset_path), writer=dataset_writer, overwrite=True
) |
4,976 | multiple link or merge | from __future__ import annotations
from typing import Any, Generator, List, Optional, Set, Tuple, Union
from astroid import Break, Continue, NodeNG, Raise, Return
class ControlFlowGraph:
"""A graph representing the control flow of a Python program."""
start: CFGBlock
end: CFGBlock
# The unique id of this cfg. Defaults to 0 if not initialized in a CFGVisitor instance.
cfg_id: int
# block_count is used as an "autoincrement" to ensure the block ids are unique.
block_count: int
# blocks (with at least one statement) that will never be executed in runtime.
unreachable_blocks: Set[CFGBlock]
def __init__(self, cfg_id: int = 0) -> None:
self.block_count = 0
self.cfg_id = cfg_id
self.unreachable_blocks = set()
self.start = self.create_block()
self.end = self.create_block()
def create_block(
self, pred: Optional[CFGBlock] = None, edge_label: Optional[Any] = None
) -> CFGBlock:
"""Create a new CFGBlock for this graph.
If pred is specified, set that block as a predecessor of the new block.
If edge_label is specified, set the corresponding edge in the CFG with that label.
"""
new_block = CFGBlock(self.block_count)
self.unreachable_blocks.add(new_block)
self.block_count += 1
if pred:
self.link_or_merge(pred, new_block, edge_label)
return new_block
def link(self, source: CFGBlock, target: CFGBlock) -> None:
"""Link source to target."""
if not source.is_jump():
CFGEdge(source, target)
def link_or_merge(
self, source: CFGBlock, target: CFGBlock, edge_label: Optional[Any] = None
) -> None:
"""Link source to target, or merge source into target if source is empty.
An "empty" node for this purpose is when source has no statements.
source with a jump statement cannot be further linked or merged to
another target.
If edge_label is specified, set the corresponding edge in the CFG with that label.
"""
if source.is_jump():
return
if source.statements == []:
if source is self.start:
self.start = target
else:
for edge in source.predecessors:
edge.target = target
target.predecessors.append(edge)
# source is a utility block that helps build the cfg that does not
# represent any part of the program so it is redundant.
self.unreachable_blocks.remove(source)
else:
CFGEdge(source, target, edge_label)
def METHOD_NAME(self, source: CFGBlock, targets: List[CFGBlock]) -> None:
"""Link source to multiple target, or merge source into targets if source is empty.
An "empty" node for this purpose is when source has no statements.
source with a jump statement cannot be further linked or merged to
another target.
Precondition:
- source != cfg.start
"""
if source.statements == []:
for edge in source.predecessors:
for t in targets:
CFGEdge(edge.source, t)
edge.source.successors.remove(edge)
source.predecessors = []
self.unreachable_blocks.remove(source)
else:
for target in targets:
self.link(source, target)
def get_blocks(self) -> Generator[CFGBlock, None, None]:
"""Generate a sequence of all blocks in this graph."""
yield from self._get_blocks(self.start, set())
def _get_blocks(self, block: CFGBlock, visited: Set[int]) -> Generator[CFGBlock, None, None]:
if block.id in visited:
return
yield block
visited.add(block.id)
for edge in block.successors:
yield from self._get_blocks(edge.target, visited)
def get_blocks_postorder(self) -> Generator[CFGBlock, None, None]:
"""Return the sequence of all blocks in this graph in the order of
a post-order traversal."""
yield from self._get_blocks_postorder(self.start, set())
def _get_blocks_postorder(self, block: CFGBlock, visited) -> Generator[CFGBlock, None, None]:
if block.id in visited:
return
visited.add(block.id)
for succ in block.successors:
yield from self._get_blocks_postorder(succ.target, visited)
yield block
def get_edges(self) -> Generator[CFGEdge, None, None]:
"""Generate a sequence of all edges in this graph."""
yield from self._get_edges(self.start, set())
def _get_edges(self, block: CFGBlock, visited: Set[int]) -> Generator[CFGEdge, None, None]:
if block.id in visited:
return
visited.add(block.id)
for edge in block.successors:
yield edge
yield from self._get_edges(edge.target, visited)
def update_block_reachability(self) -> None:
for block in self.get_blocks():
block.reachable = True
if block in self.unreachable_blocks:
self.unreachable_blocks.remove(block)
class CFGBlock:
"""A node in a control flow graph.
Represents a maximal block of code whose statements are guaranteed to execute in sequence.
"""
# A unique identifier
id: int
# The statements in this block.
statements: List[NodeNG]
# This block's in-edges (from blocks that can execute immediately before this one).
predecessors: List[CFGEdge]
# This block's out-edges (to blocks that can execute immediately after this one).
successors: List[CFGEdge]
# Whether there exists a path from the start block to this block.
reachable: bool
def __init__(self, id_: int) -> None:
"""Initialize a new CFGBlock."""
self.id = id_
self.statements = []
self.predecessors = []
self.successors = []
self.reachable = False
def add_statement(self, statement: NodeNG) -> None:
if not self.is_jump():
self.statements.append(statement)
statement.cfg_block = self
@property
def jump(self) -> Optional[NodeNG]:
if len(self.statements) > 0:
return self.statements[-1]
def is_jump(self) -> bool:
"""Returns True if the block has a statement that branches
the control flow (ex: `break`)"""
return isinstance(self.jump, (Break, Continue, Return, Raise))
class CFGEdge:
"""An edge in a control flow graph.
Edges are directed, and in the future may be augmented with auxiliary metadata about the control flow.
"""
source: CFGBlock
target: CFGBlock
label: Optional[Any]
def __init__(
self, source: CFGBlock, target: CFGBlock, edge_label: Optional[Any] = None
) -> None:
self.source = source
self.target = target
self.label = edge_label
self.source.successors.append(self)
self.target.predecessors.append(self) |
4,977 | postfix to infix | #!/usr/bin/env python
"""Expression Interconversion
This script provides a menu-driven interface to demonstrate interconversion
between different notations for arithmetic expressions i.e., Prefix, Infix
and Postfix.
Python: 3.10.8
External packages: None
Author: Siddhant Tiwari (github.com/stiwari-ds)
"""
import sys
def priority_level(operator: str) -> int:
match operator:
case "+":
return 1
case "-":
return 1
case "*":
return 2
case "/":
return 2
case "^":
return 3
case _:
return -1 # invalid operator
def prefix_to_infix(prefix: str) -> str:
infix = []
for char in reversed(prefix): # parsing right to left
if char.isalpha(): # operand
infix.append(char)
else: # operator
left_operand = infix.pop()
right_operand = infix.pop()
infix.append(f"({left_operand} {char} {right_operand})")
return infix.pop()
def infix_to_prefix(infix: str) -> str:
prefix = []
operator_stack = []
for char in infix:
if char == "(":
operator_stack.append(char)
elif char == ")":
# pop triplets until '(' is found
while operator_stack and operator_stack[-1] != "(":
right_operand = prefix.pop()
left_operand = prefix.pop()
operator = operator_stack.pop()
prefix.append(operator + left_operand + right_operand)
operator_stack.pop() # remove pending '('
elif char.isalpha(): # operand directly added to output
prefix.append(char)
else: # operator
# pop operators until lower precedence operator is found
while operator_stack and (
priority_level(char) <= priority_level(operator_stack[-1])
):
right_operand = prefix.pop()
left_operand = prefix.pop()
operator = operator_stack.pop()
prefix.append(operator + left_operand + right_operand)
operator_stack.append(char)
while operator_stack: # pop remaining triplets
right_operand = prefix.pop()
left_operand = prefix.pop()
operator = operator_stack.pop()
prefix.append(operator + left_operand + right_operand)
return prefix.pop()
def infix_to_postfix(infix: str) -> str:
postfix = []
operator_stack = []
for char in infix:
if char.isalpha(): # operand directly added to output
postfix.append(char)
elif char == "(": # pushed into stack
operator_stack.append(char)
elif char == ")": # pop operators until '(' is encountered
while operator_stack and operator_stack[-1] != "(":
postfix.append(operator_stack.pop()) # popped char added to output
operator_stack.pop() # remove pending '('
else: # operator
# pop operators until lower precedence operator is found
while operator_stack and (
priority_level(char) <= priority_level(operator_stack[-1])
):
postfix.append(operator_stack.pop())
operator_stack.append(char)
while operator_stack: # pop remaining operators
postfix.append(operator_stack.pop())
return "".join(postfix)
def METHOD_NAME(postfix: str) -> str:
infix = []
for char in postfix:
if char.isalpha(): # operand
infix.insert(0, char)
else: # operator
right_operand = infix.pop(0)
left_operand = infix.pop(0)
infix.insert(0, f"({left_operand} {char} {right_operand})")
return infix.pop()
def postfix_to_prefix(postfix: str) -> str:
prefix = []
for char in postfix:
if char.isalpha(): # operand
prefix.append(char)
else: # operator
right_operand = prefix.pop()
left_operand = prefix.pop()
prefix.append(char + left_operand + right_operand)
return "".join(prefix)
def prefix_to_postfix(prefix: str) -> str:
postfix = []
for char in reversed(prefix): # parsing right to left
if char.isalpha(): # operand
postfix.append(char)
else:
left_operand = postfix.pop()
right_operand = postfix.pop()
postfix.append(left_operand + right_operand + char)
return "".join(postfix)
def menu_interface():
menu = (
"\n---- Expression Interconversion ----\n"
"ID OPERATION\n"
"1. Prefix to Infix\n"
"2. Infix to Prefix\n"
"3. Infix to Postfix\n"
"4. Postfix to Infix\n"
"5. Postfix to Prefix\n"
"6. Prefix to Postfix\n"
"0. Exit\n"
)
while True:
print(menu)
try:
choice = int(input("Enter operation ID: ").strip())
except ValueError:
print("Invalid ID entered. Try again.")
continue
match choice:
case 1:
prefix = input("Enter prefix expression: ").strip()
infix = prefix_to_infix(prefix)
print(f"Infix conversion: {infix}")
case 2:
infix = input("Enter infix expression: ").strip()
prefix = infix_to_prefix(infix)
print(f"Prefix conversion: {prefix}")
case 3:
infix = input("Enter infix expression: ").strip()
postfix = infix_to_postfix(infix)
print(f"Postfix conversion: {postfix}")
case 4:
postfix = input("Enter postfix expression: ").strip()
infix = METHOD_NAME(postfix)
print(f"Infix conversion: {infix}")
case 5:
postfix = input("Enter postfix expression: ").strip()
prefix = postfix_to_prefix(postfix)
print(f"Prefix conversion: {prefix}")
case 6:
prefix = input("Enter prefix expression: ").strip()
postfix = prefix_to_postfix(prefix)
print(f"Postfix conversion: {postfix}")
case 0: # Exit
sys.exit("\nExiting.")
case _:
print("Invalid ID entered. Try again.")
_ = input("\nPress enter to continue...")
if __name__ == "__main__":
menu_interface() |
4,978 | set title | #!/usr/bin/env python3
# License: GPL v3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
# Replay the log from --dump-commands. To use first run
# kitty --dump-commands > file.txt
# then run
# kitty --replay-commands file.txt
# will replay the commands and pause at the end waiting for user to press enter
import sys
from contextlib import suppress
from typing import Any
CSI = '\x1b['
OSC = '\x1b]'
def write(x: str) -> None:
sys.stdout.write(x)
sys.stdout.flush()
def METHOD_NAME(*args: Any) -> None:
pass
def set_icon(*args: Any) -> None:
pass
def screen_bell() -> None:
pass
def screen_normal_keypad_mode() -> None:
write('\x1b>')
def screen_alternate_keypad_mode() -> None:
write('\x1b=')
def screen_cursor_position(y: int, x: int) -> None:
write(f'{CSI}{y};{x}H')
def screen_cursor_forward(amt: int) -> None:
write(f'{CSI}{amt}C')
def screen_save_cursor() -> None:
write('\x1b7')
def screen_restore_cursor() -> None:
write('\x1b8')
def screen_cursor_back1(amt: int) -> None:
write(f'{CSI}{amt}D')
def screen_save_modes() -> None:
write(f'{CSI}?s')
def screen_restore_modes() -> None:
write(f'{CSI}?r')
def screen_designate_charset(which: int, to: int) -> None:
w = '()'[int(which)]
t = chr(int(to))
write(f'\x1b{w}{t}')
def select_graphic_rendition(*a: int) -> None:
write(f'{CSI}{";".join(map(str, a))}m')
def screen_cursor_to_column(c: int) -> None:
write(f'{CSI}{c}G')
def screen_cursor_to_line(ln: int) -> None:
write(f'{CSI}{ln}d')
def screen_set_mode(x: int, private: bool) -> None:
write(f'{CSI}{"?" if private else ""}{x}h')
def screen_save_mode(x: int, private: bool) -> None:
write(f'{CSI}{"?" if private else ""}{x}s')
def screen_reset_mode(x: int, private: bool) -> None:
write(f'{CSI}{"?" if private else ""}{x}l')
def screen_restore_mode(x: int, private: bool) -> None:
write(f'{CSI}{"?" if private else ""}{x}r')
def screen_set_margins(t: int, b: int) -> None:
write(f'{CSI}{t};{b}r')
def screen_indexn(n: int) -> None:
write(f'{CSI}{n}S')
def screen_delete_characters(count: int) -> None:
write(f'{CSI}{count}P')
def screen_push_colors(which: int) -> None:
write(f'{CSI}{which}#P')
def screen_pop_colors(which: int) -> None:
write(f'{CSI}{which}#Q')
def screen_report_colors() -> None:
write(f'{CSI}#R')
def screen_repeat_character(num: int) -> None:
write(f'{CSI}{num}b')
def screen_insert_characters(count: int) -> None:
write(f'{CSI}{count}@')
def screen_scroll(count: int) -> None:
write(f'{CSI}{count}S')
def screen_erase_in_display(how: int, private: bool) -> None:
write(f'{CSI}{"?" if private else ""}{how}J')
def screen_erase_in_line(how: int, private: bool) -> None:
write(f'{CSI}{"?" if private else ""}{how}K')
def screen_delete_lines(num: int) -> None:
write(f'{CSI}{num}M')
def screen_cursor_up2(count: int) -> None:
write(f'{CSI}{count}A')
def screen_cursor_down(count: int) -> None:
write(f'{CSI}{count}B')
def screen_carriage_return() -> None:
write('\r')
def screen_linefeed() -> None:
write('\n')
def screen_tab() -> None:
write('\t')
def screen_backspace() -> None:
write('\x08')
def screen_set_cursor(mode: int, secondary: int) -> None:
write(f'{CSI}{secondary} q')
def screen_insert_lines(num: int) -> None:
write(f'{CSI}{num}L')
def draw(*a: str) -> None:
write(' '.join(a))
def screen_manipulate_title_stack(op: int, which: int) -> None:
write(f'{CSI}{op};{which}t')
def report_device_attributes(mode: int, char: int) -> None:
x = CSI
if char:
x += chr(char)
if mode:
x += str(mode)
write(f'{CSI}{x}c')
def screen_decsace(mode: int) -> None:
write(f'{CSI}{mode}*x')
def screen_set_8bit_controls(mode: int) -> None:
write(f'\x1b {"G" if mode else "F"}')
def write_osc(code: int, string: str = '') -> None:
if string:
write(f'{OSC}{code};{string}\x07')
else:
write(f'{OSC}{code}\x07')
set_dynamic_color = set_color_table_color = process_cwd_notification = write_osc
clipboard_control_pending: str = ''
def shell_prompt_marking(payload: str) -> None:
write_osc(133, payload)
def clipboard_control(payload: str) -> None:
global clipboard_control_pending
code, data = payload.split(';', 1)
if code == '-52':
if clipboard_control_pending:
clipboard_control_pending += data.lstrip(';')
else:
clipboard_control_pending = payload
return
if clipboard_control_pending:
clipboard_control_pending += data.lstrip(';')
payload = clipboard_control_pending
clipboard_control_pending = ''
write(f'{OSC}{payload}\x07')
def replay(raw: str) -> None:
specials = {
'draw', 'set_title', 'set_icon', 'set_dynamic_color', 'set_color_table_color',
'process_cwd_notification', 'clipboard_control', 'shell_prompt_marking'
}
for line in raw.splitlines():
if line.strip() and not line.startswith('#'):
cmd, rest = line.partition(' ')[::2]
if cmd in specials:
globals()[cmd](rest)
else:
r = map(int, rest.split()) if rest else ()
globals()[cmd](*r)
def main(path: str) -> None:
with open(path) as f:
raw = f.read()
replay(raw)
with suppress(EOFError, KeyboardInterrupt):
input() |
4,979 | exercise functions | from __future__ import absolute_import, division, print_function
from boost_adaptbx.boost import rational
from libtbx.test_utils import Exception_expected, approx_equal, show_diff
from six.moves import range
from six.moves import zip
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
def exercise_int():
ri = rational.int
r = ri()
assert r.numerator() == 0
assert r.denominator() == 1
assert r.as_tuple() == (0,1)
assert int(r) == 0
assert float(r) == 0
assert rational.int(rational.int(3)).as_tuple() == (3,1)
assert rational.int(2).as_tuple() == (2,1)
assert rational.int(2,3).as_tuple() == (2,3)
assert str(rational.int()) == "0"
assert str(rational.int(2)) == "2"
assert str(rational.int(-2,3)) == "-2/3"
assert (-rational.int(2,3)).as_tuple() == (-2,3)
assert (rational.int(2,3) + rational.int(3,4)).as_tuple() == (17,12)
assert (rational.int(2,3) - rational.int(3,4)).as_tuple() == (-1,12)
assert (rational.int(2,3) * rational.int(3,4)).as_tuple() == (1,2)
assert (rational.int(2,3) / rational.int(3,4)).as_tuple() == (8,9)
assert (rational.int(2,3) // rational.int(3,4)) == 0
assert (rational.int(2,3) % rational.int(1,2)).as_tuple() == (1,6)
assert (rational.int(2,3) + 4).as_tuple() == (14,3)
assert (rational.int(2,3) - 4).as_tuple() == (-10,3)
assert (rational.int(2,3) * 4).as_tuple() == (8,3)
assert (rational.int(2,3) / 4).as_tuple() == (1,6)
assert (rational.int(2,3) // 4) == 0
assert (rational.int(7,3) % 2).as_tuple() == (1,3)
assert (5 + rational.int(2,3)).as_tuple() == (17,3)
assert (5 - rational.int(2,3)).as_tuple() == (13,3)
assert (5 * rational.int(2,3)).as_tuple() == (10,3)
assert (5 / rational.int(2,3)).as_tuple() == (15,2)
assert (5 // rational.int(2,3)) == 7
assert (5 % rational.int(2,3)).as_tuple() == (1,3)
assert rational.int(2,3) == rational.int(2,3)
assert not rational.int(2,3) == rational.int(2,5)
assert rational.int(2,3) != rational.int(2,5)
assert not rational.int(2,3) != rational.int(2,3)
assert rational.int(2,3) < rational.int(3,4)
assert not rational.int(2,3) < rational.int(2,3)
assert rational.int(2,3) > rational.int(1,2)
assert not rational.int(2,3) > rational.int(2,3)
assert rational.int(2,3) <= rational.int(3,4)
assert not rational.int(2,3) <= rational.int(1,2)
assert rational.int(2,3) >= rational.int(1,2)
assert not rational.int(2,3) >= rational.int(3,4)
assert rational.int(4,2) == 2
assert not rational.int(4,2) == 3
assert rational.int(4,2) != 3
assert not rational.int(4,2) != 2
assert rational.int(4,2) < 3
assert not rational.int(4,2) < 2
assert rational.int(4,2) > 1
assert not rational.int(4,2) > 2
assert rational.int(4,2) <= 3
assert not rational.int(4,2) <= 1
assert rational.int(4,2) >= 1
assert not rational.int(4,2) >= 3
assert 2 == rational.int(4,2)
assert not 3 == rational.int(4,2)
assert 3 != rational.int(4,2)
assert not 2 != rational.int(4,2)
assert 3 > rational.int(4,2)
assert not 2 > rational.int(4,2)
assert 1 < rational.int(4,2)
assert not 2 < rational.int(4,2)
assert 3 >= rational.int(4,2)
assert not 1 >= rational.int(4,2)
assert 1 <= rational.int(4,2)
assert not 3 <= rational.int(4,2)
r = rational.int(4,3)
r += 1
assert r.as_tuple() == (7,3)
assert approx_equal(float(r), 7./3)
s = rational.int(4,3)
assert hash(s) == hash(rational.int(4,3))
assert hash(s) != hash(r)
for n in range(-100,100):
assert hash(n) == hash(rational.int(n))
for d in range(1,8):
assert hash(rational.int(n,d)) == hash(rational.int(n,d))
assert hash(rational.int(n,d)) == hash(rational.int(3*n,3*d))
assert hash(rational.int(n,d)) == hash(rational.int(-3*n,-3*d))
try: int(r)
except RuntimeError as e:
assert str(e) == "boost.rational: as_int() conversion error:" \
" denominator is different from one."
else: raise Exception_expected
for n in range(-5,6):
for d in range(1,10):
r = rational.int(n, d)
p = pickle.dumps(r)
l = pickle.loads(p)
assert l == r
assert str(l) == str(r)
#
ee = "bad rational: zero denominator"
lhs = ri(1)
for rhs in [ri(0), 0]:
try: lhs / rhs
except RuntimeError as e: assert not show_diff(str(e), ee)
else: raise Exception_expected
try: lhs % rhs
except RuntimeError as e: assert not show_diff(str(e), ee)
else: raise Exception_expected
#
try:
import fractions
except ImportError:
fractions = None
def check(nd1, nd2, expected=None):
r1, r2 = ri(*nd1), ri(*nd2)
rm = r1 % r2
assert (r1 // r2) * r2 + rm == r1
if (fractions is not None):
ff = fractions.Fraction
f1, f2 = ff(*nd1), ff(*nd2)
fm = f1 % f2
assert (fm.numerator, fm.denominator) == rm.as_tuple()
if (expected is not None):
assert rm.as_tuple() == expected
check((2,3), (1,2), (1,6))
check((2,3), (-1,2), (-1,3))
check((-2,3), (1,2), (1,3))
check((-2,3), (-1,2), (-1,6))
for ln in range(-7,7+1):
for rn in range(-9,9+1):
if (rn == 0): continue
check((ln,3), (rn,4))
#
ri = rational.int
def check(r, e):
assert isinstance(r, ri)
assert r == e
check(ri(3,2) + ri(4,5), ri(23,10))
check(ri(3,2) + 4, ri(11,2))
check(2 + ri(4,5), ri(14,5))
check(ri(3,2) - ri(4,5), ri(7,10))
check(ri(3,2) - 4, ri(-5,2))
check(2 - ri(4,5), ri(6,5))
check(ri(3,2) * ri(4,5), ri(6,5))
check(ri(3,2) * 4, ri(6,1))
check(2 * ri(4,5), ri(8,5))
check(ri(3,2) / ri(4,5), ri(15,8))
check(ri(3,2) / 4, ri(3,8))
check(2 / ri(4,5), ri(5,2))
#
def check(r, e):
assert isinstance(r, int)
assert r == e
check(ri(3,2) // ri(4,5), 1)
check(ri(3,2) // 4, 0)
check(2 // ri(4,5), 2)
#
def check(r, e):
assert isinstance(r, float)
assert approx_equal(r, e)
check(ri(3,2) + 4., 5.5)
check(2. + ri(4,5), 2.8)
check(ri(3,2) - 4., -2.5)
check(2. - ri(4,5), 1.2)
check(ri(3,2) * 4., 6.0)
check(2. * ri(4,5), 1.6)
check(ri(3,2) / 4., 0.375)
check(2. / ri(4,5), 2.5)
#
try: ri(3,2) // 4.
except TypeError as e:
assert str(e).startswith("unsupported operand type(s)")
else: raise Exception_expected
try: 2. // ri(4,5)
except TypeError as e:
assert str(e).startswith("unsupported operand type(s)")
else: raise Exception_expected
#
try: ri(3,2) % 4.
except TypeError as e:
assert str(e).startswith("unsupported operand type(s)")
else: raise Exception_expected
try: 2. % ri(4,5)
except TypeError as e:
assert str(e).startswith("unsupported operand type(s)")
else: raise Exception_expected
#
try: ri(1) / 0.
except ZeroDivisionError as e:
assert not show_diff(str(e), "float division by zero")
else: raise Exception_expected
try: 1. / ri(0)
except ZeroDivisionError as e:
assert not show_diff(str(e), "float division by zero")
else: raise Exception_expected
def METHOD_NAME():
assert rational.gcd(8,6) == 2
assert rational.lcm(8,6) == 24
def exercise_python_code():
r = rational.int
assert rational.from_string("1") == 1
assert rational.from_string("2/4").as_tuple() == (1,2)
assert rational.vector((2,3,4), 3) == [r(d,3) for d in (2,3,4)]
assert rational.vector((2,3,4), (3,4,5)) == [
r(d,n) for d,n in zip((2,3,4), (3,4,5))]
assert rational.lcm_denominators(array=[]) == 1
assert rational.lcm_denominators(array=[r(3,4)]) == 4
assert rational.lcm_denominators(array=[r(3,4), r(5,6)]) == 12
def run():
exercise_int()
METHOD_NAME()
exercise_python_code()
print("OK")
if (__name__ == "__main__"):
run() |
4,980 | is elliptical | #! /usr/bin/env python
import openturns as ot
import math as m
import os
class UniformNdPy(ot.PythonDistribution):
def __init__(self, a=[0.0], b=[1.0]):
super(UniformNdPy, self).__init__(len(a))
if len(a) != len(b):
raise ValueError("Invalid bounds")
for i in range(len(a)):
if a[i] > b[i]:
raise ValueError("Invalid bounds")
self.a = a
self.b = b
self.factor = 1.0
for i in range(len(a)):
self.factor *= b[i] - a[i]
def getRange(self):
return ot.Interval(self.a, self.b, [True] * len(self.a), [True] * len(self.a))
def getRealization(self):
X = []
for i in range(len(self.a)):
X.append(
self.a[i] + (self.b[i] - self.a[i]) * ot.RandomGenerator.Generate()
)
return X
def getSample(self, size):
X = []
for i in range(size):
X.append(self.getRealization())
return X
def computeCDF(self, X):
prod = 1.0
for i in range(len(self.a)):
if X[i] < self.a[i]:
return 0.0
prod *= min(self.b[i], X[i]) - self.a[i]
return prod / self.factor
def computePDF(self, X):
for i in range(len(self.a)):
if X[i] < self.a[i]:
return 0.0
if X[i] > self.b[i]:
return 0.0
return 1.0 / self.factor
def getRoughness(self):
return 42.0
def getMean(self):
mu = []
for i in range(len(self.a)):
mu.append(0.5 * (self.a[i] + self.b[i]))
return mu
def getStandardDeviation(self):
stdev = []
for i in range(len(self.a)):
stdev.append((self.b[i] - self.a[i]) / m.sqrt(12.0))
return stdev
def getSkewness(self):
return [0.0] * len(self.a)
def getKurtosis(self):
return [1.8] * len(self.a)
def getMoment(self, n):
return [-0.1 * n] * len(self.a)
def getCentralMoment(self, n):
return [0.0] * len(self.a)
def computeCharacteristicFunction(self, x):
if len(self.a) > 1:
raise ValueError("dim>1")
ax = self.a[0] * x
bx = self.b[0] * x
return (m.sin(bx) - m.sin(ax) + 1j * (m.cos(ax) - m.cos(bx))) / (bx - ax)
def METHOD_NAME(self):
return (len(self.a) == 1) and (self.a[0] == -self.b[0])
def isCopula(self):
for i in range(len(self.a)):
if self.a[i] != 0.0:
return False
if self.b[i] != 1.0:
return False
return True
def getMarginal(self, indices):
subA = []
subB = []
for i in indices:
subA.append(self.a[i])
subB.append(self.b[i])
py_dist = UniformNdPy(subA, subB)
return ot.Distribution(py_dist)
def computeQuantile(self, prob, tail=False):
p = 1.0 - prob if tail else prob
quantile = self.a
for i in range(len(self.a)):
quantile[i] += p * (self.b[i] - self.a[i])
return quantile
def getParameter(self):
param = list(self.a)
param.extend(self.b)
return param
def getParameterDescription(self):
paramDesc = ["a_" + str(i) for i in range(len(self.a))]
paramDesc.extend(["b_" + str(i) for i in range(len(self.a))])
return paramDesc
def setParameter(self, parameter):
dim = len(self.a)
for i in range(dim):
self.a[i] = parameter[i]
self.b[i] = parameter[dim + i]
myDist = ot.Distribution(UniformNdPy([0.0] * 2, [2.0] * 2))
st = ot.Study()
fileName = "PyDIST.xml"
st.setStorageManager(ot.XMLStorageManager(fileName))
st.add("myDist", myDist)
st.save()
print("saved dist=", myDist)
dist = ot.Distribution()
st = ot.Study()
st.setStorageManager(ot.XMLStorageManager(fileName))
st.load()
st.fillObject("myDist", dist)
print("loaded dist=", dist)
os.remove(fileName) |
4,981 | cholesky block diag | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_block_diag
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_kronecker
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
# By default, compute the Cholesky of the dense matrix, and return a
# LowerTriangular operator. Methods below specialize this registration.
@linear_operator_algebra.RegisterCholesky(linear_operator.LinearOperator)
def _cholesky_linear_operator(linop):
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
linalg_ops.cholesky(linop.to_dense()),
is_non_singular=True,
is_self_adjoint=False,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_diag.LinearOperatorDiag)
def _cholesky_diag(diag_operator):
return linear_operator_diag.LinearOperatorDiag(
math_ops.sqrt(diag_operator.diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_identity.LinearOperatorIdentity)
def _cholesky_identity(identity_operator):
return linear_operator_identity.LinearOperatorIdentity(
num_rows=identity_operator._num_rows, # pylint: disable=protected-access
batch_shape=identity_operator.batch_shape,
dtype=identity_operator.dtype,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_identity.LinearOperatorScaledIdentity)
def _cholesky_scaled_identity(identity_operator):
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=identity_operator._num_rows, # pylint: disable=protected-access
multiplier=math_ops.sqrt(identity_operator.multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_block_diag.LinearOperatorBlockDiag)
def METHOD_NAME(block_diag_operator):
# We take the cholesky of each block on the diagonal.
return linear_operator_block_diag.LinearOperatorBlockDiag(
operators=[
operator.cholesky() for operator in block_diag_operator.operators],
is_non_singular=True,
is_self_adjoint=False,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_kronecker.LinearOperatorKronecker)
def _cholesky_kronecker(kronecker_operator):
# Cholesky decomposition of a Kronecker product is the Kronecker product
# of cholesky decompositions.
return linear_operator_kronecker.LinearOperatorKronecker(
operators=[
operator.cholesky() for operator in kronecker_operator.operators],
is_non_singular=True,
is_self_adjoint=False,
is_square=True) |
4,982 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-12-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-12-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.LoadTestService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.loadtesting.LoadTestMgmtClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all the available API operations for Load Test Resource.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.loadtesting.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-12-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.LoadTestService/operations"} |
4,983 | get local root | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import fnmatch
import os
from awscli.customizations.s3.utils import split_s3_bucket_key
LOG = logging.getLogger(__name__)
def create_filter(parameters):
"""Given the CLI parameters dict, create a Filter object."""
# We need to evaluate all the filters based on the source
# directory.
if parameters['filters']:
cli_filters = parameters['filters']
real_filters = []
for filter_type, filter_pattern in cli_filters:
real_filters.append((filter_type.lstrip('-'),
filter_pattern))
source_location = parameters['src']
if source_location.startswith('s3://'):
# This gives us (bucket, keyname) and we want
# the bucket to be the root dir.
src_rootdir = _get_s3_root(source_location,
parameters['dir_op'])
else:
src_rootdir = METHOD_NAME(parameters['src'], parameters['dir_op'])
destination_location = parameters['dest']
if destination_location.startswith('s3://'):
dst_rootdir = _get_s3_root(parameters['dest'],
parameters['dir_op'])
else:
dst_rootdir = METHOD_NAME(parameters['dest'],
parameters['dir_op'])
return Filter(real_filters, src_rootdir, dst_rootdir)
else:
return Filter({}, None, None)
def _get_s3_root(source_location, dir_op):
# Obtain the bucket and the key.
bucket, key = split_s3_bucket_key(source_location)
if not dir_op and not key.endswith('/'):
# If we are not performing an operation on a directory and the key
# is of the form: ``prefix/key``. We only want ``prefix`` included in
# the the s3 root and not ``key``.
key = '/'.join(key.split('/')[:-1])
# Rejoin the bucket and key back together.
s3_path = '/'.join([bucket, key])
return s3_path
def METHOD_NAME(source_location, dir_op):
if dir_op:
rootdir = os.path.abspath(source_location)
else:
rootdir = os.path.abspath(os.path.dirname(source_location))
return rootdir
class Filter(object):
"""
This is a universal exclude/include filter.
"""
def __init__(self, patterns, rootdir, dst_rootdir):
"""
:var patterns: A list of patterns. A pattern consists of a list
whose first member is a string 'exclude' or 'include'.
The second member is the actual rule.
:var rootdir: The root directory where the patterns are evaluated.
This will generally be the directory of the source location.
:var dst_rootdir: The destination root directory where the patterns are
evaluated. This is only useful when the --delete option is
also specified.
"""
self._original_patterns = patterns
self.patterns = self._full_path_patterns(patterns, rootdir)
self.dst_patterns = self._full_path_patterns(patterns, dst_rootdir)
def _full_path_patterns(self, original_patterns, rootdir):
# We need to transform the patterns into patterns that have
# the root dir prefixed, so things like ``--exclude "*"``
# will actually be ['exclude', '/path/to/root/*']
full_patterns = []
for pattern in original_patterns:
full_patterns.append(
(pattern[0], os.path.join(rootdir, pattern[1])))
return full_patterns
def call(self, file_infos):
"""
This function iterates over through the yielded file_info objects. It
determines the type of the file and applies pattern matching to
determine if the rule applies. While iterating though the patterns the
file is assigned a boolean flag to determine if a file should be
yielded on past the filer. Anything identified by the exclude filter
has its flag set to false. Anything identified by the include filter
has its flag set to True. All files begin with the flag set to true.
Rules listed at the end will overwrite flags thrown by rules listed
before it.
"""
for file_info in file_infos:
file_path = file_info.src
file_status = (file_info, True)
for pattern, dst_pattern in zip(self.patterns, self.dst_patterns):
current_file_status = self._match_pattern(pattern, file_info)
if current_file_status is not None:
file_status = current_file_status
dst_current_file_status = self._match_pattern(dst_pattern, file_info)
if dst_current_file_status is not None:
file_status = dst_current_file_status
LOG.debug("=%s final filtered status, should_include: %s",
file_path, file_status[1])
if file_status[1]:
yield file_info
def _match_pattern(self, pattern, file_info):
file_status = None
file_path = file_info.src
pattern_type = pattern[0]
if file_info.src_type == 'local':
path_pattern = pattern[1].replace('/', os.sep)
else:
path_pattern = pattern[1].replace(os.sep, '/')
is_match = fnmatch.fnmatch(file_path, path_pattern)
if is_match and pattern_type == 'include':
file_status = (file_info, True)
LOG.debug("%s matched include filter: %s",
file_path, path_pattern)
elif is_match and pattern_type == 'exclude':
file_status = (file_info, False)
LOG.debug("%s matched exclude filter: %s",
file_path, path_pattern)
else:
LOG.debug("%s did not match %s filter: %s",
file_path, pattern_type, path_pattern)
return file_status |
4,984 | app wo domain | import os
import shutil
from typing import Generator, Iterator, Optional, Union
from unittest.mock import patch
import pytest
from flask import current_app
from fittrackee import create_app, db, limiter
from fittrackee.application.models import AppConfig
from fittrackee.application.utils import update_app_config_from_database
from fittrackee.workouts.utils.gpx import weather_service
@pytest.fixture(autouse=True)
def default_weather_service(monkeypatch: pytest.MonkeyPatch) -> Iterator[None]:
with patch.object(weather_service, 'get_weather', return_value=None):
yield
def get_app_config(
with_config: Optional[bool] = False,
max_workouts: Optional[int] = None,
max_single_file_size: Optional[Union[int, float]] = None,
max_zip_file_size: Optional[Union[int, float]] = None,
max_users: Optional[int] = None,
) -> Optional[AppConfig]:
if with_config:
config = AppConfig.query.one_or_none()
if not config:
config = AppConfig()
db.session.add(config)
db.session.flush()
config.gpx_limit_import = 10 if max_workouts is None else max_workouts
config.max_single_file_size = (
(1 if max_single_file_size is None else max_single_file_size)
* 1024
* 1024
)
config.max_zip_file_size = (
(10 if max_zip_file_size is None else max_zip_file_size)
* 1024
* 1024
)
config.max_users = 100 if max_users is None else max_users
db.session.commit()
return config
return None
def get_app(
with_config: Optional[bool] = False,
max_workouts: Optional[int] = None,
max_single_file_size: Optional[Union[int, float]] = None,
max_zip_file_size: Optional[Union[int, float]] = None,
max_users: Optional[int] = None,
) -> Generator:
app = create_app()
limiter.enabled = False
with app.app_context():
try:
db.create_all()
app_db_config = get_app_config(
with_config,
max_workouts,
max_single_file_size,
max_zip_file_size,
max_users,
)
if app_db_config:
update_app_config_from_database(app, app_db_config)
yield app
except Exception as e:
print(f'Error with app configuration: {e}')
finally:
db.session.remove()
db.drop_all()
# close unused idle connections => avoid the following error:
# FATAL: remaining connection slots are reserved for
# non-replication superuser connections
db.engine.dispose()
# remove all temp files like gpx files
shutil.rmtree(
current_app.config['UPLOAD_FOLDER'],
ignore_errors=True,
)
return app
@pytest.fixture
def app(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://none:none@0.0.0.0:1025')
if os.getenv('TILE_SERVER_URL'):
monkeypatch.delenv('TILE_SERVER_URL')
if os.getenv('STATICMAP_SUBDOMAINS'):
monkeypatch.delenv('STATICMAP_SUBDOMAINS')
if os.getenv('MAP_ATTRIBUTION'):
monkeypatch.delenv('MAP_ATTRIBUTION')
if os.getenv('DEFAULT_STATICMAP'):
monkeypatch.delenv('DEFAULT_STATICMAP')
yield from get_app(with_config=True)
@pytest.fixture
def app_default_static_map(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('DEFAULT_STATICMAP', 'True')
yield from get_app(with_config=True)
@pytest.fixture
def app_with_max_workouts(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://none:none@0.0.0.0:1025')
yield from get_app(with_config=True, max_workouts=2)
@pytest.fixture
def app_with_max_file_size_equals_0(
monkeypatch: pytest.MonkeyPatch,
) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://none:none@0.0.0.0:1025')
yield from get_app(with_config=True, max_single_file_size=0)
@pytest.fixture
def app_with_max_file_size(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://none:none@0.0.0.0:1025')
yield from get_app(with_config=True, max_single_file_size=0.001)
@pytest.fixture
def app_with_max_zip_file_size(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://none:none@0.0.0.0:1025')
yield from get_app(with_config=True, max_zip_file_size=0.001)
@pytest.fixture
def app_with_3_users_max(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://none:none@0.0.0.0:1025')
yield from get_app(with_config=True, max_users=3)
@pytest.fixture
def app_no_config() -> Generator:
yield from get_app(with_config=False)
@pytest.fixture
def app_ssl(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv(
'EMAIL_URL', 'smtp://username:password@0.0.0.0:1025?ssl=True'
)
yield from get_app(with_config=True)
@pytest.fixture
def app_tls(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv(
'EMAIL_URL', 'smtp://username:password@0.0.0.0:1025?tls=True'
)
yield from get_app(with_config=True)
@pytest.fixture
def app_wo_email_auth(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', 'smtp://0.0.0.0:1025')
yield from get_app(with_config=True)
@pytest.fixture
def app_wo_email_activation(monkeypatch: pytest.MonkeyPatch) -> Generator:
monkeypatch.setenv('EMAIL_URL', '')
yield from get_app(with_config=True)
@pytest.fixture
def METHOD_NAME() -> Generator:
yield from get_app(with_config=True)
@pytest.fixture()
def app_config() -> AppConfig:
config = AppConfig()
config.gpx_limit_import = 10
config.max_single_file_size = 1048576
config.max_zip_file_size = 10485760
config.max_users = 0
db.session.add(config)
db.session.commit()
return config |
4,985 | to html | from pathlib import Path
import random
import string
import tempfile
import mlflow
import pandas as pd
from mlflow import MlflowClient
from giskard.utils.analytics_collector import analytics, anonymize
class ScanReport:
def __init__(self, issues, as_html: bool = True):
self.issues = issues
self.as_html = as_html
def has_issues(self):
return len(self.issues) > 0
def __repr__(self):
if not self.has_issues():
return "<ScanReport (no issues)>"
return f"<ScanReport ({len(self.issues)} issue{'s' if len(self.issues) > 1 else ''})>"
def _ipython_display_(self):
if self.as_html:
from IPython.core.display import display_html
html = self._repr_html_()
display_html(html, raw=True)
else:
from IPython.core.display import display_markdown
markdown = self._repr_markdown_()
display_markdown(markdown, raw=True)
def _repr_html_(self):
return self.METHOD_NAME(embed=True)
def _repr_markdown_(self):
return self.to_markdown()
def METHOD_NAME(self, filename=None, embed=False):
from ..visualization.widget import ScanReportWidget
widget = ScanReportWidget(self)
html = widget.render_html(embed=embed)
if filename is not None:
with open(filename, "w") as f:
f.write(html)
return
return html
def to_markdown(self, filename=None, template="summary"):
from ..visualization.widget import ScanReportWidget
widget = ScanReportWidget(self)
markdown = widget.render_markdown(template=template)
if filename is not None:
with open(filename, "w") as f:
f.write(markdown)
return
return markdown
def to_dataframe(self):
df = pd.DataFrame(
[
{
"domain": issue.meta.get("domain"),
"slicing_fn": str(issue.slicing_fn) if issue.slicing_fn else None,
"transformation_fn": str(issue.transformation_fn) if issue.transformation_fn else None,
"metric": issue.meta.get("metric"),
"deviation": issue.meta.get("deviation"),
"description": issue.description,
}
for issue in self.issues
]
)
return df
def generate_tests(self, with_names=False):
tests = sum([issue.generate_tests(with_names=with_names) for issue in self.issues], [])
return tests
def generate_test_suite(self, name=None):
from giskard import Suite
suite = Suite(name=name or "Test suite (generated by automatic scan)")
for test, test_name in self.generate_tests(with_names=True):
suite.add_test(test, test_name)
self._track_suite(suite, name)
return suite
def _track_suite(self, suite, name):
tests_cnt = {}
if suite.tests:
for t in suite.tests:
try:
name = t.giskard_test.meta.full_name
if name not in tests_cnt:
tests_cnt[name] = 1
else:
tests_cnt[name] += 1
except: # noqa
pass
analytics.track(
"scan:generate_test_suite",
{"suite_name": anonymize(name), "tests_cnt": len(suite.tests), **tests_cnt},
)
@staticmethod
def get_scan_summary_for_mlflow(scan_results):
results_df = scan_results.to_dataframe()
results_df.metric = results_df.metric.replace("=.*", "", regex=True)
return results_df
def to_mlflow(
self,
mlflow_client: MlflowClient = None,
mlflow_run_id: str = None,
summary: bool = True,
model_artifact_path: str = "",
):
results_df = self.get_scan_summary_for_mlflow(self)
if model_artifact_path != "":
model_artifact_path = "-for-" + model_artifact_path
with tempfile.NamedTemporaryFile(
prefix="giskard-scan-results" + model_artifact_path + "-", suffix=".html", delete=False
) as f:
# Get file path
scan_results_local_path = f.name
# Get name from file
scan_results_artifact_name = Path(f.name).name
scan_summary_artifact_name = (
"scan-summary" + model_artifact_path + ".json" if summary else None
)
# Write the file on disk
self.METHOD_NAME(scan_results_local_path)
try:
if mlflow_client is None and mlflow_run_id is None:
mlflow.log_artifact(scan_results_local_path)
if summary:
mlflow.log_table(results_df, artifact_file=scan_summary_artifact_name)
elif mlflow_client and mlflow_run_id:
mlflow_client.log_artifact(mlflow_run_id, scan_results_local_path)
if summary:
mlflow_client.log_table(mlflow_run_id, results_df, artifact_file=scan_summary_artifact_name)
finally:
# Force deletion of the temps file
Path(f.name).unlink(missing_ok=True)
return scan_results_artifact_name, scan_summary_artifact_name
def to_wandb(self, **kwargs):
"""Log the scan results to the WandB run.
Log the current scan results in an HTML format to the active WandB run.
Parameters
----------
**kwargs :
Additional keyword arguments
(see https://docs.wandb.ai/ref/python/init) to be added to the active WandB run.
"""
import wandb # noqa library import already checked in wandb_run
from giskard.integrations.wandb.wandb_utils import wandb_run
from ..utils.analytics_collector import analytics
with wandb_run(**kwargs) as run:
try:
html = self.METHOD_NAME()
suffix = "".join(
random.choices(string.ascii_lowercase + string.digits, k=8)
)
wandb_artifact_name = (
f"Vulnerability scan results/giskard-scan-results-{suffix}"
)
analytics.track(
"wandb_integration:scan_result",
{
"wandb_run_id": run.id,
"has_issues": self.has_issues(),
"issues_cnt": len(self.issues),
},
)
except Exception as e:
analytics.track(
"wandb_integration:scan_result:error:unknown",
{
"wandb_run_id": run.id,
"error": str(e),
},
)
raise ValueError(
"An error occurred while logging the scan results into wandb. "
"Please submit the traceback as a GitHub issue in the following "
"repository for further assistance: https://github.com/Giskard-AI/giskard."
) from e
run.log({wandb_artifact_name: wandb.Html(html, inject=False)}) |
4,986 | test get new url from user | from typing import Optional
import pytest
from demisto_sdk.commands.common.hook_validations.readme import (
ReadmeUrl,
ReadMeValidator,
)
from demisto_sdk.commands.common.legacy_git_tools import git_path
from demisto_sdk.commands.common.markdown_lint import run_markdownlint
from demisto_sdk.commands.format.update_readme import ReadmeFormat
INVALID_MD = f"{git_path()}/demisto_sdk/tests/test_files/README-invalid.md"
INVALID_MD_IN_PACK = f"{git_path()}/demisto_sdk/tests/test_files/Packs/DummyPack2"
def test_readme_markdown_fixes():
"""
Given: Some markdown file with lint errors
When: Calling format on the file
Then: The errors are fixed
"""
with ReadMeValidator.start_mdx_server():
readme_formatter = ReadmeFormat(INVALID_MD, assume_answer=True)
old_content = readme_formatter.readme_content
readme_formatter.fix_lint_markdown()
assert old_content != readme_formatter.readme_content
assert (
run_markdownlint(open(INVALID_MD).read(), fix=True).fixed_text
== readme_formatter.readme_content
)
def test_format_with_update_docker_flag(mocker, monkeypatch):
"""
Check when run demisto-sdk format execute with -ud (update docker) from repo which does not have a mdx server,
(but has a node), that the run ends without any exception.
"""
monkeypatch.setenv("COLUMNS", "1000")
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.hook_validations.readme import ReadMeValidator
from demisto_sdk.commands.format.format_module import format_manager
from demisto_sdk.commands.validate.validate_manager import ValidateManager
mocker.patch.object(
ReadMeValidator, "are_modules_installed_for_verify", return_value=False
)
mocker.patch.object(ReadMeValidator, "is_docker_available", return_value=False)
mocker.patch.object(
ValidateManager,
"get_changed_files_from_git",
return_value=(set(), set(), set(), set(), True),
)
mocker.patch.object(GitUtil, "deleted_files", return_value=set())
assert format_manager(input=f"{git_path()}/Packs/TestPack", update_docker=True) == 0
def get_new_url_from_user_assume_yes(relative_url: list) -> Optional[str]:
"""Check if new url is as expected when using assume_yes flag"""
readme_formatter = ReadmeFormat(INVALID_MD, assume_answer=True)
return readme_formatter.get_new_url_from_user(relative_url)
def get_new_url_from_user_add_prefix(mocker, relative_url: list) -> Optional[str]:
"""Check if new url is as expected when user selects adding https:// prefix"""
mocker.patch("builtins.input", side_effect=["y"])
readme_formatter = ReadmeFormat(INVALID_MD)
return readme_formatter.get_new_url_from_user(relative_url)
def get_new_url_from_user_change_url(mocker, relative_url: list) -> Optional[str]:
"""Check if new url is as expected when user inserts new url"""
mocker.patch("builtins.input", side_effect=["n", "https://goodurl.com"])
readme_formatter = ReadmeFormat(INVALID_MD)
return readme_formatter.get_new_url_from_user(relative_url)
def get_new_url_from_user_skip(mocker, relative_url: list) -> Optional[str]:
"""Check if new url is as expected when user asks to skip"""
mocker.patch("builtins.input", side_effect=["n", ""])
readme_formatter = ReadmeFormat(INVALID_MD)
return readme_formatter.get_new_url_from_user(relative_url)
class TestReadmeFormat:
@pytest.mark.parametrize(
"regex_relative_url,new_url,expected_link",
(
(
["[invalid relative 2]", "www.relative2.com", True],
"https://new.com",
"[invalid relative 2](https://new.com)",
),
(
['<a href="www.hreftesting.com"', "www.hreftesting.com", False],
"https://new.com",
'<a href="https://new.com"',
),
),
)
def test_replace_url_in_content(
self, regex_relative_url: list, new_url: str, expected_link: str
):
"""
Given
- A README file , and a relative url link found in it.
When
- Run replace_url_in_content on it
Then
- Ensure the url changes to the expected output.
"""
readme_formatter = ReadmeFormat(INVALID_MD)
readme_url = ReadmeUrl(
regex_relative_url[0], regex_relative_url[1], regex_relative_url[2]
)
readme_formatter.replace_url_in_content(readme_url, new_url)
assert expected_link in readme_formatter.readme_content
@pytest.mark.parametrize(
"relative_url",
(
(["[invalid relative 1]", " relative1.com", True]),
(["[invalid relative 2]", "www.relative2.com", True]),
(['<a href="www.hreftesting.com"', "www.hreftesting.com", False]),
(['<a href="www.hreftesting.com "', "www.hreftesting.com ", False]),
),
)
def METHOD_NAME(self, mocker, relative_url: list):
"""
Given
- A relative url, sometimes with trailing spaces.
check the following scenarios-
(A) - assume-yes flag is on.
(B) - request to add prefix.
(C) - request to change url.
(D) - request to skip.
When
- Run get_new_url_from_user.
Then
- Ensure the new url is as expected.
(A) - https:// is added to address.
(B) - https:// is added to address.
(C) - New url is returned.
(D) - None is returned.
"""
stripped_url = str.strip(relative_url[1])
readme_url = ReadmeUrl(relative_url[0], relative_url[1], relative_url[2])
assert get_new_url_from_user_assume_yes(readme_url) == f"https://{stripped_url}"
assert (
get_new_url_from_user_add_prefix(mocker, readme_url)
== f"https://{stripped_url}"
)
assert (
get_new_url_from_user_change_url(mocker, readme_url)
== "https://goodurl.com"
)
assert get_new_url_from_user_skip(mocker, readme_url) is None |
4,987 | test sparse tfidf retriever regexp | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.agents import create_agent_from_model_file
import parlai.utils.testing as testing_utils
import os
import unittest
SKIP_TESTS = False
try:
from parlai.agents.tfidf_retriever.tfidf_retriever import ( # noqa: F401
TfidfRetrieverAgent,
)
except ImportError:
SKIP_TESTS = True
class TestTfidfRetriever(unittest.TestCase):
"""
Basic tests on the display_data.py example.
"""
@unittest.skipIf(SKIP_TESTS, "Missing Tfidf dependencies.")
def test_sparse_tfidf_multiworkers(self):
with testing_utils.tempdir() as tmpdir:
MODEL_FILE = os.path.join(tmpdir, 'tmp_test_babi')
testing_utils.train_model(
dict(
model='tfidf_retriever',
task='babi:task1k:1',
model_file=MODEL_FILE,
retriever_numworkers=4,
retriever_hashsize=2**8,
retriever_tokenizer='simple',
datatype='train:ordered',
batchsize=1,
num_epochs=1,
)
)
agent = create_agent_from_model_file(MODEL_FILE)
obs = {
'text': (
'Mary moved to the bathroom. John went to the hallway. '
'Where is Mary?'
),
'episode_done': True,
}
agent.observe(obs)
reply = agent.act()
assert reply['text'] == 'bathroom'
ANS = 'The one true label.'
new_example = {
'text': 'A bunch of new words that are not in the other task, '
'which the model should be able to use to identify '
'this label.',
'labels': [ANS],
'episode_done': True,
}
agent.observe(new_example)
reply = agent.act()
assert 'text' in reply and reply['text'] == ANS
new_example.pop('labels')
agent.observe(new_example)
reply = agent.act()
assert reply['text'] == ANS
@unittest.skipIf(SKIP_TESTS, "Missing Tfidf dependencies.")
def test_sparse_tfidf_retriever_singlethread(self):
with testing_utils.tempdir() as tmpdir:
MODEL_FILE = os.path.join(tmpdir, 'tmp_test_babi')
testing_utils.train_model(
dict(
model='tfidf_retriever',
task='babi:task1k:1',
model_file=MODEL_FILE,
retriever_numworkers=1,
retriever_hashsize=2**8,
retriever_tokenizer='simple',
datatype='train:ordered',
batchsize=1,
num_epochs=1,
)
)
agent = create_agent_from_model_file(MODEL_FILE)
obs = {
'text': (
'Mary moved to the bathroom. John went to the hallway. '
'Where is Mary?'
),
'episode_done': True,
}
agent.observe(obs)
reply = agent.act()
assert reply['text'] == 'bathroom'
ANS = 'The one true label.'
new_example = {
'text': 'A bunch of new words that are not in the other task, '
'which the model should be able to use to identify '
'this label.',
'labels': [ANS],
'episode_done': True,
}
agent.observe(new_example)
reply = agent.act()
assert 'text' in reply and reply['text'] == ANS
new_example.pop('labels')
agent.observe(new_example)
reply = agent.act()
assert reply['text'] == ANS
@unittest.skipIf(SKIP_TESTS, "Missing Tfidf dependencies.")
def METHOD_NAME(self):
with testing_utils.tempdir() as tmpdir:
MODEL_FILE = os.path.join(tmpdir, 'tmp_test_babi')
testing_utils.train_model(
dict(
model='tfidf_retriever',
task='babi:task1k:1',
model_file=MODEL_FILE,
retriever_tokenizer='regexp',
retriever_numworkers=4,
retriever_hashsize=2**8,
datatype='train:ordered',
batchsize=1,
num_epochs=1,
)
)
agent = create_agent_from_model_file(MODEL_FILE)
obs = {
'text': (
'Mary moved to the bathroom. John went to the hallway. '
'Where is Mary?'
),
'episode_done': True,
}
agent.observe(obs)
reply = agent.act()
assert reply['text'] == 'bathroom'
ANS = 'The one true label.'
new_example = {
'text': 'A bunch of new words that are not in the other task, '
'which the model should be able to use to identify '
'this label.',
'labels': [ANS],
'episode_done': True,
}
agent.observe(new_example)
reply = agent.act()
assert 'text' in reply and reply['text'] == ANS
new_example.pop('labels')
agent.observe(new_example)
reply = agent.act()
assert reply['text'] == ANS
if __name__ == '__main__':
unittest.main() |
4,988 | has file | #!/usr/bin/env python3
# ==============================================================================
#
# Copyright (C) 2022 Sophgo Technologies Inc. All rights reserved.
#
# TPU-MLIR is licensed under the 2-Clause BSD License except for the
# third-party components.
#
# ==============================================================================
from xmlrpc.server import SimpleXMLRPCServer
import subprocess
import hashlib
import os
import importlib
import pprint
import atexit
import shutil
import datetime
import argparse
def check_health(md5, buffer):
return md5 == hashlib.md5(buffer.data).hexdigest()
def now():
return str(datetime.datetime.now())
def time_obj(time_str):
return datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S.%f")
class FileRecorder:
_folder = "./.cache/soc_rpc/"
_recorder = os.path.join(_folder, "recorder.py")
_max_capacity = 2**31
_age = 2.0**1023
__slots__ = ("record", "size")
# {md5, {name, date, size, query}}
def __init__(self) -> None:
if os.path.exists(self._recorder):
loader = importlib.machinery.SourceFileLoader("record", self._recorder) # type: ignore
spec = importlib.util.spec_from_loader("record", loader) # type: ignore
recorder = importlib.util.module_from_spec(spec) # type: ignore
loader.exec_module(recorder)
self.record = recorder.record
else:
os.makedirs(os.path.dirname(self._recorder), exist_ok=True)
self.record = {}
self.size = sum((x["size"] for x in self.record.values()))
self.eviction()
def __contains__(self, key):
if key in self.record:
value = self.record[key]
value["query"] += 1
value["date"] = now()
self.__aging()
value["age"] += self._age
return True
return False
def __getitem__(self, md5):
return os.path.join(self._folder, md5)
def __setitem__(self, md5, file):
name, buffer = file
assert check_health(md5, buffer), f"md5 does not match, file corrupt."
name = os.path.basename(name)
size = len(buffer.data)
self.size += size
with open(os.path.join(self._folder, md5), "wb") as handle:
handle.write(buffer.data)
self.record[md5] = {
"name": name,
"date": now(), # last use time
"size": size,
"query": 0,
"age": self._age,
}
self.__save()
def __save(self):
with open(self._recorder, "w") as fb:
fb.write(f"# {now()}\n\n")
fb.write("record = ")
pprint.pprint(self.record, width=80, stream=fb)
def __aging(self):
# https://en.wikipedia.org/wiki/Page_replacement_algorithm#Aging
for v in self.record.values():
v["age"] /= 2
def is_full(self):
return self.size > self._max_capacity
def eviction(self):
# maintain file recorder
# remove folder
print("Maintain files.")
file_alive = set()
for f in os.listdir(self._folder):
_f = os.path.join(self._folder, f)
if os.path.isdir(_f):
try:
shutil.rmtree(_f)
except:
pass
continue
if f not in self.record:
os.remove(_f)
continue
file_alive.add(f)
invalid_record = self.record.keys() - file_alive
for k in invalid_record:
self.size -= self.record[k]["size"]
del self.record[k]
if not self.is_full():
self.__save()
return
# remove some files if storage shortage
for k, _ in sorted(self.record.items(), key=lambda x: x[1]["age"]):
os.remove(os.path.join(self._folder, k))
self.size -= self.record[k]["size"]
del self.record[k]
if self.size < self._max_capacity:
break
self.__save()
files = FileRecorder()
@atexit.register
def save_cache():
files.eviction()
print("record saved. Goodbye")
def receve_file(md5, name, buffer):
if files.is_full():
files.eviction()
files[md5] = (name, buffer)
print(f"received file '{name}'")
def METHOD_NAME(md5):
return md5 in files
def xor_md5(md5s):
xormd5 = int(md5s[0], base=16)
for m in md5s[1:]:
xormd5 ^= int(m, base=16)
return f"{xormd5:x}"
def build_dir(md5):
# use XOR MD5 as the folder name to keep it stable.
folder_name = xor_md5(list(md5.values()))
folder = os.path.join(FileRecorder._folder, folder_name)
if os.path.exists(folder):
print(f"folder {folder} exists.")
return folder_name
os.makedirs(folder)
for name, md5 in md5.items():
os.symlink(f"../{md5}", os.path.join(folder, name))
return folder_name
def run_command(cmd_fmt: str, *md5):
if all(md5):
cmd_fmt = cmd_fmt.format(*(files[x] for x in md5))
out = subprocess.run(
cmd_fmt,
shell=True,
capture_output=True,
)
return out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
dest="port",
help="server port number, default:8000",
)
args = parser.parse_args()
server = SimpleXMLRPCServer(("0.0.0.0", args.port), allow_none=True)
print(f"Listening on port {args.port}...")
server.register_function(receve_file, "send_file")
server.register_function(build_dir, "build_dir")
server.register_function(run_command, "run") # type: ignore
server.register_function(METHOD_NAME, "has_file")
server.serve_forever() |
4,989 | preprocess | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Dict
import numpy
import torch
from torch import device as device_class
from ultralytics.yolo.utils import LOGGER
__all__ = ["export_sample_inputs_outputs"]
# define the priority order for the execution providers
# prefer CUDA Execution Provider over CPU Execution Provider
EP_list = ["CUDAExecutionProvider", "CPUExecutionProvider"]
def METHOD_NAME(
batch: Dict[str, Any], device: device_class, half: bool = False
) -> Dict[str, Any]:
"""
Ported from
https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/v8/detect/val.py
"""
batch["img"] = batch["img"].to(device, non_blocking=True)
batch["img"] = (batch["img"].half() if half else batch["img"].float()) / 255
for k in ["batch_idx", "cls", "bboxes"]:
batch[k] = batch[k].to(device)
return batch
@torch.no_grad()
def export_sample_inputs_outputs(
data_loader: torch.utils.data.DataLoader,
model: torch.nn.Module,
save_dir: str,
device: device_class,
number_export_samples: int,
onnx_path: str,
):
"""
Export sample model input and output for testing with the DeepSparse Engine
:param data_loader: path to data loader to take samples from
:param model: model to be exported. Used to generate torch outputs
:param save_dir: directory to save samples to
:param device: device to run the inference (output generation) on
:param number_export_samples: number of samples to export
:param onnx_path: path to onnx model. Used to generate ORT outputs
"""
try:
import onnxruntime
except (ImportError, ModuleNotFoundError) as exception:
raise ValueError(
"onnxruntime is needed to export samples for validation, but the "
"module was not found, try `pip install sparseml[onnxruntime]`"
) from exception
LOGGER.info(
f"Exporting {number_export_samples} sample model inputs and outputs for "
"testing with the DeepSparse Engine"
)
exported_samples = 0
# Sample export directories
sample_in_dir = os.path.join(save_dir, "sample_inputs")
sample_out_dir_torch = os.path.join(save_dir, "sample_outputs_torch")
sample_out_dir_ort = os.path.join(save_dir, "sample_outputs_onnxruntime")
os.makedirs(sample_in_dir, exist_ok=True)
os.makedirs(sample_out_dir_torch, exist_ok=True)
os.makedirs(sample_out_dir_ort, exist_ok=True)
save_inputs_as_uint8 = _graph_has_uint8_inputs(onnx_path) if onnx_path else False
# Prepare model for inference
model = model.to(device)
model.eval()
# Prepare onnxruntime engine for inference
ort_session = onnxruntime.InferenceSession(onnx_path, providers=EP_list)
LOGGER.info(f"Exporting sample inputs to directory {sample_in_dir}")
LOGGER.info(f"Exporting sample torch outputs to directory {sample_out_dir_torch}")
LOGGER.info(
f"Exporting sample onnxruntime outputs to directory {sample_out_dir_ort}"
)
for batch in data_loader:
file_idx = f"{exported_samples}".zfill(4)
preprocessed_batch = METHOD_NAME(batch=batch, device=device)
image = preprocessed_batch["img"]
# Save torch outputs as numpy array
_export_torch_outputs(image, model, sample_out_dir_torch, file_idx)
# Convert input data type if needed
if save_inputs_as_uint8:
image = (255 * image).to(dtype=torch.uint8)
# Save inputs as numpy array
_export_inputs(image, sample_in_dir, file_idx)
# Save onnxruntime outputs as numpy array
_export_ort_outputs(
image.cpu().numpy(), ort_session, sample_out_dir_ort, file_idx
)
exported_samples += 1
if exported_samples >= number_export_samples:
break
if exported_samples < number_export_samples:
LOGGER.info(
f"Could not export {number_export_samples} samples. Exhausted dataloader "
f"and exported {exported_samples} samples",
level="warning",
)
LOGGER.info(
f"Completed the export of {number_export_samples} "
f"input/output samples to {save_dir}"
)
def _export_torch_outputs(
image: torch.Tensor, model: torch.nn.Module, sample_out_dir: str, file_idx: str
):
# Run model to get torch outputs
model_out = model(image)
preds = model_out
sample_output_filename = os.path.join(sample_out_dir, f"out-{file_idx}.npz")
seg_prediction = None
# Move to cpu for exporting
# Segmentation currently supports two outputs
if isinstance(preds, tuple):
preds_out = preds[0].detach().to("cpu")
seg_prediction = preds[1].detach().to("cpu")
else:
preds_out = preds.detach().to("cpu")
numpy.savez(sample_output_filename, preds_out, seg_prediction=seg_prediction)
def _export_ort_outputs(
image: numpy.ndarray,
session: "onnxruntime.InferenceSession", # noqa: F821
sample_out_dir: str,
file_idx: str,
):
# Run model to get onnxruntime outputs
ort_inputs = {session.get_inputs()[0].name: image}
ort_outs = session.run(None, ort_inputs)
preds = ort_outs
seg_prediction = None
if len(preds) > 1:
preds_out = preds[0]
seg_prediction = preds[1]
else:
preds_out = preds[0]
preds_out = numpy.squeeze(preds_out, axis=0)
sample_output_filename = os.path.join(sample_out_dir, f"out-{file_idx}.npz")
numpy.savez(sample_output_filename, preds_out, seg_prediction=seg_prediction)
def _export_inputs(image: torch.Tensor, sample_in_dir: str, file_idx: str):
sample_in = image.detach().to("cpu").squeeze(0)
sample_input_filename = os.path.join(sample_in_dir, f"inp-{file_idx}.npz")
numpy.savez(sample_input_filename, sample_in)
def _graph_has_uint8_inputs(onnx_path: str) -> bool:
"""
Load onnx model and check if it's input is type 2 (unit8)
"""
import onnx
onnx_model = onnx.load(str(onnx_path))
return onnx_model.graph.input[0].type.tensor_type.elem_type == 2 |
4,990 | show | from castle.cms.interfaces import IReferenceNamedImage
from castle.cms.interfaces import IVersionViewLayer
from DateTime import DateTime
from lxml.html import fromstring
from lxml.html import tostring
from plone import api
from plone.app.layout.viewlets.content import ContentHistoryView as BaseContentHistoryView
from plone.app.layout.viewlets.content import HistoryByLineView
from plone.memoize.view import memoize
from plone.protect.interfaces import IDisableCSRFProtection
from Products.CMFEditions.browser.diff import DiffView
from Products.CMFPlone.browser.syndication.adapters import SearchFeed
from Products.CMFPlone.interfaces.syndication import IFeedItem
from Products.CMFPlone.resources import add_resource_on_request
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.component import queryMultiAdapter
from zope.interface import alsoProvides
from zope.interface import noLongerProvides
import json
class HistoryView(HistoryByLineView):
def __call__(self):
# utility function to add resource to rendered page
add_resource_on_request(self.request, 'castle-components-history')
return super(HistoryView, self).__call__()
index = ViewPageTemplateFile('templates/history_view.pt')
def METHOD_NAME(self):
"""
just use permission checking
"""
return True
def show_history(self):
"""
see comment for pervious method
"""
return True
class ContentHistoryView(BaseContentHistoryView):
index = ViewPageTemplateFile("templates/content_history.pt")
class HistoryVersionView(DiffView):
template = ViewPageTemplateFile("templates/history_version.pt")
def getContent(self, version):
alsoProvides(self.request, IVersionViewLayer)
feed = SearchFeed(api.portal.get())
adapter = queryMultiAdapter((version, feed), IFeedItem)
content = adapter.render_content_core().strip()
noLongerProvides(self.request, IVersionViewLayer)
if not content:
# try old fashioned way... bah!
content = version.restrictedTraverse(version.defaultView())()
dom = fromstring(content)
return ''.join([tostring(el) for el in dom.cssselect('#content-core > *')])
else:
dom = fromstring(content)
return ''.join([tostring(el) for el in dom.cssselect('[data-panel] > *')])
@property
@memoize
def versions(self):
version_history = []
rt = api.portal.get_tool("portal_repository")
history = rt.getHistoryMetadata(self.context)
retrieve = history.retrieve
getId = history.getVersionId
# Count backwards from most recent to least recent
for i in xrange(history.getLength(countPurged=False) - 1, -1, -1):
version_id = getId(i, countPurged=False)
data = retrieve(i, countPurged=False)
meta = data["metadata"]["sys_metadata"]
version_history.append({
'dt': DateTime(meta['timestamp']),
'comments': meta['comment'],
'version_id': version_id
})
return version_history
def __call__(self):
# utility function to add resource to rendered page
add_resource_on_request(self.request, 'castle-components-history')
alsoProvides(self.request, IDisableCSRFProtection)
self.version_info = None
for version in self.versions:
if str(version['version_id']) == self.request.form.get('version'):
self.version_info = version
return self.template()
def get_referenced_image(self, obj):
if IReferenceNamedImage.providedBy(obj.image):
catalog = api.portal.get_tool('portal_catalog')
brains = catalog.unrestrictedSearchResults(UID=self.context.image.reference)
if len(brains) > 0:
return brains[0].getObject()
class UpdateComment(BrowserView):
def __call__(self):
self.request.response.setHeader('Content-type', 'application/json')
rt = api.portal.get_tool("portal_repository")
history = rt.getHistoryMetadata(self.context)
version_id = self.request.form.get('version_id')
comments = self.request.form.get('comments', '')
if not version_id or not version_id.isdigit():
return json.dumps({
'success': False,
'error': 'Must provide a valid version id'
})
version_id = int(version_id)
version = history.retrieve(version_id)
if version is None:
return json.dumps({
'success': False,
'error': 'Can not find version'
})
if version_id not in history._full:
return json.dumps({
'success': False,
'error': 'Can not find version metadata'
})
try:
history._full[version_id]['metadata']['sys_metadata']['comment'] = comments
history._full._p_changed = True
except KeyError:
return json.dumps({
'success': False,
'error': 'Can not find version metadata'
})
return json.dumps({
'success': True,
'comment': comments
}) |
4,991 | final commit id | # Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
# Import from pygit2
from .ffi import ffi, C
from .utils import GenericIterator
from ._pygit2 import Signature, Oid
def wrap_signature(csig):
if not csig:
return None
return Signature(ffi.string(csig.name).decode('utf-8'),
ffi.string(csig.email).decode('utf-8'),
csig.when.time, csig.when.offset, 'utf-8')
class BlameHunk:
@classmethod
def _from_c(cls, blame, ptr):
hunk = cls.__new__(cls)
hunk._blame = blame
hunk._hunk = ptr
return hunk
@property
def lines_in_hunk(self):
"""Number of lines"""
return self._hunk.lines_in_hunk
@property
def boundary(self):
"""Tracked to a boundary commit"""
# Casting directly to bool via cffi does not seem to work
return int(ffi.cast('int', self._hunk.boundary)) != 0
@property
def final_start_line_number(self):
"""Final start line number"""
return self._hunk.final_start_line_number
@property
def final_committer(self):
"""Final committer"""
return wrap_signature(self._hunk.final_signature)
@property
def METHOD_NAME(self):
return Oid(raw=bytes(ffi.buffer(ffi.addressof(self._hunk, 'final_commit_id'))[:]))
@property
def orig_start_line_number(self):
"""Origin start line number"""
return self._hunk.orig_start_line_number
@property
def orig_committer(self):
"""Original committer"""
return wrap_signature(self._hunk.orig_signature)
@property
def orig_commit_id(self):
return Oid(raw=bytes(ffi.buffer(ffi.addressof(self._hunk, 'orig_commit_id'))[:]))
@property
def orig_path(self):
"""Original path"""
path = self._hunk.orig_path
if not path:
return None
return ffi.string(path).decode('utf-8')
class Blame:
@classmethod
def _from_c(cls, repo, ptr):
blame = cls.__new__(cls)
blame._repo = repo
blame._blame = ptr
return blame
def __del__(self):
C.git_blame_free(self._blame)
def __len__(self):
return C.git_blame_get_hunk_count(self._blame)
def __getitem__(self, index):
chunk = C.git_blame_get_hunk_byindex(self._blame, index)
if not chunk:
raise IndexError
return BlameHunk._from_c(self, chunk)
def for_line(self, line_no):
"""
Returns the <BlameHunk> object for a given line given its number in the
current Blame.
Parameters:
line_no
Line number, starts at 1.
"""
if line_no < 0:
raise IndexError
chunk = C.git_blame_get_hunk_byline(self._blame, line_no)
if not chunk:
raise IndexError
return BlameHunk._from_c(self, chunk)
def __iter__(self):
return GenericIterator(self) |
4,992 | get last page | import logging
import multiprocessing
import os
import re
import shutil
import tempfile
import zipfile
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from dimagi.utils.parsing import string_to_utc_datetime
from corehq.apps.export.const import FORM_EXPORT
from corehq.apps.export.dbaccessors import get_properly_wrapped_export_instance
from corehq.apps.export.export import get_export_size
from corehq.apps.export.filters import NOT, TermFilter
from corehq.apps.export.forms import FormExportFilterBuilder
from corehq.apps.export.models import MAIN_TABLE, PathNode
from corehq.apps.export.multiprocess import (
MultiprocessExporter,
OutputPaginator,
run_multiprocess_exporter,
)
from corehq.util.files import safe_filename
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Rebuild a saved export using multiple processes"
def add_arguments(self, parser):
parser.add_argument('export_id')
parser.add_argument('-d', '--download_path', help="Path to download export to.")
parser.add_argument(
'--processes',
type=int,
dest='processes',
default=multiprocessing.cpu_count() - 1,
help='Number of parallel processes to run.'
)
def handle(self, export_id, **options):
export_instance = get_properly_wrapped_export_instance(export_id)
if export_instance.type != FORM_EXPORT:
raise CommandError("Unsupported export type: %s" % export_instance.type)
filters = export_instance.get_filters()
if any(isinstance(filter_, FormExportFilterBuilder.date_filter_class) for filter_ in filters):
raise CommandError("Export already has a date filter and so must be fully rebuilt.")
export_archive_path = download_export(export_instance, download_path=options.get('download_path'))
last_run_meta = get_last_run_meta(export_instance, export_archive_path)
last_form_id, last_form_received_on, last_page_number = last_run_meta
print("Exporting data since '%s'" % last_form_received_on)
filters.append(FormExportFilterBuilder.date_filter_class(gt=last_form_received_on))
if last_form_id:
filters.append(NOT(TermFilter('_id', last_form_id)))
total_docs = get_export_size(export_instance, filters)
exporter = MultiprocessExporter(
export_instance, total_docs, options['processes'],
existing_archive_path=options['download_path'], keep_file=True
)
paginator = OutputPaginator(export_id, last_page_number + 1)
logger.info('Starting data dump of {} docs'.format(total_docs))
run_multiprocess_exporter(exporter, filters, paginator, 1000000)
def get_last_run_meta(export_instance, export_archive_path):
main_table = export_instance.get_table(MAIN_TABLE)
received_on_column_index, received_on_column = main_table.get_column([PathNode(name="received_on")],
'ExportItem', None)
if not received_on_column:
raise CommandError("Export does not contain a field appropriate for finding the last exported date.")
form_id_column_index, form_id_column = main_table.get_column(
[
PathNode(name='form'),
PathNode(name='meta'),
PathNode(name='instanceID')
], 'ExportItem', None
)
if form_id_column_index is None:
print("WARNING: unable to get last form ID. Export may contain a duplicate form")
last_page_path = METHOD_NAME(export_archive_path, main_table.label)
if last_page_path:
folder, filename = last_page_path.rsplit('/', 1)
matcher = re.match(r'(\d+)_.*', filename)
last_page_number = int(matcher.group(1))
else:
last_page_number = 0
last_form_id, date_col_string = _get_column_value_from_last_line(
last_page_path, form_id_column_index, received_on_column_index
)
last_form_received_on = string_to_utc_datetime(date_col_string)
return (
last_form_id,
last_form_received_on,
last_page_number
)
def _get_column_value_from_last_line(page_path, form_id_index, date_index):
last_line = _get_last_line(page_path)
fields = last_line.strip().split(',')
return fields[form_id_index] if form_id_index is not None else None, fields[date_index]
def METHOD_NAME(export_archive_path, table_label):
extract_to = tempfile.mkdtemp()
with zipfile.ZipFile(export_archive_path, 'r') as zipref:
# skip first since that will be page 0
for member in sorted(zipref.namelist(), reverse=True)[1:5]:
folder, filename = member.rsplit('/', 1)
if re.match(r'(\d+_)?%s.csv' % table_label, filename):
zipref.extract(member, extract_to)
return os.path.join(extract_to, member)
def _get_last_line(file_path):
"""
https://stackoverflow.com/a/18603065/632517
"""
with open(file_path, "rb") as f:
f.seek(-2, os.SEEK_END) # Jump to the second last byte.
while f.read(1) != b"\n": # Until EOL is found...
f.seek(-2, os.SEEK_CUR) # ...jump back the read byte plus one more.
return f.readline()
def download_export(export_instance, download_path=None):
if not download_path:
export_archive_path = '{}_{}.zip'.format(
safe_filename(export_instance.name.encode('ascii', 'replace') or 'Export'),
datetime.utcnow().isoformat()
)
download_path = os.path.join(settings.SHARED_DRIVE_ROOT, export_archive_path)
if not os.path.exists(download_path):
payload = export_instance.get_payload(stream=True)
with open(download_path, 'wb') as download:
shutil.copyfileobj(payload, download)
return download_path |
4,993 | raises type err | # Copyright (c) 2021, 2021, Oracle and/or its affiliates.
# Copyright (C) 1996-2020 Python Software Foundation
#
# Licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
def test_class_attr_change():
class A(object):
counter = 0
for i in range(10):
A.counter += 1
assert A.counter == 10
def test_class_attr_deleted():
class A(object):
counter = 0
class B(A):
counter = 1
for i in range(10):
B.counter += 1
assert B.counter == 11
assert A.counter == 0
del B.counter
assert B.counter == 0
for i in range(10):
A.counter += 1
assert A.counter == 10
def test_class_attr_added():
class A(object):
counter = 0
class B(A):
pass
for i in range(10):
B.counter += 1
assert B.counter == 10
assert A.counter == 0
B.counter = 1
assert B.counter == 1
for i in range(10):
A.counter += 1
assert A.counter == 10
def test_class_attr_add_del():
class A:
foo = 1
class B(A):
foo = 2
class C(B):
foo = 3
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
C.foo += 1
assert C.foo == 10
del C.foo
assert C.foo == 2
del B.foo
assert C.foo == 1
B.foo = 5
assert C.foo == 5
C.foo = 10
assert C.foo == 10
def test_class_assignment():
class A:
foo = 1
class B(A):
foo = 2
a = A()
assert a.foo == 1
a.__class__ = B
assert a.foo == 2
b = B()
assert b.foo == 2
b.__class__ = A
assert b.foo == 1
assert type(a) == B
assert type(b) == A
try:
a.__class__ = 1
except TypeError:
assert True
else:
assert False
try:
a.__class__ = object
except TypeError:
assert True
else:
assert False
try:
object().__class__ = object
except TypeError:
assert True
else:
assert False
def test_class_slots():
class X():
__slots__ = "_local__impl", "__dict__"
def __init__(self):
self._local__impl = 1
self.foo = 12
self.__dict__ = {"bar": 42}
assert X().bar == 42
assert X()._local__impl == 1
try:
X().foo
except AttributeError:
assert True
else:
assert False
x = X()
x.foo = 1
assert x.foo == 1
assert x.__dict__["foo"] == 1
x.__dict__["_local__impl"] = 22
assert x._local__impl == 1
assert X.__dict__["_local__impl"].__get__(x, type(x)) == 1
def test_class_with_slots_assignment():
class X():
__slots__ = "a", "b"
class Y():
__slots__ = "a", "b"
class Z():
__slots__ = "b", "c"
x = X()
x.__class__ = Y
assert type(x) == Y
try:
x.__class__ = Z
except TypeError as e:
assert True
else:
assert False
def test_mro_change_on_attr_access():
eq_called = []
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __eq__(self, other):
eq_called.append(1)
X.__bases__ = (Base2,)
class Base(object):
mykey = 'base 42'
class Base2(object):
mykey = 'base2 42'
X = type('X', (Base,), {MyKey(): 5})
assert X.mykey == 'base 42'
assert eq_called == [1]
# ----------------------------------
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __eq__(self, other):
X.__bases__ = (Base,)
class Base(object):
pass
class Base2(object):
mykey = '42'
X = type('X', (Base,Base2,), {MyKey(): 5})
mk = X.mykey
assert mk == '42'
X = type('X', (Base2,), {MyKey(): 5})
assert X.mykey == '42'
# ----------------------------------
class Base(object):
mykey = 'from Base2'
class Base2(object):
pass
X = type('X', (Base2,), {MyKey(): 5})
try:
assert X.mykey == '42'
except AttributeError as e:
assert True
else:
assert False
def test_subclass_propagation():
# Test taken from CPython's test_desc, but modified to use non-slot attributes,
# which are also interesting on GraalPython in combination with MRO shapes.
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B, C):
pass
def assert_hash_raises_type_error(x):
try:
call_hash(x)
except TypeError as e:
pass
else:
assert False
# This will make the call monomorphic
def call_hash(x):
return x.myhash()
for i in range(1,3):
d = D()
A.myhash = lambda self: 42
assert call_hash(d) == 42
C.myhash = lambda self: 314
assert call_hash(d) == 314
B.myhash = lambda self: 144
assert call_hash(d) == 144
D.myhash = lambda self: 100
assert call_hash(d) == 100
D.myhash = None
assert_hash_raises_type_error(d)
del D.myhash
assert call_hash(d) == 144
B.myhash = None
assert_hash_raises_type_error(d)
del B.myhash
assert call_hash(d) == 314
C.myhash = None
assert_hash_raises_type_error(d)
del C.myhash
assert call_hash(d) == 42
A.myhash = None
assert_hash_raises_type_error(d)
def test_slots_mismatch():
# NOTE: this is less of a test of some well defined Python behavior that we want to support
# and more of a stress test that checks that in some weird corner cases we do not fail with
# internal errors or produce some clearly incorrect results.
def METHOD_NAME(code):
try:
code()
except TypeError:
pass
else:
assert False
class Klass(float):
pass
x = Klass(14)
Klass.__getattribute__ = Klass.__pow__
# Attribute access actually calls __pow__ now, with 2 arguments,
# which should be fine, it will just return NotImplemented
assert x.bar == NotImplemented
Klass.__getattribute__ = float.__setattr__
# __setattr__ requires 3 arguments, calling it via attribute read
# should give argument validation error (TypeError)
METHOD_NAME(lambda: x.bar)
# The same for unary slot __hash__:
# __round__ accepts single argument, but it is a binary builtin
Klass.__hash__ = float.__round__
try:
assert hash(x) == 14
except AssertionError:
raise
except:
# On MacOS & GraalPython this test is giving TypeError: 'NoneType' object cannot be interpreted as an int
# We ignore this for now. Important is that we do not give wrong result and do not fail on some internal error
pass
# __getattribute__ needs both its arguments
Klass.__hash__ = float.__getattribute__
METHOD_NAME(lambda: hash(x))
def test_no_value_and_mro_shape():
class A:
def foo(self):
return 42
class B(A):
pass
B.foo = lambda self: 1
del B.foo
class C(B):
pass
c = C()
assert c.foo() == 4 |
4,994 | outputs | """
cartesian_to_spherical_fc
=========================
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.METHOD_NAME import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class cartesian_to_spherical_fc(Operator):
"""Converts 3D field from cartesian coordinates to spherical coordinates.
Parameters
----------
fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.geo.cartesian_to_spherical_fc()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.geo.cartesian_to_spherical_fc(
... fields_container=my_fields_container,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, fields_container=None, config=None, server=None):
super().__init__(name="cartesian_to_spherical_fc", config=config, server=server)
self._inputs = InputsCartesianToSphericalFc(self)
self._outputs = OutputsCartesianToSphericalFc(self)
if fields_container is not None:
self.inputs.fields_container.connect(fields_container)
@staticmethod
def _spec():
description = (
"""Converts 3D field from cartesian coordinates to spherical coordinates."""
)
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="cartesian_to_spherical_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsCartesianToSphericalFc
"""
return super().inputs
@property
def METHOD_NAME(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsCartesianToSphericalFc
"""
return super().METHOD_NAME
class InputsCartesianToSphericalFc(_Inputs):
"""Intermediate class used to connect user inputs to
cartesian_to_spherical_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.cartesian_to_spherical_fc()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
"""
def __init__(self, op: Operator):
super().__init__(cartesian_to_spherical_fc._spec().inputs, op)
self._fields_container = Input(
cartesian_to_spherical_fc._spec().input_pin(0), 0, op, -1
)
self._inputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator.
Parameters
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.cartesian_to_spherical_fc()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
class OutputsCartesianToSphericalFc(_Outputs):
"""Intermediate class used to get outputs from
cartesian_to_spherical_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.cartesian_to_spherical_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(cartesian_to_spherical_fc._spec().METHOD_NAME, op)
self._fields_container = Output(
cartesian_to_spherical_fc._spec().output_pin(0), 0, op
)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.cartesian_to_spherical_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container |
4,995 | error format | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"mobile-network pcdp delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete the specified packet core data plane.
:example: Delete Packet Core Data Plane
az mobile-network pcdp delete -g rg -n pcdp-name --pccp-name pccp-name -y
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.mobilenetwork/packetcorecontrolplanes/{}/packetcoredataplanes/{}", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.pccp_name = AAZStrArg(
options=["--pccp-name"],
help="The name of the packet core control plane.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9][a-zA-Z0-9_-]*$",
max_length=64,
),
)
_args_schema.pcdp_name = AAZStrArg(
options=["-n", "--name", "--pcdp-name"],
help="The name of the packet core data plane.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9][a-zA-Z0-9_-]*$",
max_length=64,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.PacketCoreDataPlanesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class PacketCoreDataPlanesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileNetwork/packetCoreControlPlanes/{packetCoreControlPlaneName}/packetCoreDataPlanes/{packetCoreDataPlaneName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def METHOD_NAME(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"packetCoreControlPlaneName", self.ctx.args.pccp_name,
required=True,
),
**self.serialize_url_param(
"packetCoreDataPlaneName", self.ctx.args.pcdp_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
4,996 | ini2po | #
# Copyright 2005, 2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Converts additional Mozilla files to properties files.
"""
from io import BytesIO
from translate.convert import prop2po
def inc2prop(lines):
"""convert a .inc file with #defines in it to a properties file"""
yield "# converted from #defines file\n"
for line in lines:
line = line.decode("utf-8")
if line.startswith("# "):
commented = True
line = line.replace("# ", "", 1)
else:
commented = False
if not line.strip():
yield line
elif line.startswith("#define"):
parts = line.replace("#define", "", 1).strip().split(None, 1)
if not parts:
continue
if len(parts) == 1:
key, value = parts[0], ""
else:
key, value = parts
# special case: uncomment MOZ_LANGPACK_CONTRIBUTORS
if key == "MOZ_LANGPACK_CONTRIBUTORS":
commented = False
if commented:
yield "# "
yield f"{key} = {value}\n"
else:
if commented:
yield "# "
yield line
def it2prop(lines, encoding="cp1252"):
"""convert a pseudo-properties .it file to a conventional properties file"""
yield "# converted from pseudo-properties .it file\n"
# differences: ; instead of # for comments
# [section] titles that we replace with # section: comments
for line in lines:
line = line.decode(encoding)
if not line.strip():
yield line
elif line.lstrip().startswith(";"):
yield line.replace(";", "#", 1)
elif line.lstrip().startswith("[") and line.rstrip().endswith("]"):
yield "# section: " + line
else:
yield line
def funny2prop(lines, itencoding="cp1252"):
hashstarts = len([line for line in lines if line.startswith("#")])
if hashstarts:
yield from inc2prop(lines)
else:
yield from it2prop(lines, encoding=itencoding)
def inc2po(
inputfile,
outputfile,
templatefile,
encoding=None,
pot=False,
duplicatestyle="msgctxt",
):
"""wraps prop2po but converts input/template files to properties first"""
inputlines = inputfile.readlines()
inputproplines = list(inc2prop(inputlines))
inputpropfile = BytesIO("".join(inputproplines).encode())
if templatefile is not None:
templatelines = templatefile.readlines()
templateproplines = list(inc2prop(templatelines))
templatepropfile = BytesIO("".join(templateproplines).encode())
else:
templatepropfile = None
return prop2po.convertprop(
inputpropfile,
outputfile,
templatepropfile,
personality="mozilla",
pot=pot,
duplicatestyle=duplicatestyle,
)
def it2po(
inputfile,
outputfile,
templatefile,
encoding="cp1252",
pot=False,
duplicatestyle="msgctxt",
):
"""wraps prop2po but converts input/template files to properties first"""
inputlines = inputfile.readlines()
inputproplines = list(it2prop(inputlines, encoding=encoding))
inputpropfile = BytesIO("".join(inputproplines).encode())
if templatefile is not None:
templatelines = templatefile.readlines()
templateproplines = list(it2prop(templatelines, encoding=encoding))
templatepropfile = BytesIO("".join(templateproplines).encode())
else:
templatepropfile = None
return prop2po.convertprop(
inputpropfile,
outputfile,
templatepropfile,
personality="mozilla",
pot=pot,
duplicatestyle=duplicatestyle,
)
def METHOD_NAME(
inputfile,
outputfile,
templatefile,
encoding="UTF-8",
pot=False,
duplicatestyle="msgctxt",
):
return it2po(
inputfile=inputfile,
outputfile=outputfile,
templatefile=templatefile,
encoding=encoding,
pot=pot,
duplicatestyle=duplicatestyle,
)
def main(argv=None):
import sys
lines = sys.stdin.readlines()
for line in funny2prop(lines):
sys.stdout.write(line)
if __name__ == "__main__":
main() |
4,997 | test curie pattern | # -*- coding: utf-8 -*-
"""Tests for the bioregistry client."""
import unittest
from typing import Iterable, Tuple
import bioregistry
from bioregistry import manager
from bioregistry.resolve import get_external
class TestResolve(unittest.TestCase):
"""Tests for getting Bioregistry content."""
def test_resolve(self):
"""Test prefixes can be resolved properly."""
for expected, query in [
("ncbitaxon", "ncbitaxon"),
("ncbitaxon", "NCBITaxon"),
("ncbitaxon", "taxonomy"),
("scomp", "SCOMP"),
("sfam", "SFAM"),
("eccode", "ec-code"),
("eccode", "EC_CODE"),
("chembl.compound", "chembl.compound"),
("chembl.compound", "chemblcompound"),
("chembl", "chembl"),
]:
with self.subTest(query=query):
self.assertEqual(expected, bioregistry.normalize_prefix(query))
def test_get(self):
"""Test getting content from the bioregistry."""
ncbitaxon_entry = bioregistry.get_resource("ncbitaxon")
self.assertIn("NCBI_Taxon_ID", ncbitaxon_entry.synonyms)
self.assertIsNotNone(get_external("ncbitaxon", "miriam"))
self.assertIsNotNone(get_external("ncbitaxon", "obofoundry"))
self.assertIsNotNone(get_external("ncbitaxon", "ols"))
self.assertIsNotNone(get_external("ncbitaxon", "wikidata"))
def test_validate_true(self):
"""Test that validation returns true."""
tests = [
("eccode", "1"),
("eccode", "1.1"),
("eccode", "1.1.1"),
("eccode", "1.1.1.1"),
("eccode", "1.1.123.1"),
("eccode", "1.1.1.123"),
# Namespace in LUI: Standard rule for upper-casing
("chebi", "24867"),
("chebi", "CHEBI:1234"),
# BANANA (explicit)
(
"vario",
"0376",
), # this showcases the banana problem where the namespace in LUI is weird
("VariO", "0376"),
("did", "sov:WRfXPg8dantKVubE3HX8pw"),
("did", "did:sov:WRfXPg8dantKVubE3HX8pw"),
("go.ref", "0000041"),
("go.ref", "GO_REF:0000041"),
# bananas from OBO
("go", "0000001"),
("go", "GO:0000001"),
("go", "go:0000001"),
# banana are strange
("omim.ps", "PS214100"),
("omim.ps", "214100"),
("OMIMPS", "214100"),
("PS", "214100"),
("PS", "PS214100"),
("agrovoc", "1234"),
("agrovoc", "c_1234"),
]
for prefix, resource in bioregistry.read_registry().items():
if bioregistry.is_deprecated(prefix):
continue
banana = bioregistry.get_banana(prefix)
if banana is None or bioregistry.has_no_terms(prefix):
continue
peel = resource.get_banana_peel()
example = bioregistry.get_example(prefix)
with self.subTest(prefix=prefix):
if example is None:
self.fail(msg=f"{prefix} has a banana {banana} but is missing an example")
else:
tests.append((prefix, example))
tests.append((prefix, f"{banana}{peel}{example}"))
self.assert_known_identifiers(tests)
def assert_known_identifiers(self, examples: Iterable[Tuple[str, str]]) -> None:
"""Validate the examples."""
for prefix, identifier in examples:
with self.subTest(prefix=prefix, identifier=identifier):
self.assertTrue(
bioregistry.is_standardizable_identifier(prefix, identifier),
msg=f"CURIE {prefix}:{identifier} does not loosely match {bioregistry.get_pattern(prefix)}",
)
def test_validate_false(self):
"""Test that validation returns false."""
for prefix, identifier in [
("chebi", "A1234"),
("chebi", "GO:A1234"),
]:
with self.subTest(prefix=prefix, identifier=identifier):
self.assertFalse(bioregistry.is_standardizable_identifier(prefix, identifier))
def test_lui(self):
"""Test the LUI makes sense (spoilers, they don't).
Discussion is ongoing at:
- https://github.com/identifiers-org/identifiers-org.github.io/issues/151
"""
for prefix in bioregistry.read_registry():
if not bioregistry.get_namespace_in_lui(prefix):
continue
if bioregistry.get_banana(prefix):
continue # rewrite rules are applied to prefixes with bananas
if prefix in {"ark", "obi"}:
continue # these patterns on identifiers.org are garb
with self.subTest(prefix=prefix):
re_pattern = bioregistry.get_pattern(prefix)
miriam_prefix = bioregistry.get_identifiers_org_prefix(prefix)
self.assertTrue(
re_pattern.startswith(f"^{miriam_prefix.upper()}")
or re_pattern.startswith(miriam_prefix.upper()),
msg=f"{prefix} pattern: {re_pattern}",
)
def METHOD_NAME(self):
"""Test CURIE pattern.
.. seealso:: https://github.com/biopragmatics/bioregistry/issues/245
"""
self.assertEqual("^chebi:\\d+$", bioregistry.get_curie_pattern("chebi"))
self.assertEqual("^CHEBI:\\d+$", bioregistry.get_curie_pattern("chebi", use_preferred=True))
self.assertEqual(
"^chembl\\.compound:CHEMBL\\d+$", bioregistry.get_curie_pattern("chembl.compound")
)
pattern = bioregistry.get_curie_pattern("panther.pthcmp")
self.assertRegex("panther.pthcmp:P00266", pattern)
self.assertNotRegex("pantherXpthcmp:P00266", pattern)
def test_depends_on(self):
"""Test getting dependencies."""
test_prefix = "foodon"
test_target = "bfo"
resource = bioregistry.get_resource(test_prefix)
self.assertIsNotNone(resource)
obofoundry = resource.get_external("obofoundry")
self.assertIsNotNone(obofoundry)
self.assertIn("depends_on", obofoundry)
fobi_dependencies = manager.get_depends_on(test_prefix)
self.assertIsNotNone(fobi_dependencies)
self.assertIn(test_target, fobi_dependencies)
fobi_dependencies = bioregistry.get_depends_on(test_prefix)
self.assertIsNotNone(fobi_dependencies)
self.assertIn(test_target, fobi_dependencies) |
4,998 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network application-gateway start",
)
class Start(AAZCommand):
"""Start an application gateway.
:example: Start an application gateway.
az network application-gateway start -g MyResourceGroup -n MyAppGateway
"""
_aaz_info = {
"version": "2022-05-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/applicationgateways/{}/start", "2022-05-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the application gateway.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.ApplicationGatewaysStart(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ApplicationGatewaysStart(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"applicationGatewayName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-05-01",
required=True,
),
}
return parameters
def METHOD_NAME(self, session):
pass
class _StartHelper:
"""Helper class for Start"""
__all__ = ["Start"] |
4,999 | test scalar train graph | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
def _test_scalar_graph(test_case, device):
x = flow.tensor(3.0, device=device)
class MyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.weight = flow.nn.Parameter(flow.tensor(5.0, device=device))
def forward(self, x):
return x * self.weight + 1.0
my_module = MyModule()
of_eager_out = my_module(x)
class ScalarGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = my_module
def build(self, x):
return self.m(x)
scalar_g = ScalarGraph()
of_lazy_out = scalar_g(x)
test_case.assertTrue(np.array_equal(of_lazy_out.numpy(), of_eager_out.numpy()))
def METHOD_NAME(test_case, device):
class MyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.weight = flow.nn.Parameter(flow.tensor(5.0, device=device))
def forward(self, x):
return x * self.weight + 1.0
my_module = MyModule()
of_sgd = flow.optim.SGD(my_module.parameters(), lr=0.001, momentum=0.9)
eager_out_list = []
for i in range(3):
x = flow.tensor(i * 1.0, device=device, requires_grad=False)
of_eager_out = my_module(x)
of_eager_out.backward()
of_sgd.step()
of_sgd.zero_grad()
eager_out_list.append(of_eager_out)
lazy_module = MyModule()
class ScalarTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = lazy_module
of_sgd = flow.optim.SGD(lazy_module.parameters(), lr=0.001, momentum=0.9)
# self.m = MyModule()
# of_sgd = flow.optim.SGD(self.m.parameters(), lr=0.001, momentum=0.9)
self.add_optimizer(of_sgd)
def build(self, x):
loss = self.m(x)
loss.backward()
return loss
lazy_out_list = []
scalar_g = ScalarTrainGraph()
for i in range(3):
x = flow.tensor(i * 1.0, device=device)
of_lazy_out = scalar_g(x)
lazy_out_list.append(of_lazy_out)
for i in range(3):
test_case.assertTrue(
np.array_equal(lazy_out_list[i].numpy(), eager_out_list[i].numpy())
)
def _test_scalar_global_train_graph(test_case, placement):
sbp_b = flow.sbp.broadcast
class MyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.weight = flow.nn.Parameter(flow.tensor(5.0))
def forward(self, x):
return x * self.weight + 1.0
my_module = MyModule()
of_sgd = flow.optim.SGD(my_module.parameters(), lr=0.001, momentum=0.9)
eager_out_list = []
for i in range(3):
x = flow.tensor(i * 1.0, requires_grad=False)
of_eager_out = my_module(x)
of_eager_out.backward()
of_sgd.step()
of_sgd.zero_grad()
eager_out_list.append(of_eager_out)
lazy_module = MyModule()
lazy_module.to_global(placement=placement, sbp=sbp_b)
class ScalarTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = lazy_module
of_sgd = flow.optim.SGD(lazy_module.parameters(), lr=0.001, momentum=0.9)
self.add_optimizer(of_sgd)
def build(self, x):
loss = self.m(x)
loss.backward()
return loss
lazy_out_list = []
scalar_g = ScalarTrainGraph()
for i in range(3):
x = flow.tensor(i * 1.0, requires_grad=False)
x = x.to_global(placement=placement, sbp=sbp_b)
of_lazy_out = scalar_g(x)
lazy_out_list.append(of_lazy_out)
for i in range(3):
test_case.assertTrue(
np.array_equal(
lazy_out_list[i].to_local().numpy(), eager_out_list[i].numpy()
)
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestScalarGraph(oneflow.unittest.TestCase):
def test_scalar_graph_gpu(test_case):
_test_scalar_graph(test_case, flow.device("cuda"))
def test_scalar_graph_cpu(test_case):
_test_scalar_graph(test_case, flow.device("cpu"))
def test_scalar_train_graph_gpu(test_case):
METHOD_NAME(test_case, flow.device("cuda"))
def test_scalar_train_graph_cpu(test_case):
METHOD_NAME(test_case, flow.device("cpu"))
def test_scalar_global_train_graph_gpu(test_case):
_test_scalar_global_train_graph(test_case, flow.placement("cuda", ranks=[0]))
def test_scalar_global_train_graph_cpu(test_case):
_test_scalar_global_train_graph(test_case, flow.placement("cpu", ranks=[0]))
if __name__ == "__main__":
unittest.main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.