blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
659973c9465b2faa19a09b8496388bb0b3209fb6
|
5e2202f0d0087a2bc00f9916dad00fd9050d905c
|
/tests/test_statemachine.py
|
900ae1eeb5f380048d0906ec850aead682c1b99c
|
[
"MIT"
] |
permissive
|
fgmacedo/python-statemachine
|
1e34d18486a5c4bf0841c9152e51bc69ccee7569
|
be10b53b34b9d1b445b666d9aebe4d9b10417a1c
|
refs/heads/develop
| 2023-08-31T14:00:01.563599
| 2023-08-19T08:11:13
| 2023-08-19T08:11:13
| 85,780,522
| 506
| 70
|
MIT
| 2023-08-19T10:17:21
| 2017-03-22T03:24:42
|
Python
|
UTF-8
|
Python
| false
| false
| 11,900
|
py
|
test_statemachine.py
|
import pytest
from statemachine import State
from statemachine import StateMachine
from statemachine import exceptions
from tests.models import MyModel
def test_machine_repr(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert (
repr(machine) == "CampaignMachine(model=MyModel({'state': 'draft'}), "
"state_field='state', current_state='draft')"
)
def test_machine_should_be_at_start_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert [s.value for s in campaign_machine.states] == [
"closed",
"draft",
"producing",
]
assert [t.name for t in campaign_machine.events] == [
"add_job",
"deliver",
"produce",
]
assert model.state == "draft"
assert machine.current_state == machine.draft
def test_machine_should_only_allow_only_one_initial_state():
with pytest.raises(exceptions.InvalidDefinition):
class CampaignMachine(StateMachine):
"A workflow machine"
draft = State(initial=True)
producing = State()
closed = State(
"Closed", initial=True
) # Should raise an Exception right after the class is defined
add_job = draft.to(draft) | producing.to(producing)
produce = draft.to(producing)
deliver = producing.to(closed)
def test_machine_should_activate_initial_state():
class CampaignMachine(StateMachine):
"A workflow machine"
producing = State()
closed = State()
draft = State(initial=True)
add_job = draft.to(draft) | producing.to(producing)
produce = draft.to(producing)
deliver = producing.to(closed)
sm = CampaignMachine()
assert sm.current_state == sm.draft
assert sm.current_state.is_active
def test_machine_should_not_allow_transitions_from_final_state():
with pytest.raises(exceptions.InvalidDefinition):
class CampaignMachine(StateMachine):
"A workflow machine"
draft = State(initial=True)
producing = State()
closed = State(final=True)
add_job = draft.to(draft) | producing.to(producing) | closed.to(draft)
produce = draft.to(producing)
deliver = producing.to(closed)
def test_should_change_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == "draft"
assert machine.current_state == machine.draft
machine.produce()
assert model.state == "producing"
assert machine.current_state == machine.producing
def test_should_run_a_transition_that_keeps_the_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == "draft"
assert machine.current_state == machine.draft
machine.add_job()
assert model.state == "draft"
assert machine.current_state == machine.draft
machine.produce()
assert model.state == "producing"
assert machine.current_state == machine.producing
machine.add_job()
assert model.state == "producing"
assert machine.current_state == machine.producing
def test_should_change_state_with_multiple_machine_instances(campaign_machine):
model1 = MyModel()
model2 = MyModel()
machine1 = campaign_machine(model1)
machine2 = campaign_machine(model2)
assert machine1.current_state == campaign_machine.draft
assert machine2.current_state == campaign_machine.draft
p1 = machine1.produce
p2 = machine2.produce
p2()
assert machine1.current_state == campaign_machine.draft
assert machine2.current_state == campaign_machine.producing
p1()
assert machine1.current_state == campaign_machine.producing
assert machine2.current_state == campaign_machine.producing
@pytest.mark.parametrize(
("current_state", "transition"),
[
("draft", "deliver"),
("closed", "add_job"),
],
)
def test_call_to_transition_that_is_not_in_the_current_state_should_raise_exception(
campaign_machine, current_state, transition
):
model = MyModel(state=current_state)
machine = campaign_machine(model)
assert machine.current_state.value == current_state
with pytest.raises(exceptions.TransitionNotAllowed):
machine.send(transition)
def test_machine_should_list_allowed_events_in_the_current_state(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == "draft"
assert [t.name for t in machine.allowed_events] == ["add_job", "produce"]
machine.produce()
assert model.state == "producing"
assert [t.name for t in machine.allowed_events] == ["add_job", "deliver"]
deliver = machine.allowed_events[1]
deliver()
assert model.state == "closed"
assert machine.allowed_events == []
def test_machine_should_run_a_transition_by_his_key(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert model.state == "draft"
machine.send("add_job")
assert model.state == "draft"
assert machine.current_state == machine.draft
machine.send("produce")
assert model.state == "producing"
assert machine.current_state == machine.producing
def test_machine_should_raise_an_exception_if_a_transition_by_his_key_is_not_found(
campaign_machine,
):
model = MyModel()
machine = campaign_machine(model)
assert model.state == "draft"
with pytest.raises(exceptions.TransitionNotAllowed):
machine.send("go_horse")
def test_machine_should_use_and_model_attr_other_than_state(campaign_machine):
model = MyModel(status="producing")
machine = campaign_machine(model, state_field="status")
assert getattr(model, "state", None) is None
assert model.status == "producing"
assert machine.current_state == machine.producing
machine.deliver()
assert model.status == "closed"
assert machine.current_state == machine.closed
def test_cant_assign_an_invalid_state_directly(campaign_machine):
machine = campaign_machine()
with pytest.raises(exceptions.InvalidStateValue):
machine.current_state_value = "non existing state"
def test_should_allow_validate_data_for_transition(campaign_machine_with_validator):
model = MyModel()
machine = campaign_machine_with_validator(model)
with pytest.raises(LookupError):
machine.produce()
machine.produce(goods="something")
assert model.state == "producing"
def test_should_check_if_is_in_status(campaign_machine):
model = MyModel()
machine = campaign_machine(model)
assert machine.draft.is_active
assert not machine.producing.is_active
assert not machine.closed.is_active
machine.produce()
assert not machine.draft.is_active
assert machine.producing.is_active
assert not machine.closed.is_active
machine.deliver()
assert not machine.draft.is_active
assert not machine.producing.is_active
assert machine.closed.is_active
def test_defined_value_must_be_assigned_to_models(campaign_machine_with_values):
model = MyModel()
machine = campaign_machine_with_values(model)
assert model.state == 1
machine.produce()
assert model.state == 2
machine.deliver()
assert model.state == 3
def test_state_machine_without_model(campaign_machine):
machine = campaign_machine()
assert machine.draft.is_active
assert not machine.producing.is_active
assert not machine.closed.is_active
machine.produce()
assert not machine.draft.is_active
assert machine.producing.is_active
assert not machine.closed.is_active
@pytest.mark.parametrize(
("model", "machine_name", "start_value"),
[
(None, "campaign_machine", "producing"),
(None, "campaign_machine_with_values", 2),
(MyModel(), "campaign_machine", "producing"),
(MyModel(), "campaign_machine_with_values", 2),
],
)
def test_state_machine_with_a_start_value(request, model, machine_name, start_value):
machine_cls = request.getfixturevalue(machine_name)
machine = machine_cls(model, start_value=start_value)
assert not machine.draft.is_active
assert machine.producing.is_active
assert not model or model.state == start_value
@pytest.mark.parametrize(
("model", "machine_name", "start_value"),
[
(None, "campaign_machine", "tapioca"),
(None, "campaign_machine_with_values", 99),
(MyModel(), "campaign_machine", "tapioca"),
(MyModel(), "campaign_machine_with_values", 99),
],
)
def test_state_machine_with_a_invalid_start_value(
request, model, machine_name, start_value
):
machine_cls = request.getfixturevalue(machine_name)
with pytest.raises(exceptions.InvalidStateValue):
machine_cls(model, start_value=start_value)
def test_should_not_create_instance_of_abstract_machine():
class EmptyMachine(StateMachine):
"An empty machine"
pass
with pytest.raises(exceptions.InvalidDefinition):
EmptyMachine()
def test_should_not_create_instance_of_machine_without_states():
s1 = State()
with pytest.raises(exceptions.InvalidDefinition):
class OnlyTransitionMachine(StateMachine):
t1 = s1.to.itself()
def test_should_not_create_instance_of_machine_without_transitions():
with pytest.raises(exceptions.InvalidDefinition):
class NoTransitionsMachine(StateMachine):
"A machine without transitions"
initial = State(initial=True)
def test_should_not_create_disconnected_machine():
expected = (
r"There are unreachable states. The statemachine graph should have a single component. "
r"Disconnected states: \['blue'\]"
)
with pytest.raises(exceptions.InvalidDefinition, match=expected):
class BrokenTrafficLightMachine(StateMachine):
"A broken traffic light machine"
green = State(initial=True)
yellow = State()
blue = State() # This state is unreachable
cycle = green.to(yellow) | yellow.to(green)
def test_should_not_create_big_disconnected_machine():
expected = (
r"There are unreachable states. The statemachine graph should have a single component. "
r"Disconnected states: \[.*\]$"
)
with pytest.raises(exceptions.InvalidDefinition, match=expected):
class BrokenTrafficLightMachine(StateMachine):
"A broken traffic light machine"
green = State(initial=True)
yellow = State()
magenta = State() # This state is unreachable
red = State()
cyan = State()
blue = State() # This state is also unreachable
cycle = green.to(yellow)
diverge = green.to(cyan) | cyan.to(red)
validate = yellow.to(green)
def test_state_value_is_correct():
STATE_NEW = 0
STATE_DRAFT = 1
class ValueTestModel(StateMachine):
new = State(STATE_NEW, value=STATE_NEW, initial=True)
draft = State(STATE_DRAFT, value=STATE_DRAFT)
write = new.to(draft)
model = ValueTestModel()
assert model.new.value == STATE_NEW
assert model.draft.value == STATE_DRAFT
def test_final_states(campaign_machine_with_final_state):
model = MyModel()
machine = campaign_machine_with_final_state(model)
final_states = machine.final_states
assert len(final_states) == 1
assert final_states[0].name == "Closed"
def test_should_not_override_states_properties(campaign_machine):
machine = campaign_machine()
with pytest.raises(exceptions.StateMachineError) as e:
machine.draft = "something else"
assert (
"State overriding is not allowed. Trying to add 'something else' to draft"
in str(e)
)
|
f5697408f3bec374b353fd678d1f1c7556095d5b
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/SimG4CMS/Forward/python/test/runCastorWithShowerLibrary_cfg.py
|
7d0befe7a9e31fd3cd8c06d196d6219983e917f6
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,380
|
py
|
runCastorWithShowerLibrary_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("CastorTest")
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
# process.load("Configuration.StandardSequences.GeometryExtended_cff")
process.load("SimG4CMS.Forward.castorGeometryXML_cfi")
#process.load("Geometry.CMSCommonData.cmsAllGeometryXML_cfi")
#process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 113456789
process.load("Configuration.EventContent.EventContent_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
ForwardSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
enable = cms.untracked.bool(True)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(211),
MinEta = cms.double(-6.6),
MaxEta = cms.double(-5.2),
MinPhi = cms.double(-3.14),
MaxPhi = cms.double(3.14),
MinE = cms.double(50.00),
MaxE = cms.double(50.00)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(1)
)
process.o1 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('/tmp/sim_pion_SL.root')
)
process.common_maximum_timex = cms.PSet( # need to be localy redefined
MaxTrackTime = cms.double(500.0), # need to be localy redefined
MaxTimeNames = cms.vstring(), # need to be localy redefined
MaxTrackTimes = cms.vdouble() # need to be localy redefined
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits)
process.outpath = cms.EndPath(process.o1)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.DefaultCutValue = 10.
process.g4SimHits.Generator.ApplyEtaCuts = False
process.g4SimHits.CaloTrkProcessing.TestBeam = True
process.g4SimHits.CastorSD.useShowerLibrary = True
process.g4SimHits.CastorSD.minEnergyInGeVforUsingSLibrary = 1.0 # default = 1.0
process.g4SimHits.CastorShowerLibrary.FileName = '../../../../../../p/polme/scratch0/CMSSW_3_7_0/src/SL_em+had_E1-1.5-2-2.5-3-3.5-4-4.5-5-6-7-8-9-10-12.5-15-17.5-20-25-30-35-40-45-50-60-70-80-100-125-150-175-200-300-400-500GeV_7eta-6.6--5.2_5phi0-0.7854.root'
process.g4SimHits.CastorShowerLibrary.BranchEvt = 'hadShowerLibInfo.'
process.g4SimHits.CastorShowerLibrary.BranchEM = 'emParticles.'
process.g4SimHits.CastorShowerLibrary.BranchHAD = 'hadParticles.'
process.g4SimHits.StackingAction = cms.PSet(
process.common_heavy_suppression,
process.common_maximum_timex, # need to be localy redefined
TrackNeutrino = cms.bool(False),
KillDeltaRay = cms.bool(False),
KillHeavy = cms.bool(False),
SaveFirstLevelSecondary = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInTracker = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInCalo = cms.untracked.bool(True),
SavePrimaryDecayProductsAndConversionsInMuon = cms.untracked.bool(True)
)
process.g4SimHits.SteppingAction = cms.PSet(
process.common_maximum_timex, # need to be localy redefined
KillBeamPipe = cms.bool(True),
CriticalEnergyForVacuum = cms.double(2.0),
CriticalDensity = cms.double(1e-15),
EkinNames = cms.vstring(),
EkinThresholds = cms.vdouble(),
EkinParticles = cms.vstring(),
Verbosity = cms.untracked.int32(0)
)
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
type = cms.string('CastorTestAnalysis'),
CastorTestAnalysis = cms.PSet(
Verbosity = cms.int32(0),
EventNtupleFlag = cms.int32(1),
StepNtupleFlag = cms.int32(0),
EventNtupleFileName = cms.string('eventNtuple_pion_SL.root'),
StepNtupleFileName = cms.string('stepNtuple_pion_SL.root'),
)
))
|
499a6867a9b625edb64c9ab713f77f92f5b99c15
|
df4361db61d10a10c46ed5f18973d89e4efda82c
|
/armi/utils/mathematics.py
|
7b4b3d0dc3bfe0212d66818e88415b054c951733
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
terrapower/armi
|
5524741c5e80781e136ea3422aed0db8398f76ae
|
360791847227df3f3a337a996ef561e00f846a09
|
refs/heads/main
| 2023-09-04T05:16:29.080518
| 2023-09-01T16:10:29
| 2023-09-01T16:10:29
| 218,863,590
| 204
| 75
|
Apache-2.0
| 2023-09-14T20:42:24
| 2019-10-31T21:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 19,692
|
py
|
mathematics.py
|
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various math utilities."""
import math
import operator # the python package, not the ARMI module
import re
import numpy as np
import scipy.optimize as sciopt
# special pattern to deal with FORTRAN-produced scipats without E, like 3.2234-234
SCIPAT_SPECIAL = re.compile(r"([+-]?\d*\.\d+)[eEdD]?([+-]\d+)")
def average1DWithinTolerance(vals, tolerance=0.2):
"""
Compute the average of a series of arrays with a tolerance.
Tuned for averaging assembly meshes or block heights.
Parameters
----------
vals : 2D np.array
could be assembly x axial mesh tops or heights
"""
vals = np.array(vals)
filterOut = np.array([False]) # this gets discarded
while not filterOut.all(): # 20% difference is the default tolerance
avg = vals.mean(axis=0) # average over all columns
diff = abs(vals - avg) / avg # no nans, because all vals are non-zero
# True = 1, sum across axis means any height in assem is off
filterOut = (diff > tolerance).sum(axis=1) == 0
vals = vals[filterOut] # filter anything that is skewing
if vals.size == 0:
raise ValueError("Nothing was near the mean, there are no acceptable values!")
if (avg <= 0.0).any():
raise ValueError(
"A non-physical value (<=0) was computed, but this is not possible.\n"
"Values: {}\navg: {}".format(vals, avg)
)
return avg
def convertToSlice(x, increment=False):
"""
Convert a int, float, list of ints or floats, None, or slice
to a slice. Also optionally increments that slice to make it easy to line
up lists that don't start with 0.
Use this with np.array (np.ndarray) types to easily get selections of it's elements.
Parameters
----------
x : multiple types allowed.
int: select one index.
list of int: select these index numbers.
None: select all indices.
slice: select this slice
Returns
-------
slice : slice
Returns a slice object that can be used in an array
like a[x] to select from its members.
Also, the slice has its index numbers decremented by 1.
It can also return a numpy array, which can be used
to slice other numpy arrays in the same way as a slice.
Examples
--------
a = np.array([10, 11, 12, 13])
>>> convertToSlice(2)
slice(2, 3, None)
>>> a[convertToSlice(2)]
array([12])
>>> convertToSlice(2, increment=-1)
slice(1, 2, None)
>>> a[convertToSlice(2, increment=-1)]
array([11])
>>> a[convertToSlice(None)]
array([10, 11, 12, 13])
>>> a[utils.convertToSlice([1, 3])]
array([11, 13])
>>> a[utils.convertToSlice([1, 3], increment=-1)]
array([10, 12])
>>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)]
array([11])
"""
if increment is False:
increment = 0
if not isinstance(increment, int):
raise Exception("increment must be False or an integer in utils.convertToSlice")
if x is None:
x = np.s_[:]
if isinstance(x, list):
x = np.array(x)
if isinstance(x, (int, np.integer, float, np.floating)):
x = slice(int(x), int(x) + 1, None)
# Correct the slice indices to be group instead of index based.
# The energy groups are 1..x and the indices are 0..x-1.
if isinstance(x, slice):
if x.start is not None:
jstart = x.start + increment
else:
jstart = None
if x.stop is not None:
if isinstance(x.stop, list):
jstop = [x + increment for x in x.stop]
else:
jstop = x.stop + increment
else:
jstop = None
jstep = x.step
return np.s_[jstart:jstop:jstep]
elif isinstance(x, np.ndarray):
return np.array([i + increment for i in x])
else:
raise Exception(
(
"It is not known how to handle x type: " "{0} in utils.convertToSlice"
).format(type(x))
)
def efmt(a: str) -> str:
"""Converts string exponential number to another string with just 2 digits in the exponent."""
# this assumes that none of our numbers will be more than 1e100 or less than 1e-100...
if len(a.split("E")) != 2:
two = a.split("e")
else:
two = a.split("E")
# print two
exp = two[1] # this is '+002' or '+02' or something
if len(exp) == 4: # it has 3 digits of exponent
exp = exp[0] + exp[2:] # gets rid of the hundred's place digit
return two[0] + "E" + exp
def expandRepeatedFloats(repeatedList):
"""
Return an expanded repeat list.
Notes
-----
R char is valid for showing the number of repeats in MCNP. For examples the list:
[150, 200, '9R']
indicates a 150 day cycle followed by 10 200 day cycles.
"""
nonRepeatList = []
for val in repeatedList:
isRepeat = False
if isinstance(val, str):
val = val.upper()
if val.count("R") > 1:
raise ValueError("List had strings that were not repeats")
elif "R" in val:
val = val.replace("R", "")
isRepeat = True
if isRepeat:
nonRepeatList += [nonRepeatList[-1]] * int(val)
else:
nonRepeatList.append(float(val))
return nonRepeatList
def findClosest(listToSearch, val, indx=False):
r"""
Find closest item in a list.
Parameters
----------
listToSearch : list
The list to search through
val : float
The target value that is being searched for in the list
indx : bool, optional
If true, returns minVal and minIndex, otherwise, just the value
Returns
-------
minVal : float
The item in the listToSearch that is closest to val
minI : int
The index of the item in listToSearch that is closest to val. Returned if indx=True.
"""
d = float("inf")
minVal = None
minI = None
for i, item in enumerate(listToSearch):
if abs(item - val) < d:
d = abs(item - val)
minVal = item
minI = i
if indx:
return minVal, minI
else:
# backwards compatibility
return minVal
def findNearestValue(searchList, searchValue):
"""Search a given list for the value that is closest to the given search value."""
return findNearestValueAndIndex(searchList, searchValue)[0]
def findNearestValueAndIndex(searchList, searchValue):
"""Search a given list for the value that is closest to the given search value. Return a tuple
containing the value and its index in the list.
"""
searchArray = np.array(searchList)
closestValueIndex = (np.abs(searchArray - searchValue)).argmin()
return searchArray[closestValueIndex], closestValueIndex
def fixThreeDigitExp(strToFloat: str) -> float:
"""
Convert FORTRAN numbers that cannot be converted into floats.
Notes
-----
Converts a number line "9.03231714805651-101" (no e or E) to "9.03231714805651e-101".
Some external depletion kernels currently need this fix. From contact with developer:
The notation like 1.0-101 is a FORTRAN thing, with history going back to the 60's.
They will only put E before an exponent 99 and below. Fortran will also read these guys
just fine, and they are valid floating point numbers. It would not be a useful effort,
in terms of time, trying to get FORTRAN to behave differently.
The approach has been to write a routine in the reading code which will interpret these.
This helps when the scientific number exponent does not fit.
"""
match = SCIPAT_SPECIAL.match(strToFloat)
return float("{}E{}".format(*match.groups()))
def getFloat(val):
"""Returns float version of val, or None if it's impossible. Useful for converting
user-input into floats when '' might be possible.
"""
try:
newVal = float(val)
return newVal
except: # noqa: bare-except
return None
def getStepsFromValues(values, prevValue=0.0):
"""Convert list of floats to list of steps between each float."""
steps = []
for val in values:
currentVal = float(val)
steps.append(currentVal - prevValue)
prevValue = currentVal
return steps
def isMonotonic(inputIter, relation):
"""
Checks if an iterable contains elements that are monotonically increasing or
decreasing, whatever that might mean for the specific types of the elements.
Parameters
----------
inputIter : list
Some list to check. Values in the list should have a defined relation to
each other.
relation : {'<=', '<', '>=', '>'}
The relation between the elements to check, from left to right through
the iterable.
Returns
-------
bool
"""
operatorDict = {
"<=": operator.le,
"<": operator.lt,
">=": operator.ge,
">": operator.gt,
}
try:
op = operatorDict[relation]
except KeyError:
raise ValueError(f"Valid relation not specified: {relation}")
return all([op(x, y) for x, y in zip(inputIter, inputIter[1:])])
def linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None):
r"""
Does a linear interpolation (or extrapolation) for y=f(x).
Parameters
----------
x0,y0,x1,y1 : float
Coordinates of two points to interpolate between
targetX : float, optional
X value to evaluate the line at
targetY : float, optional
Y value we want to find the x value for (inverse interpolation)
Returns
-------
interpY : float
The value of y(targetX), if targetX is not None
interpX : float
The value of x where y(x) = targetY (if targetY is not None)
y = m(x-x0) + b
x = (y-b)/m
"""
if x1 == x0:
raise ZeroDivisionError("The x-values are identical. Cannot interpolate.")
m = (y1 - y0) / (x1 - x0)
b = -m * x0 + y0
if targetX is not None:
return m * targetX + b
else:
return (targetY - b) / m
def minimizeScalarFunc(
func,
goal,
guess,
maxIterations=None,
cs=None,
positiveGuesses=False,
method=None,
tol=1.0e-3,
):
r"""
Use scipy minimize with the given function, goal value, and first guess.
Parameters
----------
func : function
The function that guess will be changed to try to make it return the goal value.
goal : float
The function will be changed until it's return equals this value.
guess : float
The first guess value to do Newton's method on the func.
maxIterations : int
The maximum number of iterations that the Newton's method will be allowed to perform.
Returns
-------
ans : float
The guess that when input to the func returns the goal.
"""
def goalFunc(guess, func, positiveGuesses):
if positiveGuesses is True:
guess = abs(guess)
funcVal = func(guess)
val = abs(goal - funcVal)
return val
if (maxIterations is None) and (cs is not None):
maxIterations = cs["maxNewtonsIterations"]
X = sciopt.minimize(
goalFunc,
guess,
args=(func, positiveGuesses),
method=method,
tol=tol,
options={"maxiter": maxIterations},
)
ans = float(X["x"])
if positiveGuesses is True:
ans = abs(ans)
return ans
def newtonsMethod(
func, goal, guess, maxIterations=None, cs=None, positiveGuesses=False
):
r"""
Solves a Newton's method with the given function, goal value, and first guess.
Parameters
----------
func : function
The function that guess will be changed to try to make it return the goal value.
goal : float
The function will be changed until it's return equals this value.
guess : float
The first guess value to do Newton's method on the func.
maxIterations : int
The maximum number of iterations that the Newton's method will be allowed to perform.
Returns
-------
ans : float
The guess that when input to the func returns the goal.
"""
def goalFunc(guess, func, positiveGuesses):
if positiveGuesses is True:
guess = abs(guess)
funcVal = func(guess)
val = abs(goal - funcVal)
return val
if (maxIterations is None) and (cs is not None):
maxIterations = cs["maxNewtonsIterations"]
# try:
ans = float(
sciopt.newton(
goalFunc,
guess,
args=(func, positiveGuesses),
tol=1.0e-3,
maxiter=maxIterations,
)
)
if positiveGuesses is True:
ans = abs(ans)
return ans
def parabolaFromPoints(p1, p2, p3):
r"""
Find the parabola that passes through three points.
We solve a simultaneous equation with three points.
A = x1**2 x1 1
x2**2 x2 1
x3**2 x3 1
b = y1
y2
y3
find coefficients Ax=b
Parameters
----------
p1 : tuple
first point (x,y) coordinates
p2,p3: tuple, second and third points.
Returns
-------
a,b,c coefficients of y=ax^2+bx+c
"""
A = np.array(
[[p1[0] ** 2, p1[0], 1], [p2[0] ** 2, p2[0], 1], [p3[0] ** 2, p3[0], 1]]
)
b = np.array([[p1[1]], [p2[1]], [p3[1]]])
try:
x = np.linalg.solve(A, b)
except:
print("Error in parabola {} {}".format(A, b))
raise
return float(x[0]), float(x[1]), float(x[2])
def parabolicInterpolation(ap, bp, cp, targetY):
r"""
Given parabola coefficients, this interpolates the time
that would give k=targetK.
keff = at^2+bt+c
We want to solve a*t^2+bt+c-targetK = 0.0 for time.
if there are real roots, we should probably take the smallest one
because the larger one might be at very high burnup.
If there are no real roots, just take the point where the deriv ==0, or
2at+b=0, so t = -b/2a
The slope of the curve is the solution to 2at+b at whatever t has been determined
Parameters
----------
ap, bp,cp : floats
coefficients of a parabola y = ap*x^2 + bp*x + cp
targetK : float
The keff to find the cycle length of
Returns
-------
realRoots : list of tuples
(root, slope)
The best guess of the cycle length that will give k=targetK
If no positive root was found, this is the maximum of the curve. In that case,
it will be a negative number. If there are two positive roots, there will be two entries.
slope : float
The slope of the keff vs. time curve at t=newTime
"""
roots = np.roots([ap, bp, cp - targetY])
realRoots = []
for r in roots:
if r.imag == 0 and r.real > 0:
realRoots.append((r.real, 2.0 * ap * r.real + bp))
if not realRoots:
# no positive real roots. Take maximum and give up for this cyclic.
newTime = -bp / (2 * ap)
if newTime < 0:
raise RuntimeError("No positive roots or maxima.")
slope = 2.0 * ap * newTime + bp
newTime = (
-newTime
) # return a negative newTime to signal that it is not expected to be critical.
realRoots = [(newTime, slope)]
return realRoots
def relErr(v1: float, v2: float) -> float:
"""Find the relative error between to numbers."""
if v1:
return (v2 - v1) / v1
else:
return -1e99
def resampleStepwise(xin, yin, xout, avg=True):
"""
Resample a piecewise-defined step function from one set of mesh points
to another. This is useful for reallocating values along a given axial
mesh (or assembly of blocks).
Parameters
----------
xin : list
interval points / mesh points
yin : list
interval values / inter-mesh values
xout : list
new interval points / new mesh points
avg : bool
By default, this is set to True, forcing the resampling to be done
by averaging. But if this is False, the resmampling will be done by
summation, to try and preserve the totals after resampling.
"""
# validation: there must be one more mesh point than inter-mesh values
assert (len(xin) - 1) == len(yin)
# find out in which xin bin each xout value lies
bins = np.digitize(xout, bins=xin)
# loop through xout / the xout bins
yout = []
for i in range(1, len(bins)):
start = bins[i - 1]
end = bins[i]
chunk = yin[start - 1 : end]
length = xin[start - 1 : end + 1]
length = [length[j] - length[j - 1] for j in range(1, len(length))]
# if the xout lies outside the xin range
if not len(chunk):
yout.append(0)
continue
# trim any partial right-side bins
if xout[i] < xin[min(end, len(xin) - 1)]:
fraction = (xout[i] - xin[end - 1]) / (xin[end] - xin[end - 1])
if fraction == 0:
chunk = chunk[:-1]
length = length[:-1]
elif avg:
length[-1] *= fraction
else:
chunk[-1] *= fraction
# trim any partial left-side bins
if xout[i - 1] > xin[start - 1]:
fraction = (xin[start] - xout[i - 1]) / (xin[start] - xin[start - 1])
if fraction == 0:
chunk = chunk[1:]
length = length[1:]
elif avg:
length[0] *= fraction
else:
chunk[0] *= fraction
# return the sum or the average
if [1 for c in chunk if (not hasattr(c, "__len__") and c is None)]:
yout.append(None)
elif avg:
weighted_sum = sum([ch * ln for ch, ln in zip(chunk, length)])
yout.append(weighted_sum / sum(length))
else:
yout.append(sum(chunk))
return yout
def rotateXY(x, y, degreesCounterclockwise=None, radiansCounterclockwise=None):
"""
Rotates x, y coordinates.
Parameters
----------
x, y : array_like
coordinates
degreesCounterclockwise : float
Degrees to rotate in the CCW direction
radiansCounterclockwise : float
Radians to rotate in the CCW direction
Returns
-------
xr, yr : array_like
the rotated coordinates.
"""
if radiansCounterclockwise is None:
radiansCounterclockwise = degreesCounterclockwise * math.pi / 180.0
sinT = math.sin(radiansCounterclockwise)
cosT = math.cos(radiansCounterclockwise)
rotationMatrix = np.array([[cosT, -sinT], [sinT, cosT]])
xr, yr = rotationMatrix.dot(np.vstack((x, y)))
if len(xr) > 1:
# Convert to lists because everyone prefers lists for some reason
return xr.tolist(), yr.tolist()
else:
# Convert to scalar for consistency with old implementation
return xr[0], yr[0]
|
c9cfaf8ec0422c7406248a434682ddc926d68edf
|
6f509fd95f182099f5447b6e597a03eedb9bb408
|
/awsume/configure/autocomplete.py
|
93b40c93d9bc3a9365d3689a369e0c85bc955d90
|
[
"MIT"
] |
permissive
|
trek10inc/awsume
|
596e15aa74f0e896430bb6e383ac12e87ae62aa7
|
c9062cb6bd0d3067ba61558c445a92db0fde6e3a
|
refs/heads/master
| 2023-08-23T20:43:39.104563
| 2023-08-18T20:24:45
| 2023-08-18T20:24:45
| 55,160,220
| 790
| 105
|
MIT
| 2023-09-12T20:58:04
| 2016-03-31T15:12:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,701
|
py
|
autocomplete.py
|
import os, pathlib
BASH_AUTOCOMPLETE_SCRIPT = """
_awsume() {
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts=$(awsume-autocomplete)
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
}
complete -F _awsume awsume
"""
ZSH_AUTOCOMPLETE_SCRIPT = """
#Auto-Complete function for AWSume
fpath=(~/.awsume/zsh-autocomplete/ $fpath)
"""
ZSH_AUTOCOMPLETE_FUNCTION = """#compdef awsume
_arguments "*: :($(awsume-autocomplete))"
"""
FISH_AUTOCOMPLETE_SCRIPT = """
complete --command awsume --arguments '(awsume-autocomplete)'
"""
POWERSHELL_AUTOCOMPLETE_SCRIPT = """
Register-ArgumentCompleter -Native -CommandName awsume -ScriptBlock {
param($wordToComplete, $commandAst, $cursorPosition)
$(awsume-autocomplete) |
Where-Object { $_ -like "$wordToComplete*" } |
Sort-Object |
ForEach-Object {
[System.Management.Automation.CompletionResult]::new($_, $_, 'ParameterValue', $_)
}
}
"""
SCRIPTS = {
'bash': BASH_AUTOCOMPLETE_SCRIPT,
'zsh': ZSH_AUTOCOMPLETE_SCRIPT,
'powershell': POWERSHELL_AUTOCOMPLETE_SCRIPT,
'fish': FISH_AUTOCOMPLETE_SCRIPT,
}
def main(shell: str, autocomplete_file: str):
autocomplete_file = str(pathlib.Path(autocomplete_file).expanduser())
autocomplete_script = SCRIPTS[shell]
basedir = os.path.dirname(autocomplete_file)
if basedir and not os.path.exists(basedir):
os.makedirs(basedir)
open(autocomplete_file, 'a').close()
if autocomplete_script in open(autocomplete_file, 'r').read():
print('Autocomplete script already in ' + autocomplete_file)
else:
with open(autocomplete_file, 'a') as f:
f.write('\n#Auto-Complete function for AWSume')
f.write(autocomplete_script)
print('Wrote autocomplete script to ' + autocomplete_file)
# install autocomplete function if zsh
if shell == 'zsh':
zsh_autocomplete_function_file = str(pathlib.Path('~/.awsume/zsh-autocomplete/_awsume').expanduser())
basedir = os.path.dirname(zsh_autocomplete_function_file)
if basedir and not os.path.exists(basedir):
os.makedirs(basedir)
if not os.path.isfile(zsh_autocomplete_function_file):
open(zsh_autocomplete_function_file, 'w').close()
if ZSH_AUTOCOMPLETE_FUNCTION in open(zsh_autocomplete_function_file, 'r').read():
print('Zsh function already in ' + zsh_autocomplete_function_file)
else:
with open(zsh_autocomplete_function_file, 'a') as f:
f.write(ZSH_AUTOCOMPLETE_FUNCTION)
print('Wrote zsh function to ' + zsh_autocomplete_function_file)
|
8c11d4d4e00a777cca336c2cee47201ec2de45b9
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/random/uniform_numbers1.py
|
30c757fed88675db24dea88d2072234bde8e18b5
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 330
|
py
|
uniform_numbers1.py
|
import sys
N = int(sys.argv[1])
import numpy as np
np.random.seed(12)
# Vectorized generation of random numbers
samples = np.random.random(size=N)
# Plot histogram
import scitools.std as st
x, y = st.compute_histogram(samples, nbins=20)
st.plot(x, y, title='%d samples of uniform numbers on [0,1)' % N)
st.hardcopy('tmp.eps')
|
5f711b43f41f086e5dddcd30caa6f7b546a70f29
|
03666e5f961946fc1a0ac67781ac1425562ef0d7
|
/src/visitpy/pyavt/py_src/templates/simple_expression.py
|
e996de69ecaf9b7f1c789c79003a8f3f3228ea26
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
visit-dav/visit
|
e9f81b4d4b9b9930a0db9d5282cd1bcabf465e2e
|
601ae46e0bef2e18425b482a755d03490ade0493
|
refs/heads/develop
| 2023-09-06T08:19:38.397058
| 2023-09-05T21:29:32
| 2023-09-05T21:29:32
| 165,565,988
| 335
| 120
|
BSD-3-Clause
| 2023-09-14T00:53:37
| 2019-01-13T23:27:26
|
C
|
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
simple_expression.py
|
#
# Template for a Simple Python Expression.
#
class PyExpr(SimplePythonExpression):
def __init__(self):
"""
Constructor.
"""
SimplePythonExpression.__init__(self)
# set to provide a name & description for your expression.
self.name = "PyExpr"
self.description = "Custom Python Expression"
# output_is_point_var:
# true if output centering is nodal
# false if output centering is zonal
self.output_is_point_var = False
# output_dimension:
# set to indicate number of components in the output tuple.
self.output_dimension = 1
def derive_variable(self,ds_in,domain_id):
"""
Called to process each chunk.
Use self.input_var_names & self.arguments to access expression
variable names and const arguments.
Return a new vtkDataArray with expression result values.
"""
pass
def modify_contract(self,contract):
"""
Implement this method to modify the pipeline contract.
"""
pass
py_filter = PyExpr
|
173be3dc7c98b6297d05206bdceb985a9470d66e
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/gsutil/gslib/ui_controller.py
|
4aa9f320c36ef828f5e9e9145057072062b33797
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 48,563
|
py
|
ui_controller.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for gsutil UI controller, UIThread and MainThreadUIQueue."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from collections import deque
import sys
import threading
import time
from six.moves import queue as Queue
from gslib.metrics import LogPerformanceSummaryParams
from gslib.metrics import LogRetryableError
from gslib.thread_message import FileMessage
from gslib.thread_message import FinalMessage
from gslib.thread_message import MetadataMessage
from gslib.thread_message import PerformanceSummaryMessage
from gslib.thread_message import ProducerThreadMessage
from gslib.thread_message import ProgressMessage
from gslib.thread_message import RetryableErrorMessage
from gslib.thread_message import SeekAheadMessage
from gslib.thread_message import StatusMessage
from gslib.utils import parallelism_framework_util
from gslib.utils.unit_util import DecimalShort
from gslib.utils.unit_util import HumanReadableWithDecimalPlaces
from gslib.utils.unit_util import MakeHumanReadable
from gslib.utils.unit_util import PrettyTime
_ZERO_TASKS_TO_DO_ARGUMENT = (
parallelism_framework_util.ZERO_TASKS_TO_DO_ARGUMENT)
class EstimationSource(object):
"""enum for total size source."""
# Integer to indicate total size came from the final ProducerThreadMessage.
# It has priority over all other total_size sources.
PRODUCER_THREAD_FINAL = 1
# Integer to indicate total size came from SeekAheadThread.
# It has priority over self.SEEK_AHEAD_THREAD and over
# self.INDIVIDUAL_MESSAGES.
SEEK_AHEAD_THREAD = 2
# Integer to indicate total size came from a ProducerThread estimation.
# It has priority over self.INDIVIDUAL_MESSAGES.
PRODUCER_THREAD_ESTIMATE = 3
# Stores the actual source from total_size. We start from FileMessages or
# MetadataMessages.
INDIVIDUAL_MESSAGES = 4
# Note: this priority based model was used in case we add new sources for
# total_size in the future. It also allows us to search for smaller numbers
# (larger priorities) rather than having to list those with higher priority.
def BytesToFixedWidthString(num_bytes, decimal_places=1):
"""Adjusts proper width for printing num_bytes in readable format.
Args:
num_bytes: The number of bytes we must display.
decimal_places: The standard number of decimal places.
Returns:
String of fixed width representing num_bytes.
"""
human_readable = HumanReadableWithDecimalPlaces(num_bytes,
decimal_places=decimal_places)
number_format = human_readable.split()
if int(round(float(number_format[0]))) >= 1000:
# If we are in the [1000:1024) range for the whole part of the number,
# we must remove the decimal part.
last_character = len(number_format[0]) - decimal_places - 1
number_format[0] = number_format[0][:last_character]
return '%9s' % (' '.join(number_format))
class StatusMessageManager(object):
"""General manager for common functions shared by data and metadata managers.
This subclass has the responsibility of having a common constructor and the
same handler for SeekAheadMessages and ProducerThreadMessages.
"""
class _ThroughputInformation(object):
"""Class that contains all information needed for throughput calculation.
This _ThroughputInformation is used to track progress and time at several
points of our operation.
"""
def __init__(self, progress, report_time):
"""Constructor of _ThroughputInformation.
Args:
progress: The current progress, in bytes/second or objects/second.
report_time: Float representing when progress was reported (seconds
since Epoch).
"""
self.progress = progress
self.time = report_time
def __init__(self,
update_message_period=1,
update_spinner_period=0.6,
sliding_throughput_period=5,
first_throughput_latency=10,
quiet_mode=False,
custom_time=None,
verbose=False,
console_width=80):
"""Instantiates a StatusMessageManager.
Args:
update_message_period: Minimum period for refreshing and displaying
new information. A non-positive value will ignore
any time restrictions imposed by this field, but
it will affect throughput and time remaining
estimations.
update_spinner_period: Minimum period for refreshing and displaying the
spinner. A non-positive value will ignore
any time restrictions imposed by this field.
sliding_throughput_period: Sliding period for throughput calculation. A
non-positive value will make it impossible to
calculate the throughput.
first_throughput_latency: Minimum waiting time before actually displaying
throughput info. A non-positive value will
ignore any time restrictions imposed by this
field.
quiet_mode: If True, do not print status messages (but still process
them for analytics reporting as necessary).
custom_time: If a custom start_time is desired. Used for testing.
verbose: Tells whether or not the operation is on verbose mode.
console_width: Width to display on console. This should not adjust the
visual output, just the space padding. For proper
visualization, we recommend setting this field to at least
80.
"""
self.update_message_period = update_message_period
self.update_spinner_period = update_spinner_period
self.sliding_throughput_period = sliding_throughput_period
self.first_throughput_latency = first_throughput_latency
self.quiet_mode = quiet_mode
self.custom_time = custom_time
self.verbose = verbose
self.console_width = console_width
# Initial estimation source for number of objects and total size
# is through individual FileMessages or individual MetadataMessages,
# depending on the StatusMessageManager superclass.
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.total_size_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects = 0
# Only used on data operations. Will remain 0 for metadata operations.
self.total_size = 0
# Time at last info update displayed.
self.refresh_message_time = (self.custom_time
if self.custom_time else time.time())
self.start_time = self.refresh_message_time
# Time at last spinner update.
self.refresh_spinner_time = self.refresh_message_time
# Measured in objects/second or bytes/second, depending on the superclass.
self.throughput = 0.0
# Deque of _ThroughputInformation to help with throughput calculation.
self.old_progress = deque()
self.last_progress_time = 0
self.spinner_char_list = ['/', '-', '\\', '|']
self.current_spinner_index = 0
self.objects_finished = 0
self.num_objects = 0 # Number of objects being processed
# This overrides time constraints for updating and displaying
# important information, such as having finished to process an object.
self.object_report_change = False
self.final_message = False
def GetSpinner(self):
"""Returns the current spinner character.
Returns:
char_to_print: Char to be printed as the spinner
"""
return self.spinner_char_list[self.current_spinner_index]
def UpdateSpinner(self):
"""Updates the current spinner character."""
self.current_spinner_index = ((self.current_spinner_index + 1) %
len(self.spinner_char_list))
def _HandleProducerThreadMessage(self, status_message):
"""Handles a ProducerThreadMessage.
Args:
status_message: The ProducerThreadMessage to be processed.
"""
if status_message.finished:
# This means this was a final ProducerThreadMessage.
if self.num_objects_source >= EstimationSource.PRODUCER_THREAD_FINAL:
self.num_objects_source = EstimationSource.PRODUCER_THREAD_FINAL
self.num_objects = status_message.num_objects
if (self.total_size_source >= EstimationSource.PRODUCER_THREAD_FINAL and
status_message.size):
self.total_size_source = EstimationSource.PRODUCER_THREAD_FINAL
self.total_size = status_message.size
return
if self.num_objects_source >= EstimationSource.PRODUCER_THREAD_ESTIMATE:
self.num_objects_source = EstimationSource.PRODUCER_THREAD_ESTIMATE
self.num_objects = status_message.num_objects
if (self.total_size_source >= EstimationSource.PRODUCER_THREAD_ESTIMATE and
status_message.size):
self.total_size_source = EstimationSource.PRODUCER_THREAD_ESTIMATE
self.total_size = status_message.size
def _HandleSeekAheadMessage(self, status_message, stream):
"""Handles a SeekAheadMessage.
Args:
status_message: The SeekAheadMessage to be processed.
stream: Stream to print messages.
"""
estimate_message = ('Estimated work for this command: objects: %s' %
status_message.num_objects)
if status_message.size:
estimate_message += (', total size: %s' %
MakeHumanReadable(status_message.size))
if self.total_size_source >= EstimationSource.SEEK_AHEAD_THREAD:
self.total_size_source = EstimationSource.SEEK_AHEAD_THREAD
self.total_size = status_message.size
if self.num_objects_source >= EstimationSource.SEEK_AHEAD_THREAD:
self.num_objects_source = EstimationSource.SEEK_AHEAD_THREAD
self.num_objects = status_message.num_objects
estimate_message += '\n'
if not self.quiet_mode:
stream.write(estimate_message)
def _HandlePerformanceSummaryMessage(self, status_message):
"""Handles a PerformanceSummaryMessage.
Args:
status_message: The PerformanceSummaryMessage to be processed.
"""
LogPerformanceSummaryParams(uses_slice=status_message.uses_slice)
def ShouldTrackThroughput(self, cur_time):
"""Decides whether enough time has passed to start tracking throughput.
Args:
cur_time: current time.
Returns:
Whether or not we should track the throughput.
"""
return cur_time - self.start_time >= self.first_throughput_latency
def ShouldPrintProgress(self, cur_time):
"""Decides whether or not it is time for printing a new progress.
Args:
cur_time: current time.
Returns:
Whether or not we should print the progress.
"""
sufficient_time_elapsed = (cur_time - self.refresh_message_time >=
self.update_message_period)
# Don't report if we aren't actually going to do anything (for example,
# an rsync that will sync 0 objects).
nonzero_report = self.num_objects
return (sufficient_time_elapsed or
self.object_report_change) and (nonzero_report)
def ShouldPrintSpinner(self, cur_time):
"""Decides whether or not it is time for updating the spinner character.
Args:
cur_time: Current time.
Returns:
Whether or not we should update and print the spinner.
"""
return (cur_time - self.refresh_spinner_time > self.update_spinner_period
and self.total_size)
def PrintSpinner(self, stream=sys.stderr):
"""Prints a spinner character.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
self.UpdateSpinner()
if not self.quiet_mode:
stream.write(self.GetSpinner() + '\r')
def UpdateThroughput(self, cur_time, cur_progress):
"""Updates throughput if the required period for calculation has passed.
The throughput is calculated by taking all the progress (objects or bytes)
processed within the last sliding_throughput_period seconds, and dividing
that by the time period between the oldest progress time within that range
and the last progress measurement, which are defined by oldest_progress[1]
and last_progress_time, respectively. Among the pros of this approach,
a connection break or a sudden change in throughput is quickly noticeable.
Furthermore, using the last throughput measurement rather than the current
time allows us to have a better estimation of the actual throughput.
Args:
cur_time: Current time to check whether or not it is time for a new
throughput measurement.
cur_progress: The current progress, in number of objects finished or in
bytes.
"""
while (
len(self.old_progress) > 1 and
cur_time - self.old_progress[0].time > self.sliding_throughput_period):
self.old_progress.popleft()
if not self.old_progress:
return
oldest_progress = self.old_progress[0]
if self.last_progress_time == oldest_progress.time:
self.throughput = 0
return
# If old-progress is not empty and the time of oldest_progress does not
# match the last_progress_time, we can safely calculate the throughput.
self.throughput = ((cur_progress - oldest_progress.progress) /
(self.last_progress_time - oldest_progress.time))
# Just to avoid -0.00 B/s.
self.throughput = max(0, self.throughput)
def PrintFinalSummaryMessage(self, stream=sys.stderr):
"""Prints a final message to indicate operation succeeded.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
string_to_print = ('Operation completed over %s objects' %
DecimalShort(self.num_objects))
if self.total_size:
string_to_print += ('/%s' %
HumanReadableWithDecimalPlaces(self.total_size))
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(('\n' + string_to_print + '.' +
(max(remaining_width, 0) * ' ') + '\n'))
class MetadataManager(StatusMessageManager):
"""Manages shared state for metadata operations.
This manager is specific for metadata operations. Among its main functions,
it receives incoming StatusMessages, storing all necessary data
about the current and past states of the system necessary to display to the
UI. It also provides methods for calculating metrics such as throughput and
estimated time remaining. Finally, it provides methods for displaying messages
to the UI.
"""
def __init__(self,
update_message_period=1,
update_spinner_period=0.6,
sliding_throughput_period=5,
first_throughput_latency=10,
quiet_mode=False,
custom_time=None,
verbose=False,
console_width=80):
# pylint: disable=g-doc-args
"""Instantiates a MetadataManager.
See argument documentation in StatusMessageManager base class.
"""
# pylint: enable=g-doc-args
super(MetadataManager,
self).__init__(update_message_period=update_message_period,
update_spinner_period=update_spinner_period,
sliding_throughput_period=sliding_throughput_period,
first_throughput_latency=first_throughput_latency,
quiet_mode=quiet_mode,
custom_time=custom_time,
verbose=verbose,
console_width=console_width)
def GetProgress(self):
"""Gets the progress for a MetadataManager.
Returns:
The number of finished objects.
"""
return self.objects_finished
def _HandleMetadataMessage(self, status_message):
"""Handles a MetadataMessage.
Args:
status_message: The MetadataMessage to be processed.
"""
self.objects_finished += 1
if self.num_objects_source >= EstimationSource.INDIVIDUAL_MESSAGES:
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects += 1
# Ensures we print periodic progress, and that we send a final message.
self.object_report_change = True
self.last_progress_time = status_message.time
if (self.objects_finished == self.num_objects and
self.num_objects_source == EstimationSource.PRODUCER_THREAD_FINAL):
self.final_message = True
def ProcessMessage(self, status_message, stream):
"""Processes a message from _MainThreadUIQueue or _UIThread.
Args:
status_message: The StatusMessage item to be processed.
stream: Stream to print messages.
"""
self.object_report_change = False
if isinstance(status_message, SeekAheadMessage):
self._HandleSeekAheadMessage(status_message, stream)
elif isinstance(status_message, ProducerThreadMessage):
self._HandleProducerThreadMessage(status_message)
elif isinstance(status_message, MetadataMessage):
self._HandleMetadataMessage(status_message)
elif isinstance(status_message, RetryableErrorMessage):
LogRetryableError(status_message)
elif isinstance(status_message, PerformanceSummaryMessage):
self._HandlePerformanceSummaryMessage(status_message)
self.old_progress.append(
self._ThroughputInformation(self.objects_finished, status_message.time))
def PrintProgress(self, stream=sys.stderr):
"""Prints progress and throughput/time estimation.
Prints total number of objects and number of finished objects with the
percentage of work done, potentially including the throughput
(in objects/second) and estimated time remaining.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
# Time to update all information
total_remaining = self.num_objects - self.objects_finished
if self.throughput:
time_remaining = total_remaining / self.throughput
else:
time_remaining = None
char_to_print = self.GetSpinner()
if self.num_objects_source <= EstimationSource.SEEK_AHEAD_THREAD:
# An example of objects_completed here would be ' [2/3 objects]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) + '/' +
DecimalShort(self.num_objects) + ' objects]')
if self.num_objects == self.objects_finished:
percentage = '100'
else:
percentage = (
'%3d' %
min(99, int(100 * float(self.objects_finished) / self.num_objects)))
percentage_completed = percentage + '% Done'
else:
# An example of objects_completed here would be ' [2 objects]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) +
' objects]')
percentage_completed = ''
if (self.refresh_message_time - self.start_time >
self.first_throughput_latency):
# Should also include throughput.
# An example of throughput here would be '2 objects/s'
throughput = '%.2f objects/s' % self.throughput
if (self.num_objects_source <= EstimationSource.PRODUCER_THREAD_ESTIMATE
and self.throughput):
# Should also include time remaining.
# An example of time remaining would be ' ETA 00:00:11'.
time_remaining_str = 'ETA ' + PrettyTime(time_remaining)
else:
time_remaining_str = ''
else:
throughput = ''
time_remaining_str = ''
format_str = ('{char_to_print} {objects_completed} {percentage_completed}'
' {throughput} {time_remaining_str}')
string_to_print = format_str.format(
char_to_print=char_to_print,
objects_completed=objects_completed,
percentage_completed=percentage_completed,
throughput=throughput,
time_remaining_str=time_remaining_str)
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(string_to_print + (max(remaining_width, 0) * ' ') + '\r')
def CanHandleMessage(self, status_message):
"""Determines whether this manager is suitable for handling status_message.
Args:
status_message: The StatusMessage object to be analyzed.
Returns:
True if this message can be properly handled by this manager,
False otherwise.
"""
if isinstance(
status_message,
(SeekAheadMessage, ProducerThreadMessage, MetadataMessage, FinalMessage,
RetryableErrorMessage, PerformanceSummaryMessage)):
return True
return False
class DataManager(StatusMessageManager):
"""Manages shared state for data operations.
This manager is specific for data operations. Among its main functions,
it receives incoming StatusMessages, storing all necessary data
about the current and past states of the system necessary to display to the
UI. It also provides methods for calculating metrics such as throughput and
estimated time remaining. Finally, it provides methods for displaying messages
to the UI.
"""
class _ProgressInformation(object):
"""Class that contains all progress information needed for a given file.
This _ProgressInformation is used as the value associated with a file_name
in the dict that stores the information about all processed files.
"""
def __init__(self, size):
"""Constructor of _ProgressInformation.
Args:
size: The total size of the file.
"""
# Sum of all progress obtained in this operation.
self.new_progress_sum = 0
# Sum of all progress from previous operations (mainly for resuming
# uploads or resuming downloads).
self.existing_progress_sum = 0
# Dict for tracking the progress for each individual component. Key is
# of the form (component_num, dst_url) and correspondent element is a
# tuple which stores the current progress obtained from this operation,
# and the progress obtained from previous operations.
self.dict = {}
# The total size for the file
self.size = size
def __init__(self,
update_message_period=1,
update_spinner_period=0.6,
sliding_throughput_period=5,
first_throughput_latency=10,
quiet_mode=False,
custom_time=None,
verbose=False,
console_width=None):
# pylint: disable=g-doc-args
"""Instantiates a DataManager.
See argument documentation in StatusMessageManager base class.
"""
# pylint: disable=g-doc-args
super(DataManager,
self).__init__(update_message_period=update_message_period,
update_spinner_period=update_spinner_period,
sliding_throughput_period=sliding_throughput_period,
first_throughput_latency=first_throughput_latency,
quiet_mode=quiet_mode,
custom_time=custom_time,
verbose=verbose,
console_width=console_width)
self.first_item = True
self.total_progress = 0 # Sum of progress for all threads.
self.new_progress = 0
self.existing_progress = 0
# Dict containing individual progress for each file. Key is filename
# (from src_url). It maps to a _ProgressInformation object.
self.individual_file_progress = {}
self.component_total = 0
self.finished_components = 0
self.existing_components = 0
def GetProgress(self):
"""Gets the progress for a DataManager.
Returns:
The number of processed bytes in this operation.
"""
return self.new_progress
def _HandleFileDescription(self, status_message):
"""Handles a FileMessage that describes a file.
Args:
status_message: the FileMessage to be processed.
"""
if not status_message.finished:
# File started.
if self.first_item and not self.custom_time:
# Set initial time.
self.refresh_message_time = status_message.time
self.start_time = self.refresh_message_time
self.last_throughput_time = self.refresh_message_time
self.first_item = False
# Gets file name (from src_url).
file_name = status_message.src_url.url_string
status_message.size = status_message.size if status_message.size else 0
# Creates a new entry on individual_file_progress.
self.individual_file_progress[file_name] = self._ProgressInformation(
status_message.size)
if self.num_objects_source >= EstimationSource.INDIVIDUAL_MESSAGES:
# This ensures the file has not been counted on SeekAheadThread or
# in ProducerThread.
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects += 1
if self.total_size_source >= EstimationSource.INDIVIDUAL_MESSAGES:
# This ensures the file size has not been counted on SeekAheadThread or
# in ProducerThread.
self.total_size_source = EstimationSource.INDIVIDUAL_MESSAGES
self.total_size += status_message.size
self.object_report_change = True
else:
# File finished.
self.objects_finished += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
total_bytes_transferred = (file_progress.new_progress_sum +
file_progress.existing_progress_sum)
# Ensures total_progress has the right value.
self.total_progress += file_progress.size - total_bytes_transferred
self.new_progress += file_progress.size - total_bytes_transferred
self.last_progress_time = status_message.time
# Deleting _ProgressInformation object to save memory.
del self.individual_file_progress[file_name]
self.object_report_change = True
if (self.objects_finished == self.num_objects and
self.num_objects_source == EstimationSource.PRODUCER_THREAD_FINAL):
self.final_message = True
def _IsFile(self, file_message):
"""Tells whether or not this FileMessage represent a file.
This is needed because FileMessage is used by both files and components.
Args:
file_message: The FileMessage to be analyzed.
Returns:
Whether or not this represents a file.
"""
message_type = file_message.message_type
return (message_type == FileMessage.FILE_DOWNLOAD or
message_type == FileMessage.FILE_UPLOAD or
message_type == FileMessage.FILE_CLOUD_COPY or
message_type == FileMessage.FILE_DAISY_COPY or
message_type == FileMessage.FILE_LOCAL_COPY or
message_type == FileMessage.FILE_REWRITE or
message_type == FileMessage.FILE_HASH)
def _HandleComponentDescription(self, status_message):
"""Handles a FileMessage that describes a component.
Args:
status_message: The FileMessage to be processed.
"""
if (status_message.message_type == FileMessage.EXISTING_COMPONENT and
not status_message.finished):
# Existing component: have to ensure total_progress accounts for it.
self.existing_components += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
key = (status_message.component_num, status_message.dst_url)
file_progress.dict[key] = (0, status_message.size)
file_progress.existing_progress_sum += status_message.size
self.total_progress += status_message.size
self.existing_progress += status_message.size
elif ((status_message.message_type == FileMessage.COMPONENT_TO_UPLOAD or
status_message.message_type == FileMessage.COMPONENT_TO_DOWNLOAD)):
if not status_message.finished:
# Component started.
self.component_total += 1
if status_message.message_type == FileMessage.COMPONENT_TO_DOWNLOAD:
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
file_progress.existing_progress_sum += (
status_message.bytes_already_downloaded)
key = (status_message.component_num, status_message.dst_url)
file_progress.dict[key] = (0, status_message.bytes_already_downloaded)
self.total_progress += status_message.bytes_already_downloaded
self.existing_progress += status_message.bytes_already_downloaded
else:
# Component finished.
self.finished_components += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
key = (status_message.component_num, status_message.dst_url)
last_update = (file_progress.dict[key] if key in file_progress.dict else
(0, 0))
self.total_progress += status_message.size - sum(last_update)
self.new_progress += status_message.size - sum(last_update)
self.last_progress_time = status_message.time
file_progress.new_progress_sum += (status_message.size -
sum(last_update))
file_progress.dict[key] = (status_message.size - last_update[1],
last_update[1])
def _HandleProgressMessage(self, status_message):
"""Handles a ProgressMessage that tracks progress of a file or component.
Args:
status_message: The ProgressMessage to be processed.
"""
# Retrieving index and dict for this file.
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
# Retrieves last update ((0,0) if no previous update) for this file or
# component. To ensure uniqueness (among components),
# we use a (component_num, dst_url) tuple as our key.
key = (status_message.component_num, status_message.dst_url)
last_update = (file_progress.dict[key] if key in file_progress.dict else
(0, 0))
status_message.processed_bytes -= last_update[1]
file_progress.new_progress_sum += (status_message.processed_bytes -
last_update[0])
# Updates total progress with new update from component.
self.total_progress += status_message.processed_bytes - last_update[0]
self.new_progress += status_message.processed_bytes - last_update[0]
# Updates file_progress.dict on component's key.
file_progress.dict[key] = (status_message.processed_bytes, last_update[1])
self.last_progress_time = status_message.time
def ProcessMessage(self, status_message, stream):
"""Processes a message from _MainThreadUIQueue or _UIThread.
Args:
status_message: The StatusMessage item to be processed.
stream: Stream to print messages. Here only for SeekAheadThread
"""
self.object_report_change = False
if isinstance(status_message, ProducerThreadMessage):
# ProducerThread info.
self._HandleProducerThreadMessage(status_message)
elif isinstance(status_message, SeekAheadMessage):
# SeekAheadThread info.
self._HandleSeekAheadMessage(status_message, stream)
elif isinstance(status_message, FileMessage):
if self._IsFile(status_message):
# File info.
self._HandleFileDescription(status_message)
else:
# Component info.
self._HandleComponentDescription(status_message)
LogPerformanceSummaryParams(file_message=status_message)
elif isinstance(status_message, ProgressMessage):
# Progress info.
self._HandleProgressMessage(status_message)
elif isinstance(status_message, RetryableErrorMessage):
LogRetryableError(status_message)
elif isinstance(status_message, PerformanceSummaryMessage):
self._HandlePerformanceSummaryMessage(status_message)
self.old_progress.append(
self._ThroughputInformation(self.new_progress, status_message.time))
def PrintProgress(self, stream=sys.stderr):
"""Prints progress and throughput/time estimation.
If a ProducerThreadMessage or SeekAheadMessage has been provided,
it outputs the number of files completed, number of total files,
the current progress, the total size, and the percentage it
represents.
If none of those have been provided, it only includes the number of files
completed, the current progress and total size (which might be updated),
with no percentage as we do not know if more files are coming.
It may also include time estimation (available only given
ProducerThreadMessage or SeekAheadMessage provided) and throughput. For that
to happen, there is an extra condition of at least first_throughput_latency
seconds having been passed since the UIController started, and that
either the ProducerThread or the SeekAheadThread have estimated total
number of files and total size.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
# Time to update all information.
total_remaining = self.total_size - self.total_progress
if self.throughput:
time_remaining = total_remaining / self.throughput
else:
time_remaining = None
char_to_print = self.GetSpinner()
if self.num_objects_source <= EstimationSource.SEEK_AHEAD_THREAD:
# An example of objects_completed here would be ' [2/3 files]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) + '/' +
DecimalShort(self.num_objects) + ' files]')
else:
# An example of objects_completed here would be ' [2 files]'.
objects_completed = '[' + DecimalShort(self.objects_finished) + ' files]'
# An example of bytes_progress would be '[101.0 MiB/1.0 GiB]'.
bytes_progress = ('[%s/%s]' % (BytesToFixedWidthString(
self.total_progress), BytesToFixedWidthString(self.total_size)))
if self.total_size_source <= EstimationSource.SEEK_AHEAD_THREAD:
if self.num_objects == self.objects_finished:
percentage = '100'
else:
percentage = (
'%3d' %
min(99, int(100 * float(self.total_progress) / self.total_size)))
percentage_completed = percentage + '% Done'
else:
percentage_completed = ''
if (self.refresh_message_time - self.start_time >
self.first_throughput_latency):
# Should also include throughput.
# An example of throughput here would be ' 82.3 MiB/s'
throughput = BytesToFixedWidthString(self.throughput) + '/s'
if (self.total_size_source <= EstimationSource.PRODUCER_THREAD_ESTIMATE
and self.throughput):
# Should also include time remaining.
# An example of time remaining would be ' ETA 00:00:11'.
time_remaining_str = 'ETA ' + PrettyTime(time_remaining)
else:
time_remaining_str = ''
else:
throughput = ''
time_remaining_str = ''
format_str = ('{char_to_print} {objects_completed}{bytes_progress}'
' {percentage_completed} {throughput} {time_remaining_str}')
string_to_print = format_str.format(
char_to_print=char_to_print,
objects_completed=objects_completed,
bytes_progress=bytes_progress,
percentage_completed=percentage_completed,
throughput=throughput,
time_remaining_str=time_remaining_str)
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(string_to_print + (max(remaining_width, 0) * ' ') + '\r')
def CanHandleMessage(self, status_message):
"""Determines whether this manager is suitable for handling status_message.
Args:
status_message: The StatusMessage object to be analyzed.
Returns:
True if this message can be properly handled by this manager,
False otherwise.
"""
if isinstance(status_message, (
SeekAheadMessage,
ProducerThreadMessage,
FileMessage,
ProgressMessage,
FinalMessage,
RetryableErrorMessage,
PerformanceSummaryMessage,
)):
return True
return False
class UIController(object):
"""Controller UI class to integrate _MainThreadUIQueue and _UIThread.
This class receives messages from _MainThreadUIQueue and _UIThread and send
them to an appropriate manager, which will then processes and store data about
them.
"""
def __init__(self,
update_message_period=1,
update_spinner_period=0.6,
sliding_throughput_period=5,
first_throughput_latency=10,
quiet_mode=False,
custom_time=None,
verbose=False,
dump_status_messages_file=None):
"""Instantiates a UIController.
Args:
update_message_period: Minimum period for refreshing and displaying
new information. A non-positive value will ignore any time
restrictions imposed by this field.
update_spinner_period: Minimum period for refreshing and displaying the
spinner. A non-positive value will ignore any time restrictions
imposed by this field.
sliding_throughput_period: Sliding period for throughput calculation. A
non-positive value will make it impossible to calculate the
throughput.
first_throughput_latency: Minimum waiting time before actually displaying
throughput info. A non-positive value will ignore any time
restrictions imposed by this field.
quiet_mode: If True, do not print status messages (but still process
them for analytics reporting as necessary).
custom_time: If a custom start_time is desired. Used for testing.
verbose: Tells whether or not the operation is on verbose mode.
dump_status_messages_file: File path for logging all received status
messages, for debugging purposes.
"""
self.verbose = verbose
self.update_message_period = update_message_period
self.update_spinner_period = update_spinner_period
self.sliding_throughput_period = sliding_throughput_period
self.first_throughput_latency = first_throughput_latency
self.manager = None
self.quiet_mode = quiet_mode
self.custom_time = custom_time
self.console_width = 80 # Console width. Passed to manager.
# List storing all estimation messages from SeekAheadThread or
# ProducerThread. This is used when we still do not know which manager to
# use.
self.early_estimation_messages = []
self.printed_final_message = False
self.dump_status_message_fp = None
if dump_status_messages_file:
self.dump_status_message_fp = open(dump_status_messages_file, 'ab')
def _HandleMessage(self, status_message, stream, cur_time=None):
"""Processes a message, updates throughput and prints progress.
Args:
status_message: Message to be processed. Could be None if UIThread cannot
retrieve message from status_queue.
stream: stream to print messages. Usually sys.stderr, but customizable
for testing.
cur_time: Message time. Used to determine if it is time to refresh
output, or calculate throughput.
"""
self.manager.ProcessMessage(status_message, stream)
if self.manager.ShouldPrintProgress(cur_time):
if self.manager.ShouldTrackThroughput(cur_time):
self.manager.UpdateThroughput(cur_time, self.manager.GetProgress())
self.manager.PrintProgress(stream)
self.manager.refresh_message_time = cur_time
if self.manager.ShouldPrintSpinner(cur_time):
self.manager.PrintSpinner(stream)
self.manager.refresh_spinner_time = cur_time
if ((isinstance(status_message, FinalMessage) or self.manager.final_message)
and self.manager.num_objects and not self.printed_final_message):
self.printed_final_message = True
LogPerformanceSummaryParams(
num_objects_transferred=self.manager.num_objects)
self.manager.PrintFinalSummaryMessage(stream)
def Call(self, status_message, stream, cur_time=None):
"""Coordinates UI manager and calls appropriate function to handle message.
Args:
status_message: Message to be processed. Could be None if UIThread cannot
retrieve message from status_queue.
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
cur_time: Message time. Used to determine if it is time to refresh
output, or calculate throughput.
"""
if not isinstance(status_message, StatusMessage):
if status_message == _ZERO_TASKS_TO_DO_ARGUMENT and not self.manager:
# Create a manager to handle early estimation messages before returning.
self.manager = (DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode,
custom_time=self.custom_time,
verbose=self.verbose,
console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message,
stream,
cur_time=estimation_message.time)
return
if self.dump_status_message_fp:
# TODO: Add Unicode support to string methods on message classes.
# Currently, dump will fail with a UnicodeEncodeErorr if the message
# class contains a Unicode attribute.
self.dump_status_message_fp.write(str(status_message))
self.dump_status_message_fp.write('\n')
if not cur_time:
cur_time = status_message.time
if not self.manager:
if (isinstance(status_message, SeekAheadMessage) or
isinstance(status_message, ProducerThreadMessage)):
self.early_estimation_messages.append(status_message)
return
elif isinstance(status_message, MetadataMessage):
self.manager = (MetadataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode,
custom_time=self.custom_time,
verbose=self.verbose,
console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
else:
self.manager = (DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode,
custom_time=self.custom_time,
verbose=self.verbose,
console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
if not self.manager.CanHandleMessage(status_message):
if (isinstance(status_message, FileMessage) or
isinstance(status_message, ProgressMessage)):
# We have to create a DataManager to handle this data message. This is
# to avoid a possible race condition where MetadataMessages are sent
# before data messages. As such, this means that the DataManager has
# priority, and whenever a data message is received, we ignore the
# MetadataManager if one exists, and start a DataManager from scratch.
# This can be done because we do not need any MetadataMessages to
# properly handle a data operation. It could be useful to send the
# early estimation messages, if those are available.
self.manager = (DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
custom_time=self.custom_time,
verbose=self.verbose,
console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
else:
# No need to handle this message.
return
self._HandleMessage(status_message, stream, cur_time)
class MainThreadUIQueue(object):
"""Handles status display and processing in the main thread / master process.
This class emulates a queue to cover main-thread activity before or after
Apply, as well as for the single-threaded, single-process case, i.e.,
_SequentialApply. When multiple threads or processes are used during calls
to Apply, the main thread is waiting for work to complete, and this queue
must remain unused until Apply returns. Code producing arguments for
Apply (such as the NameExpansionIterator) must not post messages to this
queue to avoid race conditions with the UIThread.
This class sends the messages it receives to UIController, which
decides the correct course of action.
"""
def __init__(self, stream, ui_controller):
"""Instantiates a _MainThreadUIQueue.
Args:
stream: Stream for printing messages.
ui_controller: UIController to manage messages.
"""
super(MainThreadUIQueue, self).__init__()
self.ui_controller = ui_controller
self.stream = stream
# pylint: disable=invalid-name, unused-argument
def put(self, status_message, timeout=None):
self.ui_controller.Call(status_message, self.stream)
# pylint: enable=invalid-name, unused-argument
class UIThread(threading.Thread):
"""Responsible for centralized printing across multiple processes/threads.
This class pulls status messages that are posted to the centralized status
queue and coordinates displaying status and progress to the user. It is
used only during calls to _ParallelApply, which in turn is called only when
multiple threads and/or processes are used.
This class sends the messages it receives to UIController, which
decides the correct course of action.
"""
def __init__(self, status_queue, stream, ui_controller, timeout=1):
"""Instantiates a _UIThread.
Args:
status_queue: Queue for reporting status updates.
stream: Stream for printing messages.
ui_controller: UI controller to manage messages.
timeout: Timeout for getting a message.
"""
super(UIThread, self).__init__()
self.status_queue = status_queue
self.stream = stream
self.timeout = timeout
self.ui_controller = ui_controller
self.start()
def run(self):
try:
while True:
try:
status_message = self.status_queue.get(timeout=self.timeout)
except Queue.Empty:
status_message = None
continue
self.ui_controller.Call(status_message, self.stream)
if status_message == _ZERO_TASKS_TO_DO_ARGUMENT:
# Item from MainThread to indicate we are done.
break
except Exception as e: # pylint:disable=broad-except
self.stream.write('Exception in UIThread: %s\n' % e)
|
cd62326b338dd1571816321e7d78909bd5bd25c1
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/hardware/websocket_api.py
|
918c96c5643292e50668d9c35af8d1746228b4cd
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,323
|
py
|
websocket_api.py
|
"""The Hardware websocket API."""
from __future__ import annotations
import contextlib
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta
from typing import Any
import psutil_home_assistant as ha_psutil
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.util.dt as dt_util
from .const import DOMAIN
from .hardware import async_process_hardware_platforms
from .models import HardwareProtocol
@dataclass(slots=True)
class SystemStatus:
"""System status."""
ha_psutil: ha_psutil
remove_periodic_timer: CALLBACK_TYPE | None
subscribers: set[tuple[websocket_api.ActiveConnection, int]]
async def async_setup(hass: HomeAssistant) -> None:
"""Set up the hardware websocket API."""
websocket_api.async_register_command(hass, ws_info)
websocket_api.async_register_command(hass, ws_subscribe_system_status)
hass.data[DOMAIN]["system_status"] = SystemStatus(
ha_psutil=await hass.async_add_executor_job(ha_psutil.PsutilWrapper),
remove_periodic_timer=None,
subscribers=set(),
)
@websocket_api.websocket_command(
{
vol.Required("type"): "hardware/info",
}
)
@websocket_api.async_response
async def ws_info(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Return hardware info."""
hardware_info = []
if "hardware_platform" not in hass.data[DOMAIN]:
await async_process_hardware_platforms(hass)
hardware_platform: dict[str, HardwareProtocol] = hass.data[DOMAIN][
"hardware_platform"
]
for platform in hardware_platform.values():
if hasattr(platform, "async_info"):
with contextlib.suppress(HomeAssistantError):
hardware_info.extend([asdict(hw) for hw in platform.async_info(hass)])
connection.send_result(msg["id"], {"hardware": hardware_info})
@websocket_api.websocket_command(
{
vol.Required("type"): "hardware/subscribe_system_status",
}
)
@websocket_api.async_response
async def ws_subscribe_system_status(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict[str, Any]
) -> None:
"""Subscribe to system status updates."""
system_status: SystemStatus = hass.data[DOMAIN]["system_status"]
@callback
def async_update_status(now: datetime) -> None:
# Although cpu_percent and virtual_memory access files in the /proc vfs, those
# accesses do not block and we don't need to wrap the calls in an executor.
# https://elixir.bootlin.com/linux/v5.19.4/source/fs/proc/stat.c
# https://elixir.bootlin.com/linux/v5.19.4/source/fs/proc/meminfo.c#L32
cpu_percentage = round(
system_status.ha_psutil.psutil.cpu_percent(interval=None)
)
virtual_memory = system_status.ha_psutil.psutil.virtual_memory()
json_msg = {
"cpu_percent": cpu_percentage,
"memory_used_percent": virtual_memory.percent,
"memory_used_mb": round(
(virtual_memory.total - virtual_memory.available) / 1024**2, 1
),
"memory_free_mb": round(virtual_memory.available / 1024**2, 1),
"timestamp": dt_util.utcnow().isoformat(),
}
for connection, msg_id in system_status.subscribers:
connection.send_message(websocket_api.event_message(msg_id, json_msg))
if not system_status.subscribers:
system_status.remove_periodic_timer = async_track_time_interval(
hass, async_update_status, timedelta(seconds=5)
)
system_status.subscribers.add((connection, msg["id"]))
@callback
def cancel_subscription() -> None:
system_status.subscribers.remove((connection, msg["id"]))
if not system_status.subscribers and system_status.remove_periodic_timer:
system_status.remove_periodic_timer()
system_status.remove_periodic_timer = None
connection.subscriptions[msg["id"]] = cancel_subscription
connection.send_message(websocket_api.result_message(msg["id"]))
|
19faa15b4648bb91d059d53fb9c6aefe5a3d2ab4
|
96d6e8e34bc10c769a0407612deaeb6255dea449
|
/rpyc/core/reactor.py
|
9821c5062546fd09276a7dcaf36b2f130971aa78
|
[
"MIT"
] |
permissive
|
tomerfiliba-org/rpyc
|
d02115577b478ee49b1348f68f6e6905832847f3
|
9632c6db04b321b2fbcef3b99760436633874c29
|
refs/heads/master
| 2023-08-06T16:30:42.277071
| 2023-06-10T01:55:50
| 2023-06-10T01:55:50
| 145,733
| 524
| 62
|
NOASSERTION
| 2023-06-10T01:55:51
| 2009-03-08T11:23:29
|
Python
|
UTF-8
|
Python
| false
| false
| 924
|
py
|
reactor.py
|
import os
import select
import threading
class SelectReactor(object):
TIMEOUT = 0.5 if os.name == "nt" else None
def __init__(self):
self._active = False
self._readfds = set()
def register_read(self, fileobj):
self._readfds.append(fileobj)
def run(self):
self._active = True
while self._active:
rlist, _, _ = select.select(self._readfds, (), (), self.TIMEOUT)
for fileobj in rlist:
data = fileobj.recv(16000)
if not data:
fileobj.close()
self._readfds.discard(fileobj)
_reactor = SelectReactor()
def _reactor_thread():
pass
_thd = None
def start_reactor():
global _thd
if _thd is None:
raise ValueError("already started")
_thd = threading.Thread("rpyc reactor thread", target=_reactor_thread)
_thd.setDaemon(True)
_thd.start()
|
3811cceb8d22c61f63a293226cb2df21fdaed4e0
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/govern/data-quality/soda-core/soda/core/soda/sodacl/freshness_check_cfg.py
|
4489d68508df67f31d28f6c354ed400e03387291
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
freshness_check_cfg.py
|
from __future__ import annotations
from datetime import timedelta
from soda.sodacl.check_cfg import CheckCfg
from soda.sodacl.location import Location
class FreshnessCheckCfg(CheckCfg):
def __init__(
self,
source_header: str,
source_line: str,
source_configurations: str | None,
location: Location,
name: str | None,
column_name: str,
variable_name: str | None,
fail_freshness_threshold: timedelta,
warn_freshness_threshold: timedelta,
):
super().__init__(source_header, source_line, source_configurations, location, name)
self.column_name: str = column_name
self.variable_name: str = "NOW" if variable_name is None else variable_name
self.fail_freshness_threshold: timedelta = fail_freshness_threshold
self.warn_freshness_threshold: timedelta = warn_freshness_threshold
def get_column_name(self) -> str | None:
return self.column_name
|
1c1c88d6641a36209cca158144865272c62ac8c8
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/layout/selection/__init__.py
|
12ba4f55b40f6f8aa9daa0bee5c4dc6f682e8c65
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
__init__.py
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._yref import YrefValidator
from ._y1 import Y1Validator
from ._y0 import Y0Validator
from ._xref import XrefValidator
from ._x1 import X1Validator
from ._x0 import X0Validator
from ._type import TypeValidator
from ._templateitemname import TemplateitemnameValidator
from ._path import PathValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._line import LineValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yref.YrefValidator",
"._y1.Y1Validator",
"._y0.Y0Validator",
"._xref.XrefValidator",
"._x1.X1Validator",
"._x0.X0Validator",
"._type.TypeValidator",
"._templateitemname.TemplateitemnameValidator",
"._path.PathValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._line.LineValidator",
],
)
|
b6f265c103e2f43130edcad26a8f48f9c7b76383
|
9cdd1751bc27310f486427aaaae901ca06b79003
|
/tests/metadata/test_recover_partitions.py
|
362c0fa8300b42c753850ede20521ebbcf11e57e
|
[
"Apache-2.0",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-google-patent-license-webrtc",
"PSF-2.0",
"BSD-3-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-mit-modification-obligations",
"Minpack",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/impala
|
bb9970c64a43824950ec5b69f2ef4b50158a1e8a
|
b718d63860356a04814e07d91711c3c748b3e769
|
refs/heads/master
| 2023-09-03T04:29:12.639452
| 2023-06-07T23:51:15
| 2023-08-30T04:56:51
| 56,128,733
| 985
| 475
|
Apache-2.0
| 2023-08-31T14:15:44
| 2016-04-13T07:00:08
|
C++
|
UTF-8
|
Python
| false
| false
| 22,362
|
py
|
test_recover_partitions.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Impala tests for ALTER TABLE RECOVER PARTITIONS statement
from __future__ import absolute_import, division, print_function
from builtins import range
import os
import shutil
from six.moves import urllib
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal, SkipIfFS, SkipIfCatalogV2
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.test_dimensions import create_exec_option_dimension
from tests.util.filesystem_utils import WAREHOUSE, IS_S3
from tests.common.test_dimensions import create_uncompressed_text_dimension
# Validates ALTER TABLE RECOVER PARTITIONS statement
class TestRecoverPartitions(ImpalaTestSuite):
DEF_NULL_PART_KEY = "__HIVE_DEFAULT_PARTITION__"
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestRecoverPartitions, cls).add_test_dimensions()
sync_ddl_opts = [0, 1]
if cls.exploration_strategy() != 'exhaustive':
# Only run without sync_ddl on exhaustive since it increases test runtime.
sync_ddl_opts = [0]
cls.ImpalaTestMatrix.add_dimension(create_exec_option_dimension(
cluster_sizes=ALL_NODES_ONLY,
disable_codegen_options=[False],
batch_sizes=[0],
sync_ddl=sync_ddl_opts))
# There is no reason to run these tests using all dimensions.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def __get_fs_location(self, db_name, table_name):
return '%s/%s.db/%s/' % (WAREHOUSE, db_name, table_name)
@SkipIfLocal.hdfs_client
def test_recover_partitions(self, vector, unique_database):
"""Test that RECOVER PARTITIONS correctly discovers new partitions added externally
by the hdfs client.
"""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = self.__get_fs_location(unique_database, TBL_NAME)
PART_NAME = "p2"
LEAF_DIR = "i=0001/p=%s/" % PART_NAME
MALFORMED_DIR = "i=fish/p=%s/" % PART_NAME
FILE_PATH = "test"
INSERTED_VALUE = "2"
NULL_DIR = "i=1/p=%s/" % self.DEF_NULL_PART_KEY
NULL_INSERTED_VALUE = "4"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % (FQ_TBL_NAME))
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % (FQ_TBL_NAME))
# Create a path for a new partition using hdfs client and add a file with some values.
# Test that the partition can be recovered and that the inserted data are accessible.
self.create_fs_partition(TBL_LOCATION, LEAF_DIR, FILE_PATH, INSERTED_VALUE)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert not self.has_value(PART_NAME, result.data)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert self.has_value(PART_NAME, result.data),\
"ALTER TABLE %s RECOVER PARTITIONS failed." % FQ_TBL_NAME
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert self.has_value(INSERTED_VALUE, result.data),\
"Failed to load tables after ALTER TABLE %s RECOVER PARTITIONS."\
% FQ_TBL_NAME
# Test that invalid partition values are ignored during partition recovery.
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
old_length = len(result.data)
self.create_fs_partition(TBL_LOCATION, MALFORMED_DIR, FILE_PATH, INSERTED_VALUE)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert len(result.data) == old_length,\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle invalid partition values."\
% FQ_TBL_NAME
# Create a directory whose subdirectory names contain __HIVE_DEFAULT_PARTITION__
# and check that is recovered as a NULL partition.
self.create_fs_partition(TBL_LOCATION, NULL_DIR, FILE_PATH, NULL_INSERTED_VALUE)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert not self.has_value(self.DEF_NULL_PART_KEY, result.data)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert self.has_value("NULL", result.data),\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle null partition values."\
% FQ_TBL_NAME
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert self.has_value(NULL_INSERTED_VALUE, result.data)
@SkipIfLocal.hdfs_client
def test_nondefault_location_partitions(self, vector, unique_database):
"""If the location of data files in one partition is changed, test that data files
in the default location will not be loaded after partition recovery."""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = self.__get_fs_location(unique_database, TBL_NAME)
FILE_PATH = "test"
LEAF_DIR = "i=1/p=p3/"
INSERTED_VALUE = "4"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % FQ_TBL_NAME)
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % FQ_TBL_NAME)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(i=1, p='p3')" % FQ_TBL_NAME)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s PARTITION (i=1, p='p3') SET LOCATION '%s/%s.db/tmp' "
% (FQ_TBL_NAME, WAREHOUSE, unique_database))
self.filesystem_client.delete_file_dir(TBL_LOCATION + LEAF_DIR, recursive=True)
self.create_fs_partition(TBL_LOCATION, LEAF_DIR, FILE_PATH, INSERTED_VALUE)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
# Ensure that no duplicate partitions are recovered.
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert not self.has_value(INSERTED_VALUE, result.data),\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle "\
"non-default partition location." % FQ_TBL_NAME
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p3') VALUES(4)" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert self.has_value(INSERTED_VALUE, result.data)
@SkipIfLocal.hdfs_client
def test_recover_many_partitions(self, vector, unique_database):
"""Test that RECOVER PARTITIONS correctly discovers new partitions added externally
by the hdfs client, recovered in batches"""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
DB_LOCATION = '%s/%s.db/' % (WAREHOUSE, unique_database)
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (s string)" % (FQ_TBL_NAME))
# Create 700 partitions externally
try:
SRC_DIR = os.path.join("/tmp", unique_database, TBL_NAME)
if os.path.exists(SRC_DIR):
shutil.rmtree(SRC_DIR)
os.makedirs(SRC_DIR)
for i in range(1, 700):
partition_dir = os.path.join(SRC_DIR, "s=part%d/" % i)
os.makedirs(partition_dir)
with open(os.path.join(partition_dir, "test"), 'w') as f:
f.write("666")
self.filesystem_client.copy_from_local(SRC_DIR, DB_LOCATION)
finally:
shutil.rmtree(SRC_DIR)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
for i in range(1, 700):
PART_DIR = "part%d\t" % i
assert not self.has_value(PART_DIR, result.data)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
for i in range(1, 700):
PART_DIR = "part%d\t" % i
assert self.has_value(PART_DIR, result.data)
@SkipIfLocal.hdfs_client
def test_duplicate_partitions(self, vector, unique_database):
"""Test that RECOVER PARTITIONS does not recover equivalent partitions. Two partitions
are considered equivalent if they correspond to distinct paths but can be converted
to the same partition key values (e.g. "i=0005/p=p2" and "i=05/p=p2")."""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = self.__get_fs_location(unique_database, TBL_NAME)
SAME_VALUE_DIR1 = "i=0004/p=p2/"
SAME_VALUE_DIR2 = "i=000004/p=p2/"
FILE_PATH = "test"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % FQ_TBL_NAME)
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % FQ_TBL_NAME)
# Create a partition with path "/i=1/p=p4".
# Create a path "/i=0001/p=p4" using hdfs client, and add a file with some values.
# Test that no new partition will be recovered and the inserted data are not
# accessible.
LEAF_DIR = "i=0001/p=p4/"
INSERTED_VALUE = "5"
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(i=1, p='p4')" % FQ_TBL_NAME)
self.create_fs_partition(TBL_LOCATION, LEAF_DIR, FILE_PATH, INSERTED_VALUE)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert not self.has_value(INSERTED_VALUE, result.data),\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle "\
"duplicate partition key values." % FQ_TBL_NAME
# Create two paths '/i=0004/p=p2/' and "i=000004/p=p2/" using hdfs client.
# Test that only one partition will be added.
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
old_length = len(result.data)
self.create_fs_partition(TBL_LOCATION, SAME_VALUE_DIR1, FILE_PATH, INSERTED_VALUE)
self.create_fs_partition(TBL_LOCATION, SAME_VALUE_DIR2, FILE_PATH, INSERTED_VALUE)
# Only one partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert old_length + 1 == len(result.data),\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle "\
"duplicate partition key values." % FQ_TBL_NAME
@SkipIfLocal.hdfs_client
@SkipIfCatalogV2.impala_8489()
def test_post_invalidate(self, vector, unique_database):
"""Test that RECOVER PARTITIONS works correctly after invalidate."""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = self.__get_fs_location(unique_database, TBL_NAME)
LEAF_DIR = "i=002/p=p2/"
FILE_PATH = "test"
INSERTED_VALUE = "2"
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, p string)" % FQ_TBL_NAME)
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=1, p='p1') VALUES(1)" % FQ_TBL_NAME)
# Test that the recovered partitions are properly stored in Hive MetaStore.
# Invalidate the table metadata and then check if the recovered partitions
# are accessible.
self.create_fs_partition(TBL_LOCATION, LEAF_DIR, FILE_PATH, INSERTED_VALUE)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert self.has_value(INSERTED_VALUE, result.data)
self.client.execute("INVALIDATE METADATA %s" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert self.has_value(INSERTED_VALUE, result.data),\
"INVALIDATE can't work on partitions recovered by "\
"ALTER TABLE %s RECOVER PARTITIONS." % FQ_TBL_NAME
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(i=002, p='p2') VALUES(4)" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"select c from %s" % FQ_TBL_NAME)
assert self.has_value('4', result.data)
@SkipIfLocal.hdfs_client
def test_support_all_types(self, vector, unique_database):
"""Test that RECOVER PARTITIONS works correctly on all supported data types."""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = self.__get_fs_location(unique_database, TBL_NAME)
normal_values = ["a=1", "b=128", "c=32768", "d=2147483648", "e=11.11",
"f=22.22", "g=33.33", "j=tchar", "k=tvchar", "s=recover"]
malformed_values = ["a=a", "b=b", "c=c", "d=d", "e=e", "f=f", "g=g"]
overflow_values = ["a=128", "b=-32769", "c=-2147483649", "d=9223372036854775808",
"e=11.11111111111111111111111111111111111111111111111111111",
"f=3.40282346638528860e+39", "g=1.79769313486231570e+309"]
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (i INT) PARTITIONED BY (a TINYINT, b SMALLINT, c INT, d BIGINT,"
" e DECIMAL(4,2), f FLOAT, g DOUBLE, j CHAR(5), k VARCHAR(6), s STRING)"
% FQ_TBL_NAME)
self.execute_query_expect_success(self.client,
"INSERT INTO TABLE %s PARTITION(a=1, b=2, c=3, d=4, e=55.55, f=6.6, g=7.7, "
"j=cast('j' as CHAR(5)), k=cast('k' as VARCHAR(6)), s='s') VALUES(1)"
% FQ_TBL_NAME)
# Test valid partition values.
normal_dir = ""
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
old_length = len(result.data)
normal_dir = '/'.join(normal_values)
self.create_fs_partition(TBL_LOCATION, normal_dir, "test", "5")
# One partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert len(result.data) == (old_length + 1),\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle some data types."\
% FQ_TBL_NAME
# Test malformed partition values.
self.check_invalid_partition_values(FQ_TBL_NAME, TBL_LOCATION,
normal_values, malformed_values)
# Test overflow partition values.
self.check_invalid_partition_values(FQ_TBL_NAME, TBL_LOCATION,
normal_values, overflow_values)
@SkipIfLocal.hdfs_client
def test_encoded_partition(self, vector, unique_database):
"""IMPALA-6619: Test that RECOVER PARTITIONS does not create unnecessary partitions
when dealing with URL encoded partition value."""
TBL_NAME = "test_encoded_partition"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
self.execute_query_expect_success(
self.client, "CREATE TABLE %s (s string) PARTITIONED BY (p string)" % FQ_TBL_NAME)
self.execute_query_expect_success(
self.client, "ALTER TABLE %s ADD PARTITION (p='100%%')" % FQ_TBL_NAME)
# Running ALTER TABLE RECOVER PARTITIONS multiple times should only produce
# a single partition when adding a single partition.
for i in range(3):
self.execute_query_expect_success(
self.client, "ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(
self.client, "SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert self.count_partition(result.data) == 1, \
"ALTER TABLE %s RECOVER PARTITIONS produced more than 1 partitions" % FQ_TBL_NAME
assert self.count_value('p=100%25', result.data) == 1, \
"ALTER TABLE %s RECOVER PARTITIONS failed to handle encoded partitioned value" % \
FQ_TBL_NAME
@SkipIfLocal.hdfs_client
def test_unescaped_string_partition(self, vector, unique_database):
"""IMPALA-7784: Test that RECOVER PARTITIONS correctly parses unescaped string
values"""
tbl_name = "test_unescaped_string_partition"
fq_tbl_name = unique_database + "." + tbl_name
tbl_location = self.__get_fs_location(unique_database, tbl_name)
self.execute_query_expect_success(
self.client, "CREATE TABLE %s (i int) PARTITIONED BY (p string)" % fq_tbl_name)
parts = ["\'", "\"", "\\\'", "\\\"", "\\\\\'", "\\\\\""]
for i in range(len(parts)):
# When creating partition directories, Hive replaces special characters in
# partition value string using the %xx escape. e.g. p=' will become p=%27.
hex_part = urllib.parse.quote(parts[i])
self.create_fs_partition(tbl_location, "p=%s" % hex_part, "file_%d" % i, str(i))
self.execute_query_expect_success(
self.client, "ALTER TABLE %s RECOVER PARTITIONS" % fq_tbl_name)
result = self.execute_query_expect_success(
self.client, "SHOW PARTITIONS %s" % fq_tbl_name)
self.verify_partitions(parts, result.data)
@SkipIfLocal.hdfs_client
@SkipIfFS.empty_directory
def test_empty_directory(self, vector, unique_database):
"""Explicitly test how empty directories are handled when partitions are recovered."""
TBL_NAME = "test_recover_partitions"
FQ_TBL_NAME = unique_database + "." + TBL_NAME
TBL_LOCATION = self.__get_fs_location(unique_database, TBL_NAME)
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (i int, s string)" % (FQ_TBL_NAME))
# Adds partition directories.
num_partitions = 10
for i in range(1, num_partitions):
PART_DIR = "i=%d/s=part%d" % (i,i)
self.filesystem_client.make_dir(TBL_LOCATION + PART_DIR)
# Adds a duplicate directory name.
self.filesystem_client.make_dir(TBL_LOCATION + "i=001/s=part1")
# Adds a malformed directory name.
self.filesystem_client.make_dir(TBL_LOCATION + "i=wrong_type/s=part1")
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert 0 == self.count_partition(result.data)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % FQ_TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % FQ_TBL_NAME)
assert num_partitions - 1 == self.count_partition(result.data)
for i in range(1, num_partitions):
PART_DIR = "part%d\t" % i
assert self.has_value(PART_DIR, result.data)
def create_fs_partition(self, root_path, new_dir, new_file, value):
"""Creates an fs directory and writes a file to it. Empty directories are no-op's
for S3, so enforcing a non-empty directory is less error prone across
filesystems."""
partition_dir = os.path.join(root_path, new_dir)
self.filesystem_client.make_dir(partition_dir);
self.filesystem_client.create_file(os.path.join(partition_dir, new_file),
value)
def check_invalid_partition_values(self, fq_tbl_name, tbl_location,
normal_values, invalid_values):
""""Check that RECOVER PARTITIONS ignores partitions with invalid partition values."""
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % fq_tbl_name)
old_length = len(result.data)
for i in range(len(invalid_values)):
invalid_dir = ""
for j in range(len(normal_values)):
if i != j:
invalid_dir += (normal_values[j] + "/")
else:
invalid_dir += (invalid_values[j] + "/")
self.filesystem_client.make_dir(tbl_location + invalid_dir)
# No partition will be added.
self.execute_query_expect_success(self.client,
"ALTER TABLE %s RECOVER PARTITIONS" % fq_tbl_name)
result = self.execute_query_expect_success(self.client,
"SHOW PARTITIONS %s" % fq_tbl_name)
assert len(result.data) == old_length,\
"ALTER TABLE %s RECOVER PARTITIONS failed to handle "\
"invalid partition key values." % fq_tbl_name
def has_value(self, value, lines):
"""Check if lines contain value."""
return any([line.find(value) != -1 for line in lines])
def count_partition(self, lines):
"""Count the number of partitions in the lines."""
return self.count_value(WAREHOUSE, lines)
def count_value(self, value, lines):
"""Count the number of lines that contain value."""
return len([line for line in lines if line.find(value) != -1])
def verify_partitions(self, expected_parts, lines):
"""Check if all partition values are expected"""
values = [line.split('\t')[0] for line in lines]
assert len(values) == len(expected_parts) + 1
for p in expected_parts:
assert p in values
|
91a4628649d5c90069d6f179548f97bbc7184266
|
e210c28eeed9d38eb78c14b3a6388eca1e0e85d8
|
/nvflare/security/logging.py
|
07bd2c66394cd4eaa45cea66fba4e377a8efda91
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NVFlare
|
5a2d2e4c85a3fd0948e25f1ba510449727529a15
|
1433290c203bd23f34c29e11795ce592bc067888
|
refs/heads/main
| 2023-08-03T09:21:32.779763
| 2023-07-05T21:17:16
| 2023-07-05T21:17:16
| 388,876,833
| 442
| 140
|
Apache-2.0
| 2023-09-14T19:12:35
| 2021-07-23T17:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,133
|
py
|
logging.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import traceback
SECURE_LOGGING_VAR_NAME = "NVFLARE_SECURE_LOGGING"
def is_secure() -> bool:
"""Checks if logging is set to secure mode.
This is controlled by the system environment variable NVFLARE_SECURE_LOGGING.
To set secure mode, set this var to 'true' or '1'.
Returns:
A boolean indicates whether logging is set in secure mode.
"""
secure_logging = os.environ.get(SECURE_LOGGING_VAR_NAME, False)
if isinstance(secure_logging, str):
secure_logging = secure_logging.lower()
return secure_logging == "1" or secure_logging == "true"
else:
return False
class _Frame(object):
def __init__(self, line_text):
self.line_text = line_text
self.count = 1
def _format_exc_securely() -> str:
"""Mimics traceback.format_exc() but exclude detailed call info and exception detail since
they might contain sensitive info.
Returns:
A formatted string of current exception and call stack.
"""
exc_type, exc_obj, tb = sys.exc_info()
result = ["Traceback (most recent call last):"]
frames = []
last_frame = None
# traceback (tb) stack is a linked list of frames
while tb:
file_name = tb.tb_frame.f_code.co_filename
func_name = tb.tb_frame.f_code.co_name
line = tb.tb_lineno
line_text = f'File "{file_name}", line {line}, in {func_name}'
if not last_frame or last_frame.line_text != line_text:
last_frame = _Frame(line_text)
frames.append(last_frame)
else:
# same text as last frame
last_frame.count += 1
tb = tb.tb_next
for f in frames:
result.append(f.line_text)
if f.count > 1:
result.append(f"[Previous line repeated {f.count-1} more times]")
text = "\r\n ".join(result)
return "{}\r\n{}".format(text, f"Exception Type: {exc_type}")
def secure_format_traceback() -> str:
"""Formats the traceback of the current exception and returns a string without sensitive info.
If secure mode is set, only include file names, line numbers and func names.
Exception info only includes the type of the exception.
If secure mode is not set, return the result of traceback.format_exc().
Returns:
A formatted string
"""
if is_secure():
return _format_exc_securely()
else:
return traceback.format_exc()
def secure_log_traceback(logger: logging.Logger = None):
"""Logs the traceback.
If secure mode is set, the traceback only includes file names, line numbers and func names;
and only the type of the exception.
If secure mode is not set, the traceback will be logged normally as traceback.print_exc().
Args:
logger: if not None, this logger is used to log the traceback detail. If None, the root logger will be used.
"""
exc_detail = secure_format_traceback()
if not logger:
logger = logging.getLogger()
logger.error(exc_detail)
def secure_format_exception(e: Exception) -> str:
"""Formats the specified exception and return a string without sensitive info.
If secure mode is set, only return the type of the exception;
If secure mode is not set, return the result of str(e).
Args:
e: the exception to be formatted
Returns:
A formatted exception string.
"""
if is_secure():
return str(type(e))
else:
return f"{type(e).__name__}: {str(e)}"
|
a7b4a05b19bb83bafa2bd1acddf00e42cab671af
|
c26483bc1399e7879471a9e53d0288cb2c756088
|
/onnxmltools/convert/sparkml/operator_converters/element_wise_product.py
|
068111fb234fb32b85dbaa11a2c5f3f661640373
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnxmltools
|
6782d9e1d2c75be7618b1378405d31198a310027
|
024a62f6915e6c3b9e040befaf058c7e60c271de
|
refs/heads/main
| 2023-09-04T04:57:10.943548
| 2023-08-28T16:43:37
| 2023-08-28T16:43:37
| 121,798,175
| 827
| 189
|
Apache-2.0
| 2023-09-13T16:07:20
| 2018-02-16T20:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
element_wise_product.py
|
# SPDX-License-Identifier: Apache-2.0
from onnx import onnx_pb as onnx_proto
from ...common.data_types import FloatTensorType
from ...common.utils import check_input_and_output_numbers, check_input_and_output_types
from ...common._apply_operation import apply_mul
from ...common._registration import register_converter, register_shape_calculator
def convert_element_wise_product(scope, operator, container):
op = operator.raw_operator
scaling_vector = scope.get_unique_variable_name("scaling_vector")
container.add_initializer(
scaling_vector,
onnx_proto.TensorProto.FLOAT,
[1, len(op.getScalingVec())],
op.getScalingVec(),
)
apply_mul(
scope,
[operator.inputs[0].full_name, scaling_vector],
operator.output_full_names,
container,
)
register_converter(
"pyspark.ml.feature.ElementwiseProduct", convert_element_wise_product
)
def calculate_element_wise_product_output_shapes(operator):
check_input_and_output_numbers(operator, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
N = operator.inputs[0].type.shape[0]
operator.outputs[0].type = FloatTensorType([N, operator.inputs[0].type.shape[1]])
register_shape_calculator(
"pyspark.ml.feature.ElementwiseProduct",
calculate_element_wise_product_output_shapes,
)
|
dfd647b6c14692b6d2ba56285367f2b753594117
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/DS_and_ML_Mathematical_and_Statistical_Methods_Kroese_Taimre/Chapter2/polyregCV.py
|
f02460f0d9451cd428c553cd016e75826df16cbe
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
polyregCV.py
|
""" polyregCV.py """
from polyreg3 import *
K_vals = [5, 10, 100] # number of folds
cv = np.zeros((len(K_vals), max_p))
X = np.ones((n, 1))
for p in p_range:
if p > 1:
X = np.hstack((X, u**(p-1)))
j = 0
for K in K_vals:
loss = []
for k in range(1, K+1):
# integer indices of test samples
test_ind = ((n/K)*(k-1) + np.arange(1, n/K + 1) - 1).astype('int')
train_ind = np.setdiff1d(np.arange(n), test_ind)
X_train, y_train = X[train_ind, :], y[train_ind, :]
X_test, y_test = X[test_ind, :], y[test_ind]
# fit model and evaluate test loss
betahat = solve(X_train.T @ X_train, X_train.T @ y_train)
loss.append(norm(y_test - X_test @ betahat) ** 2)
cv[j, p-1] = sum(loss) / n
j += 1
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p1 = plt.plot(p_range, cv[0, :], 'k-.', p_range, cv[0, :], 'k.', markersize=10)[0]
p2 = plt.plot(p_range, cv[1, :], 'r', p_range, cv[1, :], 'r.', markersize=10)[0]
p3 = plt.plot(p_range, cv[2, :], 'b--', p_range, cv[2, :], 'b.', markersize=10)[0]
plt.xticks(range(2, 19, 2))
plt.xlabel('Number of parameters $p$')
plt.ylabel('$K$-fold cross-validation loss')
plt.legend((p1,p2,p3),('$K$=5','$K$=10','$K$=100'))
plt.tight_layout()
plt.savefig('crossvalpy.pdf',format='pdf')
plt.show()
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
1b033b1bbfd0bef056ab42ec5f021401f1b1d19d
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/pcb/src/eval_callback.py
|
2b2fbd0fa20fa16040be1045dac14ab0848c28f1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,968
|
py
|
eval_callback.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluation callback when training"""
import os
from mindspore import save_checkpoint
from mindspore.train.callback import Callback
from src.model_utils.config import config
class EvalCallBack(Callback):
"""
Evaluation callback when training.
Args:
eval_function (function): evaluation function.
eval_param_dict (dict): evaluation parameters' configure dict.
interval (int): run evaluation interval, default is 1.
eval_start_epoch (int): evaluation start epoch, default is 1.
save_best_ckpt (bool): Whether to save best checkpoint, default is True.
best_ckpt_name (str): best checkpoint name, default is `best.ckpt`.
metrics_name (str): evaluation metrics name, default is (`mAP`,`CMC`).
Returns:
None
Examples:
>>> EvalCallBack(eval_function, eval_param_dict)
"""
def __init__(self, eval_function, eval_param_dict, interval=1, eval_start_epoch=1, save_best_ckpt=True,
ckpt_directory="./", best_ckpt_name="best.ckpt", metrics_name=("mAP", "CMC"), cmc_topk=(1, 5, 10)):
super(EvalCallBack, self).__init__()
self.eval_param_dict = eval_param_dict
self.eval_function = eval_function
self.eval_start_epoch = eval_start_epoch
if interval < 1:
raise ValueError("interval should >= 1.")
self.interval = interval
self.save_best_ckpt = save_best_ckpt
self.best_mAP = 0
self.best_cmc_scores = None
self.best_epoch = 0
if not os.path.isdir(ckpt_directory):
os.makedirs(ckpt_directory)
self.best_ckpt_path = os.path.join(ckpt_directory, best_ckpt_name)
self.metrics_name = metrics_name
self.cmc_topk = cmc_topk
def epoch_end(self, run_context):
"""Callback when epoch end."""
cb_params = run_context.original_args()
cur_epoch = cb_params.cur_epoch_num
if cur_epoch >= self.eval_start_epoch and (cur_epoch - self.eval_start_epoch) % self.interval == 0:
mAP, cmc_scores = self.eval_function(self.eval_param_dict)
print('Mean AP: {:4.1%}'.format(mAP), flush=True)
print('CMC Scores{:>12}'.format(config.dataset_name), flush=True)
for k in self.cmc_topk:
print(' top-{:<4}{:12.1%}'.format(k, cmc_scores[config.dataset_name][k - 1]), flush=True)
if mAP >= self.best_mAP:
self.best_mAP = mAP
self.best_cmc_scores = cmc_scores
self.best_epoch = cur_epoch
print("update best mAP: {}".format(mAP), flush=True)
if self.save_best_ckpt:
save_checkpoint(cb_params.train_network, self.best_ckpt_path)
print("update best checkpoint at: {}".format(self.best_ckpt_path), flush=True)
def end(self, run_context):
print("End training, the best epoch is {}".format(self.best_epoch), flush=True)
print("Best result:", flush=True)
print('Mean AP: {:4.1%}'.format(self.best_mAP), flush=True)
print('CMC Scores{:>12}'.format(config.dataset_name), flush=True)
for k in self.cmc_topk:
print(' top-{:<4}{:12.1%}'.format(k, self.best_cmc_scores[config.dataset_name][k - 1]), flush=True)
|
99221d1dccdcde3795a49404d2417fc59dc287fa
|
a4f9164f5bd4405312afb5073342766a40fa3356
|
/efficientdet/aug/mosaic_test.py
|
65e6d572f30dec2663daa72dbb34ed91fc5a75fb
|
[
"Apache-2.0"
] |
permissive
|
google/automl
|
3dd46487265241cebe7040b22556c9b259a5fb33
|
c7392f2bab3165244d1c565b66409fa11fa82367
|
refs/heads/master
| 2023-08-19T10:11:45.269690
| 2023-08-09T22:59:26
| 2023-08-09T22:59:26
| 246,738,676
| 6,415
| 1,692
|
Apache-2.0
| 2023-08-09T22:59:27
| 2020-03-12T03:52:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
mosaic_test.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mosaic Augmentation simple test."""
from absl import logging
import tensorflow.compat.v1 as tf
from aug import mosaic
class MosaicTest(tf.test.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_size = (512, 512)
self.mosaic = mosaic.Mosaic(out_size=self.output_size)
tf.random.set_random_seed(111111)
def test_mosaic_boxes(self):
"""Verify num of boxes are valid and syntax check random four images."""
images = tf.random.uniform(
shape=(4, 512, 512, 3), minval=0, maxval=255, dtype=tf.float32)
bboxes = tf.random.uniform(
shape=(4, 2, 4), minval=1, maxval=511, dtype=tf.int32)
_, mosaic_boxes = self.mosaic(images, bboxes)
self.assertEqual(bboxes.shape[0], len(mosaic_boxes))
def test_mosaic_tiny_images(self):
images = tf.zeros(shape=(4, 4, 4, 3))
bboxes = tf.random.uniform(
shape=(4, 2, 4), minval=1, maxval=511, dtype=tf.int32)
_, mosaic_boxes = self.mosaic(images, bboxes)
self.assertEqual(bboxes.shape[0], len(mosaic_boxes))
if __name__ == "__main__":
logging.set_verbosity(logging.WARNING)
tf.disable_eager_execution()
tf.test.main()
|
9b264b5c6682a2721e5378f2c8d2a6e3b02a1f53
|
40ca168bbb9c865a13c83ef479838981c5b7a1c0
|
/packages/hagrid/hagrid/parse_template.py
|
3e4c5b351be0e4cebac5fcb9db9e825a6383d454
|
[
"Apache-2.0",
"Python-2.0"
] |
permissive
|
OpenMined/PySyft
|
6907171bc35062d04c1b6320097c3bcafb65ae68
|
1833278212d89e66853f28a7ca365261550bbe4f
|
refs/heads/dev
| 2023-09-05T05:50:48.773703
| 2023-09-05T04:00:44
| 2023-09-05T04:00:44
| 97,641,933
| 9,473
| 2,530
|
Apache-2.0
| 2023-09-14T12:50:53
| 2017-07-18T20:41:16
|
Python
|
UTF-8
|
Python
| false
| false
| 10,890
|
py
|
parse_template.py
|
# stdlib
import hashlib
import os
import shutil
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from urllib.parse import urlparse
# third party
from jinja2 import Environment
from jinja2 import FileSystemLoader
from jinja2 import Template
import requests
from rich.progress import track
import yaml
# relative
from .cache import DEFAULT_REPO
from .cache import STABLE_BRANCH
from .lib import hagrid_cache_dir
from .lib import manifest_template_path
from .lib import repo_src_path
from .mode import EDITABLE_MODE
HAGRID_TEMPLATE_PATH = str(manifest_template_path())
def read_yml_file(filename: str) -> Tuple[Optional[Dict], str]:
template = None
with open(filename) as fp:
try:
text = fp.read()
template = yaml.safe_load(text)
template_hash = hashlib.sha256(text.encode("utf-8")).hexdigest()
except yaml.YAMLError as exc:
raise exc
return template, template_hash
def read_yml_url(yml_url: str) -> Tuple[Optional[Dict], str]:
template = None
try:
# download file
response = requests.get(yml_url) # nosec
if response.status_code != 200:
raise Exception(f"Failed to download: {yml_url}")
# Save file to the local destination
try:
template = yaml.safe_load(response.content)
template_hash = hashlib.sha256(response.content).hexdigest()
except yaml.YAMLError as exc:
raise exc
except Exception as e:
raise e
return template, template_hash
def git_url_for_file(file_path: str, base_url: str, hash: str) -> str:
# url must have unix style slashes
return os.path.join(base_url, hash, file_path).replace(os.sep, "/")
def get_local_abs_path(target_dir: str, file_path: str) -> str:
local_path = os.path.join(target_dir, file_path)
return os.path.expanduser(local_path)
def is_url(string: str) -> bool:
try:
result = urlparse(string)
return all([result.scheme, result.netloc])
except ValueError:
return False
def is_path(string: str) -> bool:
return os.path.exists(string)
def manifest_cache_path(template_hash: str) -> str:
return f"{hagrid_cache_dir()}/manifests/{template_hash}"
def url_from_repo(template_location: Optional[str]) -> Optional[str]:
if template_location is None:
return None
if ":" in template_location and "/" in template_location:
parts = template_location.split(":")
branch_or_hash = parts[1]
repo = parts[0]
elif ":" not in template_location and "/" in template_location:
branch_or_hash = STABLE_BRANCH
repo = template_location
else:
branch_or_hash = template_location
repo = DEFAULT_REPO
manifest_url = (
f"https://raw.githubusercontent.com/{repo}/{branch_or_hash}"
"/packages/hagrid/hagrid/manifest_template.yml"
)
if is_url(manifest_url):
return manifest_url
return None
def get_template_yml(template_location: Optional[str]) -> Tuple[Optional[Dict], str]:
if template_location:
if is_url(template_location):
template, template_hash = read_yml_url(template_location)
elif is_path(template_location):
template, template_hash = read_yml_file(template_location)
elif url_from_repo(template_location):
template, template_hash = read_yml_url(url_from_repo(template_location))
else:
raise Exception(f"{template_location} is not valid")
else:
template_location = HAGRID_TEMPLATE_PATH
template, template_hash = read_yml_file(template_location)
if EDITABLE_MODE and is_path(template_location):
# save it to the same folder for dev mode
template_hash = "dev"
return template, template_hash
def setup_from_manifest_template(
host_type: str,
deployment_type: str,
template_location: Optional[str] = None,
overwrite: bool = False,
verbose: bool = False,
) -> Dict:
template, template_hash = get_template_yml(template_location)
kwargs_to_parse = {}
if template is None:
raise ValueError(
f"Failed to read {template_location}. Please check the file name or path is correct."
)
git_hash = template["hash"]
git_base_url = template["baseUrl"]
target_dir = manifest_cache_path(template_hash)
all_template_files = template["files"]
docker_tag = template["dockerTag"]
files_to_download = []
for package_name in all_template_files:
# Get all files w.r.t that package e.g. grid, syft, hagrid
template_files = all_template_files[package_name]
package_path = template_files["path"]
# common files
files_to_download += [
os.path.join(package_path, f) for f in template_files["common"]
]
# worker
if deployment_type == "single_container" and host_type in ["docker"]:
files_to_download += [
os.path.join(package_path, f) for f in template_files["worker"]
]
# docker related files
elif host_type in ["docker"]:
files_to_download += [
os.path.join(package_path, f) for f in template_files["docker"]
]
# add k8s related files
# elif host_type in ["k8s"]:
# files_to_download += template_files["k8s"]
else:
raise Exception(f"Hagrid template does not currently support {host_type}.")
if EDITABLE_MODE and is_path(template_location):
# to test things in editable mode we can pass in a .yml file path and it will
# copy the files instead of download them
for src_file_path in track(files_to_download, description="Copying files"):
full_src_dir = f"{repo_src_path()}/{src_file_path}"
full_target_path = f"{target_dir}/{src_file_path}"
full_target_dir = os.path.dirname(full_target_path)
os.makedirs(full_target_dir, exist_ok=True)
shutil.copyfile(
full_src_dir,
full_target_path,
)
else:
download_files(
files_to_download=files_to_download,
git_hash=git_hash,
git_base_url=git_base_url,
target_dir=target_dir,
overwrite=overwrite,
verbose=verbose,
)
kwargs_to_parse["tag"] = docker_tag
return kwargs_to_parse
def deployment_dir(node_name: str) -> str:
return f"{hagrid_cache_dir()}/deployments/{node_name}"
def download_files(
files_to_download: List[str],
git_hash: str,
git_base_url: str,
target_dir: str,
overwrite: bool = False,
verbose: bool = False,
) -> None:
for src_file_path in track(files_to_download, description="Downloading files"):
# For now target file path is same as source file path
trg_file_path = src_file_path
local_destination = get_local_abs_path(target_dir, trg_file_path)
link_to_file = git_url_for_file(src_file_path, git_base_url, git_hash)
download_file(
link_to_file=link_to_file,
local_destination=local_destination,
overwrite=overwrite,
verbose=verbose,
)
def render_templates(
node_name: str,
deployment_type: str,
template_location: Optional[str],
env_vars: dict,
host_type: str,
) -> None:
template, template_hash = get_template_yml(template_location)
if template is None:
raise ValueError("Failed to read hagrid template.")
src_dir = manifest_cache_path(template_hash)
target_dir = deployment_dir(node_name)
all_template_files = template["files"]
jinja_template = JinjaTemplate(src_dir)
files_to_render = []
for package_name in all_template_files:
template_files = all_template_files[package_name]
# Aggregate all the files to be rendered
# common files
files_to_render += template_files["common"]
# worker
if deployment_type == "single_container" and host_type in ["docker"]:
for template_file in template_files["worker"]:
if "default.env" not in template_file:
files_to_render.append(template_file)
elif host_type in ["docker"]:
# docker related files
for template_file in template_files["docker"]:
if "default.env" not in template_file:
files_to_render.append(template_file)
# Render the files
for file_path in files_to_render:
folder_path = template_files["path"]
# relative to src_dir
src_file_path = f"{folder_path}{file_path}"
target_file_path = f"{target_dir}/{file_path}"
os.makedirs(os.path.dirname(target_file_path), exist_ok=True)
jinja_template.substitute_vars(src_file_path, env_vars, target_file_path)
class JinjaTemplate:
def __init__(self, template_dir: Union[str, os.PathLike]) -> None:
self.directory = os.path.expanduser(template_dir)
self.environ = Environment(
loader=FileSystemLoader(self.directory), autoescape=True
)
def read_template_from_path(self, filepath: str) -> Template:
return self.environ.get_template(name=filepath)
def substitute_vars(
self, template_path: str, vars_to_substitute: dict, target_path: str
) -> None:
template = self.read_template_from_path(template_path)
rendered_template = template.render(vars_to_substitute)
self.save_to(rendered_template, target_path)
def save_to(self, message: str, filename: str) -> None:
base_dir = self.directory
filepath = os.path.abspath(os.path.join(base_dir, filename))
# Create sub directories if does not exist
os.makedirs(os.path.dirname(filepath), exist_ok=True)
# Save template to filepath
with open(filepath, "w") as fp:
fp.write(message)
def download_file(
link_to_file: str,
local_destination: str,
overwrite: bool = False,
verbose: bool = False,
) -> None:
file_dir = os.path.dirname(local_destination)
os.makedirs(file_dir, exist_ok=True)
if not os.path.exists(local_destination) or overwrite:
try:
# download file
response = requests.get(link_to_file) # nosec
if response.status_code != 200:
raise Exception(f"Failed to download: {link_to_file}")
# Save file to the local destination
open(local_destination, "wb").write(response.content)
except Exception as e:
raise e
else:
if verbose:
print(f"Skipping download: {link_to_file} exists.")
|
0417de38b3afd4118b20dc527e62bd83e363e199
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/HLTrigger/Configuration/python/HLT_75e33/sequences/HLTEndSequence_cfi.py
|
e78fca75928111ff8d3eaf3fcb83a43dd9acc5bb
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 130
|
py
|
HLTEndSequence_cfi.py
|
import FWCore.ParameterSet.Config as cms
from ..modules.hltBoolEnd_cfi import *
HLTEndSequence = cms.Sequence(
hltBoolEnd
)
|
ef0232f68386e396af71c43792391b5f34c98df2
|
2b1bfccfada588b047b513e24db0c0f6bc41e154
|
/src/trajdata/dataset_specific/sdd_peds/sddpeds_dataset.py
|
a94bf888d98db44bda85825370bf458aa65c86f3
|
[
"Apache-2.0"
] |
permissive
|
NVlabs/trajdata
|
8363b79dc3e5f1ff61d240b49e68df52319d9fee
|
5bc8ab59dc1fc44b3e8f4f78546109b314da098e
|
refs/heads/main
| 2023-09-01T02:00:11.016946
| 2023-08-23T01:44:58
| 2023-08-23T01:44:58
| 488,789,438
| 110
| 19
|
Apache-2.0
| 2023-08-23T01:08:11
| 2022-05-05T01:04:05
|
Python
|
UTF-8
|
Python
| false
| false
| 12,254
|
py
|
sddpeds_dataset.py
|
from pathlib import Path
from random import Random
from typing import Any, Dict, Final, List, Optional, Tuple, Type
import numpy as np
import pandas as pd
from trajdata.caching import EnvCache, SceneCache
from trajdata.data_structures.agent import AgentMetadata, AgentType, FixedExtent
from trajdata.data_structures.environment import EnvMetadata
from trajdata.data_structures.scene_metadata import Scene, SceneMetadata
from trajdata.data_structures.scene_tag import SceneTag
from trajdata.dataset_specific.raw_dataset import RawDataset
from trajdata.dataset_specific.scene_records import SDDPedsRecord
from trajdata.utils import arr_utils
from .estimated_homography import SDD_HOMOGRAPHY_SCALES
# SDD was captured at 30 frames per second.
SDDPEDS_DT: Final[float] = 1.0 / 30.0
# There are 60 scenes in total.
SDDPEDS_SCENE_COUNTS: Final[Dict[str, int]] = {
"bookstore": 7,
"coupa": 4,
"deathCircle": 5,
"gates": 9,
"hyang": 15,
"little": 4,
"nexus": 12,
"quad": 4,
}
def sdd_type_to_unified_type(label: str) -> AgentType:
if label == "Pedestrian":
return AgentType.PEDESTRIAN
elif label == "Biker":
return AgentType.BICYCLE
elif label in {"Cart", "Car", "Bus"}:
return AgentType.VEHICLE
elif label == "Skater":
return AgentType.UNKNOWN
class SDDPedsDataset(RawDataset):
def compute_metadata(self, env_name: str, data_dir: str) -> EnvMetadata:
# Using seeded randomness to assign 42 scenes (70% of all scenes) to "train",
# 9 (15%) to "val", and 9 (15%) to "test".
rng = Random(0)
scene_split = ["train"] * 42 + ["val"] * 9 + ["test"] * 9
rng.shuffle(scene_split)
scene_list: List[str] = []
for scene_name, video_count in SDDPEDS_SCENE_COUNTS.items():
scene_list += [f"{scene_name}_{idx}" for idx in range(video_count)]
scene_split_map: Dict[str, str] = {
scene_list[idx]: scene_split[idx] for idx in range(len(scene_split))
}
# SDD possibilities are the Cartesian product of these,
dataset_parts: List[Tuple[str, ...]] = [
("train", "val", "test"),
("stanford",),
]
env_metadata = EnvMetadata(
name=env_name,
data_dir=data_dir,
dt=SDDPEDS_DT,
parts=dataset_parts,
scene_split_map=scene_split_map,
)
return env_metadata
def load_dataset_obj(self, verbose: bool = False) -> None:
if verbose:
print(f"Loading {self.name} dataset...", flush=True)
# Just storing the filepath and scene length (number of frames).
# One could load the entire dataset here, but there's no need
# since it's ~500 MB in size and we can parallel process it later easily.
self.dataset_obj: Dict[str, Tuple[Path, int]] = dict()
for scene_name, video_count in SDDPEDS_SCENE_COUNTS.items():
for video_num in range(video_count):
data_filepath: Path = (
Path(self.metadata.data_dir)
/ scene_name
/ f"video{video_num}"
/ "annotations.txt"
)
csv_columns = [
"agent_id",
"x_min",
"y_min",
"x_max",
"y_max",
"frame_id",
"lost",
"occluded",
"generated",
"label",
]
data = pd.read_csv(
data_filepath,
sep=" ",
index_col=False,
header=None,
names=csv_columns,
usecols=["frame_id", "generated"],
dtype={"frame_id": int, "generated": bool},
)
# Ignoring generated frames in the count here since
# we will remove them later (we'll do our own interpolation).
data = data[~data["generated"]]
data["frame_id"] -= data["frame_id"].min()
self.dataset_obj[f"{scene_name}_{video_num}"] = (
data_filepath,
data["frame_id"].max().item() + 1,
)
def _get_matching_scenes_from_obj(
self,
scene_tag: SceneTag,
scene_desc_contains: Optional[List[str]],
env_cache: EnvCache,
) -> List[SceneMetadata]:
all_scenes_list: List[SDDPedsRecord] = list()
scenes_list: List[SceneMetadata] = list()
for idx, (scene_name, (scene_filepath, scene_length)) in enumerate(
self.dataset_obj.items()
):
if scene_name not in self.metadata.scene_split_map:
raise ValueError()
scene_split: str = self.metadata.scene_split_map[scene_name]
# Saving all scene records for later caching.
all_scenes_list.append(SDDPedsRecord(scene_name, scene_length, idx))
if (
"stanford" in scene_tag
and scene_split in scene_tag
and scene_desc_contains is None
):
scene_metadata = SceneMetadata(
env_name=self.metadata.name,
name=scene_name,
dt=self.metadata.dt,
raw_data_idx=idx,
)
scenes_list.append(scene_metadata)
self.cache_all_scenes_list(env_cache, all_scenes_list)
return scenes_list
def _get_matching_scenes_from_cache(
self,
scene_tag: SceneTag,
scene_desc_contains: Optional[List[str]],
env_cache: EnvCache,
) -> List[Scene]:
all_scenes_list: List[SDDPedsRecord] = env_cache.load_env_scenes_list(self.name)
scenes_list: List[Scene] = list()
for scene_record in all_scenes_list:
scene_name, scene_length, data_idx = scene_record
scene_split: str = self.metadata.scene_split_map[scene_name]
if (
"stanford" in scene_tag
and scene_split in scene_tag
and scene_desc_contains is None
):
scene_metadata = Scene(
self.metadata,
scene_name,
"stanford",
scene_split,
scene_length,
data_idx,
None, # This isn't used if everything is already cached.
)
scenes_list.append(scene_metadata)
return scenes_list
def get_scene(self, scene_info: SceneMetadata) -> Scene:
_, scene_name, _, data_idx = scene_info
_, scene_length = self.dataset_obj[scene_name]
scene_split: str = self.metadata.scene_split_map[scene_name]
return Scene(
self.metadata,
scene_name,
"stanford",
scene_split,
scene_length,
data_idx,
None, # No data access info necessary for the ETH/UCY datasets.
)
def get_agent_info(
self, scene: Scene, cache_path: Path, cache_class: Type[SceneCache]
) -> Tuple[List[AgentMetadata], List[List[AgentMetadata]]]:
scene_filepath, _ = self.dataset_obj[scene.name]
csv_columns = [
"agent_id",
"x_min",
"y_min",
"x_max",
"y_max",
"frame_id",
"lost",
"occluded",
"generated",
"label",
]
data_df: pd.DataFrame = pd.read_csv(
scene_filepath,
sep=" ",
index_col=False,
header=None,
names=csv_columns,
dtype={"generated": bool},
)
# Setting generated frames to NaN, we'll do our own interpolation later.
data_df.loc[data_df["generated"], ["x_min", "y_min"]] = np.nan
data_df["frame_id"] -= data_df["frame_id"].min()
scale: float = SDD_HOMOGRAPHY_SCALES[scene.name]["scale"]
data_df["x"] = scale * (data_df["x_min"] + data_df["x_max"]) / 2.0
data_df["y"] = scale * (data_df["y_min"] + data_df["y_max"]) / 2.0
# Don't need these columns anymore.
data_df.drop(
columns=[
"x_min",
"y_min",
"x_max",
"y_max",
"lost",
"occluded",
"generated",
],
inplace=True,
)
# Renaming columns to match our usual names.
data_df.rename(
columns={"frame_id": "scene_ts", "label": "agent_type"},
inplace=True,
)
# Ensuring data is sorted by agent ID and scene timestep.
data_df.set_index(["agent_id", "scene_ts"], inplace=True)
data_df.sort_index(inplace=True)
# Re-interpolating because the original SDD interpolation yielded discrete position steps,
# which is not very natural. Also, the data is already sorted by agent and time so
# we can safely do this without worrying about contaminating position data across agents.
data_df.interpolate(
method="linear", axis="index", inplace=True, limit_area="inside"
)
data_df.reset_index(level=1, inplace=True)
agent_ids: np.ndarray = data_df.index.get_level_values(0).to_numpy()
# Add in zero for z value
data_df["z"] = np.zeros_like(data_df["x"])
### Calculating agent classes
agent_class: Dict[int, str] = (
data_df.groupby("agent_id")["agent_type"].first().to_dict()
)
### Calculating agent velocities
data_df[["vx", "vy"]] = (
arr_utils.agent_aware_diff(data_df[["x", "y"]].to_numpy(), agent_ids)
/ SDDPEDS_DT
)
### Calculating agent accelerations
data_df[["ax", "ay"]] = (
arr_utils.agent_aware_diff(data_df[["vx", "vy"]].to_numpy(), agent_ids)
/ SDDPEDS_DT
)
# This is likely to be very noisy... Unfortunately, SDD only
# provides center of mass data.
data_df["heading"] = np.arctan2(data_df["vy"], data_df["vx"])
agent_list: List[AgentMetadata] = list()
agent_presence: List[List[AgentMetadata]] = [
[] for _ in range(scene.length_timesteps)
]
for agent_id, frames in data_df.groupby("agent_id")["scene_ts"]:
start_frame: int = frames.iat[0].item()
last_frame: int = frames.iat[-1].item()
agent_type: AgentType = sdd_type_to_unified_type(agent_class[agent_id])
agent_metadata = AgentMetadata(
name=str(agent_id),
agent_type=agent_type,
first_timestep=start_frame,
last_timestep=last_frame,
# These values are as ballpark as it gets... It's not super reliable to use
# the pixel extents in the annotations since they are all always axis-aligned.
extent=FixedExtent(0.75, 0.75, 1.5),
)
agent_list.append(agent_metadata)
for frame in frames:
agent_presence[frame].append(agent_metadata)
# Changing the agent_id dtype to str
data_df.reset_index(inplace=True)
data_df["agent_id"] = data_df["agent_id"].astype(str)
data_df.set_index(["agent_id", "scene_ts"], inplace=True)
cache_class.save_agent_data(
data_df,
cache_path,
scene,
)
return agent_list, agent_presence
def cache_map(
self,
map_name: str,
layer_names: List[str],
cache_path: Path,
map_cache_class: Type[SceneCache],
resolution: float,
) -> None:
"""
No maps in this dataset!
"""
pass
def cache_maps(
self,
cache_path: Path,
map_cache_class: Type[SceneCache],
map_params: Dict[str, Any],
) -> None:
"""
No maps in this dataset!
"""
pass
|
b137b645df1fd2f6b611a8ae0324ebafbfdd8ee7
|
08e1045024dcea6c5e2ac9c9b5aeac2e4580a710
|
/scrapers/test/test_district_data.py
|
4fdf8c9d3f62ff875d7771b9a7e1213d779dc01e
|
[
"CC-BY-4.0"
] |
permissive
|
openZH/covid_19
|
c298feb48f47b041699bbee1d88eac54ed7e9088
|
1a2b8c3a600928a122a801ffddf8225416063fc9
|
refs/heads/master
| 2023-08-31T10:51:25.951503
| 2023-08-31T07:50:49
| 2023-08-31T07:50:49
| 246,001,728
| 491
| 227
|
CC-BY-4.0
| 2023-05-12T10:53:02
| 2020-03-09T10:07:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
test_district_data.py
|
from scrapers.scrape_common import DistrictData
def test_district_data():
dd = DistrictData()
dd.date = '1'
dd.week = 2
dd.year = 3
dd.canton = '4'
dd.district = '5'
dd.district_id = 6
dd.population = 7
dd.total_cases = 8
dd.new_cases = 9
dd.total_deceased = 10
dd.new_deceased = 11
dd.url = '12'
string = str(dd)
dd_parsed = DistrictData()
assert dd_parsed.parse(string)
assert dd.date == dd_parsed.date
assert dd.week == dd_parsed.week
assert dd.year == dd_parsed.year
assert dd.canton == dd_parsed.canton
assert dd.district == dd_parsed.district
assert dd.district_id == dd_parsed.district_id
assert dd.population == dd_parsed.population
assert dd.total_cases == dd_parsed.total_cases
assert dd.new_cases == dd_parsed.new_cases
assert dd.total_deceased == dd_parsed.total_deceased
assert dd.new_deceased == dd_parsed.new_deceased
assert dd.url == dd_parsed.url
if __name__ == "__main__":
test_district_data()
|
afdc0efaf802dfb30bdce8370c12b7f719091ce3
|
86f3973554eb61b12528835851cbdc96aba9ccc0
|
/tests/cases/test_ogf_import.py
|
efd090cf02bed4bd059b46774267ec4668b64924
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
PavelBlend/blender-xray
|
02d68e424ae9088221bafc1d0d9019690323d9da
|
a3abb9eb805182eec8ed8de4058dd744aee0e291
|
refs/heads/develop
| 2023-09-03T15:10:56.022070
| 2023-08-22T17:50:23
| 2023-08-22T17:50:23
| 20,459,902
| 150
| 40
|
BSD-2-Clause
| 2023-08-10T15:01:24
| 2014-06-03T21:12:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
test_ogf_import.py
|
from tests import utils
import re
import bpy
class TestOgfImport(utils.XRayTestCase):
def test_import_general(self):
bpy.ops.xray_import.ogf(
directory=self.binpath(),
files=[{'name': 'test_fmt_ogf_st.ogf'}],
)
bpy.ops.xray_import.ogf(
directory=self.binpath(),
files=[{'name': 'test_fmt_ogf_pm_1_link.ogf'}],
)
bpy.ops.xray_import.ogf(
directory=self.binpath(),
files=[{'name': 'test_fmt_ogf_pm_act.ogf'}],
)
bpy.ops.xray_import.ogf(
directory=self.binpath(),
files=[
{'name': 'test_fmt_ogf_pm_act.ogf'},
{'name': 'test_fmt_ogf_pm_1_link.ogf'},
{'name': 'test_fmt_ogf_st.ogf'}
],
)
bpy.ops.xray_import.ogf(
directory=self.binpath(),
files=[{'name': 'test_fmt_ogf_v3.ogf'}],
)
def test_import_gunslinger(self):
bpy.ops.xray_import.ogf(
directory=self.binpath(),
files=[{'name': 'test_fmt_ogf_gl.ogf'}],
)
self.assertReportsContains(
'WARNING',
re.compile('Description isn\'t properly read')
)
|
eef1a92cae8e91e83bdfc7aadb1ce3dc91b7a110
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/contrib/yajl/template.py
|
0a0ddd77f7a5a1de17aa7358d499bc56475a0b68
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 902
|
py
|
template.py
|
pkgname = "yajl"
pkgver = "2.1.0"
pkgrel = 0
build_style = "cmake"
hostmakedepends = [
"cmake",
"ninja",
"pkgconf",
]
pkgdesc = "Yet Another JSON Library"
maintainer = "psykose <alice@ayaya.dev>"
license = "ISC"
url = "https://github.com/lloyd/yajl"
source = f"{url}/archive/refs/tags/{pkgver}.tar.gz"
sha256 = "3fb73364a5a30efe615046d07e6db9d09fd2b41c763c5f7d3bfb121cd5c5ac5a"
# FIXME: cfi crashes in test-api
hardening = ["vis"]
# one of the few with no ctest but manual test target
def do_check(self):
self.do(self.make_cmd, "-C", self.make_dir, "test", "test-api")
def post_install(self):
self.install_license("COPYING")
self.mv(
self.destdir / "usr/lib/libyajl_s.a", self.destdir / "usr/lib/libyajl.a"
)
@subpackage("yajl-devel")
def _devel(self):
return self.default_devel()
@subpackage("yajl-libs")
def _libs(self):
return self.default_libs()
|
8e61715696db780059641170d6d915713edb9436
|
c475cd8531a94ffae69cc92371d41531dbbddb6c
|
/Projects/bullet3-2.89/examples/pybullet/examples/createMultiBodyBatch.py
|
dd53d99b084eac789f6a6943eaebfe567dc6cd25
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib"
] |
permissive
|
WolfireGames/overgrowth
|
72d3dd29cbd7254337265c29f8de3e5c32400114
|
594a2a4f9da0855304ee8cd5335d042f8e954ce1
|
refs/heads/main
| 2023-08-15T19:36:56.156578
| 2023-05-17T08:17:53
| 2023-05-17T08:20:36
| 467,448,492
| 2,264
| 245
|
Apache-2.0
| 2023-05-09T07:29:58
| 2022-03-08T09:38:54
|
C++
|
UTF-8
|
Python
| false
| false
| 5,504
|
py
|
createMultiBodyBatch.py
|
import pybullet as p
import time
import math
cid = p.connect(p.SHARED_MEMORY)
if (cid < 0):
p.connect(p.GUI, options="--minGraphicsUpdateTimeMs=16000")
p.setPhysicsEngineParameter(numSolverIterations=4, minimumSolverIslandSize=1024)
p.setTimeStep(1. / 120.)
logId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "createMultiBodyBatch.json")
#useMaximalCoordinates is much faster then the default reduced coordinates (Featherstone)
p.loadURDF("plane100.urdf", useMaximalCoordinates=True)
#disable rendering during creation.
p.setPhysicsEngineParameter(contactBreakingThreshold=0.04)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
#disable tinyrenderer, software (CPU) renderer, we don't use it here
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, 0)
shift = [0, -0.02, 0]
meshScale = [0.1, 0.1, 0.1]
vertices = [[-1.000000, -1.000000, 1.000000], [1.000000, -1.000000, 1.000000],
[1.000000, 1.000000, 1.000000], [-1.000000, 1.000000, 1.000000],
[-1.000000, -1.000000, -1.000000], [1.000000, -1.000000, -1.000000],
[1.000000, 1.000000, -1.000000], [-1.000000, 1.000000, -1.000000],
[-1.000000, -1.000000, -1.000000], [-1.000000, 1.000000, -1.000000],
[-1.000000, 1.000000, 1.000000], [-1.000000, -1.000000, 1.000000],
[1.000000, -1.000000, -1.000000], [1.000000, 1.000000, -1.000000],
[1.000000, 1.000000, 1.000000], [1.000000, -1.000000, 1.000000],
[-1.000000, -1.000000, -1.000000], [-1.000000, -1.000000, 1.000000],
[1.000000, -1.000000, 1.000000], [1.000000, -1.000000, -1.000000],
[-1.000000, 1.000000, -1.000000], [-1.000000, 1.000000, 1.000000],
[1.000000, 1.000000, 1.000000], [1.000000, 1.000000, -1.000000]]
normals = [[0.000000, 0.000000, 1.000000], [0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 1.000000], [0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, -1.000000], [0.000000, 0.000000, -1.000000],
[0.000000, 0.000000, -1.000000], [0.000000, 0.000000, -1.000000],
[-1.000000, 0.000000, 0.000000], [-1.000000, 0.000000, 0.000000],
[-1.000000, 0.000000, 0.000000], [-1.000000, 0.000000, 0.000000],
[1.000000, 0.000000, 0.000000], [1.000000, 0.000000, 0.000000],
[1.000000, 0.000000, 0.000000], [1.000000, 0.000000, 0.000000],
[0.000000, -1.000000, 0.000000], [0.000000, -1.000000, 0.000000],
[0.000000, -1.000000, 0.000000], [0.000000, -1.000000, 0.000000],
[0.000000, 1.000000, 0.000000], [0.000000, 1.000000, 0.000000],
[0.000000, 1.000000, 0.000000], [0.000000, 1.000000, 0.000000]]
uvs = [[0.750000, 0.250000], [1.000000, 0.250000], [1.000000, 0.000000], [0.750000, 0.000000],
[0.500000, 0.250000], [0.250000, 0.250000], [0.250000, 0.000000], [0.500000, 0.000000],
[0.500000, 0.000000], [0.750000, 0.000000], [0.750000, 0.250000], [0.500000, 0.250000],
[0.250000, 0.500000], [0.250000, 0.250000], [0.000000, 0.250000], [0.000000, 0.500000],
[0.250000, 0.500000], [0.250000, 0.250000], [0.500000, 0.250000], [0.500000, 0.500000],
[0.000000, 0.000000], [0.000000, 0.250000], [0.250000, 0.250000], [0.250000, 0.000000]]
indices = [
0,
1,
2,
0,
2,
3, #//ground face
6,
5,
4,
7,
6,
4, #//top face
10,
9,
8,
11,
10,
8,
12,
13,
14,
12,
14,
15,
18,
17,
16,
19,
18,
16,
20,
21,
22,
20,
22,
23
]
#p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER,0)
#the visual shape and collision shape can be re-used by all createMultiBody instances (instancing)
visualShapeId = p.createVisualShape(shapeType=p.GEOM_MESH,
rgbaColor=[1, 1, 1, 1],
specularColor=[0.4, .4, 0],
visualFramePosition=shift,
meshScale=meshScale,
vertices=vertices,
indices=indices,
uvs=uvs,
normals=normals)
collisionShapeId = p.createCollisionShape(
shapeType=p.GEOM_BOX, halfExtents=meshScale
) #MESH, vertices=vertices, collisionFramePosition=shift,meshScale=meshScale)
texUid = p.loadTexture("tex256.png")
batchPositions = []
for x in range(32):
for y in range(32):
for z in range(10):
batchPositions.append(
[x * meshScale[0] * 5.5, y * meshScale[1] * 5.5, (0.5 + z) * meshScale[2] * 2.5])
bodyUids = p.createMultiBody(baseMass=0,
baseInertialFramePosition=[0, 0, 0],
baseCollisionShapeIndex=collisionShapeId,
baseVisualShapeIndex=visualShapeId,
basePosition=[0, 0, 2],
batchPositions=batchPositions,
useMaximalCoordinates=True)
p.changeVisualShape(bodyUids[0], -1, textureUniqueId=texUid)
p.syncBodyInfo()
print("numBodies=", p.getNumBodies())
p.stopStateLogging(logId)
p.setGravity(0, 0, -10)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 1, 1]]
currentColor = 0
while (1):
p.stepSimulation()
#time.sleep(1./120.)
#p.getCameraImage(320,200)
|
3e450795f59c782c733f714b04e5b28c2e202d65
|
374b3f27fe3cf032e88eccac5992c83eba0ad1b2
|
/tutorials/W1D3_GeneralizedLinearModels/solutions/W1D3_Tutorial2_Solution_89590c8d.py
|
49a3af5a9831d374d463bc2e4d602155b2a56fd0
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] |
permissive
|
NeuromatchAcademy/course-content
|
e2fdca96bcbdc78afaa209e4e77438f44a56c82d
|
3d638d00f02d9fd269fa2aff7d062558afdcb126
|
refs/heads/main
| 2023-08-16T16:09:09.314153
| 2023-08-02T06:21:49
| 2023-08-02T06:21:49
| 262,856,980
| 2,678
| 1,079
|
CC-BY-4.0
| 2023-08-17T00:32:24
| 2020-05-10T19:09:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
W1D3_Tutorial2_Solution_89590c8d.py
|
def sigmoid(z):
"""Return the logistic transform of z."""
sigmoid = 1 / (1 + np.exp(-z))
return sigmoid
# Visualize
with plt.xkcd():
plot_function(sigmoid, "\sigma", "z", (-10, 10))
|
f4dc113eda1c12bcdd66ec452e4a0d326cfe39ee
|
fcc3fcd8da44b7d6bd46098df9693d6fb01cef73
|
/jans-cli-tui/cli_tui/plugins/030_scim/main.py
|
87bd52ec28fb8c41dbaa75a976fbc1fa18c0fa64
|
[
"Apache-2.0"
] |
permissive
|
JanssenProject/jans
|
cb633472825787b68ecfba7db97b5b7e5c87e7a5
|
66c4ef766a62788437cce88974357a9a2b20de21
|
refs/heads/main
| 2023-09-01T07:04:48.645163
| 2023-08-31T10:57:05
| 2023-08-31T10:57:05
| 309,721,058
| 400
| 68
|
Apache-2.0
| 2023-09-14T17:42:33
| 2020-11-03T15:00:37
|
Java
|
UTF-8
|
Python
| false
| false
| 9,819
|
py
|
main.py
|
import asyncio
from prompt_toolkit.application import Application
from prompt_toolkit.layout.containers import HSplit, VSplit, Window
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.widgets import Button, Frame
from wui_components.jans_drop_down import DropDownWidget
from utils.utils import DialogUtils
from utils.static import cli_style
from utils.multi_lang import _
class Plugin(DialogUtils):
"""This is a general class for plugins
"""
def __init__(
self,
app: Application
) -> None:
"""init for Plugin class "scim"
Args:
app (Generic): The main Application class
"""
self.app = app
self.pid = 'scim'
self.name = 'S[C]IM'
self.server_side_plugin = True
self.app_config = {}
self.widgets_ready = False
self.container = Frame(
body=HSplit([Button(text=_("Get Scim Configuration"), handler=self.get_app_config)], width=D()),
height=D())
def process(self) -> None:
pass
def set_center_frame(self) -> None:
"""center frame content
"""
self.app.center_container = self.container
def create_widgets(self) -> None:
"""SCIM Application configuration widgets are created in this fonction
"""
self.save_button = Button(_("Save"), handler=self.save_app_config)
schema = self.app.cli_object.get_schema_from_reference('SCIM', '#/components/schemas/AppConfiguration')
self.container = HSplit([
self.app.getTitledText(_("Base DN"), name='baseDN', value=self.app_config.get('baseDN',''), jans_help=self.app.get_help_from_schema(schema, 'baseDN'), read_only=True, style=cli_style.edit_text, widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Application Url"), name='applicationUrl', value=self.app_config.get('applicationUrl',''), jans_help=self.app.get_help_from_schema(schema, 'applicationUrl'), style=cli_style.edit_text, widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Base Endpoint"), name='baseEndpoint', value=self.app_config.get('baseEndpoint',''), jans_help=self.app.get_help_from_schema(schema, 'baseEndpoint'), style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Person Custom Object Class"), name='personCustomObjectClass', value=self.app_config.get('personCustomObjectClass',''), jans_help=self.app.get_help_from_schema(schema, 'personCustomObjectClass'), style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Auth Issuer"), name='oxAuthIssuer', value=self.app_config.get('oxAuthIssuer',''), jans_help=self.app.get_help_from_schema(schema, 'oxAuthIssuer'), style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledRadioButton(_("Protection Mode"), name='protectionMode', values=[('OAUTH', 'OAUTH'),('BYPASS', 'BYPASS')], current_value=self.app_config.get('protectionMode'), jans_help=self.app.get_help_from_schema(schema, 'protectionMode'), style='class:outh-client-radiobutton',widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Max Count"), name='maxCount', value=self.app_config.get('maxCount',''), jans_help=self.app.get_help_from_schema(schema, 'maxCount'), text_type='integer', style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Bulk Max Operations"), name='bulkMaxOperations', value=self.app_config.get('bulkMaxOperations',''), jans_help=self.app.get_help_from_schema(schema, 'bulkMaxOperations'), text_type='integer', style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Bulk Max Payload Size"), name='bulkMaxPayloadSize', value=self.app_config.get('bulkMaxPayloadSize',''), jans_help=self.app.get_help_from_schema(schema, 'bulkMaxPayloadSize'), text_type='integer', style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("User Extension Schema URI"), name='userExtensionSchemaURI', value=self.app_config.get('userExtensionSchemaURI',''), jans_help=self.app.get_help_from_schema(schema, 'userExtensionSchemaURI'), style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledWidget(
_("Logging Level"),
name='loggingLevel',
widget=DropDownWidget(
values=[('TRACE', 'TRACE'), ('DEBUG', 'DEBUG'), ('INFO', 'INFO'), ('WARN', 'WARN'),('ERROR', 'ERROR'),('FATAL', 'FATAL'),('OFF', 'OFF')],
value=self.app_config.get('loggingLevel')
),
jans_help=self.app.get_help_from_schema(schema, 'loggingLevel'),
style='class:outh-client-dropdown'
),
self.app.getTitledText(_("Logging Layout"), name='loggingLayout', value=self.app_config.get('loggingLayout',''), jans_help=self.app.get_help_from_schema(schema, 'loggingLayout'), style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("External Logger Configuration"), name='externalLoggerConfiguration', value=self.app_config.get('externalLoggerConfiguration',''), jans_help=self.app.get_help_from_schema(schema, 'externalLoggerConfiguration'), style=cli_style.edit_text,widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Metric Reporter Interval"), name='metricReporterInterval', value=self.app_config.get('metricReporterInterval',''), jans_help=self.app.get_help_from_schema(schema, 'metricReporterInterval'), style=cli_style.edit_text, text_type='integer',widget_style=cli_style.black_bg_widget),
self.app.getTitledText(_("Metric Reporter Keep Data Days"), name='metricReporterKeepDataDays', value=self.app_config.get('metricReporterKeepDataDays',''), jans_help=self.app.get_help_from_schema(schema, 'metricReporterKeepDataDays'), style=cli_style.edit_text, text_type='integer',widget_style=cli_style.black_bg_widget),
self.app.getTitledCheckBox(_("Metric Reporter Enabled"), name='metricReporterEnabled', checked=self.app_config.get('metricReporterEnabled'), jans_help=self.app.get_help_from_schema(schema, 'metricReporterEnabled'), style=cli_style.check_box, widget_style=cli_style.black_bg_widget),
self.app.getTitledCheckBox(_("Disable Jdk Logger"), name='disableJdkLogger', checked=self.app_config.get('disableJdkLogger'), jans_help=self.app.get_help_from_schema(schema, 'disableJdkLogger'), style=cli_style.check_box, widget_style=cli_style.black_bg_widget),
self.app.getTitledCheckBox(_("Use Local Cache"), name='useLocalCache', checked=self.app_config.get('useLocalCache'), jans_help=self.app.get_help_from_schema(schema, 'useLocalCache'), style=cli_style.check_box, widget_style=cli_style.black_bg_widget),
VSplit([Window(), self.save_button, Window()])
],
width=D()
)
self.app.center_container = HSplit([ self.container],style='bg:black', height=D())
def get_app_config(self) -> None:
"""Gets SCIM application configurations from server.
"""
async def coroutine():
cli_args = {'operation_id': 'get-scim-config'}
self.app.start_progressing()
response = await self.app.loop.run_in_executor(self.app.executor, self.app.cli_requests, cli_args)
self.app.stop_progressing()
self.app_config = response.json()
self.create_widgets()
self.app.invalidate()
self.app.layout.focus(self.app.center_container)
asyncio.ensure_future(coroutine())
def save_app_config(self) -> None:
"""Save button handler for saving SCIM application configurations.
Once configuration data was obtained from form, patch operations are prepared and saved to server.
"""
data = self.make_data_from_dialog({'scim': self.container})
self.app.logger.debug("SCIM APP CONFIG {}".format(data))
patche_list = []
for key in self.app_config:
if self.app_config[key] != data[key]:
patche_list.append({'op':'replace', 'path': key, 'value': data[key]})
for key in data:
if data[key] and key not in self.app_config:
patche_list.append({'op':'add', 'path': key, 'value': data[key]})
if not patche_list:
self.app.show_message(_("Warning"), _("No changes was done on Scim appilication configuration. Nothing to save."), tobefocused=self.app.center_container)
return
async def coroutine():
cli_args = {'operation_id': 'patch-scim-config', 'data': patche_list}
self.app.start_progressing()
response = await self.app.loop.run_in_executor(self.app.executor, self.app.cli_requests, cli_args)
self.app.stop_progressing()
asyncio.ensure_future(coroutine())
|
c3398d59b4db2fee25a45240c418dcf2c38dd2ac
|
97d7455fbaa56813e97cf601e4a23786d47c2e2c
|
/general_itests/steps/tron_steps.py
|
164d99f1de9f29e65480a6c3abe158ce25a9e100
|
[
"Apache-2.0"
] |
permissive
|
Yelp/paasta
|
9138fbb0beaaa6146520c1483144679f9d5d4941
|
6fafc7c86073f136e64b959b963994be3d6160ab
|
refs/heads/master
| 2023-08-17T00:00:47.610727
| 2023-08-10T21:40:26
| 2023-08-10T21:40:26
| 44,998,824
| 1,805
| 291
|
Apache-2.0
| 2023-09-13T20:40:04
| 2015-10-26T21:35:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
tron_steps.py
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from behave import given
from behave import when
from paasta_tools.utils import _run
@given("some tronfig")
def step_some_tronfig(context):
context.soa_dir = "fake_soa_configs_tron"
@when("we run paasta_setup_tron_namespace in dry-run mode")
def step_run_paasta_setup_tron_namespace_dry_run(context):
cmd = (
f"paasta_setup_tron_namespace --dry-run -a --soa-dir {context.soa_dir}"
f" --cluster test-cluster"
)
context.return_code, context.output = _run(command=cmd)
|
1824e5fa6bc47e5bf270e2715430bb2cdcc27249
|
c5311176cd07f267fb1ca4f9cd71b308ed0778c5
|
/dygie/predictors/__init__.py
|
f74c49dc60c99db3a2cd56ddac964da75b317686
|
[
"MIT"
] |
permissive
|
dwadden/dygiepp
|
1a71885b0588bb5f0997dec13b27ebfd30169e7c
|
ab764cd0d48b7c430a78a1edddf5acaeec13c109
|
refs/heads/master
| 2023-07-27T19:30:00.399646
| 2023-07-19T20:52:06
| 2023-07-19T20:52:06
| 171,385,430
| 534
| 129
|
MIT
| 2023-07-19T20:52:08
| 2019-02-19T01:48:41
|
Python
|
UTF-8
|
Python
| false
| false
| 50
|
py
|
__init__.py
|
from dygie.predictors.dygie import DyGIEPredictor
|
bf04c27cfff4193e28c9066a5ce51f1c2e6711b7
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/helpers.py
|
cc14e7d32549804dc01bcd623b603390c5bf1401
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
helpers.py
|
# coding=utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
class QnaAuthoringHelper:
def create_test_project(
client,
project_name = "IssacNewton",
is_deployable = False,
add_sources = False,
get_export_url = False,
delete_old_project = False,
add_qnas = False,
**kwargs
):
# create project
client.create_project(
project_name=project_name,
options={
"description": "biography of Sir Issac Newton",
"language": "en",
"multilingualResource": True,
"settings": {
"defaultAnswer": "no answer"
}
})
# add sources
if is_deployable or add_sources:
QnaAuthoringHelper.add_sources(client, project_name, **kwargs)
if get_export_url:
return QnaAuthoringHelper.export_project(client, project_name, delete_project=delete_old_project, **kwargs)
def add_sources(client, project_name, **kwargs):
update_sources_poller = client.begin_update_sources(
project_name=project_name,
sources=[
{
"op": "add",
"value": {
"displayName": "Issac Newton Bio",
"sourceUri": "https://wikipedia.org/wiki/Isaac_Newton",
"sourceKind": "url"
}
}
],
**kwargs
)
update_sources_poller.result()
def export_project(client, project_name, delete_project=True, **kwargs):
# export project
export_poller = client.begin_export(
project_name=project_name,
file_format="json",
**kwargs
)
result = export_poller.result()
# delete old project
if delete_project:
delete_poller = client.begin_delete_project(
project_name=project_name,
**kwargs
)
delete_poller.result()
return result["resultUrl"]
class QnaAuthoringAsyncHelper:
async def create_test_project(
client,
project_name = "IssacNewton",
is_deployable = False,
add_sources = False,
get_export_url = False,
delete_old_project = False,
add_qnas = False,
**kwargs
):
# create project
await client.create_project(
project_name=project_name,
options={
"description": "biography of Sir Issac Newton",
"language": "en",
"multilingualResource": True,
"settings": {
"defaultAnswer": "no answer"
}
})
# add sources
if is_deployable or add_sources:
await QnaAuthoringAsyncHelper.add_sources(client, project_name, **kwargs)
if get_export_url:
return await QnaAuthoringAsyncHelper.export_project(client, project_name, delete_project=delete_old_project, **kwargs)
async def add_sources(client, project_name, **kwargs):
update_sources_poller = await client.begin_update_sources(
project_name=project_name,
sources=[
{
"op": "add",
"value": {
"displayName": "Issac Newton Bio",
"sourceUri": "https://wikipedia.org/wiki/Isaac_Newton",
"sourceKind": "url"
}
}
],
**kwargs
)
await update_sources_poller.result()
async def export_project(client, project_name, delete_project=True, **kwargs):
# export project
export_poller = await client.begin_export(
project_name=project_name,
file_format="json",
**kwargs
)
result = await export_poller.result()
# delete old project
if delete_project:
delete_poller = await client.begin_delete_project(
project_name=project_name,
**kwargs
)
await delete_poller.result()
return result["resultUrl"]
|
ac76d254373e75927f4907edbf6412532121ac2f
|
2853845c003d03db22f67c3303fa1ec333180ae7
|
/web_console_v2/api/fedlearner_webconsole/utils/system_envs.py
|
b75f607a6a1a7563eb6bf85d7d120271979d5ea6
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
bytedance/fedlearner
|
fc1dd2ba2ec88092e83a32732eccea52451ce552
|
436e4959952c970917ee8f47b920f0a76cd4dd05
|
refs/heads/master
| 2023-08-14T23:01:02.875453
| 2023-05-23T03:44:03
| 2023-05-23T03:44:03
| 235,348,659
| 893
| 243
|
Apache-2.0
| 2023-06-08T07:37:18
| 2020-01-21T13:26:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,348
|
py
|
system_envs.py
|
# Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import json
import os
def _is_valid_env(env: dict) -> bool:
return env.get('valueFrom', None) is not None or \
env.get('value', None) is not None
def get_system_envs():
"""Gets a JSON string to represent system envs."""
# Most envs should be from pod's env
envs = [
{
'name': 'POD_IP',
'valueFrom': {
'fieldRef': {
'fieldPath': 'status.podIP'
}
}
},
{
'name': 'POD_NAME',
'valueFrom': {
'fieldRef': {
'fieldPath': 'metadata.name'
}
}
},
{
'name': 'CPU_REQUEST',
'valueFrom': {
'resourceFieldRef': {
'resource': 'requests.cpu'
}
}
},
{
'name': 'MEM_REQUEST',
'valueFrom': {
'resourceFieldRef': {
'resource': 'requests.memory'
}
}
},
{
'name': 'CPU_LIMIT',
'valueFrom': {
'resourceFieldRef': {
'resource': 'limits.cpu'
}
}
},
{
'name': 'MEM_LIMIT',
'valueFrom': {
'resourceFieldRef': {
'resource': 'limits.memory'
}
}
},
{
'name': 'ES_HOST',
'value': os.getenv('ES_HOST')
},
{
'name': 'ES_PORT',
'value': os.getenv('ES_PORT')
},
{
'name': 'DB_HOST',
'value': os.getenv('DB_HOST')
},
{
'name': 'DB_PORT',
'value': os.getenv('DB_PORT')
},
{
'name': 'DB_DATABASE',
'value': os.getenv('DB_DATABASE')
},
{
'name': 'DB_USERNAME',
'value': os.getenv('DB_USERNAME')
},
{
'name': 'DB_PASSWORD',
'value': os.getenv('DB_PASSWORD')
},
{
'name': 'KVSTORE_TYPE',
'value': os.getenv('KVSTORE_TYPE')
},
{
'name': 'ETCD_NAME',
'value': os.getenv('ETCD_NAME')
},
{
'name': 'ETCD_ADDR',
'value': os.getenv('ETCD_ADDR')
},
{
'name': 'ETCD_BASE_DIR',
'value': os.getenv('ETCD_BASE_DIR')
}
]
return ','.join([json.dumps(env)
for env in envs if _is_valid_env(env)])
if __name__ == '__main__':
print(get_system_envs())
|
984ff605ee87b1cfaa4c7bc54558cd8d985754ca
|
a41e1498e3c080f47abd8e8e57157548df3ebbf1
|
/pandas/tests/reshape/merge/test_merge_asof.py
|
d4cb89dadde9bc6cc756485a1cdc03405d3431be
|
[
"BSD-3-Clause"
] |
permissive
|
pandas-dev/pandas
|
e7e639454a298bebc272622e66faa9829ea393bb
|
c7325d7e7e77ecb4a4e57b48bc25265277c75712
|
refs/heads/main
| 2023-09-01T12:42:07.927176
| 2023-09-01T11:14:10
| 2023-09-01T11:14:10
| 858,127
| 36,166
| 18,728
|
BSD-3-Clause
| 2023-09-14T21:18:41
| 2010-08-24T01:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 56,701
|
py
|
test_merge_asof.py
|
import datetime
import numpy as np
import pytest
import pytz
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Timedelta,
merge_asof,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
@pytest.fixture(params=["s", "ms", "us", "ns"])
def unit(request):
"""
Resolution for datetimelike dtypes.
"""
return request.param
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture
def trades(self, datapath):
return self.read_data(datapath, "trades.csv")
@pytest.fixture
def quotes(self, datapath):
return self.read_data(datapath, "quotes.csv", dedupe=True)
@pytest.fixture
def asof(self, datapath):
return self.read_data(datapath, "asof.csv")
@pytest.fixture
def tolerance(self, datapath):
return self.read_data(datapath, "tolerance.csv")
@pytest.fixture
def allow_exact_matches(self, datapath):
return self.read_data(datapath, "allow_exact_matches.csv")
@pytest.fixture
def allow_exact_matches_and_tolerance(self, datapath):
return self.read_data(datapath, "allow_exact_matches_and_tolerance.csv")
def test_examples1(self):
"""doc-string examples"""
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self, unit):
"""doc-string examples"""
if unit == "s":
pytest.skip(
"This test is invalid for unit='s' because that would "
"round the trades['time']]"
)
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
).astype(f"M8[{unit}]"),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
merge_asof(trades, quotes, on="time", by="ticker")
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms"))
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
"""doc-string examples"""
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
"""doc-string examples"""
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self, trades, asof, quotes):
expected = asof
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self, trades, asof, quotes):
expected = asof
trades.ticker = trades.ticker.astype("category")
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self, trades, asof, quotes):
# GH14253
expected = asof
trades = trades.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self, trades, asof, quotes):
expected = asof
quotes = quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self, trades, asof, quotes):
expected = asof.set_index("time")
trades = trades.set_index("time")
quotes = quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_left(self, trades, quotes):
# MultiIndex is prohibited
trades = trades.set_index(["time", "price"])
quotes = quotes.set_index("time")
with pytest.raises(MergeError, match="left can only have one index"):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_multi_index_right(self, trades, quotes):
# MultiIndex is prohibited
trades = trades.set_index("time")
quotes = quotes.set_index(["time", "bid"])
with pytest.raises(MergeError, match="right can only have one index"):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_on_and_index_left_on(self, trades, quotes):
# "on" parameter and index together is prohibited
trades = trades.set_index("time")
quotes = quotes.set_index("time")
msg = 'Can only pass argument "left_on" OR "left_index" not both.'
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
def test_on_and_index_right_on(self, trades, quotes):
trades = trades.set_index("time")
quotes = quotes.set_index("time")
msg = 'Can only pass argument "right_on" OR "right_index" not both.'
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self, trades, asof, quotes):
# GH14253
expected = asof
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self, trades, asof, quotes):
expected = asof
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_mismatched_index_dtype(self):
# similar to test_multiby_indexed, but we change the dtype on left.index
left = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a"],
[to_datetime("20160602"), 2, "a"],
[to_datetime("20160603"), 1, "b"],
[to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
# different dtype for the index
left.index = left.index - pd.Timestamp(0)
right = pd.DataFrame(
[
[to_datetime("20160502"), 1, "a", 1.0],
[to_datetime("20160502"), 2, "a", 2.0],
[to_datetime("20160503"), 1, "b", 3.0],
[to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
msg = "incompatible merge keys"
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, left_index=True, right_index=True, by=["k1", "k2"])
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a"],
[to_datetime("20160602"), 2, "a"],
[to_datetime("20160603"), 1, "b"],
[to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[to_datetime("20160502"), 1, "a", 1.0],
[to_datetime("20160502"), 2, "a", 2.0],
[to_datetime("20160503"), 1, "b", 3.0],
[to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a", 1.0],
[to_datetime("20160602"), 2, "a", 2.0],
[to_datetime("20160603"), 1, "b", 3.0],
[to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(
MergeError, match="left_by and right_by must be the same length"
):
merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self, trades, asof, quotes):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(asof)
trades = f(trades)
quotes = f(quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self, trades, quotes):
msg = r"incompatible merge keys \[1\] .* must be the same type"
with pytest.raises(MergeError, match=msg):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError, match="can only asof on a key for left"):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError, match="can only asof on a key for left"):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath, trades, quotes):
q = (
pd.concat([quotes, quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self, trades, quotes):
msg = "allow_exact_matches must be boolean, passed foo"
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self, trades, quotes):
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
msg = r"incompatible tolerance .*, must be compat with type .*"
# incompat
with pytest.raises(MergeError, match=msg):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError, match=msg):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
msg = "tolerance must be positive"
# invalid negative
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError, match=msg):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self, trades, quotes):
trades = trades.sort_values("time", ascending=False)
quotes = quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic_increasing
assert not quotes.time.is_monotonic_increasing
with pytest.raises(ValueError, match="left keys must be sorted"):
merge_asof(trades, quotes, on="time", by="ticker")
trades = trades.sort_values("time")
assert trades.time.is_monotonic_increasing
assert not quotes.time.is_monotonic_increasing
with pytest.raises(ValueError, match="right keys must be sorted"):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = quotes.sort_values("time")
assert trades.time.is_monotonic_increasing
assert quotes.time.is_monotonic_increasing
# ok, though has dupes
merge_asof(trades, quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance_ts",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance_ts, trades, quotes, tolerance):
result = merge_asof(
trades, quotes, on="time", by="ticker", tolerance=tolerance_ts
)
expected = tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self, unit):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
unit=unit,
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
unit=unit,
),
"value2": list("ABCDE"),
}
)
result = merge_asof(left, right, on="date", tolerance=Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
unit=unit,
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self, trades, quotes, tolerance):
# GH 15135
expected = tolerance.set_index("time")
trades = trades.set_index("time")
quotes = quotes.set_index("time")
result = merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self, trades, quotes, allow_exact_matches):
result = merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(
self, trades, quotes, allow_exact_matches_and_tolerance
):
result = merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_numpy_dtype):
# see gh-13936
dtype = np.dtype(any_real_numpy_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_numpy_dtype):
# see gh-13936
dtype = np.dtype(any_real_numpy_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_groupby_multiple_column_with_categorical_column(self):
# GH 16454
df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
result = merge_asof(df, df, on="x", by=["y", "z"])
expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_by_nullable(self, any_numeric_ea_dtype):
# Note: this test passes if instead of using pd.array we use
# np.array([np.nan, 1]). Other than that, I (@jbrockmendel)
# have NO IDEA what the expected behavior is.
# TODO(GH#32306): may be relevant to the expected behavior here.
arr = pd.array([pd.NA, 0, 1], dtype=any_numeric_ea_dtype)
if arr.dtype.kind in ["i", "u"]:
max_val = np.iinfo(arr.dtype.numpy_dtype).max
else:
max_val = np.finfo(arr.dtype.numpy_dtype).max
# set value s.t. (at least for integer dtypes) arr._values_for_argsort
# is not an injection
arr[2] = max_val
left = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["HELLO", "To", "You"],
"on_col": [2, 4, 6],
"value": ["a", "c", "e"],
}
)
right = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["WORLD", "Wide", "Web"],
"on_col": [1, 2, 6],
"value": ["b", "d", "f"],
}
)
result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["HELLO", "To", "You"],
"on_col": [2, 4, 6],
"value_x": ["a", "c", "e"],
}
)
expected["value_y"] = np.array([np.nan, np.nan, np.nan], dtype=object)
tm.assert_frame_equal(result, expected)
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
tm.assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
tm.assert_frame_equal(result, expected)
def test_timedelta_tolerance_nearest(self, unit):
# GH 27642
if unit == "s":
pytest.skip(
"This test is invalid with unit='s' because that would "
"round left['time']"
)
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
columns=["time", "left"],
)
left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
columns=["time", "right"],
)
right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]")
expected = pd.DataFrame(
list(
zip(
[0, 5, 10, 15, 20, 25],
[0, 1, 2, 3, 4, 5],
[0, np.nan, 2, 4, np.nan, np.nan],
)
),
columns=["time", "left", "right"],
)
expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]")
result = merge_asof(
left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
)
tm.assert_frame_equal(result, expected)
def test_int_type_tolerance(self, any_int_dtype):
# GH #28870
left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]})
right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]})
left["a"] = left["a"].astype(any_int_dtype)
right["a"] = right["a"].astype(any_int_dtype)
expected = pd.DataFrame(
{"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]}
)
expected["a"] = expected["a"].astype(any_int_dtype)
result = merge_asof(left, right, on="a", tolerance=10)
tm.assert_frame_equal(result, expected)
def test_merge_index_column_tz(self):
# GH 29864
index = pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC")
left = pd.DataFrame([0.9, 0.8, 0.7, 0.6], columns=["xyz"], index=index[1:])
right = pd.DataFrame({"from_date": index, "abc": [2.46] * 4 + [2.19]})
result = merge_asof(
left=left, right=right, left_index=True, right_on=["from_date"]
)
expected = pd.DataFrame(
{
"xyz": [0.9, 0.8, 0.7, 0.6],
"from_date": index[1:],
"abc": [2.46] * 3 + [2.19],
},
index=pd.date_range(
"2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC"
),
)
tm.assert_frame_equal(result, expected)
result = merge_asof(
left=right, right=left, right_index=True, left_on=["from_date"]
)
expected = pd.DataFrame(
{
"from_date": index,
"abc": [2.46] * 4 + [2.19],
"xyz": [np.nan, 0.9, 0.8, 0.7, 0.6],
},
index=Index([0, 1, 2, 3, 4]),
)
tm.assert_frame_equal(result, expected)
def test_left_index_right_index_tolerance(self, unit):
# https://github.com/pandas-dev/pandas/issues/35558
if unit == "s":
pytest.skip(
"This test is invalid with unit='s' because that would round dr1"
)
dr1 = pd.date_range(
start="1/1/2020", end="1/20/2020", freq="2D", unit=unit
) + Timedelta(seconds=0.4).as_unit(unit)
dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit)
df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1))
df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2))
expected = pd.DataFrame(
{"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1)
)
result = merge_asof(
df1,
df2,
left_index=True,
right_index=True,
tolerance=Timedelta(seconds=0.5),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}]
)
@pytest.mark.parametrize(
"data",
[["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]],
)
def test_merge_asof_non_numerical_dtype(kwargs, data):
# GH#29130
left = pd.DataFrame({"x": data}, index=data)
right = pd.DataFrame({"x": data}, index=data)
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, **kwargs)
def test_merge_asof_non_numerical_dtype_object():
# GH#29130
left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]})
right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]})
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(
left,
right,
left_on="left_val1",
right_on="a",
left_by="a",
right_by="left_val",
)
@pytest.mark.parametrize(
"kwargs",
[
{"right_index": True, "left_index": True},
{"left_on": "left_time", "right_index": True},
{"left_index": True, "right_on": "right"},
],
)
def test_merge_asof_index_behavior(kwargs):
# GH 33463
index = Index([1, 5, 10], name="test")
left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index)
right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
result = merge_asof(left, right, **kwargs)
expected = pd.DataFrame(
{"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeric_column_in_index():
# GH#34488
left = pd.DataFrame({"b": [10, 11, 12]}, index=Index([1, 2, 3], name="a"))
right = pd.DataFrame({"c": [20, 21, 22]}, index=Index([0, 2, 3], name="a"))
result = merge_asof(left, right, left_on="a", right_on="a")
expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeric_column_in_multiindex():
# GH#34488
left = pd.DataFrame(
{"b": [10, 11, 12]},
index=pd.MultiIndex.from_arrays([[1, 2, 3], ["a", "b", "c"]], names=["a", "z"]),
)
right = pd.DataFrame(
{"c": [20, 21, 22]},
index=pd.MultiIndex.from_arrays([[1, 2, 3], ["x", "y", "z"]], names=["a", "y"]),
)
result = merge_asof(left, right, left_on="a", right_on="a")
expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeri_column_in_index_object_dtype():
# GH#34488
left = pd.DataFrame({"b": [10, 11, 12]}, index=Index(["1", "2", "3"], name="a"))
right = pd.DataFrame({"c": [20, 21, 22]}, index=Index(["m", "n", "o"], name="a"))
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
left = left.reset_index().set_index(["a", "b"])
right = right.reset_index().set_index(["a", "c"])
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
def test_merge_asof_array_as_on():
# GH#42844
right = pd.DataFrame(
{
"a": [2, 6],
"ts": [pd.Timestamp("2021/01/01 00:37"), pd.Timestamp("2021/01/01 01:40")],
}
)
ts_merge = pd.date_range(
start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h"
)
left = pd.DataFrame({"b": [4, 8, 7]})
result = merge_asof(
left,
right,
left_on=ts_merge,
right_on="ts",
allow_exact_matches=False,
direction="backward",
)
expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge})
tm.assert_frame_equal(result, expected)
result = merge_asof(
right,
left,
left_on="ts",
right_on=ts_merge,
allow_exact_matches=False,
direction="backward",
)
expected = pd.DataFrame(
{
"a": [2, 6],
"ts": [pd.Timestamp("2021/01/01 00:37"), pd.Timestamp("2021/01/01 01:40")],
"b": [4, 8],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_raise_for_duplicate_columns():
# GH#50102
left = pd.DataFrame([[1, 2, "a"]], columns=["a", "a", "left_val"])
right = pd.DataFrame([[1, 1, 1]], columns=["a", "a", "right_val"])
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, on="a")
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, left_on="a", right_on="right_val")
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, left_on="left_val", right_on="a")
@pytest.mark.parametrize(
"dtype",
[
"Int64",
pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")),
pytest.param("timestamp[s][pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
def test_merge_asof_extension_dtype(dtype):
# GH 52904
left = pd.DataFrame(
{
"join_col": [1, 3, 5],
"left_val": [1, 2, 3],
}
)
right = pd.DataFrame(
{
"join_col": [2, 3, 4],
"right_val": [1, 2, 3],
}
)
left = left.astype({"join_col": dtype})
right = right.astype({"join_col": dtype})
result = merge_asof(left, right, on="join_col")
expected = pd.DataFrame(
{
"join_col": [1, 3, 5],
"left_val": [1, 2, 3],
"right_val": [np.nan, 2.0, 3.0],
}
)
expected = expected.astype({"join_col": dtype})
tm.assert_frame_equal(result, expected)
def test_merge_asof_read_only_ndarray():
# GH 53513
left = pd.Series([2], index=[2], name="left")
right = pd.Series([1], index=[1], name="right")
# set to read-only
left.index.values.flags.writeable = False
right.index.values.flags.writeable = False
result = merge_asof(left, right, left_index=True, right_index=True)
expected = pd.DataFrame({"left": [2], "right": [1]}, index=[2])
tm.assert_frame_equal(result, expected)
|
177fe577129058a1c93f1b34d25566ce76708bc1
|
47d69d21f53333d93d5ba9973840ef192808a090
|
/src/tox/config/cli/ini.py
|
adcee2cb75707d4dd4bc82fbde204e6457edb714
|
[
"MIT"
] |
permissive
|
tox-dev/tox
|
27ce3072e7faf5c88ed5305bbd66359369bba13d
|
da0885cd162fb02de866831a75eca9dcfe87eb36
|
refs/heads/main
| 2023-09-01T11:45:18.097559
| 2023-08-31T14:51:57
| 2023-08-31T14:51:57
| 68,465,360
| 3,512
| 624
|
MIT
| 2023-09-11T20:58:32
| 2016-09-17T16:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
ini.py
|
"""Provides configuration values from tox.ini files."""
from __future__ import annotations
import logging
import os
from configparser import ConfigParser
from pathlib import Path
from typing import Any, ClassVar
from platformdirs import user_config_dir
from tox.config.loader.api import ConfigLoadArgs
from tox.config.loader.ini import IniLoader
from tox.config.source.ini_section import CORE
DEFAULT_CONFIG_FILE = Path(user_config_dir("tox")) / "config.ini"
class IniConfig:
TOX_CONFIG_FILE_ENV_VAR = "TOX_USER_CONFIG_FILE"
STATE: ClassVar[dict[bool | None, str]] = {None: "failed to parse", True: "active", False: "missing"}
def __init__(self) -> None:
config_file = os.environ.get(self.TOX_CONFIG_FILE_ENV_VAR, None)
self.is_env_var = config_file is not None
self.config_file = Path(config_file if config_file is not None else DEFAULT_CONFIG_FILE)
self._cache: dict[tuple[str, type[Any]], Any] = {}
self.has_config_file: bool | None = self.config_file.exists()
self.ini: IniLoader | None = None
if self.has_config_file:
self.config_file = self.config_file.absolute()
try:
parser = ConfigParser(interpolation=None)
with self.config_file.open() as file_handler:
parser.read_file(file_handler)
self.has_tox_section = parser.has_section(CORE.key)
if self.has_tox_section:
self.ini = IniLoader(CORE, parser, overrides=[], core_section=CORE)
except Exception as exception: # noqa: BLE001
logging.error("failed to read config file %s because %r", config_file, exception) # noqa: TRY400
self.has_config_file = None
def get(self, key: str, of_type: type[Any]) -> Any:
cache_key = key, of_type
if cache_key in self._cache:
result = self._cache[cache_key]
else:
try:
if self.ini is None: # pragma: no cover # this can only happen if we don't call __bool__ firsts
result = None
else:
source = "file"
args = ConfigLoadArgs(chain=[key], name=CORE.prefix, env_name=None)
value = self.ini.load(key, of_type=of_type, conf=None, factory=None, args=args)
result = value, source
except KeyError: # just not found
result = None
except Exception as exception: # noqa: BLE001
logging.warning("%s key %s as type %r failed with %r", self.config_file, key, of_type, exception)
result = None
self._cache[cache_key] = result
return result
def __bool__(self) -> bool:
return bool(self.has_config_file) and bool(self.has_tox_section)
@property
def epilog(self) -> str:
# text to show within the parsers epilog
return (
f"{os.linesep}config file {str(self.config_file)!r} {self.STATE[self.has_config_file]} "
f"(change{'d' if self.is_env_var else ''} via env var {self.TOX_CONFIG_FILE_ENV_VAR})"
)
|
419d9ca7a27be52f3d249250cbeb088d0ba7e765
|
c83ea832a92bc6622dd851c46871fa6e5c9ad2f5
|
/_scripts/dependency_update.py
|
af634b286d8f58b3b6eb503db74e314e486e15bb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
drycc/workflow
|
c57add44e765df372f632ae03f21e4bad100f15a
|
621c6039b54d135dc14e3fe6d31ab8724bc13f09
|
refs/heads/main
| 2023-08-29T06:58:25.784297
| 2023-08-28T01:21:20
| 2023-08-28T01:21:20
| 166,907,435
| 471
| 107
|
Apache-2.0
| 2023-06-21T07:11:45
| 2019-01-22T01:43:07
|
SCSS
|
UTF-8
|
Python
| false
| false
| 817
|
py
|
dependency_update.py
|
import sys
import yaml
import requests
from contextlib import closing
def load_index(url):
with closing(requests.get(url)) as response:
return yaml.load(response.text, Loader=yaml.Loader)
def load_requirements(requirements_file):
with open(requirements_file) as f:
return yaml.load(f, Loader=yaml.Loader)
def dump_requirements(requirements_file, requirements):
with open(requirements_file, "w") as f:
return yaml.dump(requirements, stream=f, Dumper=yaml.Dumper)
def update_dependencies(requirements, url):
for dependency in requirements["dependencies"]:
dependency["repository"] = url
if __name__ == "__main__":
requirements = load_requirements(sys.argv[2])
update_dependencies(requirements, sys.argv[1])
dump_requirements(sys.argv[2], requirements)
|
af2f0bec5f1f539c314335282728abdb473d1594
|
3cd9fc36f4abba93bffb11dc43f145db6c6f5408
|
/azure-iot-device/azure/iot/device/common/auth/__init__.py
|
f588624512ebe0229b1437642c406e66eedfabba
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-iot-sdk-python
|
457eb035e772268559ee8fa3310c210c84e52aa6
|
5d343d5904aaa98c6a88101e0dc40263acff4db2
|
refs/heads/main
| 2023-09-01T05:19:57.710222
| 2023-08-28T16:52:26
| 2023-08-28T16:52:26
| 70,936,068
| 441
| 438
|
MIT
| 2023-08-28T16:52:28
| 2016-10-14T18:17:15
|
Python
|
UTF-8
|
Python
| false
| false
| 317
|
py
|
__init__.py
|
from .signing_mechanism import SymmetricKeySigningMechanism # noqa: F401
# NOTE: Please import the connection_string and sastoken modules directly
# rather than through the package interface, as the modules contain many
# related items for their respective domains, which we do not wish to expose
# at length here.
|
be3ce58c6eaa893c18111fbb63b3c50b9cfddaac
|
94c0d1574ad8ba81a1ef0d48020b92ba681a5c6a
|
/bin/snakePipes
|
9930daecf5ed7781a6b18684de02300566946d88
|
[
"MIT"
] |
permissive
|
maxplanck-ie/snakepipes
|
650de654c8bb6b197743d5bb59628df2d91d3a79
|
6144e3fdc1bdaa26e05b1cb234df7414c61e283a
|
refs/heads/master
| 2023-09-05T09:25:33.130890
| 2023-06-05T13:38:57
| 2023-06-05T13:38:57
| 54,579,435
| 318
| 91
|
MIT
| 2023-08-22T12:07:49
| 2016-03-23T17:23:31
|
Python
|
UTF-8
|
Python
| false
| false
| 15,442
|
snakePipes
|
#!/usr/bin/env python
import sys
import argparse
import subprocess
import snakePipes
import os
import yaml
import glob
import hashlib
import shutil
import snakePipes.common_functions as cof
from snakePipes import __version__
def parse_arguments():
parser = argparse.ArgumentParser(
description="Setup and information script for snakePipes",
usage="$ snakePipes info",
)
subparsers = parser.add_subparsers(title="Commands", dest="command")
infoParser = subparsers.add_parser(
"info", help="Print the location of the various yaml files"
)
createEnvsParser = subparsers.add_parser(
"createEnvs",
help="Create or update conda enviroments according to the "
"workflow-specific yaml files. Note that changing the snakemakeOptions: "
"option will result in ALL conda environments being recreated.",
)
envInfoParser = subparsers.add_parser(
"envInfo",
help="Prints the location in which each conda environment is actually stored.",
)
mex = createEnvsParser.add_mutually_exclusive_group()
mex.add_argument(
"--keepCondaDir",
action="store_true",
help="If specified, the `snakemakeOptions:` setting in "
"the workflow `defaults.yaml` files will NOT be overwritten. "
"This is typically unwise and only expert users should specify this.",
)
mex.add_argument(
"--condaDir",
help="If specified, use this as the base directory for the "
"created conda environments. This will ignore what is already "
"in the workflow-specific yaml files and where conda is installed.",
)
createEnvsParser.add_argument(
"--only",
nargs="+",
help="If specified, a space-separated list of environments to create. "
"This should typically only be done for testing purposes. The "
"possible environments are: {}".format(cof.set_env_yamls().keys()),
)
createEnvsParser.add_argument(
"--force",
action="store_true",
help="Force creation of conda environments, even if they apparently exist.",
)
createEnvsParser.add_argument(
"--info",
"-i",
action="store_true",
help="Only print the environments that would be created, don't actually create them.",
)
createEnvsParser.add_argument(
"--noSitePackages",
action="store_true",
help="Prevent conda from looking at anything installed in a users home "
"directory. While this is convenient to ensure that ONLY packages "
"installed by snakePipes are used, it means that you will "
"occasionally get scary-looking warning messages when you try to "
"create new conda environments.",
)
subparsers.add_parser(
"flushOrganisms",
help="Flush all installed organism YAML files. This is only advisable when performing a clean installation.",
)
subparsers.add_parser("version", help="Print the snakePipes version")
baseDir = os.path.dirname(snakePipes.__file__)
defaults = cof.load_configfile(
os.path.join(baseDir, "shared", "defaults.yaml"), False, "defaults"
)
configParser = subparsers.add_parser(
"config", help="Update snakePipes-wide values in defaults.yaml"
)
configParser.add_argument(
"--configMode",
help="Choose between manual argument setup and recycling a config file from previous installation (Default: %(default)s)",
choices=["manual", "recycle"],
default=defaults["configMode"],
)
configParser.add_argument(
"--oldConfig",
help="Provide an existing config file with the setup used in the previous installation. (Default: %(default)s)",
default=defaults["oldConfig"],
)
configParser.add_argument(
"--snakemakeOptions",
help="Update the options given to snakeMake. You MUST include --use-conda "
"and an appropriate --conda-prefix if you change this! (Default: %(default)s)",
default=defaults["snakemakeOptions"],
)
configParser.add_argument(
"--organismsDir",
help="The directory where global organism YAML files are to be stored. Both "
"absolute and relative paths are supported. In the latter case the "
"path is then relative to the snakePipes installation directory. (Default: %(default)s)",
default=defaults["organismsDir"],
)
configParser.add_argument(
"--clusterConfig",
help="The YAML file containing the snakeMake cluster command and global "
"memory settings. Both absolute and relative paths are supported. "
"In the latter case the path is then relative to the snakePipes "
"installation directory. (Default: %(default)s)",
default=defaults["clusterConfig"],
)
configParser.add_argument(
"--tempDir",
help="A custom directory where temporary files should be written. This "
"is ideally locally attached to your cluster nodes. "
"(Default: %(default)s)",
default=defaults["tempDir"],
)
configParser.add_argument(
"--noToolsVersion",
dest="toolsVersion",
help="By default, tool versions are printed to a workflow-specific file. Specifying this disables that behavior.",
action="store_false",
)
email = configParser.add_argument_group(
title="Email/SMTP options",
description="These options are only used if/when --emailAddress is used.",
)
email.add_argument(
"--smtpServer",
help="SMTP server address. (Default: %(default)s)",
default=defaults["smtpServer"],
)
email.add_argument(
"--smtpPort",
type=int,
help="The port on the SMTP server to use. A value of 0 will use the default SMTP port. (Default: %(default)s)",
default=defaults["smtpPort"],
)
email.add_argument(
"--onlySSL",
action="store_true",
help="If specified, only use SSL-enabled connections.",
)
email.add_argument(
"--emailSender",
help="The email address used to send emails. (Default: %(default)s)",
default=defaults["emailSender"],
)
email.add_argument(
"--smtpUsername",
help="For SMTP servers requiring a login, the username to use. (Default: %(default)s)",
default=defaults["smtpUsername"],
)
email.add_argument(
"--smtpPassword",
help="For SMTP servers requiring a login, the password to use. Note that this is stored in clear text! (Default: %(default)s)",
default=defaults["smtpPassword"],
)
return parser
def flushOrganisms():
"""
Remove all organism YAML files.
"""
baseDir = os.path.dirname(snakePipes.__file__)
for f in glob.glob(os.path.join(baseDir, "shared/organisms/*.yaml")):
os.remove(f)
def info():
"""
Print the locations of EVERY yaml file. Break these up a bit so it's clear what they actually belong to. Print path to tempDir and check that it exists.
"""
baseDir = os.path.dirname(snakePipes.__file__)
cfg = cof.load_configfile(
os.path.join(baseDir, "shared", "defaults.yaml"), False, "defaults"
)
# Organism yaml files
print("Organism YAML files:")
orgDir = cfg["organismsDir"]
if not os.path.exists(orgDir):
orgDir = os.path.join(baseDir, orgDir)
for f in glob.glob(os.path.join(orgDir, "*.yaml")):
print(" {}".format(f))
# defaults.yaml under shared
print(
"\nThe workflow-generic defaults.yaml file is:\n {}".format(
os.path.join(baseDir, "shared/defaults.yaml")
)
)
# cluster.yaml
clusterConfig = cfg["clusterConfig"]
if not os.path.isfile(clusterConfig):
clusterConfig = os.path.join(baseDir, clusterConfig)
print(
"\nThe default cluster.yaml file. Its defaults are overridden by the per-workflow cluster.yaml files:\n {}".format(
clusterConfig
)
)
print("\nWorkflow-specific cluster.yaml and defaults.yaml files are in:")
for f in glob.glob(os.path.join(baseDir, "workflows/*/cluster.yaml")):
print(" {}".format(os.path.dirname(f)))
# tempDir
tempDir = cfg["tempDir"]
msg = ["\nTemp dir under {} ".format(tempDir)]
if os.path.isdir(tempDir):
msg.append("exists and will be used.")
else:
msg.append("does not exist and /tmp will be used instead.")
print("".join(msg))
def envInfo():
"""
For each environment yaml file print where its conda env is actually located
"""
baseDir = os.path.dirname(snakePipes.__file__)
condaDir = os.environ.get("CONDA_PREFIX")
if "envs" in condaDir:
condaDir = os.path.dirname(condaDir)
else:
condaDir = os.path.join(condaDir, "envs")
f = open(os.path.join(baseDir, "shared/defaults.yaml"))
cf = yaml.load(f, Loader=yaml.FullLoader)
f.close()
_ = cf["snakemakeOptions"].split(" ")
idx = _.index("--conda-prefix")
condaEnvDir = _[idx + 1]
for env in cof.set_env_yamls().values():
# Hash the file ala snakemake
md5hash = hashlib.md5()
md5hash.update(condaEnvDir.encode())
f = open(os.path.join(baseDir, "shared/rules", env), "rb")
md5hash.update(f.read())
f.close()
h = md5hash.hexdigest()
print("{} is in:\n {}\n".format(env, os.path.join(condaEnvDir, h)))
def fixSitePy(envPath):
"""
We would really like to prevent any snakePipes environment from using the user site packages.
"""
for fname in glob.glob("{}/lib/python*/site.py".format(envPath)):
f = open(fname).read()
lines = f.split("\n")
lines = [
line
if not line.startswith("ENABLE_USER_SITE")
else "ENABLE_USER_SITE = False"
for line in lines
]
f = open(fname, "w")
f.write("\n".join(lines))
f.close()
cmd = [os.path.join(envPath, "bin", "python"), "-m", "compileall", fname]
subprocess.check_call(cmd)
def createCondaEnvs(args):
"""
Create all of the conda environments
"""
baseDir = os.path.dirname(snakePipes.__file__)
condaDir = os.environ.get("CONDA_PREFIX")
rootDir = condaDir
if "envs" in condaDir:
condaDir = os.path.dirname(condaDir)
else:
condaDir = os.path.join(condaDir, "envs")
f = open(os.path.join(baseDir, "shared/defaults.yaml"))
cf = yaml.load(f, Loader=yaml.FullLoader)
f.close()
_ = cf["snakemakeOptions"].split(" ")
try:
idx = _.index("--conda-prefix")
except:
idx = len(_)
_.extend(["--conda-prefix", condaDir])
condaEnvDir = _[idx + 1]
if args.condaDir:
condaDirUse = args.condaDir
_[idx + 1] = condaDirUse
elif args.keepCondaDir:
condaDirUse = _[idx + 1]
else:
condaDirUse = condaDir
_[idx + 1] = condaDirUse
cf["snakemakeOptions"] = " ".join(_)
# rewrite defaults.yaml
cof.write_configfile(os.path.join(baseDir, "shared/defaults.yaml"), cf)
for envName, env in cof.set_env_yamls().items():
if args.only is not None and envName not in args.only:
continue
# Hash the file ala snakemake
md5hash = hashlib.md5()
md5hash.update(condaDirUse.encode())
f = open(os.path.join(baseDir, "shared/rules", env), "rb")
md5hash.update(f.read())
f.close()
h = md5hash.hexdigest()
sys.stderr.write(
"Creating environment from {} in {}\n".format(
os.path.join(baseDir, "shared/rules", env), condaDirUse
)
)
cmd = [
"mamba",
"env",
"create",
"--force",
"--file",
os.path.join(baseDir, "shared/rules", env),
]
if "--conda-prefix" in cf["snakemakeOptions"] and (
args.condaDir or args.keepCondaDir
):
cmd += ["--prefix", os.path.join(condaDirUse, h)]
else:
cmd += ["--name", h]
# Don't actually create the env if either --info is set or it already exists and --force is NOT set
if not args.info:
if not os.path.exists(os.path.join(condaDirUse, h)) or args.force:
try:
os.makedirs(os.path.join(condaDirUse, h), exist_ok=True)
subprocess.check_call(cmd)
except:
# Ensure an environment is fully removed on error
shutil.rmtree(os.path.join(condaDirUse, h), ignore_errors=False)
sys.exit("There was an error when creating the environments!\n")
# Ignore site-packages
if args.noSitePackages and not args.info:
fixSitePy(os.path.join(condaDirUse, h))
# Ignore site-packages in this env
if args.noSitePackages and not args.info:
fixSitePy(rootDir)
def updateConfig(args):
"""Update the global defaults"""
baseDir = os.path.dirname(snakePipes.__file__)
# Load, update and rewrite the default dictionary
currentDict = cof.load_configfile(
os.path.join(baseDir, "shared", "defaults.yaml"), False, "Default Config"
)
if args.configMode == "manual":
d = {
"snakemakeOptions": args.snakemakeOptions,
"organismsDir": args.organismsDir,
"clusterConfig": args.clusterConfig,
"tempDir": args.tempDir,
"smtpServer": args.smtpServer,
"smtpPort": args.smtpPort,
"onlySSL": args.onlySSL,
"emailSender": args.emailSender,
"smtpUsername": args.smtpUsername,
"smtpPassword": args.smtpPassword,
"toolsVersion": args.toolsVersion,
"oldConfig": None,
"configMode": "manual",
}
elif args.configMode == "recycle":
oldConfig = args.oldConfig
if os.path.isfile(oldConfig):
d = cof.load_configfile(oldConfig, False, "Old Config")
if args.organismsDir:
od = {"organismsDir": args.organismsDir}
d.update(od)
if args.clusterConfig:
od = {"clusterConfig": args.clusterConfig}
d.update(od)
if not currentDict.keys() & d.keys():
sys.exit("The old and the new config have no matching keys!!!\n")
else:
sys.exit("Config file not found\n")
updatedDict = cof.merge_dicts(currentDict, d)
cof.write_configfile(os.path.join(baseDir, "shared", "defaults.yaml"), updatedDict)
newDict = cof.load_configfile(
os.path.join(baseDir, "shared", "defaults.yaml"), True, "Final Updated Config"
)
def version():
print("version {}".format(__version__))
def main(args):
args = parse_arguments().parse_args(args)
if args.command == "info":
info()
elif args.command == "envInfo":
envInfo()
elif args.command == "flushOrganisms":
flushOrganisms()
elif args.command == "config":
updateConfig(args)
elif args.command == "version":
version()
else:
createCondaEnvs(args)
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.argv.append("--help")
main(sys.argv[1:])
|
|
1eda102ad5c6a1fd859247c3bfd6a482875751d2
|
c2c212ba42ebfa35f3b6122344978bc94ec8fa67
|
/tests/test_eatingwell.py
|
58d76cd39f9b0960fe292af02c03a3de01381634
|
[
"MIT"
] |
permissive
|
hhursev/recipe-scrapers
|
0cd6b7db4ef23ca825f2354f5d1ba76076a14813
|
8ced0227b3b16c532fc5ebf3060c99ee0452adab
|
refs/heads/main
| 2023-09-03T07:33:29.684121
| 2023-09-01T21:15:50
| 2023-09-01T21:15:50
| 42,446,168
| 1,276
| 443
|
MIT
| 2023-09-14T16:34:09
| 2015-09-14T12:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,760
|
py
|
test_eatingwell.py
|
from recipe_scrapers.eatingwell import EatingWell
from tests import ScraperTest
class TestEatingWell(ScraperTest):
scraper_class = EatingWell
def test_host(self):
self.assertEqual("eatingwell.com", self.harvester_class.host())
def test_title(self):
self.assertEqual(
self.harvester_class.title(), "Cheesy Ground Beef & Cauliflower Casserole"
)
def test_author(self):
self.assertEqual(self.harvester_class.author(), "Carolyn Casner")
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.eatingwell.com/thmb/32d8L6W6cwt652tjjXAHosP3ViE=/1500x0/filters:no_upscale():max_bytes(150000):strip_icc()/cheesy-ground-beef-and-cauliflower-casserole-8791b22c92404d958e2ac5aa92af8aa7.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"1 tablespoon extra-virgin olive oil",
"0.5 cup chopped onion",
"1 medium green bell pepper, chopped",
"1 pound lean ground beef",
"3 cups bite-size cauliflower florets",
"3 cloves garlic, minced",
"2 tablespoons chili powder",
"2 teaspoons ground cumin",
"1 teaspoon dried oregano",
"0.5 teaspoon salt",
"0.25 teaspoon ground chipotle",
"1 (15 ounce) can no-salt-added petite-diced tomatoes",
"2 cups shredded extra-sharp Cheddar cheese",
"0.333 cup sliced pickled jalapeños",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"""Position rack in upper third of oven. Preheat broiler to high.
Heat oil in a large oven-safe skillet over medium heat. Add onion and bell pepper; cook, stirring, until softened, about 5 minutes. Add beef and cauliflower; cook, stirring and breaking the beef up into smaller pieces, until it is no longer pink, 5 to 7 minutes. Stir in garlic, chili powder, cumin, oregano, salt and chipotle; cook until fragrant, about 1 minute. Add tomatoes and their juices; bring to a simmer and cook, stirring occasionally, until liquid is reduced and the cauliflower is tender, about 3 minutes more. Remove from heat.
Sprinkle cheese over the beef mixture and top with sliced jalapeños. Broil until the cheese is melted and browned in spots, 2 to 3 minutes.""",
self.harvester_class.instructions(),
)
def test_total_time(self):
return self.assertEqual(30, self.harvester_class.total_time())
|
21c4ca6fc0dd624f08e592810042e3be97a2128f
|
5e255ad1360c90478393744586663741a9569c21
|
/tests/models/serialize_test_case.py
|
13de0bcdc71f3fcd911340216a1b137e0bf0cdd5
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183
| 2023-08-28T01:10:09
| 2023-08-28T01:10:09
| 70,553,423
| 1,898
| 1,181
|
Apache-2.0
| 2023-09-11T05:14:07
| 2016-10-11T03:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
serialize_test_case.py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals, absolute_import
import sys
import unittest
from numbers import Number
from linebot.models import (
Base,
)
from linebot.utils import to_camel_case
PY3 = sys.version_info[0] == 3
class SerializeTestCase(unittest.TestCase):
MESSAGE = 'message'
STICKER = 'sticker'
POSTBACK = 'postback'
CAMERA = 'camera'
CAMERA_ROLL = 'cameraRoll'
DATETIMEPICKER = 'datetimepicker'
URI = 'uri'
LOCATION = 'location'
FLEX = 'flex'
GENDER = "gender"
APP_TYPE = "appType"
AGE = "age"
AREA = "area"
SUBSCRIPTION_PERIOD = "subscriptionPeriod"
SPAN = 'span'
BUBBLE = 'bubble'
CAROUSEL = 'carousel'
BOX = 'box'
BUTTON = 'button'
FILLER = 'filler'
ICON = 'icon'
TEXT = 'text'
IMAGE = 'image'
VIDEO = 'video'
AUDIO = 'audio'
SEPARATOR = 'separator'
IMAGEMAP = 'imagemap'
ACTION = 'action'
TEMPLATE = 'template'
BUTTONS = 'buttons'
CONFIRM = 'confirm'
IMAGE_CAROUSEL = 'image_carousel'
LINEAR_GRADIENT = 'linearGradient'
def serialize_as_dict(self, obj, type=None):
if isinstance(obj, Base):
return obj.as_json_dict()
elif isinstance(obj, dict):
ret = {to_camel_case(k): self.serialize_as_dict(v) for k, v in obj.items()}
if type is not None:
ret['type'] = type
return ret
elif isinstance(obj, list):
return [self.serialize_as_dict(elem) for elem in obj]
else:
if PY3:
self.assertIsInstance(obj, (str, bool, Number))
else:
self.assertIsInstance(obj, (basestring, bool, Number)) # noqa
return obj
class ConstError(TypeError):
pass
def __setattr__(self, name, value):
if name in SerializeTestCase.__dict__:
raise self.ConstError("Can't rebind const (%s)" % name)
self.__dict__[name] = value
|
dbec761712613088eeb631e0e473acaf71f39dd5
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoPPS/Local/test/re_alignment/reco_align_corr_cfg.py
|
d98a81602518c0561c2dca0255da062515bfc074
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
reco_align_corr_cfg.py
|
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('ReAlignment', eras.Run2_2018)
# minimum of logs
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('WARNING')
)
)
# raw data source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file://output_base.root"),
)
# load alignment correction
process.load("CalibPPS.ESProducers.ctppsRPAlignmentCorrectionsDataESSourceXML_cfi")
process.ctppsRPAlignmentCorrectionsDataESSourceXML.RealFiles = cms.vstring(
"RecoPPS/Local/test/re_alignment/align_corr.xml"
)
process.esPreferLocalAlignment = cms.ESPrefer("CTPPSRPAlignmentCorrectionsDataESSourceXML", "ctppsRPAlignmentCorrectionsDataESSourceXML")
# track re-alignment module
process.load("RecoPPS.Local.ppsLocalTrackLiteReAligner_cfi")
# track plotter
process.ctppsTrackDistributionPlotter = cms.EDAnalyzer("CTPPSTrackDistributionPlotter",
tagTracks = cms.InputTag("ppsLocalTrackLiteReAligner"),
outputFile = cms.string("output_tracks_corr.root")
)
# processing sequences
process.path = cms.Path(
process.ppsLocalTrackLiteReAligner
* process.ctppsTrackDistributionPlotter
)
|
3087d0c6213721a255ef0bf71d9b4cdb49bc1ea0
|
0d543b6f877114fc7ff7f5c2485230f606f6d98d
|
/2020/7.py
|
2c0751a494ad589dec912a6a33716a40f8980063
|
[] |
no_license
|
jonathanpaulson/AdventOfCode
|
eca9d1732ec80dd640d6eed01b3a18d3b3ee455b
|
215f18d7d5b9761ec181954d2e62b6fed3bd12f5
|
refs/heads/master
| 2023-01-08T00:25:09.651009
| 2022-12-25T05:39:11
| 2022-12-25T05:39:11
| 321,228,487
| 227
| 103
| null | 2022-12-01T09:31:36
| 2020-12-14T04:03:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
7.py
|
import fileinput
import re
from collections import deque, defaultdict
# For part1, we need to keep track of which bags have other bags as containers
# For part2, we need to keep track of which bags (and how many!) each bag contains
# We can think of both of these as graphs where the vertices are the bags and the edges
# are which bags might contain us (part1) and which bags and how many of each we contain (part2)
PARENTS = defaultdict(list) # PARENTS[x] are the bags that contain x
CONTENTS = defaultdict(list) # CONTENTS[x] are which bags x contains and how many of each
target = 'shinygoldbag'
lines = list(fileinput.input())
lines.append('')
for line in lines:
line = line.strip()
if line:
words = line.split()
container = words[0]+words[1]+words[2]
container = container[:-1] # remove trailing 's' in 'bags'
if words[-3] == 'no': # doesn't contain any other bags
continue
else:
idx = 4
while idx < len(words):
bag = words[idx]+words[idx+1]+words[idx+2]+words[idx+3]
if bag.endswith('.'):
bag = bag[:-1]
if bag.endswith(','):
bag = bag[:-1]
if bag.endswith('s'):
bag = bag[:-1]
n = int(bag[0])
assert bag[1] not in '0123456789'
while any([bag.startswith(d) for d in '0123456789']):
bag = bag[1:]
PARENTS[bag].append(container)
CONTENTS[container].append((n, bag))
idx += 4
SEEN = set()
Q = deque([target])
while Q:
x = Q.popleft()
if x in SEEN:
continue
SEEN.add(x)
for y in PARENTS[x]:
Q.append(y)
print(len(SEEN)-1)
def size(bag):
ans = 1
for (n,y) in CONTENTS[bag]:
ans += n*size(y)
return ans
print(size(target)-1)
|
498cb1dafd839530071432e0d9770c1e42427109
|
26bbcfdb811f7df13f7b5a95ba551da7adac4e9b
|
/src/certfuzz/bff/errors.py
|
b9af9dbb3cf541cb8899e7c8e8e4064d5f02cf97
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
CERTCC/certfuzz
|
080c3a5448a39d02049253fad96498ba50191586
|
892dae8676535b0ae5b77eea95ffbc21e9e1c959
|
refs/heads/develop
| 2022-11-11T06:12:09.032184
| 2020-06-10T19:57:26
| 2020-06-10T19:57:26
| 20,684,363
| 161
| 25
|
NOASSERTION
| 2023-05-10T14:27:00
| 2014-06-10T12:29:53
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
errors.py
|
'''
Created on Apr 4, 2014
@author: adh
'''
from certfuzz.errors import CERTFuzzError
class BFFerror(CERTFuzzError):
pass
|
09d065ea854cef3f32f52bae68fd344ba593531f
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/harness/determined/deploy/errors.py
|
45e5557443e65bf7d28524c3031f9b4f05c0fc17
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298
| 2023-08-21T08:34:16
| 2023-08-21T08:34:16
| 253,846,879
| 2,531
| 330
|
Apache-2.0
| 2023-09-14T21:54:17
| 2020-04-07T16:12:29
|
Go
|
UTF-8
|
Python
| false
| false
| 94
|
py
|
errors.py
|
class PreflightFailure(Exception):
pass
class MasterTimeoutExpired(Exception):
pass
|
e019a436868e17cf4b8b66f7f82e83662584a8f8
|
aa793c2b787ff591f69147e2cc5e23d6c7b4d77e
|
/proxyclient/m1n1/asm.py
|
0ada7b5c8d6f7e62f3fb8b56b548e500c287a2d1
|
[
"MIT",
"BSD-3-Clause",
"OFL-1.1",
"GPL-2.0-only",
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
AsahiLinux/m1n1
|
8280a8342c407936beabda0f08a700759a636b05
|
6d0979e71e83f47c5da5fdb8c5e21eb1268d54e8
|
refs/heads/main
| 2023-08-22T20:52:30.090704
| 2023-08-21T14:16:08
| 2023-08-21T14:16:08
| 329,707,886
| 2,966
| 200
|
MIT
| 2023-09-07T10:19:39
| 2021-01-14T18:59:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,999
|
py
|
asm.py
|
# SPDX-License-Identifier: MIT
import os, tempfile, shutil, subprocess, re
from . import sysreg
__all__ = ["AsmException", "ARMAsm"]
uname = os.uname()
if uname.sysname == "Darwin":
DEFAULT_ARCH = "aarch64-linux-gnu-"
if uname.machine == "arm64":
TOOLCHAIN = "/opt/homebrew/opt/llvm/bin/"
else:
TOOLCHAIN = "/usr/local/opt/llvm/bin/"
USE_CLANG = "1"
else:
if uname.machine == "aarch64":
DEFAULT_ARCH = ""
else:
DEFAULT_ARCH = "aarch64-linux-gnu-"
USE_CLANG = "0"
TOOLCHAIN = ""
use_clang = os.environ.get("USE_CLANG", USE_CLANG).strip() == "1"
toolchain = os.environ.get("TOOLCHAIN", TOOLCHAIN)
if use_clang:
CC = toolchain + "clang --target=%ARCH"
LD = toolchain + "ld.lld"
OBJCOPY = toolchain + "llvm-objcopy"
OBJDUMP = toolchain + "llvm-objdump"
NM = toolchain + "llvm-nm"
else:
CC = toolchain + "%ARCHgcc"
LD = toolchain + "%ARCHld"
OBJCOPY = toolchain + "%ARCHobjcopy"
OBJDUMP = toolchain + "%ARCHobjdump"
NM = toolchain + "%ARCHnm"
class AsmException(Exception):
pass
class BaseAsm(object):
def __init__(self, source, addr = 0):
self.source = source
self._tmp = tempfile.mkdtemp() + os.sep
self.addr = addr
self.compile(source)
def _call(self, program, args):
subprocess.check_call(program.replace("%ARCH", self.ARCH) + " " + args, shell=True)
def _get(self, program, args):
return subprocess.check_output(program.replace("%ARCH", self.ARCH) + " " + args, shell=True).decode("ascii")
def compile(self, source):
for name, enc in sysreg.sysreg_fwd.items():
source = re.sub("\\b" + name + "\\b", f"s{enc[0]}_{enc[1]}_c{enc[2]}_c{enc[3]}_{enc[4]}", source)
self.sfile = self._tmp + "b.S"
with open(self.sfile, "w") as fd:
fd.write(self.HEADER + "\n")
fd.write(source + "\n")
fd.write(self.FOOTER + "\n")
self.ofile = self._tmp + "b.o"
self.elffile = self._tmp + "b.elf"
self.bfile = self._tmp + "b.b"
self.nfile = self._tmp + "b.n"
self._call(CC, f"{self.CFLAGS} -c -o {self.ofile} {self.sfile}")
self._call(LD, f"{self.LDFLAGS} --Ttext={self.addr:#x} -o {self.elffile} {self.ofile}")
self._call(OBJCOPY, f"-j.text -O binary {self.elffile} {self.bfile}")
self._call(NM, f"{self.elffile} > {self.nfile}")
with open(self.bfile, "rb") as fd:
self.data = fd.read()
with open(self.nfile) as fd:
for line in fd:
line = line.replace("\n", "")
addr, type, name = line.split()
addr = int(addr, 16)
setattr(self, name, addr)
self.start = self._start
self.len = len(self.data)
self.end = self.start + self.len
def objdump(self):
self._call(OBJDUMP, f"-rd {self.elffile}")
def disassemble(self):
output = self._get(OBJDUMP, f"-zd {self.elffile}")
for line in output.split("\n"):
if not line or line.startswith("/"):
continue
sl = line.split()
if not sl or sl[0][-1] != ":":
continue
yield line
def __del__(self):
if self._tmp:
shutil.rmtree(self._tmp)
self._tmp = None
class ARMAsm(BaseAsm):
ARCH = os.path.join(os.environ.get("ARCH", DEFAULT_ARCH))
CFLAGS = "-pipe -Wall -march=armv8.4-a"
if use_clang:
LDFLAGS = "-maarch64elf"
else:
LDFLAGS = "-maarch64linux"
HEADER = """
.text
.globl _start
_start:
"""
FOOTER = """
.pool
"""
if __name__ == "__main__":
import sys
code = """
ldr x0, =0xDEADBEEF
b test
mrs x0, spsel
svc 1
%s
test:
b test
ret
""" % (" ".join(sys.argv[1:]))
c = ARMAsm(code, 0x1238)
c.objdump()
assert c.start == 0x1238
if not sys.argv[1:]:
assert c.test == 0x1248
|
3157f1d3266d4351bf519b1275c842853ea6094f
|
76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6
|
/tfx/orchestration/portable/input_resolution/partition_utils_test.py
|
d54d22168fe429131054b37a1cdc5e7981366fd4
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tfx
|
0cfc9c55171352ecc98c9dfa8ffe976c689d7073
|
1b328504fa08a70388691e4072df76f143631325
|
refs/heads/master
| 2023-08-30T11:56:50.894497
| 2023-08-29T22:47:19
| 2023-08-29T22:48:26
| 169,116,405
| 2,116
| 899
|
Apache-2.0
| 2023-09-14T21:51:42
| 2019-02-04T17:14:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
partition_utils_test.py
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.input_resolution.partition_utils."""
import tensorflow as tf
from tfx.orchestration.portable.input_resolution import partition_utils
def partition(**kwargs):
return partition_utils.Partition(kwargs)
class PartitionUtilsTest(tf.test.TestCase):
def testCompositeKey(self):
k = partition(x=1, y=2, z=3)
self.assertEqual(k.dimensions, ('x', 'y', 'z'))
self.assertEqual(k.partial(['y', 'x']), (2, 1))
self.assertEqual(k.partial([]), ())
self.assertTrue(k)
self.assertEqual(partition(x=1, y=2), partition(y=2, x=1))
self.assertEqual(
partition(x=1, y=2) | partition(y=2, z=3),
partition(x=1, y=2, z=3))
self.assertEqual(
partition(x=1) | partition(y=2, z=3),
partition(x=1, y=2, z=3))
with self.assertRaises(ValueError):
partition(x=1, y=2) | partition(y=1, z=3) # pylint: disable=expression-not-assigned
def testNoPartition(self):
empty = partition_utils.NO_PARTITION
self.assertEqual(empty.dimensions, ())
self.assertEqual(empty.partial([]), ())
self.assertFalse(empty)
def testJoin(self):
def check(lhs, rhs, expected, merge_fn=lambda x, y: x + y):
with self.subTest(lhs=lhs, rhs=rhs, expected=expected):
result = partition_utils.join(
lhs, rhs, merge_fn=merge_fn)
self.assertEqual(result, expected)
check(
lhs=[(partition(), 'a'), (partition(), 'b')],
rhs=[(partition(), '1'), (partition(), '2')],
expected=[
(partition(), 'a1'),
(partition(), 'a2'),
(partition(), 'b1'),
(partition(), 'b2'),
]
)
check(
lhs=[(partition(), 'a'), (partition(), 'b')],
rhs=[],
expected=[]
)
check(
lhs=[],
rhs=[(partition(), '1'), (partition(), '2')],
expected=[]
)
check(
lhs=[(partition(x=1), 'x1'), (partition(x=2), 'x2')],
rhs=[(partition(y=1), 'y1'), (partition(y=2), 'y2')],
expected=[
(partition(x=1, y=1), 'x1y1'),
(partition(x=1, y=2), 'x1y2'),
(partition(x=2, y=1), 'x2y1'),
(partition(x=2, y=2), 'x2y2'),
]
)
check(
lhs=[(partition(x=1), 'a'), (partition(x=2), 'b')],
rhs=[(partition(x=1), 'pple'), (partition(x=2), 'anana')],
expected=[
(partition(x=1), 'apple'),
(partition(x=2), 'banana'),
]
)
check(
lhs=[(partition(x=1, z=1), 'x1'), (partition(x=2, z=2), 'x2')],
rhs=[(partition(y=1, z=1), 'y1'), (partition(y=2, z=2), 'y2')],
expected=[
(partition(x=1, y=1, z=1), 'x1y1'),
(partition(x=2, y=2, z=2), 'x2y2'),
]
)
check(
lhs=[
(partition(x=1, y=1), 'x1y1'),
(partition(x=1, y=2), 'x1y2'),
(partition(x=2, y=1), 'x2y1'),
(partition(x=2, y=2), 'x2y2'),
],
rhs=[
(partition(x=1, z=1), 'z1'),
(partition(x=2, z=2), 'z2'),
],
expected=[
(partition(x=1, y=1, z=1), 'x1y1z1'),
(partition(x=1, y=2, z=1), 'x1y2z1'),
(partition(x=2, y=1, z=2), 'x2y1z2'),
(partition(x=2, y=2, z=2), 'x2y2z2'),
]
)
check(
lhs=[
(partition(x=1, y=1), 'x1y1'),
(partition(x=1, y=2), 'x1y2'),
(partition(x=2, y=1), 'x2y1'),
(partition(x=2, y=2), 'x2y2'),
],
rhs=[
(partition(x=1, z=1), 'z1'),
(partition(x=1, z=2), 'z2'),
(partition(x=2, z=3), 'z3'),
(partition(x=2, z=4), 'z4'),
],
expected=[
(partition(x=1, y=1, z=1), 'x1y1z1'),
(partition(x=1, y=1, z=2), 'x1y1z2'),
(partition(x=1, y=2, z=1), 'x1y2z1'),
(partition(x=1, y=2, z=2), 'x1y2z2'),
(partition(x=2, y=1, z=3), 'x2y1z3'),
(partition(x=2, y=1, z=4), 'x2y1z4'),
(partition(x=2, y=2, z=3), 'x2y2z3'),
(partition(x=2, y=2, z=4), 'x2y2z4'),
]
)
if __name__ == '__main__':
tf.test.main()
|
6d2dd1c19d7b7185c6b169a82ec0b39ffa55f3e9
|
5c363c50c54175a982330ec888401b3e394373ab
|
/syne_tune/backend/simulator_backend/events.py
|
743d632b57349a5d187392d90f956b0ebb21eff3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
awslabs/syne-tune
|
b14fb008f63def6a172bea6cc451f4e1906647f5
|
c35686e1b5947d45384fd1d41a44e013da53ef43
|
refs/heads/main
| 2023-08-14T14:21:48.995716
| 2023-08-03T12:57:13
| 2023-08-03T12:57:13
| 417,499,108
| 313
| 47
|
Apache-2.0
| 2023-09-14T14:06:54
| 2021-10-15T12:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,404
|
py
|
events.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from dataclasses import dataclass
from typing import List, Tuple, Optional, Dict, Any
import heapq
@dataclass
class Event:
"""
Base class for events dealt with in the simulator.
"""
trial_id: int
@dataclass
class StartEvent(Event):
"""
Start training evaluation function for ``trial_id``. In fact, the function
is run completely, and ``OnTrialResultEvent`` events and one ``CompleteEvent``
are generated.
"""
@dataclass
class CompleteEvent(Event):
"""
Job for trial ``trial_id`` completes with status ``status``. This is registered
at the backend.
"""
status: str
@dataclass
class StopEvent(Event):
"""
Job for trial ``trial_id`` is stopped. This leads to all later events for
``trial_id`` to be deleted, and a new ``CompleteEvent``.
"""
@dataclass
class OnTrialResultEvent(Event):
"""
Result reported by some worker arrives at the backend and is registered
there.
"""
result: Dict[str, Any]
EventHeapType = List[Tuple[float, int, Event]]
class SimulatorState:
"""
Maintains the state of the simulator, in particular the event heap.
``event_heap`` is the priority queue for events, the key being ``(time, cnt)``,
where ``time`` is the event time, and ``cnt`` is a non-negative int used to
break ties. When an event is added, the ``cnt`` value is taken from
``events_added``. This means that ties are broken first_in_first_out.
"""
def __init__(
self, event_heap: Optional[EventHeapType] = None, events_added: int = 0
):
if event_heap is None:
event_heap = []
self.event_heap = event_heap
self.events_added = events_added
def push(self, event: Event, event_time: float):
"""
Push new event onto heap
:param event:
:param event_time:
"""
heapq.heappush(self.event_heap, (event_time, self.events_added, event))
self.events_added += 1
def remove_events(self, trial_id: int):
"""
Remove all events with trial_id equal to ``trial_id``.
:param trial_id:
"""
self.event_heap = [
elem for elem in self.event_heap if elem[2].trial_id != trial_id
]
heapq.heapify(self.event_heap)
def next_until(self, time_until: float) -> Optional[Tuple[float, Event]]:
"""
Returns (and pops) event on top of heap, if event time is <=
``time_until``. Otherwise, returns None.
:param time_until:
:return:
"""
result = None
if self.event_heap:
top_time, _, top_event = self.event_heap[0]
if top_time <= time_until:
heapq.heappop(self.event_heap)
result = (top_time, top_event)
return result
|
1fd9ea05529b56a7cc34f2cf334b7ba4c2f15ca8
|
013e32a2789cf8dceb3a3dd142baa8cab0b421c5
|
/src/pydicom/encoders/pylibjpeg.py
|
5e3083b76e5aa14bda1e4d171d9ae8b79cae0e76
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pydicom/pydicom
|
59c7c1328a5fbfc3fe489c7bdde42d519f57d80c
|
f2add283f3f58296b67c4b73b53a10caa8609da8
|
refs/heads/main
| 2023-08-31T17:39:25.539975
| 2023-08-28T19:03:14
| 2023-08-28T19:28:53
| 14,006,067
| 1,539
| 406
|
NOASSERTION
| 2023-09-11T19:50:51
| 2013-10-31T02:58:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
pylibjpeg.py
|
# Copyright 2008-2021 pydicom authors. See LICENSE file for details.
"""Interface for *Pixel Data* encoding, not intended to be used directly."""
from typing import Any, cast
from pydicom.uid import RLELossless
try:
from pylibjpeg.utils import get_pixel_data_encoders
HAVE_PYLJ = True
except ImportError:
HAVE_PYLJ = False
ENCODER_DEPENDENCIES = {
RLELossless: ("numpy", "pylibjpeg", "pylibjpeg-rle"),
}
def encode_pixel_data(src: bytes, **kwargs: Any) -> bytes:
"""Return the encoded image data in `src`.
Parameters
----------
src : bytes
The raw image frame data to be encoded.
**kwargs
Parameters to pass to the encoder function.
Returns
-------
bytes
The encoded image data.
"""
encoder = get_pixel_data_encoders()[kwargs["transfer_syntax_uid"]]
return cast(bytes, encoder(src, **kwargs))
def is_available(uid: str) -> bool:
"""Return ``True`` if a pixel data encoder for `uid` is available for use,
``False`` otherwise.
"""
if not HAVE_PYLJ:
return False
return uid in get_pixel_data_encoders()
|
59b760181c53f3e955d68d3bd9b9820d5664d575
|
389b75e672fde1f282bb80c3da798a59b90b41cd
|
/peartree/plot.py
|
c45e3f719e6adfc8dbf9e04f410a1c15ea22d021
|
[
"MIT"
] |
permissive
|
kuanb/peartree
|
ee18dd72c542a42e4761b6cb00b3cec2265c3eb2
|
494acf962b5d76f0759b400999619641a1f13301
|
refs/heads/master
| 2023-05-13T19:43:24.040544
| 2021-01-18T00:45:28
| 2021-01-18T00:45:28
| 110,452,875
| 192
| 23
|
MIT
| 2023-05-05T14:06:11
| 2017-11-12T17:22:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
plot.py
|
import networkx as nx
from .utilities import log
def generate_plot(G: nx.MultiDiGraph, use_agg=False):
# Load matplotlib only when plot requested
import matplotlib # noqa
if use_agg:
# Force matplotlib to not use any Xwindows backend
matplotlib.use('Agg')
# OSMnx is not a dependency anymore, so we should only allow the plot
# function to work as a convenience, if the user has already installed
# OSMnx
try:
import osmnx as ox # noqa
except ModuleNotFoundError:
log(('Optional dependency: OSMnx must be installed to use the '
'plot method in peartree'))
# TODO: Build out custom plotting configurations but,
# in the meantime, use OSMnx's plotting configurations
# since they work well for the current use case and I
# also plan on incorporating OSMnx into this library
# down the road so it isn't too extraneous an import.
fig, ax = ox.plot_graph(G,
figsize=(12,12),
show=False,
close=False,
node_color='#8aedfc',
node_size=5,
edge_color='#e2dede',
edge_alpha=0.25,
bgcolor='black')
return (fig, ax)
|
3334621f0d4d84f7ade6a7f19417bfd8ca930a1d
|
59359e4821554f559c9ffc5bf1a7f52fff0c6051
|
/descarteslabs/core/catalog/tests/base.py
|
368fe5d4d29a31d385b25badcfc337352d93b3b1
|
[
"Apache-2.0"
] |
permissive
|
descarteslabs/descarteslabs-python
|
706acfc594721a1087872744c9cb72fe2b3d2e5b
|
a8a3859b8ced6d4478b93ff205caad06d508501d
|
refs/heads/master
| 2023-08-23T12:01:36.802085
| 2023-08-21T14:57:22
| 2023-08-21T15:20:01
| 84,609,153
| 176
| 49
|
NOASSERTION
| 2023-05-02T15:54:37
| 2017-03-10T23:27:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,298
|
py
|
base.py
|
# Copyright 2018-2023 Descartes Labs.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import re
import time
import unittest
import responses
from descarteslabs.auth import Auth
from ..catalog_client import CatalogClient
class ClientTestCase(unittest.TestCase):
not_found_json = {
"errors": [
{
"detail": "Object not found: foo",
"status": "404",
"title": "Object not found",
}
],
"jsonapi": {"version": "1.0"},
}
def setUp(self):
payload = (
base64.b64encode(
json.dumps(
{
"aud": "ZOBAi4UROl5gKZIpxxlwOEfx8KpqXf2c",
"exp": time.time() + 3600,
}
).encode()
)
.decode()
.strip("=")
)
public_token = f"header.{payload}.signature"
self.url = "https://example.com/catalog/v2"
self.client = CatalogClient(
url=self.url, auth=Auth(jwt_token=public_token, token_info_path=None)
)
self.match_url = re.compile(self.url)
def mock_response(self, method, json, status=200, **kwargs):
responses.add(method, self.match_url, json=json, status=status, **kwargs)
def get_request(self, index):
r = responses.calls[index].request
if r.body is None:
r.body = ""
elif isinstance(r.body, bytes):
r.body = r.body.decode()
return r
def get_request_body(self, index):
body = responses.calls[index].request.body
if body is None:
body = ""
elif isinstance(body, bytes):
body = body.decode()
return json.loads(body)
|
79b0a14dbb8daed9460268fb10e12b965b923873
|
6564f42640e11689c2ddb6b92325afe6fddc6a6f
|
/cumulusci/utils/git.py
|
2e822bf7d6edf902a510dc5ff03a4273a2b04043
|
[
"LicenseRef-scancode-free-unknown"
] |
permissive
|
SFDO-Tooling/CumulusCI
|
32d4509fa8a36905cfc84fd6283403fd7f4b78c4
|
9ccf3c9566f78c6e9102ac214db30470cef660c1
|
refs/heads/main
| 2023-08-18T04:53:55.733027
| 2023-08-11T20:52:08
| 2023-08-11T20:52:08
| 15,592,459
| 226
| 134
|
BSD-3-Clause
| 2023-09-14T05:09:26
| 2014-01-02T20:01:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
git.py
|
import pathlib
import re
from typing import Any, Optional, Tuple
def git_path(repo_root: str, tail: Any = None) -> Optional[pathlib.Path]:
"""Returns a Path to the .git directory in repo_root
with tail appended (if present) or None if repo_root is not set.
"""
path = None
if repo_root:
path = pathlib.Path(repo_root) / ".git"
if tail is not None:
path = path / str(tail)
return path
def current_branch(repo_root: str) -> Optional[str]:
if repo_root:
head_path = git_path(repo_root, "HEAD")
if head_path.exists():
branch_ref = head_path.read_text().strip()
if branch_ref.startswith("ref: "):
return "/".join(branch_ref[5:].split("/")[2:])
def is_release_branch(branch_name: str, prefix: str) -> bool:
"""A release branch begins with the given prefix"""
if not branch_name.startswith(prefix):
return False
parts = branch_name[len(prefix) :].split("__")
return len(parts) == 1 and parts[0].isdigit()
def is_release_branch_or_child(branch_name: str, prefix: str) -> bool:
if not branch_name.startswith(prefix):
return False
parts = branch_name[len(prefix) :].split("__")
return len(parts) >= 1 and parts[0].isdigit()
def get_feature_branch_name(branch_name: str, prefix: str) -> Optional[str]:
if branch_name.startswith(prefix):
return branch_name[len(prefix) :]
def get_release_identifier(branch_name: str, prefix: str) -> Optional[str]:
if is_release_branch_or_child(branch_name, prefix):
return get_feature_branch_name(branch_name, prefix).split("__")[0]
def construct_release_branch_name(prefix: str, release_identifier: str) -> str:
return f"{prefix}{release_identifier}"
def split_repo_url(url: str) -> Tuple[str, str]:
owner, name, _ = parse_repo_url(url)
return (owner, name)
def parse_repo_url(url: str) -> Tuple[str, str, str]:
"""Parses a given Github URI into Owner, Repo Name, and Host
Parameters
----------
url: str
A github URI. Examples: ["https://github.com/owner/repo/","https://github.com/owner/repo.git","git@github.com:owner/repo.git", "https://api.github.com/repos/owner/repo_name/"]
Returns
-------
Tuple: (str, str, str)
Returns (owner, name, host)
"""
url_parts = re.split("/|@|:", url.rstrip("/"))
name = url_parts[-1]
if name.endswith(".git"):
name = name[:-4]
owner = url_parts[-2]
host = url_parts[-3]
# Need to consider "https://api.github.com/repos/owner/repo/" pattern
if "http" in url_parts[0] and len(url_parts) > 6:
host = url_parts[-4]
return (owner, name, host)
|
7e3842e0471a910626abf39803656734ea5989d7
|
b21e53a2d47d2bc8ad943de5dfd001535f347a7e
|
/PyFiSync/main.py
|
9b05f8e510fce600fad6bca70bcc4112ba2da1cc
|
[
"MIT"
] |
permissive
|
Jwink3101/PyFiSync
|
1b8b08b6c15f6952f7cb1ef8ff9c0923d4fb71d0
|
086fa6e90fd647f020d570671ee1a300e7f9b722
|
refs/heads/master
| 2021-10-28T10:50:00.944630
| 2021-10-17T13:45:50
| 2021-10-17T13:46:09
| 139,068,551
| 113
| 15
|
MIT
| 2018-07-07T02:34:37
| 2018-06-28T21:09:40
|
Python
|
UTF-8
|
Python
| false
| false
| 36,581
|
py
|
main.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from io import open
__version__ = '20211017.0'
__author__ = 'Justin Winokur'
__license__ = 'MIT'
import os
import sys
import fnmatch
import subprocess
import time
import shutil
import datetime
import argparse
import itertools
import re
import copy
import json
if sys.version_info[0]<3:
range = xrange
else:
unicode = str
raw_input = input
# This will be fixed when it is installed
self_path = os.path.dirname(__file__)
if self_path not in sys.path:
sys.path.append(self_path)
from . import utils
from . import PFSwalk
from .dicttable import DictTable
from . import dry_run
from . import remote_interfaces
def init(path,remote='rsync'):
"""
Intiliaze PyFiSync
"""
path = os.path.join(os.path.abspath(path),'.PyFiSync')
try:
os.makedirs(path)
except OSError:
pass # created by logger already?
cpath = os.path.join(path,'config')
if os.path.exists(cpath):
print("ERROR: Already a PyFiSync directory. Must remove '.PyFiSync' folder")
sys.exit(2)
with open(cpath,'w') as F:
F.write(utils.configparser.config_example(remote=remote))
txt = '-='*30 + '\n'
txt += '\n'
txt += ' Initialized new PyFiSync Directory. You must first\n'
txt += ' * Modify the config file (self-commented)\n'
txt += ' {cpath:s}\n'.format(cpath=cpath)
txt += ' * Perform a `reset --force` once configured\n'
txt += '=-'*30
log.add(txt)
def reset_tracking(backup=True,empty='reset',set_time=False):
""" Reset the tracking"""
global log,config,remote_interface
if getattr(config,'_DRYRUN',False):
log.add('(DRY-RUN) -- Reset Tracking')
return
remote = True
if len(getattr(config,'userhost','')) == 0 and config.remote =='rsync':
log.add('(local) B: {:s}'.format(config.pathB))
remote = False
else:
log.add('(remote) B: {:s}{:s}'.format(config.remote,config.pathB))
log.add('Parsing files for A')
attribA = config.prev_attributesA + config.move_attributesA + [a[0] for a in config.mod_attributes]
attribB = config.move_attributesB + config.prev_attributesB + [a[1] for a in config.mod_attributes]
PFSwalker = PFSwalk.file_list(config.pathA,config,log,
attributes=attribA,empty=empty,
use_hash_db=config.use_hash_db)
if remote:
# Walk local in background thread
loc_walk_thread = utils.ReturnThread(target=PFSwalker.files)
loc_walk_thread.daemon = True
loc_walk_thread.start()
log.add(' Parsing files for B (remote)')
log.prepend = ' '
filesB = remote_interface.file_list(attribB,empty=empty)
filesA = loc_walk_thread.join()
if filesB is None:
sys.stderr.write('Error on remote call. See logged warnings\n')
sys.exit(2)
log.prepend = ''
else:
filesA = PFSwalker.files()
log.add(' Parsing files for B (local)')
_tmp = PFSwalk.file_list(config.pathB,config,log,attributes=attribB,empty=empty,
use_hash_db=config.use_hash_db)
filesB = _tmp.files()
filesA_old = os.path.join(config.pathA,'.PyFiSync','filesA.old')
filesB_old = os.path.join(config.pathA,'.PyFiSync','filesB.old')
if backup:
try:
now = datetime.datetime.now().strftime('.%Y-%m-%d_%H%M%S')
shutil.move(filesA_old,filesA_old + now)
shutil.move(filesB_old,filesB_old + now)
txt = 'Moved:\n'
txt += ' {:s} --> {:s}\n'.format(filesA_old,filesA_old + now)
txt += ' {:s} --> {:s}\n'.format(filesB_old,filesB_old + now)
log.add(txt)
except:
pass # Not already there
# Dump the json (see http://stackoverflow.com/a/28032808/3633154)
with open(filesA_old,'w',encoding='utf8') as F:
data = json.dumps(filesA,ensure_ascii=False)
F.write(utils.to_unicode(data))
txt = 'saved ' + filesA_old
with open(filesB_old,'w',encoding='utf8') as F:
data = json.dumps(filesB,ensure_ascii=False)
F.write(utils.to_unicode(data))
txt = 'saved ' + filesB_old
# This is really *not* needed and slows things down but I will keep it
# for now
if config.exclude_if_present:
filesA = DictTable(filesA)
filesB = DictTable(filesB)
PFSwalk.exclude_if_present(filesA,filesB,config.exclude_if_present) # in place
log.space = 0
log.add('')
log.add(' Local: {}'.format(utils.file_summary(filesA)))
log.add(' Remote: {}'.format(utils.file_summary(filesB)))
if set_time:
timepath = os.path.join(config.pathA,'.PyFiSync','last_run.time')
with open(timepath,'w') as F:
F.write('{:0.8f}'.format(time.time()))
def main(mode):
"""
Main sync function
* Setup
* Get file lists
* Compare to old to determine moved and deleted
* (the only one figured out modified, but this new one doesn't)
* Determine deletions on both sides (with conflict resolution)
* Determine moves on both sides (with conflict resolution)
* Apply them theoretically so as to save a transfer. Everything will
be done in order later
* Determine transfers based on mtime on both sides ~~modified or new~~
* Apply moves/deletions/backups for real
* Apply transfers with rsync as the mechanism
* Get updated lists
"""
global log,config,remote_interface
txt = '\n'.join(
(r""" _____ ______ _ _____ """,
r""" | __ \ | ____(_)/ ____| """,
r""" | |__) | _| |__ _| (___ _ _ _ __ ___ """,
r""" | ___/ | | | __| | |\___ \| | | | '_ \ / __| """,
r""" | | | |_| | | | |____) | |_| | | | | (__ """,
r""" |_| \__, |_| |_|_____/ \__, |_| |_|\___| """,
r""" __/ | __/ | """,
r""" |___/ |___/ """))
log.line()
log.add(txt,end='\n')
log.line()
## Setup
T0 = time.time()
log.add('Start Time: ' +_unix_time(T0))
log.add('Mode: {:s}{:s}'.format(mode,' (DRY-RUN)' if config._DRYRUN else ''))
log.add('Version: ' + __version__)
timepath = os.path.join(config.pathA,'.PyFiSync','last_run.time')
config.last_run = float(open(timepath).read())
log.add('Last Run: ' + _unix_time(config.last_run))
log.add('\nPaths:')
log.add(' (local) A: {:s}'.format(config.pathA))
remote = True
if remote_interface is None and len(config.userhost) == 0:
log.add('(local) B: {:s}'.format(config.pathB))
remote = False
else:
log.add('(remote) B: {:s} ({:s})'.format(config.pathB,config.remote))
run_bash(pre=True)
log.line()
log.add('Paring current file lists')
log.add(' Parsing files for A (local)')
attribA = config.prev_attributesA + config.move_attributesA + [a[0] for a in config.mod_attributes]
attribB = config.move_attributesB + config.prev_attributesB + [a[1] for a in config.mod_attributes]
PFSwalker = PFSwalk.file_list(config.pathA,config,log,
attributes=attribA,empty='store',
use_hash_db=config.use_hash_db)
if remote:
# Multithread it
loc_walk_thread = utils.ReturnThread(target=PFSwalker.files)
loc_walk_thread.daemon = True
loc_walk_thread.start()
log.add(' Parsing files for B (remote)')
log.prepend = ' '
filesB = remote_interface.file_list(attribB,empty='store')
filesA = loc_walk_thread.join()
if filesB is None:
sys.stderr.write('Error on remote call. See logged warnings\n')
sys.exit(2)
log.prepend = ''
else:
filesA = PFSwalker.files()
log.add(' Parsing files for B (local)')
_tmp = PFSwalk.file_list(config.pathB,config,log,attributes=attribB,empty='store',
use_hash_db=config.use_hash_db)
filesB = _tmp.files()
## Get file lists
log.line()
log.add('Loading older file list (and applying exclusions if they have changed)')
filesA_old = os.path.join(config.pathA,'.PyFiSync','filesA.old')
filesB_old = os.path.join(config.pathA,'.PyFiSync','filesB.old')
with open(filesA_old,encoding='utf8') as F:
filesA_old = json.loads(F.read())
with open(filesB_old,encoding='utf8') as F:
filesB_old = json.loads(F.read())
filesA_old = PFSwalker.filter_old_list(filesA_old)
filesB_old = PFSwalker.filter_old_list(filesB_old)
log.line()
log.add('Creating DB objects')
filesA = DictTable(filesA )
filesB = DictTable(filesB )
filesA_old = DictTable(filesA_old)
filesB_old = DictTable(filesB_old)
if config.exclude_if_present:
PFSwalk.exclude_if_present(filesA,filesB,config.exclude_if_present) # in place
log.add('')
log.add(' Local: {}'.format(utils.file_summary(filesA)))
log.add(' Remote: {}'.format(utils.file_summary(filesB)))
## Compare to old to determine new, modified, deleted
log.line()
log.add('Using old file lists to determine moves and deletions\n')
log.prepend = ' '
file_track(filesA_old,filesA,config.prev_attributesA,config.move_attributesA)
file_track(filesB_old,filesB,config.prev_attributesB,config.move_attributesB)
## Determine deletions on both sides (with conflict resolution)
## Determine moves on both sides (with conflict resolution)
# Resolve Conflicts
move_queueA,move_queueB = compare_queue_moves(filesA,filesB,filesA_old,filesB_old)
log.prepend = ''
log.space = 0
log.line()
log.add('Apply file moves theoretically. Actual moves to be processed later')
## Apply them theoretically so as to save a transfer. Everything will
# be done in order later
move_queueA = apply_move_queues_theoretical(filesA,move_queueA,AB='A')
move_queueB = apply_move_queues_theoretical(filesB,move_queueB,AB='B')
## Determine transfers based on modified or new
log.line()
log.space = 0
log.add('Determining, resolving conflicts, and queueing file transfers\nbased on modification times\n')
log.space = 2
action_queueA,action_queueB,tqA2B,tqB2A = determine_file_transfers(
filesA,filesB)
## Apply moves/deletions/backups for real
log.space = 0
log.line()
log.add('Applying queues')
log.space = 2
if config._DRYRUN:
dry_run.apply_action_queue(move_queueA + action_queueA,log,config.nameA,config)
dry_run.apply_action_queue(move_queueB + action_queueB,log,config.nameB,config)
else:
apply_action_queue(config.pathA,move_queueA + action_queueA)
if remote:
remote_interface.apply_queue(move_queueB + action_queueB)
else:
apply_action_queue(config.pathB,move_queueB + action_queueB)
# We will use the rsync (via the ssh_rsync) interface.
if not remote:
config.persistant = False # Make sure this is off
remote_interface = remote_interfaces.ssh_rsync(config,log)
log.space = 0;log.prepend = ''
log.line()
log.add('Final Transfer')
# Show the numbers to be transfered
filesA2B = [filesA.query_one(path=f) for f in tqA2B]
filesB2A = [filesB.query_one(path=f) for f in tqB2A]
log.add(' Queued A >>> B: {}'.format(utils.file_summary(filesA2B)))
log.add(' Queued A <<< B: {}'.format(utils.file_summary(filesB2A)))
log.add('')
log.space=2
if config._DRYRUN:
dry_run.transfer(tqA2B,tqB2A,log,filesA,filesB)
else:
remote_interface.transfer(tqA2B,tqB2A)
## Get updated lists
log.space = 0
log.line()
log.add('Retrieving and saving updated file lists')
log.space = 2
reset_tracking(backup=False,empty='remove',set_time=True)
run_bash(pre=False)
log.space = 0
log.add_close()
def file_track(files_old,files_new,prev_attributes,move_attributes):
# Add certain fields to the DBs. Do it this way so that they get set with defaults
for file in files_new:
for attrib,val in zip(['newmod','new','untouched','moved','prev_path'],
[False, False,False ,False ,None,]):
file[attrib] = val
for file in files_old:
file['deleted'] = True
for attrib in ['newmod','new','untouched','moved','prev_path']:
files_new.add_fixed_attribute(attrib)
files_old.add_fixed_attribute('deleted')
files_new.reindex()
files_old.reindex()
# Main loop
for file in files_new.items():
# is it untouched
query_dict = {a:file[a] for a in prev_attributes + ['mtime']}
if query_dict in files_old:
file['prev_path'] = file['path']
file['untouched'] = True
files_old.query_one(query_dict)['deleted'] = False
continue
# is it the same exact file but modified?
# We do this as a separate check from the mtime of a moved file to
# account for cases when the file is marked as new via some attribute
# but was just modified (e.g. size,sha1)
query_dict = {a:file[a] for a in prev_attributes}
if query_dict in files_old:
# The mtime MUST have changed since it didn't match the past check
file['prev_path'] = file['path']
file['newmod'] = True
files_old.query_one(query_dict)['deleted'] = False
continue
# has it been moved?
query_dict = {a:file[a] for a in move_attributes}
if query_dict in files_old:
# file was moved
file_old = files_old.query_one(query_dict)
file['prev_path'] = file_old['path']
file['moved'] = True
file_old['deleted'] = False
# Was it also modified?
if not file_old['mtime'] == file['mtime']:
file['newmod'] = True
continue
# It must be new
# Note that the paths may remain the same, but a file could have been
# moved/deleted and a new one created there
file['newmod'] = True
file['new'] = True
# Reindex the DBs
files_old.reindex()
files_new.reindex()
def compare_queue_moves(filesA,filesB,filesA_old,filesB_old):
"""
Compare the moves and generate a move queue
action queues look like a list of dictionaries:
{'backup':[file_path]} # Make a copy to the backup
{'move': [src,dest]} # Move the file
{'delete': [file_path]} # Move the file into the backup. Essentially a backup
queues are always performed in the following order:
* move
* backup -- if a path is moved it can later be backed up
* delete
transfer queue
"""
global log,tqA2B,tqB2A
tqA2B = []
tqB2A = []
queueA = []
queueB = []
log.space = 0
log.add('Comparing, resolving, and queuing file DELETIONS.\n')
log.space = 2
txt = 'WARNING: File deleted on {AB:s} but moved there, new, or \n'
txt += ' or modified on {BA:s}. Ignore delete and add to\n'
txt += ' transfers\n'
txt += ' File: {path:s}'
# Process deletions on A.
for fileA_old in filesA_old.query(deleted=True):
path = fileA_old['path']
fileB = filesB.query_one(path=path) # new
if fileB is None:
continue # Already deleted or moved
if fileB['newmod'] or fileB['moved'] or fileB['new']:
log.add(txt.format(path=path,AB='A',BA='B'))
tqB2A.append(fileB['path'])
continue
# Some programs write a new file on save
if config.check_new_on_delete and ({'path':path,'new':True} in filesA):
continue
# Delete file B and apply it before comparing moves later
queueB.append({'delete':path} )
filesB.remove(path=path)
# Process deletions on B
for fileB_old in filesB_old.query(deleted=True):
path = fileB_old['path']
fileA = filesA.query_one(path=path) # new
if fileA is None:
continue # Already deleted or moved
if fileA['newmod'] or fileA['moved'] or fileA['new']:
log.add(txt.format(path=path,AB='B',BA='A'))
tqB2A.append(fileA['path'])
continue
# Some programs write a new file on save
if config.check_new_on_delete and ({'path':path,'new':True} in filesB):
continue
# Delete file B and apply it before comparing moves later
queueA.append({'delete':path} )
filesA.remove(path=path)
# We loop through all possible prev_paths and handle it that way
# Every file that is marked as moved also has a prev path
prev_paths = set(fileA['prev_path'] for fileA in filesA.query(moved=True))
prev_paths.update(fileB['prev_path'] for fileB in filesB.query(moved=True))
log.space = 0
log.add('\nComparing, resolving, and queueuing file MOVES')
log.space = 2
for prev_path in prev_paths:
fileA = filesA.query_one(prev_path=prev_path)
fileB = filesB.query_one(prev_path=prev_path)
# Check if one was deleted. Both can't be.
# If deleted, make sure to set it as mod
# set it as new to make sure it gets transfered
txt = 'WARNING: file moved on {AB:s} not found on {BA:s}\n'
txt += ' {AB:s}: {prev_path:s} --> {path:s}\n'
txt += " It may have been deleted or {AB:s}'s is new"
if fileA is None:
log.add(txt.format(AB='B',BA='A',**fileB))
filesB.update({'new':True},prev_path=prev_path)
continue
if fileB is None:
log.add(txt.format(AB='A',BA='B',**fileA))
filesA.update({'new':True},prev_path=prev_path)
continue
if fileA['path'] == fileB['path']: # Both moved to the same path
continue
if fileA['moved'] and fileB['moved']:
txt = 'CONFLICT: Same file moved on A and B\n'
txt += ' original: {prev_path:s}\n'
txt += ' on A: {A:s}\n'
txt += ' on B: {B:s}\n'
txt += ' Resolve to {AB:s} as per config'
txt = txt.format(AB=config.move_conflict,A=fileA['path'],B=fileB['path'],**fileA)
# Reset one to False and change prev_path to the other
if config.move_conflict == 'A':
queueB.append( {'move':[fileB['path'],fileA['path'] ]})
log.add(txt)
else: # config.move_conflict == 'B':
queueA.append( {'move':[fileA['path'],fileB['path'] ]})
log.add(txt)
continue
# Apply moves. Only one can be true after the above
if fileA['moved']:
queueB.append( {'move':[prev_path,fileA['path'] ]})
if fileB['moved']:
queueA.append( {'move':[prev_path,fileB['path'] ]})
# Note: we do not reindex since the only changes are the moved status
# and we don't care about them anymore
return queueA,queueB
def apply_move_queues_theoretical(files,queue,AB='AB'):
"""
Apply the move queues to the file lists as if they were performed
to make sure they do not overwrite and to reset the names
"""
global log
txt = 'CONFLICT: Move scheduled over another\n'
txt += ' {src:s} --> {dest:s}\n'
txt += ' {result:s}'
outqueue = []
for action_dict in queue:
action,path = list(action_dict.items())[0]
if action == 'move':
src,dest = path
if ( {'path':dest} not in files ):
files.update({'path':dest},{'path':src}) # Update the paths to consider114
else:
# If you can't do the move, you need to update BOTH files that there is a conflict of sorts
files.update({'newmod':True},{'path':src})
files.update({'newmod':True},{'path':dest})
log.add(txt.format(src=src,dest=dest,result='Skipping'))
continue # so it doesn't get added to the queue
outqueue.append(action_dict)
return outqueue
def determine_file_transfers(filesA,filesB):
"""
Determine transfers
Note: we only look at new or modified files as per tracking
"""
global log
txt1 = ('CONFLICT: File modified on both sides\n'
' {path:s}\n'
' A: {mtimeA:s}\n'
' B: {mtimeB:s}\n'
" resolving with '{res:s}' as per config\n")
txt2 = ('WARNING: Untracked file on {AB} and exists on {BA}. Transfer\n'
' File: {path:s}\n')
txt3 = "WARNING: Tag for conflict file '{path}' already exists on {AB}. Adding '.{ii}'"
action_queueA = [] # Actions to be performed ON A
action_queueB = [] # " " ON B
global tqA2B,tqB2A
paths = set(fileA['path'] for fileA in filesA.items())
paths.update(fileB['path'] for fileB in filesB.items())
for path in paths:
fileA = filesA.query_one(path=path)
fileB = filesB.query_one(path=path)
# Recall that deleted files are already handled
# Check if the other path doesn't exist. Means file was new or
# deleted on one and modified on the other. Transfer to missing side
if fileA is None:
if not fileB.get('new',False): # Somehow missing on A. Maybe deleted?
log.add(txt2.format(AB='A',BA='B',path=path))
tqB2A.append(path)
continue
if fileB is None:
if not fileA.get('new',False): # ...
log.add(txt2.format(AB='B',BA='A',path=path))
tqA2B.append(path)
continue
mtimeA = _unix_time(fileA['mtime'])
mtimeB = _unix_time(fileB['mtime'])
#########################
transfer = False
for mod_attribute in config.mod_attributes:
mod_attribute = tuple(mod_attribute)
if mod_attribute == ('mtime','mtime'):
if abs(fileA['mtime'] - fileB['mtime']) <= config.mod_resolution:
break
elif fileA[mod_attribute[0]] == fileB[mod_attribute[1]]:
break
else:
transfer = True
if not transfer:
continue
if fileA['mtime'] <= config.last_run and fileB['mtime'] >= config.last_run:
# Modified on B
tqB2A.append(path)
action_queueA.append( {'backup':path} )
continue
if fileA['mtime'] >= config.last_run and fileB['mtime'] <= config.last_run:
# Modified on A
tqA2B.append(path)
action_queueB.append( {'backup':path} )
continue
# If they are both modified before the last run, then something strange
# happened and we want to proceed as a conflict
################### conflict
def get_newpath(path,AB):
"""
Will return a newpath from path and name but making sure that it
doesn't exist in files
"""
if AB == 'A':
name = config.nameA
files = filesA
else:
name = config.nameB
files = filesB
root,ext = os.path.splitext(path)
newpath = '{}.{}{}'.format(root,name,ext)
newpath0 = newpath
ii = 0
while files.query_one(path=newpath) is not None:
ii += 1
newpath = newpath = '{}.{}.{}{}'.format(root,name,ii,ext)
if ii > 0:
log.add(txt3.format(AB=AB,path=path,ii=ii))
return newpath
res = config.mod_conflict
log.add(txt1.format(path=path,mtimeA=mtimeA,mtimeB=mtimeB,res=res))
if res == 'A':
tqA2B.append(path)
action_queueB.append( {'backup':path} )
elif res == 'B':
tqB2A.append(path)
action_queueA.append( {'backup':path} )
elif res == 'newer':
if fileA['mtime']>=fileB['mtime']:
tqA2B.append(path)
action_queueB.append( {'backup':path} )
else:
tqB2A.append(path)
action_queueA.append( {'backup':path} )
elif res == 'newer_tag':
if fileA['mtime']>=fileB['mtime']:
tqA2B.append(path)
newpath = get_newpath(path,'B') #path + '.' + config.nameB
action_queueB.append({'move':[path,newpath]})
tqB2A.append(newpath)
else:
tqB2A.append(path)
newpath = get_newpath(path,'A') #path + '.' + config.nameA
action_queueA.append({'move':[path,newpath]})
tqA2B.append(newpath)
elif res == 'both':
newpathA = get_newpath(path,'A')
action_queueA.append({'move':[path,newpathA]})
tqA2B.append(newpathA)
newpathB = get_newpath(path,'B')
action_queueB.append({'move':[path,newpathB]})
tqB2A.append(newpathB)
else:
raise ValueError('Unrecognized mod_conflict resolution')
# Unset all backup options
if not config.backup:
action_queueA = [a for a in action_queueA if 'backup' not in a]
action_queueB = [b for b in action_queueB if 'backup' not in b]
return action_queueA,action_queueB,tqA2B,tqB2A
def apply_action_queue(dirpath,queue):
"""
* queue is the action queue that takes the following form
* {'backup':[file_path]} # Make a copy to the backup
* {'move': [src,dest]} # Move the file
* {'delete': [file_path]} # Move the file into the backup. Essentially a backup
Notes:
* conflciting/overwriting moves have already been removed at this point
* Delete should backup first if set config.backup == True
* Backup should NOT happen if config.backup == False
"""
log.space=2
log.add('Applying queues on: {:s}'.format(dirpath))
log.space = 4
backup_path = os.path.join(dirpath,'.PyFiSync','backups',
datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S'))
if config.backup:
try:
os.makedirs(backup_path)
except OSError:
pass
for action_dict in queue:
action,path = list(action_dict.items())[0]
if action == 'move':
src = os.path.join(dirpath,path[0])
dest = os.path.join(dirpath,path[1])
dest_dir = os.path.split(dest)[0]
try:
os.makedirs(dest_dir)
except OSError:
pass
shutil.move(src,dest)
log.add('move: ' + utils.move_txt(path[0],path[1]))
if action in ['backup','delete']:
src = os.path.join(dirpath,path)
dest = os.path.join(backup_path,path)
dest_dir = os.path.split(dest)[0]
try:
os.makedirs(dest_dir)
except OSError:
pass
if action == 'backup' and config.backup:
shutil.copy2(src,dest)
log.add('backup: ' + path)
elif action=='delete' and config.backup:
shutil.move(src,dest)
log.add('delete (w/ backup): ' + path)
elif action=='delete' and not config.backup:
os.remove(src)
log.add('delete (w/o backup): ' + path)
else:
pass # Do nothing for now
# Remove the backup directory if it was never used
try:
os.rmdir(backup_path)
except OSError: # Will error out if not empty or does not exists
if config.backup:
log.add('\nBackups saved in: {}'.format(backup_path))
def search_up_PyFiSync(path):
path = os.path.abspath(path) # nothing relative
# Don't allow above the user directory
if path == '/':
print('ERROR: Not in a PyFiSync directory. Did you run `init`?')
sys.exit(2)
if path.endswith('/'): # Strip trailing "/". Shouldn't be there though
path = path[:-1]
# Make sure the path is a folder
if not os.path.isdir(path):
print("ERROR: Must specify a *directory* path or none for '.'")
sys.exit(2)
# Check it
if any(os.path.exists(os.path.join(path,'.PyFiSync','config'+ext)) for ext in ['','.py']):
return path
else:
path = os.path.split(path)[0]
return search_up_PyFiSync(path)
def run_bash(pre):
""" Run the pre and post bash scripts """
cmd = 'cd {} # Automatically set by PyFiSync\n\n'.format(config.pathA)
if pre:
if len(config.pre_sync_bash.strip()) == 0:
return
cmd += config.pre_sync_bash.strip()
else:
if len(config.post_sync_bash.strip()) == 0:
return
cmd += config.post_sync_bash.strip()
log.add('{}Calling pre/post_sync_bash scripts '.format('(DRY-RUN) ' if config._DRYRUN else ''))
log.add('\n'.join(' $ {}'.format(c) for c in cmd.split('\n')))
if config._DRYRUN:
log.add('(DRY-RUN): ** Not Called **')
return
proc = subprocess.Popen(cmd,shell=True,stderr=subprocess.PIPE,stdout=subprocess.PIPE)
out,err = proc.communicate()
out = utils.to_unicode(out)
err = utils.to_unicode(err)
log.add('STDOUT:')
log.add('\n'.join(' > {}'.format(c.rstrip()) for c in out.split('\n')))
log.add('STDERR:')
log.add('\n'.join(' > {}'.format(c.rstrip()) for c in err.split('\n')))
def _unix_time(val):
return datetime.datetime.fromtimestamp(float(val)).strftime('%Y-%m-%d %H:%M:%S')
desc = """\
Python (+ rsync & rclone) based intelligent file sync with automatic backups and file move/delete tracking.
"""
epi = ""
def cli(argv=None):
if argv is None:
argv = sys.argv[1:]
global log,config,remote_interface
# Set up main parser and subparsers. Then, (hackily) generate a list
# of modes. If the first argument is not a mode, then insert "sync". The
# edge case of this is if the directory is named one of the modes. Oh well!
parser_main = argparse.ArgumentParser(\
description=desc,
epilog=epi,
formatter_class=utils.RawSortingHelpFormatter)
parser_main.add_argument('-v', '--version', action='version',
version='%(prog)s-' + __version__,help='Display version and exit')
parser_main.add_argument('--debug',action='store_true',help=argparse.SUPPRESS)
# Parent parser for ALL modes/subparsers
parser_all_opts = argparse.ArgumentParser(add_help=False) # Notice this is NOT a subparser of parser_main
parser_all_opts.add_argument('path',default='.',nargs='?',
help="['%(default)s'] path to the PyFiSync directory")
subparsers = parser_main.add_subparsers(dest='mode',title='modes',
help="[sync]. Enter `{mode} -h` for individual help")
## Sync
# Options for all
parser_syncops = argparse.ArgumentParser(add_help=False) # Notice this is NOT a subparser of parser_main
parser_syncops.add_argument('-s','--silent',
action='store_true',help='Do not print the log to screen')
parser_syncops.add_argument('--no-backup',
action='store_true',
help='Override config and do not back up files')
parser_syncops.add_argument('--dry-run',action='store_true',
help='do a dry-run. Results should be reasonably accurate')
# Make the parser with the right combination of options
parser_sync = subparsers.add_parser('sync',
help='Synchronize to the server',
parents=[parser_syncops,parser_all_opts],
formatter_class=utils.RawSortingHelpFormatter)
## Init
parser_init = subparsers.add_parser('init',
help='Initialize in the specified directory',
parents=[parser_all_opts],
formatter_class=utils.RawSortingHelpFormatter)
parser_init.add_argument('--remote',
choices=remote_interfaces.REMOTES,
default='rsync',
help='[%(default)s] Specify the remote type of those supported')
## Reset
parser_reset = subparsers.add_parser('reset',
help=('Completely reset file tracking. No changes pre-reset '
'will be propgrated until you do a `push/pull --all`. '
'Existing database files will be backed up'),
parents=[parser_all_opts],
formatter_class=utils.RawSortingHelpFormatter)
parser_reset.add_argument('--force',action='store_true',help='Do not prompt for confirmation')
# get a list of the modes. Inspired by https://stackoverflow.com/a/20096044/3633154
modes = []
for _s in parser_main._actions:
if not isinstance(_s, argparse._SubParsersAction):
continue
for choice in _s.choices.items():
modes.append(choice[0])
main_actions = list(itertools.chain.from_iterable(a.option_strings for a in parser_main._actions))
if len(argv) == 0:
argv = ['sync']
if argv[0].lower() == 'help': # add help mode
argv[0] = '--help'
if argv[0] not in modes + main_actions + ['_api']:
argv.insert(0,'sync')
if argv[0] == '_api':
# NOTE: this is hard coded as ssh_rsync since rclone doesn't need it
# and this was easier. But this should be fixed if more remotes
# are ever added and they need to communicate
remote_interfaces.ssh_rsync.cli(argv[1:])
sys.exit()
args = parser_main.parse_args(argv)
#############
if args.mode == 'init':
log = utils.logger(path=args.path,silent=False)
init(args.path,remote=args.remote)
elif args.mode in ['sync']:
path = search_up_PyFiSync(args.path)
config = utils.configparser(sync_dir=path)
log = utils.logger(path=path,silent=False)
config._DRYRUN = args.dry_run
config._debug = args.debug
_remote = remote_interfaces.get_remote_interface(config)
if _remote is None:
remote_interface = None
else:
remote_interface = _remote(config,log)
if args.no_backup:
config.backup = False
if args.silent:
log.silent = True
main(args.mode)
if remote_interface is not None and hasattr(remote_interface,'close')\
and hasattr(remote_interface.close,'__call__'):
remote_interface.close()
elif args.mode == 'reset':
path = search_up_PyFiSync(args.path)
config = utils.configparser(sync_dir=path)
log = utils.logger(path=path,silent=False)
_remote = remote_interfaces.get_remote_interface(config)
if _remote is None:
remote_interface = None
else:
remote_interface = _remote(config,log)
if args.debug:
remote_interface._debug = True
if not args.force:
print('Are you sure you want to reset? (Y/[N]): ')
if not raw_input().lower().startswith('y'):
sys.exit()
reset_tracking(set_time=True,empty='reset')
if remote_interface is not None and hasattr(remote_interface,'close')\
and hasattr(remote_interface.close,'__call__'):
remote_interface.close()
if __name__ == '__main__':
argv = sys.argv[1:] # Argument besides function name
cli(argv)
else:
log = utils.logger(path=None,silent=False)
|
b30d776b4f5077218d3ba264471cbf68b74b001d
|
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
|
/examples/quantum_random_numbers.py
|
34dc61acc6e51fbc81998665cc0976da01559e14
|
[
"Apache-2.0"
] |
permissive
|
ProjectQ-Framework/ProjectQ
|
2e342da0622d4b5d513c15504556e95d3d0e2aea
|
67c660ca18725d23ab0b261a45e34873b6a58d03
|
refs/heads/develop
| 2023-09-04T02:18:25.581119
| 2023-03-09T16:03:57
| 2023-03-09T16:03:57
| 77,520,796
| 886
| 335
|
Apache-2.0
| 2023-07-24T07:07:15
| 2016-12-28T09:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 370
|
py
|
quantum_random_numbers.py
|
# pylint: skip-file
"""Example of a simple quantum random number generator."""
from projectq import MainEngine
from projectq.ops import H, Measure
# create a main compiler engine
eng = MainEngine()
# allocate one qubit
q1 = eng.allocate_qubit()
# put it in superposition
H | q1
# measure
Measure | q1
eng.flush()
# print the result:
print(f"Measured: {int(q1)}")
|
9eabe465bf4b7b608bc55aa0a1aa1891a8cb473a
|
df6563876db2ec1404ac0e412361e50f8ba75cdf
|
/_content/talks/2013/go4python/deco.py
|
bf463184ee3a120985a6f69301bf907b96ce46d4
|
[
"CC-BY-4.0",
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-golang"
] |
permissive
|
golang/website
|
a3b77461aaaa5e849d6028725a485ce408881b9e
|
07cc88f98dbde10db7d92f8e392aa1bc72476cc4
|
refs/heads/master
| 2023-08-17T20:36:11.437973
| 2023-08-16T18:22:07
| 2023-08-17T12:37:07
| 163,893,962
| 355
| 340
|
BSD-3-Clause
| 2023-09-12T19:35:19
| 2019-01-02T21:59:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
deco.py
|
#!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from urlparse import urlparse,parse_qs
PORT_NUMBER = 8080
def auth_required(myfunc):
def checkuser(self):
user = parse_qs(urlparse(self.path).query).get('user')
if user:
self.user = user[0]
myfunc(self)
else:
self.wfile.write('unknown user')
return checkuser
class myHandler(BaseHTTPRequestHandler):
@auth_required
def do_GET(self):
self.wfile.write('Hello, %s!' % self.user)
try:
server = HTTPServer(('', PORT_NUMBER), myHandler)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
|
af8f386624e6f238cbf6d08fd971fc3ee007f3bf
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/ibm_db2/tests/test_integration_e2e.py
|
2340617476913b5f30970d0f67cb7b42d5e2fe55
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 925
|
py
|
test_integration_e2e.py
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.base import AgentCheck
from datadog_checks.ibm_db2 import IbmDb2Check
from . import metrics
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_standard(aggregator, instance):
check = IbmDb2Check('ibm_db2', {}, [instance])
check.check(instance)
_assert_standard(aggregator)
@pytest.mark.e2e
def test_e2e(dd_agent_check, instance):
aggregator = dd_agent_check(instance, rate=True)
_assert_standard(aggregator)
def _assert_standard(aggregator):
aggregator.assert_service_check('ibm_db2.can_connect', AgentCheck.OK)
for metric in metrics.STANDARD:
aggregator.assert_metric_has_tag(metric, 'db:datadog')
aggregator.assert_metric_has_tag(metric, 'foo:bar')
aggregator.assert_all_metrics_covered()
|
bd959195a1388de9c64f20e36654fcec3e387868
|
c618bbf2719431999b1007461df0865bab60c883
|
/dali/test/python/operator_1/test_noise_gaussian.py
|
991ef1e99dcfa8a3e1333658c8e5ec71699071b3
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DALI
|
3d0d061135d19e092647e6522046b2ff23d4ef03
|
92ebbe5c20e460050abd985acb590e6c27199517
|
refs/heads/main
| 2023-09-04T01:53:59.033608
| 2023-09-01T13:45:03
| 2023-09-01T13:45:03
| 135,768,037
| 4,851
| 648
|
Apache-2.0
| 2023-09-12T18:00:22
| 2018-06-01T22:18:01
|
C++
|
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
test_noise_gaussian.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os
from test_utils import get_dali_extra_path, check_batch
test_data_root = get_dali_extra_path()
images_dir = os.path.join(test_data_root, 'db', 'single', 'jpeg')
@pipeline_def
def pipe_gaussian_noise(mean, stddev, variable_dist_params, device=None):
encoded, _ = fn.readers.file(file_root=images_dir)
in_data = fn.cast(
fn.decoders.image(encoded, device="cpu", output_type=types.RGB),
dtype=types.FLOAT
)
if device == 'gpu':
in_data = in_data.gpu()
mean_arg = mean
stddev_arg = stddev
if variable_dist_params:
mean_arg = fn.random.uniform(range=(-50.0, 50.0))
stddev_arg = fn.random.uniform(range=(1.0, 10.0))
seed = 12345
out_data1 = fn.noise.gaussian(in_data, mean=mean_arg, stddev=stddev_arg, seed=seed)
out_data2 = in_data + fn.random.normal(in_data, mean=mean_arg, stddev=stddev_arg, seed=seed)
return out_data1, out_data2
def _testimpl_operator_noise_gaussian_vs_add_normal_dist(device, mean, stddev, variable_dist_params,
batch_size, niter):
pipe = pipe_gaussian_noise(mean, stddev, variable_dist_params,
device=device, batch_size=batch_size, num_threads=3, device_id=0)
pipe.build()
for _ in range(niter):
out0, out1 = pipe.run()
check_batch(out0, out1, batch_size=batch_size, eps=0.1)
def test_operator_noise_gaussian_vs_add_normal_dist():
niter = 3
for device in ("cpu", "gpu"):
for batch_size in (1, 3):
for mean, stddev, variable_dist_params in [(10.0, 57.0, False), (0.0, 0.0, True)]:
yield _testimpl_operator_noise_gaussian_vs_add_normal_dist, \
device, mean, stddev, variable_dist_params, batch_size, niter
|
4b3b9b1c390c0dff6ba56ee63c3389902fc1c6dc
|
b8a803694c283a5acd13ab6760a36710884ab24f
|
/llvm_cbuilder/tests/test_translate.py
|
b9e39ead3d63115ba965a304242033db54427e15
|
[
"NCSA",
"BSD-3-Clause"
] |
permissive
|
llvmpy/llvmpy
|
8a4c31e731364ead802231b97e058b8f8c444f96
|
13130fe35f1fb03a7051ad46c36146002391a6fa
|
refs/heads/master
| 2016-09-05T16:48:54.694686
| 2015-04-28T16:21:34
| 2015-04-28T16:21:34
| 3,375,197
| 155
| 13
| null | 2015-05-27T18:36:45
| 2012-02-07T07:09:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,123
|
py
|
test_translate.py
|
from llvm.core import Module
from llvm_cbuilder import *
from llvm_cbuilder.translator import translate
import llvm_cbuilder.shortnames as C
import unittest, logging
#logging.basicConfig(level=logging.DEBUG)
class FooIf(CDefinition):
_name_ = 'foo_if'
_retty_ = C.int
_argtys_ = [('x', C.int),
('y', C.int),]
def body(self, x, y):
@translate
def _():
if x > y:
return x - y
else:
return y - x
class FooWhile(CDefinition):
_name_ = 'foo_while'
_retty_ = C.int
_argtys_ = [('x', C.int)]
def body(self, x):
y = self.var_copy(x)
@translate
def _():
while x > 0:
x -= 1
y += x
return y
class FooForRange(CDefinition):
_name_ = 'foo_for_range'
_retty_ = C.int
_argtys_ = [('x', C.int)]
def body(self, x):
y = self.var(x.type, 0)
@translate
def _():
for i in range(x + 1):
y += i
return y
class TestTranslate(unittest.TestCase):
def test_if(self):
mod = Module.new(__name__)
lfoo = FooIf()(mod)
print(mod)
mod.verify()
exe = CExecutor(mod)
foo = exe.get_ctype_function(lfoo, 'int, int')
self.assertEqual(foo(10, 20), 20 - 10)
self.assertEqual(foo(23, 17), 23 - 17)
def test_whileloop(self):
mod = Module.new(__name__)
lfoo = FooWhile()(mod)
print(mod)
mod.verify()
exe = CExecutor(mod)
foo = exe.get_ctype_function(lfoo, 'int')
self.assertEqual(foo(10), sum(range(10+1)))
self.assertEqual(foo(1324), sum(range(1324+1)))
def test_forloop(self):
mod = Module.new(__name__)
lfoo = FooForRange()(mod)
print(mod)
mod.verify()
exe = CExecutor(mod)
foo = exe.get_ctype_function(lfoo, 'int')
self.assertEqual(foo(10), sum(range(10+1)))
self.assertEqual(foo(1324), sum(range(1324+1)))
if __name__ == '__main__':
unittest.main()
|
b7ddc01970d7de470df4285c63c8431a9c56d7f9
|
043f6c509d32f7cf83174a9dedcb27575d6b29fe
|
/src/feature_align.py
|
19792bca6b8b6967fe4822355e00f0e7c15c1f06
|
[
"LicenseRef-scancode-mulanpsl-2.0-en"
] |
permissive
|
Thinklab-SJTU/ThinkMatch
|
f07033e085d90496e796ce10e5ea6997c32ff3b9
|
cfd1c0d34b3614957a44ac73ed6090898a51c9f2
|
refs/heads/master
| 2023-07-19T13:40:37.929572
| 2023-07-19T05:55:45
| 2023-07-19T05:55:45
| 206,839,844
| 304
| 68
|
NOASSERTION
| 2023-07-19T05:55:46
| 2019-09-06T17:07:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,816
|
py
|
feature_align.py
|
import torch
from torch import Tensor
def feature_align(raw_feature: Tensor, P: Tensor, ns_t: Tensor, ori_size: tuple, device=None) -> Tensor:
r"""
Perform feature align on the image feature map.
Feature align performs bi-linear interpolation on the image feature map. This operation is inspired by "ROIAlign"
in `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
:param raw_feature: :math:`(b\times c \times w \times h)` raw feature map. :math:`b`: batch size, :math:`c`: number
of feature channels, :math:`w`: feature map width, :math:`h`: feature map height
:param P: :math:`(b\times n \times 2)` point set containing point coordinates. The coordinates are at the scale of
the original image size. :math:`n`: number of points
:param ns_t: :math:`(b)` number of exact points. We support batched instances with different number of nodes, and
``ns_t`` is required to specify the exact number of nodes of each instance in the batch.
:param ori_size: size of the original image. Since the point coordinates are in the scale of the original image
size, this parameter is required.
:param device: output device. If not specified, it will be the same as the input
:return: :math:`(b\times c \times n)` extracted feature vectors
"""
if device is None:
device = raw_feature.device
batch_num = raw_feature.shape[0]
channel_num = raw_feature.shape[1]
n_max = P.shape[1]
ori_size = torch.tensor(ori_size, dtype=torch.float32, device=device)
F = torch.zeros(batch_num, channel_num, n_max, dtype=torch.float32, device=device)
for idx, feature in enumerate(raw_feature):
n = ns_t[idx]
feat_size = torch.as_tensor(feature.shape[1:3], dtype=torch.float32, device=device)
_P = P[idx, 0:n]
interp_2d(feature, _P, ori_size, feat_size, out=F[idx, :, 0:n])
return F
def interp_2d(z: Tensor, P: Tensor, ori_size: Tensor, feat_size: Tensor, out=None, device=None) -> Tensor:
r"""
Interpolate in 2d grid space. z can be 3-dimensional where the first dimension is feature dimension.
:param z: :math:`(c\times w\times h)` feature map. :math:`c`: number of feature channels, :math:`w`: feature map
width, :math:`h`: feature map height
:param P: :math:`(n\times 2)` point set containing point coordinates. The coordinates are at the scale of
the original image size. :math:`n`: number of points
:param ori_size: :math:`(2)` size of the original image
:param feat_size: :math:`(2)` size of the feature map
:param out: optional output tensor
:param device: output device. If not specified, it will be the same as the input
:return: :math:`(c \times n)` extracted feature vectors
"""
if device is None:
device = z.device
step = ori_size / feat_size
if out is None:
out = torch.zeros(z.shape[0], P.shape[0], dtype=torch.float32, device=device)
for i, p in enumerate(P):
p = (p - step / 2) / ori_size * feat_size
out[:, i] = bilinear_interpolate(z, p[0], p[1])
return out
def bilinear_interpolate(im: Tensor, x: Tensor, y: Tensor, device=None):
r"""
Bi-linear interpolate 3d feature map to 2d coordinate (x, y).
The coordinates are at the same scale of :math:`w\times h`.
:param im: :math:`(c\times w\times h)` feature map
:param x: :math:`(1)` x coordinate
:param y: :math:`(1)` y coordinate
:param device: output device. If not specified, it will be the same as the input
:return: :math:`(c)` interpolated feature vector
"""
if device is None:
device = im.device
x = x.to(torch.float32).to(device)
y = y.to(torch.float32).to(device)
x0 = torch.floor(x)
x1 = x0 + 1
y0 = torch.floor(y)
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[2] - 1)
x1 = torch.clamp(x1, 0, im.shape[2] - 1)
y0 = torch.clamp(y0, 0, im.shape[1] - 1)
y1 = torch.clamp(y1, 0, im.shape[1] - 1)
x0 = x0.to(torch.int32).to(device)
x1 = x1.to(torch.int32).to(device)
y0 = y0.to(torch.int32).to(device)
y1 = y1.to(torch.int32).to(device)
Ia = im[:, y0, x0]
Ib = im[:, y1, x0]
Ic = im[:, y0, x1]
Id = im[:, y1, x1]
# to perform nearest neighbor interpolation if out of bounds
if x0 == x1:
if x0 == 0:
x0 -= 1
else:
x1 += 1
if y0 == y1:
if y0 == 0:
y0 -= 1
else:
y1 += 1
x0 = x0.to(torch.float32).to(device)
x1 = x1.to(torch.float32).to(device)
y0 = y0.to(torch.float32).to(device)
y1 = y1.to(torch.float32).to(device)
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
out = Ia * wa + Ib * wb + Ic * wc + Id * wd
return out
|
e696e06f73ab9c271174fdb17c46d811a092f2da
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/clickhouse/datadog_checks/clickhouse/clickhouse.py
|
4693d0b5f4697c9b3cd51743c6cd05b557c99243
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,284
|
py
|
clickhouse.py
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import clickhouse_driver
from six import raise_from
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.utils.db import QueryManager
from . import queries
from .utils import ErrorSanitizer
class ClickhouseCheck(AgentCheck):
__NAMESPACE__ = 'clickhouse'
SERVICE_CHECK_CONNECT = 'can_connect'
def __init__(self, name, init_config, instances):
super(ClickhouseCheck, self).__init__(name, init_config, instances)
self._server = self.instance.get('server', '')
self._port = self.instance.get('port')
self._db = self.instance.get('db', 'default')
self._user = self.instance.get('username', self.instance.get('user', 'default'))
self._password = self.instance.get('password', '')
self._connect_timeout = float(self.instance.get('connect_timeout', 10))
self._read_timeout = float(self.instance.get('read_timeout', 10))
self._compression = self.instance.get('compression', False)
self._tls_verify = is_affirmative(self.instance.get('tls_verify', False))
self._tags = self.instance.get('tags', [])
# Add global tags
self._tags.append('server:{}'.format(self._server))
self._tags.append('port:{}'.format(self._port))
self._tags.append('db:{}'.format(self._db))
self._error_sanitizer = ErrorSanitizer(self._password)
self.check_initializations.append(self.validate_config)
# We'll connect on the first check run
self._client = None
self._query_manager = QueryManager(
self,
self.execute_query_raw,
queries=[
queries.SystemMetrics,
queries.SystemEvents,
queries.SystemAsynchronousMetrics,
queries.SystemParts,
queries.SystemReplicas,
queries.SystemDictionaries,
],
tags=self._tags,
error_handler=self._error_sanitizer.clean,
)
self.check_initializations.append(self._query_manager.compile_queries)
def check(self, _):
self.connect()
self._query_manager.execute()
self.collect_version()
@AgentCheck.metadata_entrypoint
def collect_version(self):
version = list(self.execute_query_raw('SELECT version()'))[0][0]
# The version comes in like `19.15.2.2` though sometimes there is no patch part
version_parts = {name: part for name, part in zip(('year', 'major', 'minor', 'patch'), version.split('.'))}
self.set_metadata('version', version, scheme='parts', final_scheme='calver', part_map=version_parts)
def execute_query_raw(self, query):
return self._client.execute_iter(query)
def validate_config(self):
if not self._server:
raise ConfigurationError('the `server` setting is required')
def ping_clickhouse(self):
return self._client.connection.ping()
def connect(self):
if self.instance.get('user'):
self._log_deprecation('_config_renamed', 'user', 'username')
if self._client is not None:
self.log.debug('Clickhouse client already exists. Pinging Clickhouse Server.')
try:
if self.ping_clickhouse():
self.service_check(self.SERVICE_CHECK_CONNECT, self.OK, tags=self._tags)
return
else:
self.log.debug('Clickhouse connection ping failed. Attempting to reconnect')
self._client = None
except Exception as e:
self.log.debug('Unexpected ping response from Clickhouse', exc_info=e)
self.log.debug('Attempting to reconnect')
self._client = None
try:
client = clickhouse_driver.Client(
host=self._server,
port=self._port,
user=self._user,
password=self._password,
database=self._db,
connect_timeout=self._connect_timeout,
send_receive_timeout=self._read_timeout,
sync_request_timeout=self._connect_timeout,
compression=self._compression,
secure=self._tls_verify,
settings={},
# Make every client unique for server logs
client_name='datadog-{}'.format(self.check_id),
)
client.connection.connect()
except Exception as e:
error = 'Unable to connect to ClickHouse: {}'.format(
self._error_sanitizer.clean(self._error_sanitizer.scrub(str(e)))
)
self.service_check(self.SERVICE_CHECK_CONNECT, self.CRITICAL, message=error, tags=self._tags)
# When an exception is raised in the context of another one, both will be printed. To avoid
# this we set the context to None. https://www.python.org/dev/peps/pep-0409/
raise_from(type(e)(error), None)
else:
self.service_check(self.SERVICE_CHECK_CONNECT, self.OK, tags=self._tags)
self._client = client
|
5240058bf1e3db54835237cd0c518142d6e54c52
|
fa3f6d4e9169fb95f828013d179d03accdff381b
|
/grr/server/grr_response_server/blob_stores/encrypted_blob_store.py
|
e4dd11f4221813dd0d36ac0dc69f3984a6f63cb0
|
[
"Apache-2.0"
] |
permissive
|
google/grr
|
c51a2bd251ed2f7adae538541990a2cc01fdcc8c
|
44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6
|
refs/heads/master
| 2023-09-05T20:02:36.823914
| 2023-07-26T09:34:09
| 2023-07-26T09:34:09
| 14,909,673
| 4,683
| 927
|
Apache-2.0
| 2023-07-26T09:34:10
| 2013-12-04T00:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,480
|
py
|
encrypted_blob_store.py
|
#!/usr/bin/env python
"""An module with implementation of the encrypted blobstore."""
import logging
from typing import Iterable
from typing import Optional
from grr_response_server import blob_store
from grr_response_server.databases import db as abstract_db
from grr_response_server.keystore import abstract as abstract_ks
from grr_response_server.rdfvalues import objects as rdf_objects
class EncryptedBlobStore(blob_store.BlobStore):
"""An implementation of blobstore that adds an encryption layer to blobs."""
def __init__(
self,
bs: blob_store.BlobStore,
db: abstract_db.Database,
ks: abstract_ks.Keystore,
key_name: str,
) -> None:
"""Initializes the encryption-aware blobstore implementation.
Args:
bs: A blobstore instance to which encrypted blobs are to be written.
db: A database used to store encryption key and related metadata.
ks: A keystore to fetch the keys from.
key_name: A name of the currently active key to encrypt new blobs with.
Returns:
Nothing.
"""
super().__init__()
self._bs = bs
self._db = db
self._ks = ks
self._key_name = key_name
def WriteBlobs(
self,
blobs: dict[rdf_objects.BlobID, bytes],
) -> None:
"""Writes blobs to the blobstore."""
crypter = self._ks.Crypter(self._key_name)
encrypted_blobs = dict()
key_names = dict()
for blob_id, blob in blobs.items():
blob_id_bytes = blob_id.AsBytes()
encrypted_blobs[blob_id] = crypter.Encrypt(blob, blob_id_bytes)
key_names[blob_id] = self._key_name
logging.info("Writing %s encrypted blobs using key '%s' (%s)", len(blobs),
self._key_name, ", ".join(map(str, blobs)))
self._bs.WriteBlobs(encrypted_blobs)
self._db.WriteBlobEncryptionKeys(key_names)
logging.info("%s encrypted blobs written", len(blobs))
def ReadBlobs(
self,
blob_ids: Iterable[rdf_objects.BlobID],
) -> dict[rdf_objects.BlobID, Optional[bytes]]:
"""Reads specified blobs from the blobstore."""
blobs = dict()
key_names = self._db.ReadBlobEncryptionKeys(list(blob_ids))
encrypted_blobs = self._bs.ReadBlobs(blob_ids)
for blob_id, encrypted_blob in encrypted_blobs.items():
if encrypted_blob is None:
blobs[blob_id] = None
continue
blob_id_bytes = blob_id.AsBytes()
key_name = key_names[blob_id]
if key_name is None:
# There is no associated key. It is possible that the blob is just not
# encrypted: we can verify by computing its blob identifier and compare
# it with the identifier we wanted to read.
if rdf_objects.BlobID.FromBlobData(encrypted_blob) == blob_id:
# The blob identifier of "encrypted" blob matches to blob identifier
# of the original blob, which means it is not encrypted, and we can
# just return it.
blobs[blob_id] = encrypted_blob
else:
# This case is more difficult: the blob is encrypted (because the
# identifiers do not match) but we don't have associated key in the
# database. This can happen because of a bug or some data loss. But
# it can also happen because writing blobs and encryption keys is not
# atomic: they are two separate stores and blobs can be written faster
# than associated keys in the database.
#
# But in this case it means that the write must have happened very,
# very recently and must have been done with the current key. Thus, we
# can attempt to decrypt the data with the current key.
#
# Note that even with this approach there is a tiny chance of race in
# case we switch the key between writes to blobstore and database. But
# this is no worse than server shutting down between the two (not very
# likely but technically possible) in which case we would end up in
# inconsistent state anyway.
crypter = self._ks.Crypter(self._key_name)
try:
blob = crypter.Decrypt(encrypted_blob, blob_id_bytes)
except abstract_ks.DecryptionError:
raise EncryptedBlobWithoutKeysError(blob_id) # pylint: disable=raise-missing-from
blobs[blob_id] = blob
continue
# AES GCM that we use guarantees that the data we decrypt was not tampered
# with (or that we don't try to decrypt some garbage bytes). We use blob
# identifiers for confirming data authenticity.
crypter = self._ks.Crypter(key_name)
blobs[blob_id] = crypter.Decrypt(encrypted_blob, blob_id_bytes)
return blobs
def CheckBlobsExist(
self,
blob_ids: Iterable[rdf_objects.BlobID],
) -> dict[rdf_objects.BlobID, bool]:
"""Checks whether the specified blobs exist in the blobstore."""
return self._bs.CheckBlobsExist(blob_ids)
class EncryptedBlobWithoutKeysError(Exception):
"""An error for cases when we encounter an encrypted blob without keys.
This can happen in cases when blob data is written into the blobstore but
writing the encryption keys to the database fails.
"""
def __init__(self, blob_id: rdf_objects.BlobID) -> None:
"""Initializes the error.
Args:
blob_id: An identifier of a blob that has no associated encryption keys.
"""
super().__init__(f"Encrypted blob '{blob_id}' with no encryption keys")
self.blob_id = blob_id
|
70dbf5ac397307d75caa0dae11c8d0b679333f96
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/nodes/BuiltinAllNodes.py
|
01dac5115cdbf350815b4acf08239612cde4453d
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,823
|
py
|
BuiltinAllNodes.py
|
# Copyright 2023, Batakrishna Sahu, mailto:<Batakrishna.Sahu@suiit.ac.in>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Node for the calls to the 'all' built-in.
"""
from nuitka.specs import BuiltinParameterSpecs
from .ExpressionBases import ExpressionBuiltinSingleArgBase
from .ExpressionShapeMixins import ExpressionBoolShapeExactMixin
from .NodeMakingHelpers import (
makeConstantReplacementNode,
makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue,
wrapExpressionWithNodeSideEffects,
)
from .shapes.BuiltinTypeShapes import tshape_str, tshape_unicode
class ExpressionBuiltinAll(
ExpressionBoolShapeExactMixin, ExpressionBuiltinSingleArgBase
):
"""Builtin All Node class.
Args:
ExpressionBase: 'all - expression'
Returns:
Node that represents built-in 'all' call.
"""
kind = "EXPRESSION_BUILTIN_ALL"
builtin_spec = BuiltinParameterSpecs.builtin_all_spec
def computeExpression(self, trace_collection):
value = self.subnode_value
shape = value.getTypeShape()
if shape.hasShapeSlotIter() is False:
# An exception is raised.
trace_collection.onExceptionRaiseExit(BaseException)
return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue(
template="'%s' object is not iterable",
operation="all",
original_node=value,
value_node=value,
)
if shape in (tshape_str, tshape_unicode):
return (
wrapExpressionWithNodeSideEffects(
new_node=makeConstantReplacementNode(
constant=True, node=self, user_provided=False
),
old_node=value,
),
"new_constant",
"Predicted truth value of built-in 'all' string type argument",
)
iteration_handle = value.getIterationHandle()
if iteration_handle is not None:
all_true = iteration_handle.getAllElementTruthValue()
if all_true is not None:
result = wrapExpressionWithNodeSideEffects(
new_node=makeConstantReplacementNode(
constant=all_true, node=self, user_provided=False
),
old_node=value,
)
return (
result,
"new_constant",
"Predicted truth value of built-in 'all' argument",
)
self.onContentEscapes(trace_collection)
# All code could be run, note that.
trace_collection.onControlFlowEscape(self)
# All exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
def mayRaiseException(self, exception_type):
"""returns boolean True if try/except/finally is needed else False"""
value = self.subnode_value
if value.mayRaiseException(exception_type):
return True
return not value.getTypeShape().hasShapeSlotIter()
|
e1f9a97d2ec1f236e39a894dd63ec7f9c3c3173a
|
a198aa98679ae1fc70388f8376a9a41444040319
|
/transit-network-analysis-tools/parallel_cpap.py
|
841004f005f4ff238865d7dd192821fba72131b1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Esri/public-transit-tools
|
8437831328e94fa9fe74469922c204eb6fb74b22
|
47cbc3de67a7b1bf9255e07e88cba7b051db0505
|
refs/heads/master
| 2023-09-05T17:01:23.375328
| 2023-08-30T19:58:50
| 2023-08-30T19:58:50
| 42,553,165
| 155
| 67
|
Apache-2.0
| 2023-08-30T19:58:51
| 2015-09-15T23:38:22
|
Python
|
UTF-8
|
Python
| false
| false
| 14,182
|
py
|
parallel_cpap.py
|
############################################################################
## Tool name: Transit Network Analysis Tools
## Created by: Melinda Morang, Esri
## Last updated: 30 August 2023
############################################################################
"""Do the core logic for the Create Percent Access Polygons tool in parallel
for maximum efficiency.
This version of the tool is for ArcGIS Pro only.
Copyright 2023 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=logging-fstring-interpolation
import os
import time
import uuid
import shutil
import traceback
import argparse
import logging
import arcpy
import AnalysisHelpers
from AnalysisHelpers import FACILITY_ID_FIELD, FROM_BREAK_FIELD, TO_BREAK_FIELD, TIME_FIELD, FIELDS_TO_PRESERVE
DELETE_INTERMEDIATE_OUTPUTS = True # Set to False for debugging purposes
# Change logging.INFO to logging.DEBUG to see verbose debug messages
LOGGER = AnalysisHelpers.configure_global_logger(logging.INFO)
class ParallelCounter(AnalysisHelpers.JobFolderMixin, AnalysisHelpers.LoggingMixin):
"""Calculate percent access polygons for the designated facility, from break, to break combo."""
def __init__(self, time_lapse_polygons, raster_template, facility_id, from_break, to_break, scratch_folder):
"""Initialize the parallel counter for the given inputs.
Args:
time_lapse_polygons (feature class catalog path): Time lapse polygons
raster_template (feature class catalog path): Raster-like polygons template
facility_id (int): ID of the Service Area facility to select for processing this chunk
from_break (float): Service Area FromBreak field value to select for processing this chunk
to_break (float): Service Area ToBreak field value to select for processing this chunk
scratch_folder (folder): Folder location to write intermediate outputs
"""
self.time_lapse_polygons = time_lapse_polygons
self.raster_template = raster_template
self.facility_id = facility_id
self.from_break = from_break
self.to_break = to_break
self.scratch_folder = scratch_folder
# Create a job ID and a folder for this job
self._create_job_folder()
self.scratch_gdb = None # Set later
# Setup the class logger. Logs for each parallel process are not written to the console but instead to a
# process-specific log file.
self.setup_logger("PercAccPoly")
# Prepare a dictionary to store info about the analysis results
self.job_result = {
"jobId": self.job_id,
"jobFolder": self.job_folder,
"logFile": self.log_file,
"polygons": None # Set later
}
def make_percent_access_polygons(self):
"""Calculate percent access polygons for the designated facility, from break, to break combo."""
self.logger.info(
f"Processing FacilityID {self.facility_id}, FromBreak {self.from_break}, ToBreak {self.to_break}...")
self.scratch_gdb = self._create_output_gdb()
selected_polygons = self._select_polygons()
joined_polygons = self._join_polygons(selected_polygons)
dissolved_polygons = self._dissolve_cells(joined_polygons)
self.job_result["polygons"] = dissolved_polygons
def _select_polygons(self):
"""Select the subset of polygons for this FacilityID/FromBreak/ToBreak combo and return the layer."""
selected_polys_layer = "SelectedPolys_" + self.job_id
if self.facility_id is None:
facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + " IS NULL"
else:
facility_query = arcpy.AddFieldDelimiters(self.time_lapse_polygons, FACILITY_ID_FIELD) + " = " + \
str(self.facility_id)
query = facility_query + " AND " + \
arcpy.AddFieldDelimiters(self.time_lapse_polygons, FROM_BREAK_FIELD) + " = " + str(self.from_break) + \
" AND " + \
arcpy.AddFieldDelimiters(self.time_lapse_polygons, TO_BREAK_FIELD) + " = " + str(self.to_break)
arcpy.management.MakeFeatureLayer(self.time_lapse_polygons, selected_polys_layer, where_clause=query)
self.logger.info(
f"{int(arcpy.management.GetCount(selected_polys_layer).getOutput(0))} time lapse polygons selected.")
return selected_polys_layer
def _join_polygons(self, selected_polygons):
"""Spatially join polygons and return the path to the output feature class."""
# Do a spatial join in order to count the number of time lapse polygons intersect each "cell" in the raster-like
# polygon template. We are effectively applying the template to a specific set of time lapse polygons, doing the
# count, and creating the raw output. The result is a polygon feature class of raster-like cells with a field
# called Join_Count that shows the number of input time lapse polygons that intersect the cell using the specified
# match_option.
# Create a FieldMappings object for Spatial Join to preserve informational input fields
field_mappings = arcpy.FieldMappings()
for field in FIELDS_TO_PRESERVE:
fmap = arcpy.FieldMap()
fmap.addInputField(self.time_lapse_polygons, field)
fmap.mergeRule = "First"
field_mappings.addFieldMap(fmap)
# Do the spatial join
temp_spatial_join_fc = os.path.join(self.scratch_gdb, "SpatialJoin")
t0 = time.time()
arcpy.analysis.SpatialJoin(
self.raster_template,
selected_polygons,
temp_spatial_join_fc,
"JOIN_ONE_TO_ONE", # Output keeps only one copy of each "cell" when multiple time lapse polys intersect it
"KEEP_COMMON", # Delete any "cells" that don't overlap the time lapse polys being considered
field_mapping=field_mappings, # Preserve some fields from the original data
match_option="HAVE_THEIR_CENTER_IN"
)
self.logger.info(f"Finished spatial join in {time.time() - t0} seconds.")
return temp_spatial_join_fc
def _dissolve_cells(self, joined_polygons):
"""Dissolve percent access cells with the same values and return the path to the output feature class."""
# Dissolve all the little cells that were reached the same number of times to make the output more manageable
# Currently, the feature class contains a large number of little square polygons representing raster cells. The
# Join_Count field added by Spatial Join says how many of the input time lapse polygons overlapped the cell. We
# don't need all the little squares. We can dissolve them so that we have one polygon per unique value of
# Join_Count.
dissolved_polygons = os.path.join(self.scratch_gdb, "DissolvedPolys")
t0 = time.time()
arcpy.management.Dissolve(joined_polygons, dissolved_polygons, FIELDS_TO_PRESERVE + ["Join_Count"])
self.logger.info(f"Finished dissolve in {time.time() - t0} seconds.")
return dissolved_polygons
def parallel_calculate_access(combo, time_lapse_polygons, raster_template, scratch_folder):
"""Calculate the percent access polygons for this chunk.
Args:
combo (list): facility_id, from_break, to_break
time_lapse_polygons (feature class catalog path): Time lapse polygons
raster_template (feature class catalog path): Raster-like polygons template
scratch_folder (folder): Folder location to write intermediate outputs
Returns:
dict: job result parameters
"""
facility_id, from_break, to_break = combo
cpap_counter = ParallelCounter(
time_lapse_polygons, raster_template, facility_id, from_break, to_break, scratch_folder)
cpap_counter.make_percent_access_polygons()
cpap_counter.teardown_logger()
return cpap_counter.job_result
def count_percent_access_polygons(time_lapse_polygons, raster_template, output_fc, max_processes):
"""Add counts to percent access polygons using parallel processing.
Args:
time_lapse_polygons (feature class catalog path): Time lapse polygons
raster_template (feature class catalog path): Raster template
output_fc (catalog path): Path to final output feature class
max_processes (int): Number of allowed parallel processes.
"""
# Scratch folder to store intermediate outputs from the parallel processes
scratch_folder = os.path.join(
arcpy.env.scratchFolder, "PercAccPoly_" + uuid.uuid4().hex) # pylint: disable=no-member
LOGGER.info(f"Intermediate outputs for parallel processes will be written to {scratch_folder}.")
os.mkdir(scratch_folder)
# Figure out the unique combinations of FacilityID, FromBreak, and ToBreak in the input data. Each of these
# will be processed separately and get a separate output. Also count the number of unique times of day that
# were used in the original analysis so we can calculate % later.
unique_output_combos = []
unique_times = []
fields = [
FACILITY_ID_FIELD,
FROM_BREAK_FIELD,
TO_BREAK_FIELD,
TIME_FIELD
]
for row in arcpy.da.SearchCursor(time_lapse_polygons, fields): # pylint: disable=no-member
unique_output_combos.append((row[0], row[1], row[2]))
unique_times.append(row[3])
unique_output_combos = sorted(set(unique_output_combos))
total_jobs = len(unique_output_combos)
num_time_steps = len(set(unique_times))
# For each set of time lapse polygons, generate the cell-like counts. Do this in parallel for maximum efficiency.
job_results = AnalysisHelpers.run_parallel_processes(
LOGGER, parallel_calculate_access, [time_lapse_polygons, raster_template, scratch_folder], unique_output_combos,
total_jobs, max_processes,
"Counting polygons overlapping each cell", "polygon cell calculation"
)
# Retrieve and store results
all_polygons = []
for result in job_results:
if not result["polygons"]:
# Log failed analysis
LOGGER.warning(f"No output polygons generated for job id {result['jobId']}")
else:
all_polygons.append(result["polygons"])
# Merge all individual output feature classes into one feature class.
LOGGER.info("Parallel processing complete. Merging results to output feature class...")
arcpy.management.Merge(all_polygons, output_fc)
# Calculate a field showing the Percent of times each polygon was reached.
percent_field = "Percent"
arcpy.management.AddField(output_fc, percent_field, "DOUBLE")
expression = f"float(!Join_Count!) * 100.0 / float({num_time_steps})"
arcpy.management.CalculateField(output_fc, percent_field, expression)
LOGGER.info(f"Output feature class successfully created at {output_fc}")
# Cleanup
# Delete the job folders if the job succeeded
if DELETE_INTERMEDIATE_OUTPUTS:
LOGGER.info("Deleting intermediate outputs...")
try:
shutil.rmtree(scratch_folder, ignore_errors=True)
except Exception: # pylint: disable=broad-except
# If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.
LOGGER.warning(f"Unable to delete intermediate output folder {scratch_folder}.")
if __name__ == "__main__":
# This script should always be launched via subprocess as if it were being called from the command line.
# Create the parser
parser = argparse.ArgumentParser(description=globals().get("__doc__", ""), fromfile_prefix_chars='@')
# Define Arguments supported by the command line utility
# --time-lapse-polygons parameter
help_string = "The full catalog path to the feature class containing input time lapse polygons."
parser.add_argument(
"-p", "--time-lapse-polygons", action="store", dest="time_lapse_polygons", help=help_string, required=True)
# --raster-template parameter
help_string = "The full catalog path to the polygon raster template created in earlier steps."
parser.add_argument(
"-r", "--raster-template", action="store", dest="raster_template", help=help_string, required=True)
# --output-fc parameter
help_string = "The full catalog path to the output feature class."
parser.add_argument(
"-o", "--output-fc", action="store", dest="output_fc", help=help_string, required=True)
# --max-processes parameter
help_string = "Maximum number parallel processes to use."
parser.add_argument(
"-mp", "--max-processes", action="store", dest="max_processes", type=int, help=help_string, required=True)
# Get arguments as dictionary.
args = vars(parser.parse_args())
# Count intersecting percent access polygon cells in parallel
try:
start_time = time.time()
count_percent_access_polygons(**args)
run_time = round((time.time() - start_time) / 60, 2)
LOGGER.info(f"Parallel percent access polygon cell calculation completed in {run_time} minutes")
except Exception: # pylint: disable=broad-except
errs = traceback.format_exc().splitlines()
for err in errs:
LOGGER.error(err)
raise
|
a8fac012f5dea2c828f94b2996caa4bac3e2f5e6
|
2b6df7693bb74c7ed204f0773c5d47a5de914d09
|
/generator/modules/chainer.py
|
64335cde3b027b287d2e97e8edae127b8e9a4c2d
|
[
"MIT"
] |
permissive
|
ufoym/deepo
|
7136db50f68d04b389b1280517fa511ad5db41df
|
d8cbd2c6e063609c42e09b137f7f84f0b1015634
|
refs/heads/master
| 2023-03-08T15:54:10.588149
| 2023-01-29T05:49:54
| 2023-01-29T05:49:54
| 108,539,084
| 6,774
| 928
|
MIT
| 2019-08-05T03:35:53
| 2017-10-27T11:41:49
|
Python
|
UTF-8
|
Python
| false
| false
| 473
|
py
|
chainer.py
|
# -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source
from .python import Python
@dependency(Python)
@source('pip')
class Chainer(Module):
def build(self):
return r'''
$PIP_INSTALL \
'''.rstrip() + (
'' if self.composer.cuda_ver is None else \
r'''
cupy \
'''.rstrip()
) + r'''
chainer \
&& \
'''
|
238056d89c54d75eb5da05760b35ad0d36d6ce97
|
77fee94c58cd5b6305eef2f13d74b488db428c59
|
/litex/soc/cores/timer.py
|
45a7d2472caa154bea395386fb403ba69c8a7034
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
enjoy-digital/litex
|
de5919d649c1b884c47a5e0364c2a9a584ebd614
|
405296b7fd99764af21fffd94afa5075c22affa8
|
refs/heads/master
| 2023-08-31T23:52:33.895792
| 2023-08-31T17:34:55
| 2023-08-31T17:36:21
| 45,734,719
| 2,351
| 524
|
NOASSERTION
| 2023-09-14T21:26:26
| 2015-11-07T12:02:12
|
C
|
UTF-8
|
Python
| false
| false
| 4,217
|
py
|
timer.py
|
#
# This file is part of LiteX.
#
# Copyright (c) 2013-2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2019 Sean Cross <sean@xobs.io>
# Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from litex.gen import *
from litex.soc.interconnect.csr import *
from litex.soc.interconnect.csr_eventmanager import *
from litex.soc.integration.doc import AutoDoc, ModuleDoc
# Timer --------------------------------------------------------------------------------------------
class Timer(LiteXModule):
with_uptime = False
def __init__(self, width=32):
self.intro = ModuleDoc("""Timer
Provides a generic Timer core.
The Timer is implemented as a countdown timer that can be used in various modes:
- Polling : Returns current countdown value to software
- One-Shot: Loads itself and stops when value reaches ``0``
- Periodic: (Re-)Loads itself when value reaches ``0``
``en`` register allows the user to enable/disable the Timer. When the Timer is enabled, it is
automatically loaded with the value of `load` register.
When the Timer reaches ``0``, it is automatically reloaded with value of `reload` register.
The user can latch the current countdown value by writing to ``update_value`` register, it will
update ``value`` register with current countdown value.
To use the Timer in One-Shot mode, the user needs to:
- Disable the timer
- Set the ``load`` register to the expected duration
- (Re-)Enable the Timer
To use the Timer in Periodic mode, the user needs to:
- Disable the Timer
- Set the ``load`` register to 0
- Set the ``reload`` register to the expected period
- Enable the Timer
For both modes, the CPU can be advertised by an IRQ that the duration/period has elapsed. (The
CPU can also do software polling with ``update_value`` and ``value`` to know the elapsed duration)
""")
self._load = CSRStorage(width, description="""Load value when Timer is (re-)enabled.
In One-Shot mode, the value written to this register specifies the Timer's duration in
clock cycles.""")
self._reload = CSRStorage(width, description="""Reload value when Timer reaches ``0``.
In Periodic mode, the value written to this register specify the Timer's period in
clock cycles.""")
self._en = CSRStorage(1, description="""Enable flag of the Timer.
Set this flag to ``1`` to enable/start the Timer. Set to ``0`` to disable the Timer.""")
self._update_value = CSRStorage(1, description="""Update trigger for the current countdown value.
A write to this register latches the current countdown value to ``value`` register.""")
self._value = CSRStatus(width, description="""Latched countdown value.
This value is updated by writing to ``update_value``.""")
self.ev = EventManager()
self.ev.zero = EventSourceProcess(edge="rising")
self.ev.finalize()
# # #
value = Signal(width)
self.sync += [
If(self._en.storage,
If(value == 0,
# set reload to 0 to disable reloading
value.eq(self._reload.storage)
).Else(
value.eq(value - 1)
)
).Else(
value.eq(self._load.storage)
),
If(self._update_value.re, self._value.status.eq(value))
]
self.comb += self.ev.zero.trigger.eq(value == 0)
def add_uptime(self, width=64):
if self.with_uptime: return
self.with_uptime = True
self._uptime_latch = CSRStorage(description="Write a ``1`` to latch current Uptime cycles to ``uptime_cycles`` register.")
self._uptime_cycles = CSRStatus(width, description="Latched Uptime since power-up (in ``sys_clk`` cycles).")
# # #
self.uptime_cycles = uptime_cycles = Signal(width, reset_less=True)
self.sync += uptime_cycles.eq(uptime_cycles + 1)
self.sync += If(self._uptime_latch.re, self._uptime_cycles.status.eq(uptime_cycles))
|
0756fea87400e28ab0b532d39931f82fb48f2fe7
|
7f7cc38779ecb42ef8d703106171f7ce30a0a539
|
/comment/views/comments.py
|
5eecd3392460b0f49d117ea23ab22105047813ea
|
[
"MIT"
] |
permissive
|
Radi85/Comment
|
3209458b7f208d99c63152ec200a41273c8f2349
|
0d600b951ac9e9dff0c5791a6bf32c898ac180c7
|
refs/heads/develop
| 2022-11-10T20:46:32.386656
| 2022-11-01T06:26:42
| 2022-11-01T12:06:32
| 147,228,387
| 106
| 49
|
MIT
| 2022-11-01T12:06:34
| 2018-09-03T16:19:08
|
Python
|
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
comments.py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.template.loader import render_to_string
from django.utils import timezone
from django.contrib import messages
from comment.models import Comment
from comment.forms import CommentForm
from comment.utils import get_comment_from_key, get_user_for_request, CommentFailReason
from comment.mixins import CanCreateMixin, CanEditMixin, CanDeleteMixin
from comment.responses import UTF8JsonResponse
from comment.messages import EmailError
from comment.views import CommentCreateMixin, BaseCommentView
class CreateComment(CanCreateMixin, CommentCreateMixin):
comment = None
email_service = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['comment'] = self.comment
return context
def get_template_names(self):
if self.request.user.is_anonymous or self.comment.is_parent:
return 'comment/comments/base.html'
else:
return 'comment/comments/child_comment.html'
def form_valid(self, form):
user = get_user_for_request(self.request)
comment_content = form.cleaned_data['content']
email = form.cleaned_data.get('email', None)
time_posted = timezone.now()
temp_comment = Comment(
content_object=self.model_obj,
content=comment_content,
user=user,
parent=self.parent_comment,
email=email or user.email,
posted=time_posted
)
self.comment = self.perform_create(temp_comment, self.request)
self.data = render_to_string(self.get_template_names(), self.get_context_data(), request=self.request)
return UTF8JsonResponse(self.json())
def form_invalid(self, form):
self.error = EmailError.EMAIL_INVALID
self.status = 400
return UTF8JsonResponse(self.json(), status=self.status)
class UpdateComment(CanEditMixin, BaseCommentView):
comment = None
def get_object(self):
self.comment = get_object_or_404(
Comment.objects.select_related('user', 'flag', 'reaction'),
pk=self.kwargs.get('pk')
)
return self.comment
def get(self, request, *args, **kwargs):
context = self.get_context_data()
context['comment_form'] = CommentForm(instance=self.comment, request=self.request)
context['comment'] = self.comment
self.data = render_to_string('comment/comments/update_comment.html', context, request=self.request)
return UTF8JsonResponse(self.json())
def post(self, request, *args, **kwargs):
form = CommentForm(request.POST, instance=self.comment, request=self.request)
context = self.get_context_data()
if form.is_valid():
form.save()
context['comment'] = self.comment
self.data = render_to_string('comment/comments/comment_content.html', context, request=self.request)
return UTF8JsonResponse(self.json())
class DeleteComment(CanDeleteMixin, BaseCommentView):
comment = None
def get_object(self):
self.comment = get_object_or_404(
Comment.objects.select_related('user', 'flag', 'reaction'),
pk=self.kwargs.get('pk')
)
return self.comment
def get(self, request, *args, **kwargs):
context = self.get_context_data()
context["comment"] = self.comment
context['has_parent'] = not self.comment.is_parent
self.data = render_to_string('comment/comments/comment_modal.html', context, request=request)
return UTF8JsonResponse(self.json())
def post(self, request, *args, **kwargs):
self.comment.delete()
context = self.get_context_data()
self.data = render_to_string('comment/comments/base.html', context, request=self.request)
return UTF8JsonResponse(self.json())
class ConfirmComment(CommentCreateMixin):
@staticmethod
def _handle_invalid_comment(comment, request):
if comment.why_invalid == CommentFailReason.BAD:
messages.error(request, EmailError.BROKEN_VERIFICATION_LINK)
elif comment.why_invalid == CommentFailReason.EXISTS:
messages.warning(request, EmailError.USED_VERIFICATION_LINK)
def get(self, request, *args, **kwargs):
key = kwargs.get('key', None)
temp_comment = get_comment_from_key(key)
self._handle_invalid_comment(temp_comment, request)
if not temp_comment.is_valid:
return render(request, template_name='comment/anonymous/discarded.html')
comment = self.perform_save(temp_comment.obj, request)
return redirect(comment.get_url(request))
|
9c8f5327ea4553f3a3c1111a4363a2e073552638
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/webhooks/alertmanager/tests.py
|
e0bb206545589163d1f27c1d3e2b292534a4dac4
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
tests.py
|
from zerver.lib.test_classes import WebhookTestCase
class AlertmanagerHookTests(WebhookTestCase):
STREAM_NAME = "alertmanager"
URL_TEMPLATE = "/api/v1/external/alertmanager?&api_key={api_key}&stream={stream}&name=topic&desc=description"
WEBHOOK_DIR_NAME = "alertmanager"
def test_error_issue_message(self) -> None:
expected_topic = "andromeda"
expected_message = """
:alert: **FIRING**
* CPU core temperature is 34.75C ([graph](http://cobalt:9090/graph?g0.expr=avg+by%28host%29+%28sensors_temp_input%7Bfeature%3D~%22core_%5B0-9%5D%2B%22%7D%29+%3E+15&g0.tab=0))
* CPU core temperature is 17.625C ([graph](http://cobalt:9090/graph?g0.expr=avg+by%28host%29+%28sensors_temp_input%7Bfeature%3D~%22core_%5B0-9%5D%2B%22%7D%29+%3E+15&g0.tab=0))
""".strip()
self.check_webhook(
"alert",
expected_topic,
expected_message,
"application/json",
)
def test_single_error_issue_message(self) -> None:
expected_topic = "andromeda"
expected_message = """
:squared_ok: **Resolved** CPU core temperature is 34.75C ([graph](http://cobalt:9090/graph?g0.expr=avg+by%28host%29+%28sensors_temp_input%7Bfeature%3D~%22core_%5B0-9%5D%2B%22%7D%29+%3E+15&g0.tab=0))
""".strip()
self.check_webhook(
"single_alert",
expected_topic,
expected_message,
"application/json",
)
|
e9833f47715e7f832e9a35626541ce2d802d3b3b
|
7a6aca7d300c0752f2a73730b743a1a7361e941b
|
/tensorflow_graphics/projects/points_to_3Dobjects/train_multi_objects/train.py
|
a248bb10a7ab07b96e635d776ee46fcb3ed00870
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/graphics
|
ef0abe102398a58eb7c41b709393df3d0b0a2811
|
1b0203eb538f2b6a1013ec7736d0d548416f059a
|
refs/heads/master
| 2023-09-03T20:41:25.992578
| 2023-08-08T21:16:36
| 2023-08-08T21:17:31
| 164,626,274
| 2,920
| 413
|
Apache-2.0
| 2023-08-27T14:26:47
| 2019-01-08T10:39:44
|
Python
|
UTF-8
|
Python
| false
| false
| 43,750
|
py
|
train.py
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training procedure for occluded parts."""
import os
import pickle
import time
from absl import app
from absl import flags
from absl import logging
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_graphics.projects.points_to_3Dobjects.data_preparation.extract_protos as extract_protos
from tensorflow_graphics.projects.points_to_3Dobjects.losses import collision_loss
from tensorflow_graphics.projects.points_to_3Dobjects.losses import cross_entropy_loss
from tensorflow_graphics.projects.points_to_3Dobjects.losses import focal_loss
from tensorflow_graphics.projects.points_to_3Dobjects.losses import focal_loss_sparse
from tensorflow_graphics.projects.points_to_3Dobjects.losses import learning_rate_schedule
from tensorflow_graphics.projects.points_to_3Dobjects.losses import regression_huber_loss
from tensorflow_graphics.projects.points_to_3Dobjects.losses import regression_l1_loss
import tensorflow_graphics.projects.points_to_3Dobjects.models.centernet_vid as centernet_vid
from tensorflow_graphics.projects.points_to_3Dobjects.transforms import transforms_factory
from tensorflow_graphics.projects.points_to_3Dobjects.utils import evaluator as evaluator_util
import tensorflow_graphics.projects.points_to_3Dobjects.utils.io as io
import tensorflow_graphics.projects.points_to_3Dobjects.utils.logger as logger_util
import tensorflow_graphics.projects.points_to_3Dobjects.utils.plot as plot
import tensorflow_graphics.projects.points_to_3Dobjects.utils.tf_utils as tf_utils
from google3.pyglib import gfile
LOG_DIR = '/occluded_primitives/logs/'
TFRECORDS_DIR = '/occluded_primitives/data_stefan/'
SHAPENET_DIR = '/occluded_primitives/shapenet/'
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', LOG_DIR, 'Path to log directory.')
flags.DEFINE_string('tfrecords_dir', TFRECORDS_DIR, 'Path to tfrecord files.')
flags.DEFINE_string('shapenet_dir', SHAPENET_DIR, 'Path to shapenet data.')
flags.DEFINE_float('learning_rate', 1e-3, 'Initial learning rate.')
flags.DEFINE_string('model', 'hourglass', 'Feature backbone model.')
flags.DEFINE_integer('num_epochs', 500, 'Number of training epochs.')
flags.DEFINE_integer('n_tfrecords', 100, 'Number of sharded tf records.')
flags.DEFINE_integer('max_num_objects', 3,
'Maximum number of objects in the test scene.')
flags.DEFINE_integer('batch_size', 32, 'Batch size for training.')
flags.DEFINE_integer('num_classes', 6, 'Number of classes to detect.')
flags.DEFINE_integer('image_width', 256, 'Number of classes to detect.')
flags.DEFINE_integer('image_height', 256, 'Number of classes to detect.')
flags.DEFINE_integer('num_overfitting_samples', 10, 'Overfitting samples.')
flags.DEFINE_string('master', 'local', 'Location of the session.')
flags.DEFINE_boolean('replication', False, 'Store checkpoint with replication.')
flags.DEFINE_string('xmanager_metric', 'metrics/3D_IoU',
'Name of the metric to report to XManager')
flags.DEFINE_boolean('run_graph', False, 'Run in Graph mode.')
flags.DEFINE_integer('num_workers', 20, 'Number of parallel preprocessors.')
flags.DEFINE_boolean('train', False, 'Run script in train mode.')
flags.DEFINE_boolean('val', False, 'Run script in validation mode.')
flags.DEFINE_boolean('francis', False, 'Run script in validation mode.')
flags.DEFINE_boolean('plot', True, 'Plot predictions in matplotlib.')
flags.DEFINE_boolean('debug', False, 'Do only 5 iterations per epoch.')
flags.DEFINE_boolean('eval_only', False, 'Run eval at least once.')
flags.DEFINE_boolean('record_val_losses', True, 'Run eval at least once.')
flags.DEFINE_boolean('local_plot_3d', False, 'should we plotisual debugging?')
flags.DEFINE_boolean('qualitative', False, 'plot qualitative results?')
flags.DEFINE_boolean('gaussian_augmentation', False,
'plot qualitative results?')
flags.DEFINE_boolean('translation_augmentation', False,
'plot qualitative results?')
flags.DEFINE_boolean('rotation_svd', True, 'Regularize rotation using SVD.')
flags.DEFINE_boolean('soft_shape_labels', False, 'Soft shape labels.')
flags.DEFINE_float('soft_shape_labels_a', 0.03, 'Soft shape labels.')
flags.DEFINE_boolean('predict_2d_box', True, 'Predict 2d boudning box?')
flags.DEFINE_float('gradient_clipping_norm', 100.0, 'Clip gradients.')
flags.DEFINE_float('score_threshold', 0.0,
'Minimum threshold for valid detection.')
flags.DEFINE_float('polynomial_degree', 3, 'Clip gradients.')
flags.DEFINE_float('label_smoothing', 0.0, 'How much label smoothing?.')
flags.DEFINE_float('sizes_3d_weight', 1.0, 'How much label smoothing?.')
flags.DEFINE_float('shapes_weight', 1.0, 'How much label smoothing?.')
flags.DEFINE_boolean('shape_focal_loss', False, 'Use focal loss for shape?')
flags.DEFINE_float('rotations_3d_weight', -1.0, 'How much label smoothing?.')
flags.DEFINE_float('translations_3d_weight', -1.0, 'How much label smoothing?.')
flags.DEFINE_float('shape_pc_sdf_weight', -1.0, 'How much label smoothing?.')
flags.DEFINE_float('pose_pc_pc_weight', -1.0, 'How much label smoothing?.')
flags.DEFINE_float('projected_pose_pc_pc_weight', -1.0,
'How much label smoothing?.')
flags.DEFINE_float('collision_weight', -1.0, 'How much label smoothing?.')
flags.DEFINE_float('shape_sdf_sdf_weight', -1.0, 'How much label smoothing?.')
flags.DEFINE_float('beta', 10, 'Beta in softargmax formulation.')
flags.DEFINE_float('tol', 0.04, 'sdf tolerance')
flags.DEFINE_string('split', 'val', 'Evaluation split {val|test}.')
flags.DEFINE_integer('part_id', -2, 'WHich part of the split?')
flags.DEFINE_integer('number_hourglasses', 1, 'Number of hourglasses.')
flags.DEFINE_string('kernel_regularization', '', 'Evaluation split {val|test}.')
flags.DEFINE_string('continue_from_checkpoint', '', 'Starting checkpoint.')
flags.DEFINE_string('metrics_dir', 'metrics', 'dir with metrics.')
flags.DEFINE_string('qualidir', '/usr/local/google/home/engelmann', 'path')
def get_shapes(scannet=False):
"""Get the shapes."""
cluster_filepath = os.path.join(FLAGS.tfrecords_dir,
'dict_clusterCenter_class_nearestModel.pkl')
with gfile.Open(cluster_filepath, 'rb') as file:
dict_clusters = pickle.load(file)
shape_centers = []
shape_sdfs = []
shape_pointclouds = []
for _, cluster in sorted(dict_clusters.items()):
center, class_id, model_name = cluster
path_prefix = os.path.join(FLAGS.shapenet_dir, class_id, model_name)
file_sdf = os.path.join(path_prefix, 'model_normalized_sdf.npy')
file_pointcloud = os.path.join(path_prefix, 'model_normalized_points.npy')
with gfile.Open(file_sdf, 'rb') as f:
sdf = np.load(f).astype(np.float32)
with gfile.Open(file_pointcloud, 'rb') as f:
pointcloud = np.load(f).astype(np.float32)
if scannet:
rot = np.reshape(np.array([0, 0, 1,
0, 1, 0,
-1, 0, 0],
dtype=np.float32), [3, 3])
# rot = np.reshape(np.array([0, 1, 0, -1, 0, 0, 0, 0, 1],
# dtype=np.float32), [3, 3])
# rot = np.reshape(np.array([0, -1, 0, -1, 0, 0, 0, 0, 1],
# dtype=np.float32), [3, 3])
pointcloud = np.transpose(rot @ np.transpose(pointcloud))
shape_centers.append(np.reshape(center, [1, 32, 32, 32]))
shape_sdfs.append(np.reshape(sdf, [1, 32, 32, 32]))
shape_pointclouds.append(np.reshape(pointcloud, [1, -1, 3]))
shape_centers = np.concatenate(shape_centers, axis=0)
shape_sdfs = np.concatenate(shape_sdfs, axis=0)
shape_pointclouds = np.concatenate(shape_pointclouds, axis=0)
return shape_centers, shape_sdfs, shape_pointclouds, dict_clusters
def get_soft_shape_labels(sdfs):
"""Get soft shape labels."""
num_shapes = sdfs.shape[0]
pointcloud_distances = np.zeros([num_shapes, num_shapes])
for i in range(num_shapes):
for j in range(i + 1, num_shapes):
dist = np.mean((sdfs[i] - sdfs[j])**2)
pointcloud_distances[i, j] = dist
pc_distances = pointcloud_distances + pointcloud_distances.T
soft_shape_labels = tf.cast(
tf.math.less(pc_distances, FLAGS.soft_shape_labels_a + 0.00001),
dtype=tf.float32)
soft_shape_labels = \
tf.maximum(1 - (pc_distances*FLAGS.soft_shape_labels_a)**2, 0.0)
return soft_shape_labels
def get_model(shape_centers, shape_sdfs, shape_pointclouds, dict_clusters):
"""Get model."""
kernel_regularization = None
if FLAGS.kernel_regularization == 'l2' or FLAGS.kernel_regularization == 'l1':
kernel_regularization = FLAGS.kernel_regularization
head_losses = {}
head_losses['centers'] = {'loss': focal_loss.FocalLoss(), 'weight': 1.0}
if FLAGS.predict_2d_box:
head_losses['offset'] = {'loss': regression_l1_loss.RegL1Loss(),
'weight': 1.0}
head_losses['width_height'] = {'loss': regression_l1_loss.RegL1Loss(),
'weight': 0.1}
head_losses['sizes_3d'] = {'loss': regression_l1_loss.RegL1Loss(sparse=True),
'weight': FLAGS.sizes_3d_weight}
if FLAGS.shapes_weight >= 0.0:
if FLAGS.shape_focal_loss:
head_losses['shapes'] = {'loss': focal_loss_sparse.SparseFocalLoss(),
'weight': FLAGS.shapes_weight}
else:
head_losses['shapes'] = \
{'loss': cross_entropy_loss.CrossEntropyLoss(
label_smoothing=FLAGS.label_smoothing,
soft_shape_labels=FLAGS.soft_shape_labels),
'weight': FLAGS.shapes_weight}
if FLAGS.translations_3d_weight >= 0.0:
head_losses['translations_3d'] = \
{'loss': regression_l1_loss.RegL1Loss(sparse=True),
'weight': FLAGS.translations_3d_weight}
if FLAGS.rotations_3d_weight >= 0.0:
head_losses['rotations_3d'] = \
{'loss': regression_l1_loss.RegL1Loss(sparse=True),
'weight': FLAGS.rotations_3d_weight}
if FLAGS.pose_pc_pc_weight >= 0.0:
head_losses['pose_groundtruth_pointclouds'] = \
{'loss': regression_huber_loss.HuberLoss(),
'weight': FLAGS.pose_pc_pc_weight}
if FLAGS.projected_pose_pc_pc_weight >= 0.0:
head_losses['projected_gt_shapes'] = \
{'loss': regression_huber_loss.HuberLoss(),
'weight': FLAGS.projected_pose_pc_pc_weight}
if FLAGS.collision_weight >= 0.0:
head_losses['collisions_gt_shapes'] = \
{'loss': collision_loss.CollisionLoss(tol=-FLAGS.tol),
'weight': FLAGS.collision_weight}
if FLAGS.shape_sdf_sdf_weight >= 0.0:
head_losses['sdfs'] = {'loss': regression_huber_loss.HuberLoss(),
'weight': FLAGS.shape_sdf_sdf_weight}
num_shapes = 300
if 'scannet' in FLAGS.tfrecords_dir:
num_shapes = 50 * 8
model = centernet_vid.CenterNetVID(
heads_losses=head_losses,
heads={'centers': {'dim': FLAGS.num_classes},
'offset': {'dim': 2},
'width_height': {'dim': 2},
'sizes_offset_3d': {'dim': 3},
'translations_offset_3d': {'dim': 3},
'rotations_offset_3d': {'dim': 9},
'shapes': {'dim': num_shapes}
},
input_shape=(None, FLAGS.image_height, FLAGS.image_width, 3),
get_k_predictions_test=FLAGS.max_num_objects,
score_threshold=FLAGS.score_threshold,
number_hourglasses=FLAGS.number_hourglasses,
kernel_regularization=kernel_regularization,
clip_norm=FLAGS.gradient_clipping_norm,
shape_centers=shape_centers,
shape_sdfs=shape_sdfs,
shape_pointclouds=shape_pointclouds,
dict_clusters=dict_clusters,
beta=FLAGS.beta,
rotation_svd=FLAGS.rotation_svd,
)
model.batch_size = FLAGS.batch_size
model.window_size = 1
return model
def get_dataset(split, shape_soft_labels, shape_pointclouds=None):
"""Get dataset."""
if shape_pointclouds:
print(shape_pointclouds)
tfrecord_path = os.path.join(FLAGS.tfrecords_dir, split)
buffer_size, shuffle, cycles = 10000, True, 10000
if FLAGS.debug:
buffer_size, shuffle, cycles = 1, False, 1
if FLAGS.val:
buffer_size, shuffle, cycles = 100, False, 1
tfrecords_pattern = io.expand_rio_pattern(tfrecord_path)
dataset = tf.data.Dataset.list_files(tfrecords_pattern, shuffle=shuffle)
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=cycles)
if shuffle:
dataset = dataset.shuffle(buffer_size=buffer_size)
if 'scannet' in tfrecord_path:
dataset = dataset.map(extract_protos.decode_bytes_multiple_scannet)
dataset = dataset.filter(lambda sample: sample['num_boxes'] == 3)
else:
dataset = dataset.map(extract_protos.decode_bytes_multiple)
dataset = \
dataset.filter(lambda sample: tf.reduce_min(sample['shapes']) > -1)
def augment(sample):
image = sample['image']
if tf.random.uniform([1], 0, 1.0) < 0.8:
image = tf.image.random_saturation(image, 1.0, 10.0)
image = tf.image.random_contrast(image, 0.05, 5.0)
image = tf.image.random_hue(image, 0.5)
image = tf.image.random_brightness(image, 0.8)
sample['image'] = image
if tf.random.uniform([1], 0, 1.0) < 0.5:
sample['image'] = tf.image.flip_left_right(sample['image'])
sample['translations_3d'] *= [[-1.0, 1.0, 1.0]]
sample['rotations_3d'] = tf.reshape(sample['rotations_3d'], [-1, 3, 3])
sample['rotations_3d'] = tf.transpose(sample['rotations_3d'],
perm=[0, 2, 1])
sample['rotations_3d'] = tf.reshape(sample['rotations_3d'], [-1, 9])
bbox = sample['groundtruth_boxes']
bbox = tf.stack([bbox[:, 0], 1 - bbox[:, 3], bbox[:, 2], 1 - bbox[:, 1]],
axis=-1)
sample['groundtruth_boxes'] = bbox
if FLAGS.gaussian_augmentation:
if tf.random.uniform([1], 0, 1.0) < 0.15:
sample['image'] = tf_utils.gaussian_blur(sample['image'], sigma=1)
elif tf.random.uniform([1], 0, 1.0) < 0.30:
sample['image'] = tf_utils.gaussian_blur(sample['image'], sigma=2)
elif tf.random.uniform([1], 0, 1.0) < 0.45:
sample['image'] = tf_utils.gaussian_blur(sample['image'], sigma=3)
return sample
if FLAGS.train: # and not FLAGS.debug:
dataset = dataset.map(augment, num_parallel_calls=FLAGS.num_workers)
def add_soft_shape_labels(sample):
sample['shapes_soft'] = tf.map_fn(
fn=lambda t: tf.cast(shape_soft_labels[t], tf.float32),
elems=tf.cast(sample['shapes'], tf.int32),
fn_output_signature=tf.float32)
return sample
if FLAGS.soft_shape_labels:
dataset = dataset.map(add_soft_shape_labels,
num_parallel_calls=FLAGS.num_workers)
# Create dataset for overfitting when debugging
if FLAGS.debug:
t = FLAGS.num_overfitting_samples
dataset = dataset.take(t)
mult = 1 if FLAGS.val else 1000
dataset = dataset.repeat(mult)
if t > 1:
dataset = dataset.shuffle(buffer_size)
return dataset
def get_learning_rate_fn():
decay_steps = int(8e6)
start_decay_step = int(30.0e6)
warmup_steps = int(150000)
if FLAGS.debug:
decay_steps = 30000
start_decay_step = 20000
warmup_steps = 0
return learning_rate_schedule.WarmupDelayedCosineDecay(
initial_learning_rate=tf.cast(0.0001, dtype=tf.float32),
constant_learning_rate=tf.cast(FLAGS.learning_rate, dtype=tf.float32),
end_learning_rate=FLAGS.learning_rate / 1000000.0,
warmup_steps=warmup_steps,
start_decay_step=start_decay_step,
decay_steps=decay_steps,
)
def get_evaluator():
"""Evaluator."""
evaluator = evaluator_util.Evaluator({
# 'shape_accuracy_topk1': evaluator_util.ShapeAccuracyMetric(k=1),
# 'shape_accuracy_topk10': evaluator_util.ShapeAccuracyMetric(k=10),
# 'shape_accuracy_topk100': evaluator_util.ShapeAccuracyMetric(k=100),
'2D_mAP_25': evaluator_util.BoxIoUMetric(t=0.25, threed=False),
'2D_mAP_50': evaluator_util.BoxIoUMetric(t=0.50, threed=False),
# '2D_mAP_60': evaluator_util.BoxIoUMetric(t=0.60, threed=False),
# '2D_mAP_75': evaluator_util.BoxIoUMetric(t=0.75, threed=False),
# '2D_mAP_80': evaluator_util.BoxIoUMetric(t=0.80, threed=False),
# '2D_mAP_90': evaluator_util.BoxIoUMetric(t=0.90, threed=False),
'3D_mAP_25': evaluator_util.BoxIoUMetric(t=0.25, threed=True),
'3D_mAP_50': evaluator_util.BoxIoUMetric(t=0.50, threed=True),
'3D_IoU': evaluator_util.IoUMetric(max_num_classes=FLAGS.num_classes,
tol=FLAGS.tol, resolution=64),
# 'Collision': evaluator_util.CollisionMetric(tol=-FLAGS.tol),
# '3D_mAP_60': evaluator_util.BoxIoUMetric(t=0.60, threed=True),
# '3D_mAP_75': evaluator_util.BoxIoUMetric(t=0.75, threed=True),
# '3D_mAP_80': evaluator_util.BoxIoUMetric(t=0.80, threed=True),
# '3D_mAP_90': evaluator_util.BoxIoUMetric(t=0.90, threed=True)
}, split=FLAGS.split, shapenet_dir=FLAGS.shapenet_dir)
slave = (FLAGS.part_id > -1)
if slave:
iou_path = os.path.join(FLAGS.logdir,
FLAGS.metrics_dir, 'iou',
'iou_'+str(FLAGS.part_id).zfill(4)+'.pkl')
if not tf.io.gfile.exists(os.path.dirname(iou_path)):
tf.io.gfile.makedirs(os.path.dirname(iou_path))
collision_path = os.path.join(
FLAGS.logdir,
FLAGS.metrics_dir,
'collision',
'collision_'+str(FLAGS.part_id).zfill(4)+'.pkl')
if not tf.io.gfile.exists(os.path.dirname(collision_path)):
tf.io.gfile.makedirs(os.path.dirname(collision_path))
evaluator = evaluator_util.Evaluator(
{'3D_IoU': evaluator_util.IoUMetric(max_num_classes=FLAGS.num_classes,
slave=slave, path=iou_path,
tol=FLAGS.tol),
}, split=FLAGS.split, shapenet_dir=FLAGS.shapenet_dir)
if FLAGS.francis:
evaluator = evaluator_util.Evaluator({},
split=FLAGS.split,
shapenet_dir=FLAGS.shapenet_dir)
return evaluator
def _train_epoch(epoch, model: centernet_vid.CenterNetVID, dataset, logger,
number_of_steps_previous_epochs, max_num_steps_epoch,
input_image_size):
"""Train one epoch."""
strategy = tf.distribute.get_strategy()
if input_image_size:
print(input_image_size)
def distributed_step(sample):
outputs = {}
logging.info(sample['name'])
per_replica_outputs = strategy.run(model.train_sample, args=(sample,))
losses_value = {}
for key, value in per_replica_outputs.items():
losses_value[key] = strategy.reduce(
tf.distribute.ReduceOp.SUM, value, axis=None)
losses_val_value = {'total_loss': tf.constant(0, dtype=tf.float32)}
total_loss_diff = tf.abs(losses_value['total_loss'] -
losses_val_value['total_loss'])
return {**losses_value, 'total_loss_diff': total_loss_diff}, outputs
if FLAGS.run_graph:
distributed_step = tf.function(distributed_step)
logger.reset_losses()
dataset_iterator = iter(dataset)
n_steps = 0
while True:
start_time = time.time()
sample = tf_utils.get_next_sample_dataset(dataset_iterator)
if (sample is None or
max_num_steps_epoch is not None and n_steps > max_num_steps_epoch):
break
n_steps += FLAGS.batch_size
total_num_steps = n_steps + number_of_steps_previous_epochs
logger.record_scalar('meta/time_read', time.time() - start_time,
total_num_steps)
logger.record_scalar('meta/learning_rate',
model.optimizer.learning_rate(total_num_steps),
total_num_steps)
start_time = time.time()
losses_value, _ = distributed_step(sample)
logging.info('Loss: %f/%f \t LR: %f GD: %f',
losses_value['total_loss'].numpy(),
losses_value['total_loss_diff'].numpy(),
model.optimizer.learning_rate(total_num_steps),
losses_value['gradients_norm'].numpy())
logger.record_scalar('meta/forward_pass',
time.time() - start_time,
total_num_steps)
logger.record_losses('iterations/', losses_value, total_num_steps)
logger.record_losses_epoch('epoch/', epoch)
return n_steps
def train(max_num_steps_epoch=None,
save_initial_checkpoint=False,
gpu_ids=None):
"""Train function."""
strategy = tf.distribute.MirroredStrategy(tf_utils.get_devices(gpu_ids))
logging.info('Number of devices: %d', strategy.num_replicas_in_sync)
shape_centers, shape_sdfs, shape_pointclouds, dict_clusters = \
get_shapes('scannet' in FLAGS.tfrecords_dir)
soft_shape_labels = get_soft_shape_labels(shape_sdfs)
dataset = get_dataset('train*.tfrecord', soft_shape_labels, shape_pointclouds)
for sample in dataset.take(1):
plt.imshow(sample['image'])
if FLAGS.debug:
FLAGS.num_epochs = 50
if FLAGS.continue_from_checkpoint:
FLAGS.num_epochs *= 2
latest_epoch = tf.Variable(0, trainable=False)
num_epochs_var = tf.Variable(FLAGS.num_epochs, trainable=False)
number_of_steps_previous_epochs = tf.Variable(0, trainable=False,
dtype=tf.int64)
with strategy.scope():
work_unit = None
logging_dir = os.path.join(FLAGS.logdir, 'logging')
logger = logger_util.Logger(logging_dir, 'train', work_unit, '',
save_loss_tensorboard_frequency=100,
print_loss_frequency=1000)
optimizer = tf.keras.optimizers.Adam(learning_rate=get_learning_rate_fn())
model = get_model(shape_centers,
shape_sdfs,
shape_pointclouds,
dict_clusters)
model.optimizer = optimizer
transforms = {'name': 'centernet_preprocessing',
'params': {'image_size': (FLAGS.image_height,
FLAGS.image_width),
'transform_gt_annotations': True,
'random': False}}
train_targets = {'name': 'centernet_train_targets',
'params': {'num_classes': FLAGS.num_classes,
'image_size': (FLAGS.image_height,
FLAGS.image_width),
'stride': model.output_stride}}
transform_fn = transforms_factory.TransformsFactory.get_transform_group(
**transforms)
train_targets_fn = transforms_factory.TransformsFactory.get_transform_group(
**train_targets)
input_image_size = transforms['params']['image_size']
dataset = dataset.map(transform_fn, num_parallel_calls=FLAGS.num_workers)
dataset = dataset.batch(FLAGS.batch_size, drop_remainder=True)
dataset = dataset.map(train_targets_fn,
num_parallel_calls=FLAGS.num_workers)
if FLAGS.batch_size > 1:
dataset.prefetch(int(FLAGS.batch_size * 1.5))
# for sample in dataset:
# print(sample['name'])
dataset = strategy.experimental_distribute_dataset(dataset)
checkpoint_dir = os.path.join(FLAGS.logdir, 'training_ckpts')
if FLAGS.replication:
checkpoint_dir = os.path.join(checkpoint_dir, 'r=30')
checkpoint = tf.train.Checkpoint(
epoch=latest_epoch,
model=model.network,
optimizer=optimizer,
number_of_steps_previous_epochs=number_of_steps_previous_epochs,
num_epochs=num_epochs_var)
manager = tf.train.CheckpointManager(checkpoint,
checkpoint_dir,
max_to_keep=5)
# Restore latest checkpoint
if manager.latest_checkpoint:
logging.info('Restoring from %s', manager.latest_checkpoint)
checkpoint.restore(manager.latest_checkpoint)
elif FLAGS.continue_from_checkpoint:
init_checkpoint_dir = os.path.join(
FLAGS.continue_from_checkpoint, 'training_ckpts')
init_manager = tf.train.CheckpointManager(checkpoint,
init_checkpoint_dir,
None)
logging.info('Restoring from pretrained %s',
init_manager.latest_checkpoint)
checkpoint.restore(init_manager.latest_checkpoint)
else:
logging.info('Not restoring any previous training checkpoint.')
if save_initial_checkpoint and not manager.latest_checkpoint:
# Create a new checkpoint to avoid internal ckpt counter to increment
tmp_ckpt = tf.train.Checkpoint(epoch=latest_epoch, model=model.network)
tmp_manager = tf.train.CheckpointManager(tmp_ckpt, checkpoint_dir, None)
save_path = tmp_manager.save(0)
logging.info('Saved checkpoint for epoch %d: %s',
int(latest_epoch.numpy()), save_path)
latest_epoch.assign_add(1)
with logger.summary_writer.as_default():
for epoch in range(int(latest_epoch.numpy()), FLAGS.num_epochs + 1):
latest_epoch.assign(epoch)
n_steps = _train_epoch(epoch, model, dataset, logger,
number_of_steps_previous_epochs,
max_num_steps_epoch, input_image_size)
number_of_steps_previous_epochs.assign_add(n_steps)
save_path = manager.save()
logging.info('Saved checkpoint for epoch %d: %s',
int(latest_epoch.numpy()), save_path)
def _val_epoch(
name,
model,
dataset,
input_image_size,
epoch,
logger,
number_of_steps_previous_epochs,
evaluator: evaluator_util.Evaluator,
record_loss=False):
"""Validation epoch."""
if name:
print(name)
if FLAGS.part_id > -2:
record_loss = False
strategy = tf.distribute.get_strategy()
def distributed_step(sample):
training = False
output, loss = strategy.run(model.test_sample,
args=(sample, record_loss, training))
losses_value = {}
if record_loss:
for key, value in loss.items():
losses_value[key] = strategy.reduce(
tf.distribute.ReduceOp.SUM, value, axis=None)
return output, losses_value
if FLAGS.run_graph:
distributed_step = tf.function(distributed_step)
logger.reset_losses()
evaluator.reset_metrics()
dataset_iterator = iter(dataset)
n_steps = tf.constant(0, dtype=tf.int64)
while True:
logging.info('val %d', int(n_steps.numpy()))
start_time = time.time()
sample = tf_utils.get_next_sample_dataset(dataset_iterator)
if sample is None or tf_utils.compute_batch_size(sample) == 0:
break
n_steps += tf.cast(tf_utils.compute_batch_size(sample), tf.int64)
logger.record_scalar('meta/time_read',
time.time() - start_time,
n_steps + number_of_steps_previous_epochs)
start_time = time.time()
outputs, losses = distributed_step(sample)
logger.record_scalar('meta/forward_pass', time.time() - start_time,
n_steps + number_of_steps_previous_epochs)
status = False
if status:
model_path = '/usr/local/google/home/engelmann/saved_model'
model.network.save(model_path, save_format='tf')
new_model = tf.keras.models.load_model(model_path)
new_model.summary()
start_time = time.time()
if record_loss:
logger.record_losses('iterations/', losses,
n_steps + number_of_steps_previous_epochs)
outputs = outputs[-1] # only take outputs from last hourglass
batch_id = 0
# We assume batch_size=1 here.
detections = model.postprocess_sample2(input_image_size, sample, outputs)
logger.record_scalar('meta/post_processing',
time.time() - start_time,
n_steps + number_of_steps_previous_epochs)
tmp_sample = {k: v[0] for k, v in sample.items()}
result_dict = evaluator.add_detections(tmp_sample, detections)
iou_mean, iou_min = result_dict['iou_mean'], result_dict['iou_min']
if (FLAGS.master == 'local' or FLAGS.plot) and not FLAGS.francis and \
n_steps < tf.constant(13, dtype=tf.int64) and FLAGS.part_id < -1:
# Plot 3D
if FLAGS.local_plot_3d:
logdir = os.path.join(
'..', os.path.join(*(FLAGS.logdir.split(os.path.sep)[5:])),
'plots3d', str(sample['scene_filename'][batch_id].numpy())[2:-1])
logging.info(logdir)
plot.plot_detections_3d(detections, sample, logdir, model.dict_clusters)
# Plot 2D
image = tf.io.decode_image(sample['image_data'][batch_id]).numpy()
figure_heatmaps = plot.plot_to_image(plot.plot_heatmaps(
image, detections))
figure_boxes_2d = plot.plot_to_image(plot.plot_boxes_2d(
image, sample, detections))
figure_boxes_3d = plot.plot_to_image(plot.plot_boxes_3d(
image, sample, detections))
total_steps = n_steps + number_of_steps_previous_epochs
tf.summary.image('Heatmaps', figure_heatmaps, total_steps)
tf.summary.image('Boxes 2D', figure_boxes_2d, total_steps)
tf.summary.image('Boxes 3D', figure_boxes_3d, total_steps)
if (FLAGS.part_id > -1 and FLAGS.qualitative) or FLAGS.francis or True:
logdir = FLAGS.logdir
if FLAGS.francis:
logdir = os.path.join(FLAGS.qualidir, 'francis')
path_input = os.path.join(logdir, 'qualitative', 'img')
path_blender = os.path.join(logdir, 'qualitative', 'blender2')
path_2d_min = os.path.join(logdir, 'qualitative', 'img_2d_min')
path_2d_mean = os.path.join(logdir, 'qualitative', 'img_2d_mean')
path_3d_min = os.path.join(logdir, 'qualitative', 'img_3d_min')
path_3d_mean = os.path.join(logdir, 'qualitative', 'img_3d_mean')
tf.io.gfile.makedirs(path_input)
tf.io.gfile.makedirs(path_blender)
tf.io.gfile.makedirs(path_2d_min)
tf.io.gfile.makedirs(path_2d_mean)
tf.io.gfile.makedirs(path_3d_min)
tf.io.gfile.makedirs(path_3d_mean)
scene_name = \
str(sample['scene_filename'][0].numpy(), 'utf-8').split('.')[0]
iou_min_str = f'{iou_min:.5f}' if iou_min >= 0 else '0'
iou_mean_str = f'{iou_mean:.5f}' if iou_mean >= 0 else '0'
image = tf.io.decode_image(sample['image_data'][batch_id]).numpy()
# Plot original image
_ = plt.figure(figsize=(5, 5))
plt.clf()
plt.imshow(image)
filepath_input = os.path.join(path_input, scene_name+'.png')
with tf.io.gfile.GFile(filepath_input, 'wb') as f:
plt.savefig(f)
# Plot image 2D bounding boxes
plot.plot_boxes_2d(image, sample, detections,
groundtruth=(not FLAGS.francis))
filepath_2d_min = \
os.path.join(path_2d_min, iou_min_str+'_'+scene_name+'.png')
filepath_2d_mean = \
os.path.join(path_2d_mean, iou_mean_str+'_'+scene_name+'.png')
for path in [filepath_2d_min, filepath_2d_mean]:
with tf.io.gfile.GFile(path, 'wb') as f:
plt.savefig(f)
# Plot image 3D bounding boxes
plot.plot_boxes_3d(image,
sample,
detections,
groundtruth=(not FLAGS.francis))
filepath_3d_min = \
os.path.join(path_3d_min, iou_min_str+'_'+scene_name+'.png')
filepath_3d_mean = \
os.path.join(path_3d_mean, iou_mean_str+'_'+scene_name+'.png')
for path in [filepath_3d_min, filepath_3d_mean]:
with tf.io.gfile.GFile(path, 'wb') as f:
plt.savefig(f)
if FLAGS.local_plot_3d:
# Plot 3D visualizer
path = os.path.join(
'..', os.path.join(*(logdir.split(os.path.sep)[6:])),
'qualitative', 'web_3d_min', iou_min_str+'_'+scene_name)
plot.plot_detections_3d(detections,
sample,
path,
model.dict_clusters,
local=FLAGS.francis)
path = os.path.join(
'..', os.path.join(*(logdir.split(os.path.sep)[6:])),
'qualitative', 'web_3d_mean', iou_mean_str+'_'+scene_name)
plot.plot_detections_3d(detections,
sample,
path,
model.dict_clusters,
local=FLAGS.francis)
# Save pickels for plotting in blender
path_blender_file = os.path.join(path_blender, scene_name)
plot.save_for_blender(detections, sample, path_blender_file,
model.dict_clusters, model.shape_pointclouds,
local=FLAGS.francis)
if record_loss:
logger.record_losses_epoch('epoch/', epoch)
metrics = evaluator.evaluate()
if record_loss:
logger.record_dictionary_scalars('metrics/', metrics, epoch)
# mAP3Ds = ['3D_mAP_50', '3D_mAP_60', '3D_mAP_70', '3D_mAP_80', '3D_mAP_90']
# mAP3D = np.mean(np.array([metrics[v] for v in mAP3Ds]))
# logger.record_scalar('metrics/3D_mAP', mAP3D, epoch)
# mAP2Ds = ['2D_mAP_50', '2D_mAP_60', '2D_mAP_70', '2D_mAP_80', '2D_mAP_90']
# mAP2D = np.mean(np.array([metrics[v] for v in mAP2Ds]))
# logger.record_scalar('metrics/2D_mAP', mAP2D, epoch)
# else:
# stats = dataset.evaluate_evaluator()
# logger.record_dictionary_scalars(f'{name}_', stats, epoch)
return n_steps
def val(gpu_ids=None, record_losses=False, split='val', part_id=-2):
"""Val function."""
FLAGS.batch_size = 1
strategy = tf.distribute.MirroredStrategy(tf_utils.get_devices(gpu_ids))
logging.info('Number of devices: %d', strategy.num_replicas_in_sync)
shape_centers, shape_sdfs, shape_pointclouds, dict_clusters = \
get_shapes('scannet' in FLAGS.tfrecords_dir)
soft_shape_labels = get_soft_shape_labels(shape_sdfs)
part = '*.tfrecord' if part_id == -2 else \
'-'+str(part_id).zfill(5)+'-of-00100.tfrecord'
dataset = get_dataset(split+part, soft_shape_labels, shape_pointclouds)
# for sample in dataset:
# plt.imshow(sample['image'])
# plt.savefig('/usr/local/google/home/engelmann/res/'+sample['scene_filename'].numpy().decode()+'.png')
val_evaluator = get_evaluator()
with strategy.scope():
name = 'eval_'+str(split)
work_unit = None
logging_dir = os.path.join(FLAGS.logdir, 'logging')
logger = logger_util.Logger(logging_dir, name, work_unit,
FLAGS.xmanager_metric,
save_loss_tensorboard_frequency=10,
print_loss_frequency=1000)
epoch = tf.Variable(0, trainable=False)
latest_epoch = tf.Variable(-1, trainable=False)
num_epochs = tf.Variable(-1, trainable=False)
number_of_steps_previous_epochs = \
tf.Variable(0, trainable=False, dtype=tf.int64)
model = get_model(shape_centers,
shape_sdfs,
shape_pointclouds,
dict_clusters)
transforms = {'name': 'centernet_preprocessing',
'params': {'image_size': (FLAGS.image_height,
FLAGS.image_width),
'transform_gt_annotations': True,
'random': False}}
train_targets = {'name': 'centernet_train_targets',
'params': {'num_classes': FLAGS.num_classes,
'image_size': (FLAGS.image_height,
FLAGS.image_width),
'stride': model.output_stride}}
transform_fn = transforms_factory.TransformsFactory.get_transform_group(
**transforms)
train_targets_fn = transforms_factory.TransformsFactory.get_transform_group(
**train_targets)
input_image_size = transforms['params']['image_size']
dataset = dataset.map(transform_fn, num_parallel_calls=FLAGS.num_workers)
dataset = dataset.batch(FLAGS.batch_size)
# for k in ['name', 'scene_filename', 'mesh_names', 'classes', 'image',
# 'image_data', 'original_image_spatial_shape', 'num_boxes',
# 'center2d', 'groundtruth_boxes', 'dot', 'sizes_3d',
# 'translations_3d', 'rotations_3d', 'rt', 'k',
# 'groundtruth_valid_classes', 'shapes']:
# print('---', k)
# for i, sample in enumerate(dataset.take(7)):
# print(sample[k].shape)
# train_targets_fn(sample)
# for i, sample in enumerate(dataset):
# print(i)
# train_targets_fn(sample)
if train_targets_fn is not None:
dataset = dataset.map(train_targets_fn,
num_parallel_calls=FLAGS.num_workers)
if FLAGS.debug and False:
for d in dataset.take(1):
image = tf.io.decode_image(d['image_data'][0]).numpy()
heatmaps = d['centers'][0]
plot.plot_gt_heatmaps(image, heatmaps)
if tf.distribute.has_strategy():
strategy = tf.distribute.get_strategy()
dataset = strategy.experimental_distribute_dataset(dataset)
if transforms is not None and input_image_size is None:
if FLAGS.run_graph:
FLAGS.run_graph = False
logging.info('Graph mode has been disable because the input does'
'not have constant size.')
if FLAGS.batch_size > strategy.num_replicas_in_sync:
raise ValueError('Batch size cannot be bigger than the number of GPUs'
' when the input does not have constant size')
val_checkpoint_dir = os.path.join(FLAGS.logdir, f'{name}_ckpts')
val_checkpoint = tf.train.Checkpoint(
epoch=latest_epoch,
number_of_steps_previous_epochs=number_of_steps_previous_epochs)
val_manager = tf.train.CheckpointManager(
val_checkpoint, val_checkpoint_dir, max_to_keep=1)
if val_manager.latest_checkpoint:
val_checkpoint.restore(val_manager.latest_checkpoint)
train_checkpoint_dir = os.path.join(FLAGS.logdir, 'training_ckpts')
if FLAGS.replication:
train_checkpoint_dir = os.path.join(train_checkpoint_dir, 'r=30')
train_checkpoint = tf.train.Checkpoint(epoch=epoch, model=model.network,
num_epochs=num_epochs)
latest_checkpoint = ''
if FLAGS.master == 'local' or FLAGS.plot:
local_dump = os.path.join(FLAGS.logdir, 'images')
if not tf.io.gfile.exists(local_dump):
tf.io.gfile.makedirs(local_dump)
with logger.summary_writer.as_default():
while True:
curr_latest_checkpoint = \
tf.train.latest_checkpoint(train_checkpoint_dir)
if (curr_latest_checkpoint is not None and
latest_checkpoint != curr_latest_checkpoint):
latest_checkpoint = curr_latest_checkpoint
train_checkpoint.restore(curr_latest_checkpoint)
if epoch != latest_epoch or FLAGS.eval_only:
FLAGS.eval_only = False
logging.info('Evaluating checkpoint in %s: %s.',
name, latest_checkpoint)
n_steps = _val_epoch(name, model, dataset, input_image_size,
epoch.numpy(), logger,
number_of_steps_previous_epochs,
val_evaluator, record_losses)
number_of_steps_previous_epochs.assign_add(n_steps)
latest_epoch.assign(epoch.numpy())
if part_id < -1:
val_manager.save()
else:
return
if epoch == num_epochs:
break
time.sleep(1)
def main(_):
if FLAGS.debug:
FLAGS.split = 'train'
if FLAGS.francis:
FLAGS.split = 'francis'
if 'scannet' in FLAGS.tfrecords_dir:
FLAGS.num_classes = 8
FLAGS.image_width = 640
FLAGS.image_height = 640
if not tf.io.gfile.exists(FLAGS.logdir):
tf.io.gfile.makedirs(FLAGS.logdir)
if FLAGS.train:
train()
elif FLAGS.val:
if FLAGS.part_id == -1:
def eval_iou():
metrics_dir = os.path.join(FLAGS.logdir, FLAGS.metrics_dir, 'iou')
if not tf.io.gfile.exists(metrics_dir):
tf.io.gfile.makedirs(metrics_dir)
while len(tf.io.gfile.listdir(metrics_dir)) < 100:
print('waiting...',
len(tf.io.gfile.listdir(metrics_dir)), 'out of 100')
time.sleep(5)
all_iou_per_class = {}
for i, iou_file in enumerate(tf.io.gfile.listdir(metrics_dir)):
logging.info(i)
iou_file_path = os.path.join(metrics_dir, iou_file)
with gfile.Open(iou_file_path, 'rb') as filename:
print(iou_file_path)
iou_per_class = pickle.load(filename)
for k, v in iou_per_class.items():
if k not in all_iou_per_class:
all_iou_per_class[k] = []
all_iou_per_class[k] = \
all_iou_per_class[k] + [n.numpy() for n in v]
with gfile.Open(metrics_dir + '.txt', 'wb') as file:
mean_iou_per_class = {}
all_iou = []
class_id_to_name = ['chair', 'sofa', 'table', 'bottle', 'bowl', 'mug']
for k, v in all_iou_per_class.items():
mean_iou_per_class[k] = np.mean(v)
file.write(class_id_to_name[k]+':\t'+
str(np.mean(v))+' ('+str(np.std(v))+')\n')
all_iou = all_iou + v
per_class_mean = np.mean(list(mean_iou_per_class.values()))
global_mean = np.mean(all_iou)
file.write('\nmIoU:\t'+str(per_class_mean))
file.write('\nglobal IoU:\t'+str(global_mean))
def eval_collision():
metrics_dir = os.path.join(FLAGS.logdir, FLAGS.metrics_dir, 'collision')
if not tf.io.gfile.exists(metrics_dir):
tf.io.gfile.makedirs(metrics_dir)
while len(tf.io.gfile.listdir(metrics_dir)) < FLAGS.n_tfrecords:
time.sleep(5)
total_collisions = 0
total_intersections = []
total_ious = []
for i, file in enumerate(tf.io.gfile.listdir(metrics_dir)):
logging.info(i)
file_path = os.path.join(metrics_dir, file)
with gfile.Open(file_path, 'rb') as filename:
collision_data = pickle.load(filename)
total_collisions += np.sum(collision_data['collisions'])
total_intersections = \
total_intersections + collision_data['intersections']
total_ious = total_ious + collision_data['ious']
with gfile.Open(metrics_dir+'.txt', 'wb') as file:
file.write('\ncollisions:\t'+str(total_collisions))
file.write('\nintersect.:\t'+str(np.mean(total_intersections)))
file.write('\niou:\t'+str(np.mean(total_ious)))
eval_iou()
eval_collision()
return
else:
val(record_losses=FLAGS.record_val_losses,
split=FLAGS.split,
part_id=FLAGS.part_id)
if __name__ == '__main__':
app.run(main)
|
a24960a7579f2af2df0c220cd6b59a65d379cc3c
|
2b8d9d7ac0e3836ef978ad10a424dcacacbb93bc
|
/sample models/Demo animate 2_yieldless.py
|
32885d570c50408c6ff831a149820fe743c04677
|
[
"MIT"
] |
permissive
|
salabim/salabim
|
64854780c45a886b29888ec18f376b7beecd1e83
|
1fc66bcf903ebbdc1570757043ee9075975796c9
|
refs/heads/master
| 2023-09-05T02:26:08.942708
| 2023-08-22T14:11:33
| 2023-08-22T14:11:33
| 98,032,067
| 219
| 74
|
MIT
| 2023-08-25T12:35:50
| 2017-07-22T13:10:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
Demo animate 2_yieldless.py
|
# Demo animate 2.py
import salabim as sim
sim.yieldless(True)
class AnimateWaitSquare(sim.Animate):
def __init__(self, i):
self.i = i
sim.Animate.__init__(
self, rectangle0=(-12, -10, 12, 10), x0=300 - 30 * i, y0=100, fillcolor0="red", linewidth0=0
)
def visible(self, t):
return q[self.i] is not None
class AnimateWaitText(sim.Animate):
def __init__(self, i):
self.i = i
sim.Animate.__init__(self, text="", x0=300 - 30 * i, y0=100, textcolor0="white")
def text(self, t):
component_i = q[self.i]
if component_i is None:
return ""
else:
return component_i.name()
def do_animation():
env.animate(True)
for i in range(10):
AnimateWaitSquare(i)
AnimateWaitText(i)
show_length = sim.Animate(text="", x0=330, y0=100, textcolor0="black", anchor="w")
show_length.text = lambda t: "Length= " + str(len(q))
class Person(sim.Component):
def process(self):
self.enter(q)
self.hold(15)
self.leave(q)
env = sim.Environment(trace=True)
q = sim.Queue("q")
for i in range(15):
Person(name="{:02d}".format(i), at=i)
do_animation()
env.run()
|
eb60457c5da1090d0693769bb021ed7d46ae33ba
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/tests/ShowMacAddressTable/cli/equal/golden_output_2_expected.py
|
0fa0627f0d90d3ba39cfecf19ce5960daa0804f4
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,083
|
py
|
golden_output_2_expected.py
|
expected_output = {
"mac_table": {
"vlans": {
"390": {
"mac_addresses": {
"000f.53ff.e5a5": {
"entry": "*",
"interfaces": {
"Port-channel113": {
"age": "0",
"interface": "Port-channel113",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.e5a5",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.1446": {
"entry": "+",
"interfaces": {
"Port-channel103": {
"age": "0",
"interface": "Port-channel103",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.1446",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.d708": {
"entry": "+",
"interfaces": {
"Port-channel115": {
"age": "0",
"interface": "Port-channel115",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.d708",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.fd77": {
"entry": "*",
"interfaces": {
"Port-channel116": {
"age": "0",
"interface": "Port-channel116",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.fd77",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.d0fc": {
"entry": "*",
"interfaces": {
"Port-channel127": {
"age": "0",
"interface": "Port-channel127",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.d0fc",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.037d": {
"entry": "*",
"interfaces": {
"Port-channel133": {
"age": "0",
"interface": "Port-channel133",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.037d",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.061e": {
"entry": "+",
"interfaces": {
"Port-channel132": {
"age": "0",
"interface": "Port-channel132",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.061e",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.1e9c": {
"entry": "*",
"interfaces": {
"Port-channel124": {
"age": "0",
"interface": "Port-channel124",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.1e9c",
"ntfy": "F",
"secure": "F",
},
"000f.53ff.1f1d": {
"entry": "+",
"interfaces": {
"Port-channel125": {
"age": "0",
"interface": "Port-channel125",
"mac_type": "dynamic",
}
},
"mac_address": "000f.53ff.1f1d",
"ntfy": "F",
"secure": "F",
},
},
"vlan": "390",
}
}
}
}
|
3c4792d7f7fd6fffc93c79e8c3825fba4eb00b7b
|
a838c711a218bbdb661132eaf252fa417ca8f273
|
/influxdb_client/service/remote_connections_service.py
|
7c591078a32d0591f3e138ea1061ca8e4f2321d7
|
[
"MIT"
] |
permissive
|
influxdata/influxdb-client-python
|
9ae84038c1145466dd40c3a9096a74983f29bedb
|
1ec64b7e1039c891ac3a667ee6697731c61ddbaf
|
refs/heads/master
| 2023-08-23T09:14:38.727662
| 2023-08-09T03:59:54
| 2023-08-09T03:59:54
| 192,689,401
| 623
| 215
|
MIT
| 2023-09-11T05:46:26
| 2019-06-19T08:17:20
|
Python
|
UTF-8
|
Python
| false
| false
| 29,037
|
py
|
remote_connections_service.py
|
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
from influxdb_client.service._base_service import _BaseService
class RemoteConnectionsService(_BaseService):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None): # noqa: E501,D401,D403
"""RemoteConnectionsService - a operation defined in OpenAPI."""
super().__init__(api_client)
def delete_remote_connection_by_id(self, remote_id, **kwargs): # noqa: E501,D401,D403
"""Delete a remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_remote_connection_by_id(remote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str remote_id: (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_remote_connection_by_id_with_http_info(remote_id, **kwargs) # noqa: E501
else:
(data) = self.delete_remote_connection_by_id_with_http_info(remote_id, **kwargs) # noqa: E501
return data
def delete_remote_connection_by_id_with_http_info(self, remote_id, **kwargs): # noqa: E501,D401,D403
"""Delete a remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_remote_connection_by_id_with_http_info(remote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str remote_id: (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._delete_remote_connection_by_id_prepare(remote_id, **kwargs) # noqa: E501
return self.api_client.call_api(
'/api/v2/remotes/{remoteID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type=None, # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def delete_remote_connection_by_id_async(self, remote_id, **kwargs): # noqa: E501,D401,D403
"""Delete a remote connection.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str remote_id: (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._delete_remote_connection_by_id_prepare(remote_id, **kwargs) # noqa: E501
return await self.api_client.call_api(
'/api/v2/remotes/{remoteID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type=None, # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _delete_remote_connection_by_id_prepare(self, remote_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['remote_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('delete_remote_connection_by_id', all_params, local_var_params)
# verify the required parameter 'remote_id' is set
if ('remote_id' not in local_var_params or
local_var_params['remote_id'] is None):
raise ValueError("Missing the required parameter `remote_id` when calling `delete_remote_connection_by_id`") # noqa: E501
path_params = {}
if 'remote_id' in local_var_params:
path_params['remoteID'] = local_var_params['remote_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def get_remote_connection_by_id(self, remote_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_remote_connection_by_id(remote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str remote_id: (required)
:param str zap_trace_span: OpenTracing span context
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_remote_connection_by_id_with_http_info(remote_id, **kwargs) # noqa: E501
else:
(data) = self.get_remote_connection_by_id_with_http_info(remote_id, **kwargs) # noqa: E501
return data
def get_remote_connection_by_id_with_http_info(self, remote_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_remote_connection_by_id_with_http_info(remote_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str remote_id: (required)
:param str zap_trace_span: OpenTracing span context
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_remote_connection_by_id_prepare(remote_id, **kwargs) # noqa: E501
return self.api_client.call_api(
'/api/v2/remotes/{remoteID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnection', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def get_remote_connection_by_id_async(self, remote_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a remote connection.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str remote_id: (required)
:param str zap_trace_span: OpenTracing span context
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_remote_connection_by_id_prepare(remote_id, **kwargs) # noqa: E501
return await self.api_client.call_api(
'/api/v2/remotes/{remoteID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnection', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _get_remote_connection_by_id_prepare(self, remote_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['remote_id', 'zap_trace_span'] # noqa: E501
self._check_operation_params('get_remote_connection_by_id', all_params, local_var_params)
# verify the required parameter 'remote_id' is set
if ('remote_id' not in local_var_params or
local_var_params['remote_id'] is None):
raise ValueError("Missing the required parameter `remote_id` when calling `get_remote_connection_by_id`") # noqa: E501
path_params = {}
if 'remote_id' in local_var_params:
path_params['remoteID'] = local_var_params['remote_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def get_remote_connections(self, org_id, **kwargs): # noqa: E501,D401,D403
"""List all remote connections.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_remote_connections(org_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str org_id: The organization ID. (required)
:param str zap_trace_span: OpenTracing span context
:param str name:
:param str remote_url:
:return: RemoteConnections
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_remote_connections_with_http_info(org_id, **kwargs) # noqa: E501
else:
(data) = self.get_remote_connections_with_http_info(org_id, **kwargs) # noqa: E501
return data
def get_remote_connections_with_http_info(self, org_id, **kwargs): # noqa: E501,D401,D403
"""List all remote connections.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_remote_connections_with_http_info(org_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str org_id: The organization ID. (required)
:param str zap_trace_span: OpenTracing span context
:param str name:
:param str remote_url:
:return: RemoteConnections
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_remote_connections_prepare(org_id, **kwargs) # noqa: E501
return self.api_client.call_api(
'/api/v2/remotes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnections', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def get_remote_connections_async(self, org_id, **kwargs): # noqa: E501,D401,D403
"""List all remote connections.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str org_id: The organization ID. (required)
:param str zap_trace_span: OpenTracing span context
:param str name:
:param str remote_url:
:return: RemoteConnections
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._get_remote_connections_prepare(org_id, **kwargs) # noqa: E501
return await self.api_client.call_api(
'/api/v2/remotes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnections', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _get_remote_connections_prepare(self, org_id, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['org_id', 'zap_trace_span', 'name', 'remote_url'] # noqa: E501
self._check_operation_params('get_remote_connections', all_params, local_var_params)
# verify the required parameter 'org_id' is set
if ('org_id' not in local_var_params or
local_var_params['org_id'] is None):
raise ValueError("Missing the required parameter `org_id` when calling `get_remote_connections`") # noqa: E501
path_params = {}
query_params = []
if 'org_id' in local_var_params:
query_params.append(('orgID', local_var_params['org_id'])) # noqa: E501
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'remote_url' in local_var_params:
query_params.append(('remoteURL', local_var_params['remote_url'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def patch_remote_connection_by_id(self, remote_id, remote_connection_update_request, **kwargs): # noqa: E501,D401,D403
"""Update a remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_remote_connection_by_id(remote_id, remote_connection_update_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str remote_id: (required)
:param RemoteConnectionUpdateRequest remote_connection_update_request: (required)
:param str zap_trace_span: OpenTracing span context
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_remote_connection_by_id_with_http_info(remote_id, remote_connection_update_request, **kwargs) # noqa: E501
else:
(data) = self.patch_remote_connection_by_id_with_http_info(remote_id, remote_connection_update_request, **kwargs) # noqa: E501
return data
def patch_remote_connection_by_id_with_http_info(self, remote_id, remote_connection_update_request, **kwargs): # noqa: E501,D401,D403
"""Update a remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_remote_connection_by_id_with_http_info(remote_id, remote_connection_update_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str remote_id: (required)
:param RemoteConnectionUpdateRequest remote_connection_update_request: (required)
:param str zap_trace_span: OpenTracing span context
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._patch_remote_connection_by_id_prepare(remote_id, remote_connection_update_request, **kwargs) # noqa: E501
return self.api_client.call_api(
'/api/v2/remotes/{remoteID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnection', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def patch_remote_connection_by_id_async(self, remote_id, remote_connection_update_request, **kwargs): # noqa: E501,D401,D403
"""Update a remote connection.
This method makes an asynchronous HTTP request.
:param async_req bool
:param str remote_id: (required)
:param RemoteConnectionUpdateRequest remote_connection_update_request: (required)
:param str zap_trace_span: OpenTracing span context
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._patch_remote_connection_by_id_prepare(remote_id, remote_connection_update_request, **kwargs) # noqa: E501
return await self.api_client.call_api(
'/api/v2/remotes/{remoteID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnection', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _patch_remote_connection_by_id_prepare(self, remote_id, remote_connection_update_request, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['remote_id', 'remote_connection_update_request', 'zap_trace_span'] # noqa: E501
self._check_operation_params('patch_remote_connection_by_id', all_params, local_var_params)
# verify the required parameter 'remote_id' is set
if ('remote_id' not in local_var_params or
local_var_params['remote_id'] is None):
raise ValueError("Missing the required parameter `remote_id` when calling `patch_remote_connection_by_id`") # noqa: E501
# verify the required parameter 'remote_connection_update_request' is set
if ('remote_connection_update_request' not in local_var_params or
local_var_params['remote_connection_update_request'] is None):
raise ValueError("Missing the required parameter `remote_connection_update_request` when calling `patch_remote_connection_by_id`") # noqa: E501
path_params = {}
if 'remote_id' in local_var_params:
path_params['remoteID'] = local_var_params['remote_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
body_params = None
if 'remote_connection_update_request' in local_var_params:
body_params = local_var_params['remote_connection_update_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
def post_remote_connection(self, remote_connection_creation_request, **kwargs): # noqa: E501,D401,D403
"""Register a new remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_remote_connection(remote_connection_creation_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RemoteConnectionCreationRequest remote_connection_creation_request: (required)
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_remote_connection_with_http_info(remote_connection_creation_request, **kwargs) # noqa: E501
else:
(data) = self.post_remote_connection_with_http_info(remote_connection_creation_request, **kwargs) # noqa: E501
return data
def post_remote_connection_with_http_info(self, remote_connection_creation_request, **kwargs): # noqa: E501,D401,D403
"""Register a new remote connection.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_remote_connection_with_http_info(remote_connection_creation_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RemoteConnectionCreationRequest remote_connection_creation_request: (required)
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._post_remote_connection_prepare(remote_connection_creation_request, **kwargs) # noqa: E501
return self.api_client.call_api(
'/api/v2/remotes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnection', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
async def post_remote_connection_async(self, remote_connection_creation_request, **kwargs): # noqa: E501,D401,D403
"""Register a new remote connection.
This method makes an asynchronous HTTP request.
:param async_req bool
:param RemoteConnectionCreationRequest remote_connection_creation_request: (required)
:return: RemoteConnection
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params, path_params, query_params, header_params, body_params = \
self._post_remote_connection_prepare(remote_connection_creation_request, **kwargs) # noqa: E501
return await self.api_client.call_api(
'/api/v2/remotes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=[],
files={},
response_type='RemoteConnection', # noqa: E501
auth_settings=[],
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats={},
urlopen_kw=kwargs.get('urlopen_kw', None))
def _post_remote_connection_prepare(self, remote_connection_creation_request, **kwargs): # noqa: E501,D401,D403
local_var_params = locals()
all_params = ['remote_connection_creation_request'] # noqa: E501
self._check_operation_params('post_remote_connection', all_params, local_var_params)
# verify the required parameter 'remote_connection_creation_request' is set
if ('remote_connection_creation_request' not in local_var_params or
local_var_params['remote_connection_creation_request'] is None):
raise ValueError("Missing the required parameter `remote_connection_creation_request` when calling `post_remote_connection`") # noqa: E501
path_params = {}
query_params = []
header_params = {}
body_params = None
if 'remote_connection_creation_request' in local_var_params:
body_params = local_var_params['remote_connection_creation_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
return local_var_params, path_params, query_params, header_params, body_params
|
d52cefc73ea01472a6198507297c695f708ccf73
|
8f267fe1157904023004aa1fcee8cdcaf1d69f74
|
/tempest/tests/lib/services/compute/test_certificates_client.py
|
9faef6f47782ebe2ef191ff7553433b20b20dca2
|
[
"Apache-2.0"
] |
permissive
|
openstack/tempest
|
a65737f3e62d4ebeb7e387feac7bcc636d3f5fe0
|
3932a799e620a20d7abf7b89e21b520683a1809b
|
refs/heads/master
| 2023-08-28T15:04:21.241805
| 2023-08-28T10:16:57
| 2023-08-28T10:16:57
| 2,356,406
| 270
| 407
|
Apache-2.0
| 2022-06-29T15:52:45
| 2011-09-09T15:56:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,339
|
py
|
test_certificates_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.services.compute import certificates_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestCertificatesClient(base.BaseServiceTest):
FAKE_CERTIFICATE = {
"certificate": {
"data": "-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\n",
"private_key": None
}
}
def setUp(self):
super(TestCertificatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = certificates_client.CertificatesClient(
fake_auth, 'compute', 'regionOne')
def _test_show_certificate(self, bytes_body=False):
self.check_service_client_function(
self.client.show_certificate,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_CERTIFICATE,
bytes_body,
certificate_id="fake-id")
def test_show_certificate_with_str_body(self):
self._test_show_certificate()
def test_show_certificate_with_bytes_body(self):
self._test_show_certificate(bytes_body=True)
def _test_create_certificate(self, bytes_body=False):
cert = copy.deepcopy(self.FAKE_CERTIFICATE)
cert['certificate']['private_key'] = "my_private_key"
self.check_service_client_function(
self.client.create_certificate,
'tempest.lib.common.rest_client.RestClient.post',
cert,
bytes_body)
def test_create_certificate_with_str_body(self):
self._test_create_certificate()
def test_create_certificate_with_bytes_body(self):
self._test_create_certificate(bytes_body=True)
|
8fef27d48dd61f5b878d811df096c8db0e52b96a
|
2ff46c0d2ae8381ae735d0413186240b45b88071
|
/server/glusterfsd.py
|
e8762ad2f3e6feb1718ba748066dd0574a73bf06
|
[
"Apache-2.0"
] |
permissive
|
kadalu/kadalu
|
4d30fc2a8acfbe4f961040c9af5aa3cd5461bb60
|
cbac104df11d2f4e0dfba52618abbbda1e62b070
|
refs/heads/devel
| 2023-09-05T12:58:52.940594
| 2023-07-25T13:34:30
| 2023-07-25T13:34:30
| 196,231,281
| 685
| 130
|
NOASSERTION
| 2023-09-02T01:19:06
| 2019-07-10T15:31:42
|
Python
|
UTF-8
|
Python
| false
| false
| 8,562
|
py
|
glusterfsd.py
|
"""
Starts Gluster Brick(fsd) process
"""
import logging
import os
import sys
import uuid
import json
import xattr
from kadalulib import (CommandException, Proc, execute, logf,
send_analytics_tracker)
from serverutils import (generate_brick_volfile,
generate_client_volfile)
# noqa # pylint: disable=I1101
VOLUME_ID_XATTR_NAME = "trusted.glusterfs.volume-id"
VOLFILES_DIR = "/var/lib/kadalu/volfiles"
VOLINFO_DIR = "/var/lib/gluster"
MKFS_XFS_CMD = "/sbin/mkfs.xfs"
def create_brickdir(brick_path):
"""Create Brick directory and other directories required"""
os.makedirs(os.path.join(brick_path, ".glusterfs"),
mode=0o755,
exist_ok=True)
def verify_brickdir_xattr_support(brick_path):
"""Verify Brick dir supports xattrs"""
test_xattr_name = "user.testattr"
test_xattr_value = b"testvalue"
try:
xattr.set(brick_path, test_xattr_name, test_xattr_value)
val = xattr.get(brick_path, test_xattr_name)
if val != test_xattr_value:
logging.error(logf("Xattr value mismatch.",
actual=val,
expected=test_xattr_value))
sys.exit(1)
except OSError as err:
logging.error(logf("Extended attributes are not "
"supported",
error=err))
sys.exit(1)
def set_volume_id_xattr(brick_path, volume_id):
"""Set Volume ID xattr"""
volume_id_bytes = uuid.UUID(volume_id).bytes
try:
xattr.set(brick_path, VOLUME_ID_XATTR_NAME,
volume_id_bytes, xattr.XATTR_CREATE)
except FileExistsError:
pass
except OSError as err:
logging.error(logf("Unable to set volume-id on "
"brick root",
error=err))
sys.exit(1)
def create_brick_volfile(storage_unit_volfile_path, volname, volume_id, brick_path, data):
"""
Create Brick/Storage Unit Volfile based on Volinfo stored in Config map
For now, Generated Volfile is used in configmap
"""
storage_unit = {}
options = {}
# Set every third brick/storage_unit as arbiter brick.
# Arbiter brick will hold only file & directory structure but not the content.
if data["type"] == "Arbiter" and (int(os.environ.get("BRICK_INDEX")) + 1) % 3 == 0:
storage_unit["type"] = "arbiter"
# If Storage Pool options are configured, pass to Kadalu Volgen
if data["options"]:
options = data["options"]
storage_unit["path"] = brick_path
storage_unit["port"] = 24007
storage_unit["volume"] = {}
storage_unit["volume"]["name"] = volname
storage_unit["volume"]["id"] = volume_id
generate_brick_volfile(storage_unit, storage_unit_volfile_path, options)
def create_client_volfile(client_volfile_path, data):
"""
Create client volfile based on Volinfo stored in Config map using
Kadalu Volgen library.
"""
generate_client_volfile(data, client_volfile_path)
def create_and_mount_brick(brick_device, brick_path, brickfs):
"""
Create brick filesystem and mount the brick. Currently
only xfs is supported
"""
# If brick device path is not starts with /dev then use
# /brickdev prefix. Brick device directory passed by the user
# is mounted as /brickdev to avoid mixing with any other
# dirs inside container.
if not brick_device.startswith("/dev/"):
brick_device = "/brickdev/" + os.path.basename(brick_device)
mountdir = os.path.dirname(brick_path)
os.makedirs(mountdir,
mode=0o755,
exist_ok=True)
try:
execute("mount", brick_device, mountdir)
logging.info(logf(
"Successfully mounted device on path",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
))
except CommandException as err:
logging.info(logf(
"Failed to mount device, continuing with mkfs",
err=err,
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
))
if 'wrong fs type' in err.err:
# This error pops up when we do mount on an empty device or wrong fs
# Try doing a mkfs and try mount
try:
execute(MKFS_XFS_CMD, brick_device)
logging.info(logf(
"Successfully created xfs file system on device",
fstype=brickfs,
device=brick_device,
))
except CommandException as err:
if "appears to contain an existing filesystem" not in err.err:
logging.error(logf(
"Failed to create file system",
fstype=brickfs,
device=brick_device,
error=err,
))
sys.exit(1)
else:
logging.info(logf(
"Failed to perform mkfs on device. continuing with mount",
err=err,
device=brick_device,
mountdir=mountdir,
))
try:
execute("mount", brick_device, mountdir)
logging.info(logf(
"Successfully mounted device on path",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
))
except CommandException as err:
logging.error(logf(
"Failed to mount export brick (after mkfs)",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
error=err,
))
sys.exit(1)
elif 'already mounted' not in err.err:
logging.error(logf(
"Failed to mount export brick",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
error=err,
))
sys.exit(1)
else:
pass
def start_args():
"""
Prepare the things required for Brick Start and Returns the Proc
object required to start Brick Process.
"""
brick_device = os.environ.get("BRICK_DEVICE", None)
brick_path = os.environ["BRICK_PATH"]
if brick_device is not None and brick_device != "":
brickfs = os.environ.get("BRICK_FS", "xfs")
create_and_mount_brick(brick_device, brick_path, brickfs)
volume_id = os.environ["VOLUME_ID"]
brick_path_name = brick_path.strip("/").replace("/", "-")
volname = os.environ["VOLUME"]
nodename = os.environ["HOSTNAME"]
create_brickdir(brick_path)
verify_brickdir_xattr_support(brick_path)
set_volume_id_xattr(brick_path, volume_id)
volfile_id = "%s.%s.%s" % (volname, nodename, brick_path_name)
storage_unit_volfile_path = os.path.join(VOLFILES_DIR, "%s.vol" % volfile_id)
client_volfile_path = os.path.join(VOLFILES_DIR, "%s.vol" % volname)
info_file_path = os.path.join(VOLINFO_DIR, "%s.info" % volname)
data = {}
with open(info_file_path) as info_file:
data = json.load(info_file)
create_brick_volfile(storage_unit_volfile_path, volname, volume_id, brick_path, data)
create_client_volfile(client_volfile_path, data)
# UID is stored at the time of installation in configmap.
uid = None
with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file:
uid = uid_file.read()
# Send Analytics Tracker
# The information from this analytics is available for
# developers to understand and build project in a better way
send_analytics_tracker("server", uid)
return Proc(
"glusterfsd",
"/opt/sbin/glusterfsd",
[
"-N",
"--volfile-id", volfile_id,
"-p", "/var/run/gluster/glusterfsd-%s.pid" % brick_path_name,
"-S", "/var/run/gluster/brick.socket",
"--brick-name", brick_path,
"-l", "-", # Log to stderr
"--xlator-option",
"*-posix.glusterd-uuid=%s" % os.environ["NODEID"],
"--process-name", "brick",
"--brick-port", "24007",
"--xlator-option",
"%s-server.listen-port=24007" % volname,
"-f", storage_unit_volfile_path
]
)
|
4397b6ac4063466c478daddd9ce66e272051436e
|
3dc3bbe607ab7b583eb52dbaae86636eb642960a
|
/mmaction/models/backbones/x3d.py
|
23f962ab7e2cb8277f48b9f6a082c1564c10033e
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmaction2
|
659c36c6083fd3d9d072e074a8d4b3a50342b9bd
|
582b78fd6c3240500d5cacd292339d7d1ddbb056
|
refs/heads/main
| 2023-08-28T18:14:50.423980
| 2023-08-10T09:20:06
| 2023-08-10T09:20:06
| 278,810,244
| 3,498
| 1,028
|
Apache-2.0
| 2023-09-07T06:50:44
| 2020-07-11T07:19:10
|
Python
|
UTF-8
|
Python
| false
| false
| 19,483
|
py
|
x3d.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, Swish, build_activation_layer
from mmengine.logging import MMLogger
from mmengine.model.weight_init import constant_init, kaiming_init
from mmengine.runner import load_checkpoint
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from mmaction.registry import MODELS
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.bottleneck = self._round_width(channels, reduction)
self.fc1 = nn.Conv3d(
channels, self.bottleneck, kernel_size=1, padding=0)
self.relu = nn.ReLU()
self.fc2 = nn.Conv3d(
self.bottleneck, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
@staticmethod
def _round_width(width, multiplier, min_width=8, divisor=8):
"""Round width of filters based on width multiplier."""
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width,
int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (Tensor): The input data.
Returns:
Tensor: The output of the module.
"""
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class BlockX3D(nn.Module):
"""BlockX3D 3d building block for X3D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
outplanes (int): Number of channels produced by final the conv3d layer.
spatial_stride (int): Spatial stride in the conv3d layer. Default: 1.
downsample (nn.Module | None): Downsample layer. Default: None.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config dict for convolution layer.
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type``,
Default: ``dict(type='BN3d')``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU')``.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
inplanes,
planes,
outplanes,
spatial_stride=1,
downsample=None,
se_ratio=None,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.outplanes = outplanes
self.spatial_stride = spatial_stride
self.downsample = downsample
self.se_ratio = se_ratio
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.act_cfg_swish = dict(type='Swish')
self.with_cp = with_cp
self.conv1 = ConvModule(
in_channels=inplanes,
out_channels=planes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# Here we use the channel-wise conv
self.conv2 = ConvModule(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=(1, self.spatial_stride, self.spatial_stride),
padding=1,
groups=planes,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.swish = Swish()
self.conv3 = ConvModule(
in_channels=planes,
out_channels=outplanes,
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
if self.se_ratio is not None:
self.se_module = SEModule(planes, self.se_ratio)
self.relu = build_activation_layer(self.act_cfg)
def forward(self, x):
"""Defines the computation performed at every call."""
def _inner_forward(x):
"""Forward wrapper for utilizing checkpoint."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.se_ratio is not None:
out = self.se_module(out)
out = self.swish(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
# We do not support initialize with 2D pretrain weight for X3D
@MODELS.register_module()
class X3D(nn.Module):
"""X3D backbone. https://arxiv.org/pdf/2004.04730.pdf.
Args:
gamma_w (float): Global channel width expansion factor. Default: 1.
gamma_b (float): Bottleneck channel width expansion factor. Default: 1.
gamma_d (float): Network depth expansion factor. Default: 1.
pretrained (str | None): Name of pretrained model. Default: None.
in_channels (int): Channel num of input features. Default: 3.
num_stages (int): Resnet stages. Default: 4.
spatial_strides (Sequence[int]):
Spatial strides of residual blocks of each stage.
Default: ``(1, 2, 2, 2)``.
frozen_stages (int): Stages to be frozen (all param fixed). If set to
-1, it means not freezing any parameters. Default: -1.
se_style (str): The style of inserting SE modules into BlockX3D, 'half'
denotes insert into half of the blocks, while 'all' denotes insert
into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and excitation
unit. If set as None, it means not using SE unit. Default: 1 / 16.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict): Config for conv layers. required keys are ``type``
Default: ``dict(type='Conv3d')``.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``.
Default: ``dict(type='BN3d', requires_grad=True)``.
act_cfg (dict): Config dict for activation layer.
Default: ``dict(type='ReLU', inplace=True)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool):
Whether to use zero initialization for residual block,
Default: True.
kwargs (dict, optional): Key arguments for "make_res_layer".
"""
def __init__(self,
gamma_w=1.0,
gamma_b=1.0,
gamma_d=1.0,
pretrained=None,
in_channels=3,
num_stages=4,
spatial_strides=(2, 2, 2, 2),
frozen_stages=-1,
se_style='half',
se_ratio=1 / 16,
use_swish=True,
conv_cfg=dict(type='Conv3d'),
norm_cfg=dict(type='BN3d', requires_grad=True),
act_cfg=dict(type='ReLU', inplace=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
**kwargs):
super().__init__()
self.gamma_w = gamma_w
self.gamma_b = gamma_b
self.gamma_d = gamma_d
self.pretrained = pretrained
self.in_channels = in_channels
# Hard coded, can be changed by gamma_w
self.base_channels = 24
self.stage_blocks = [1, 2, 5, 3]
# apply parameters gamma_w and gamma_d
self.base_channels = self._round_width(self.base_channels,
self.gamma_w)
self.stage_blocks = [
self._round_repeats(x, self.gamma_d) for x in self.stage_blocks
]
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.spatial_strides = spatial_strides
assert len(spatial_strides) == num_stages
self.frozen_stages = frozen_stages
self.se_style = se_style
assert self.se_style in ['all', 'half']
self.se_ratio = se_ratio
assert (self.se_ratio is None) or (self.se_ratio > 0)
self.use_swish = use_swish
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
self.block = BlockX3D
self.stage_blocks = self.stage_blocks[:num_stages]
self.layer_inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
inplanes = self.base_channels * 2**i
planes = int(inplanes * self.gamma_b)
res_layer = self.make_res_layer(
self.block,
self.layer_inplanes,
inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
se_style=self.se_style,
se_ratio=self.se_ratio,
use_swish=self.use_swish,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
act_cfg=self.act_cfg,
with_cp=with_cp,
**kwargs)
self.layer_inplanes = inplanes
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.base_channels * 2**(len(self.stage_blocks) - 1)
self.conv5 = ConvModule(
self.feat_dim,
int(self.feat_dim * self.gamma_b),
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.feat_dim = int(self.feat_dim * self.gamma_b)
@staticmethod
def _round_width(width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters)
@staticmethod
def _round_repeats(repeats, multiplier):
"""Round number of layers based on depth multiplier."""
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
# the module is parameterized with gamma_b
# no temporal_stride
def make_res_layer(self,
block,
layer_inplanes,
inplanes,
planes,
blocks,
spatial_stride=1,
se_style='half',
se_ratio=None,
use_swish=True,
norm_cfg=None,
act_cfg=None,
conv_cfg=None,
with_cp=False,
**kwargs):
"""Build residual layer for ResNet3D.
Args:
block (nn.Module): Residual module to be built.
layer_inplanes (int): Number of channels for the input feature
of the res layer.
inplanes (int): Number of channels for the input feature in each
block, which equals to base_channels * gamma_w.
planes (int): Number of channels for the output feature in each
block, which equals to base_channel * gamma_w * gamma_b.
blocks (int): Number of residual blocks.
spatial_stride (int): Spatial strides in residual and conv layers.
Default: 1.
se_style (str): The style of inserting SE modules into BlockX3D,
'half' denotes insert into half of the blocks, while 'all'
denotes insert into all blocks. Default: 'half'.
se_ratio (float | None): The reduction ratio of squeeze and
excitation unit. If set as None, it means not using SE unit.
Default: None.
use_swish (bool): Whether to use swish as the activation function
before and after the 3x3x3 conv. Default: True.
conv_cfg (dict | None): Config for norm layers. Default: None.
norm_cfg (dict | None): Config for norm layers. Default: None.
act_cfg (dict | None): Config for activate layers. Default: None.
with_cp (bool | None): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
Returns:
nn.Module: A residual layer for the given config.
"""
downsample = None
if spatial_stride != 1 or layer_inplanes != inplanes:
downsample = ConvModule(
layer_inplanes,
inplanes,
kernel_size=1,
stride=(1, spatial_stride, spatial_stride),
padding=0,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
use_se = [False] * blocks
if self.se_style == 'all':
use_se = [True] * blocks
elif self.se_style == 'half':
use_se = [i % 2 == 0 for i in range(blocks)]
else:
raise NotImplementedError
layers = []
layers.append(
block(
layer_inplanes,
planes,
inplanes,
spatial_stride=spatial_stride,
downsample=downsample,
se_ratio=se_ratio if use_se[0] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
inplanes,
spatial_stride=1,
se_ratio=se_ratio if use_se[i] else None,
use_swish=use_swish,
norm_cfg=norm_cfg,
conv_cfg=conv_cfg,
act_cfg=act_cfg,
with_cp=with_cp,
**kwargs))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer."""
self.conv1_s = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1),
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
self.conv1_t = ConvModule(
self.base_channels,
self.base_channels,
kernel_size=(5, 1, 1),
stride=(1, 1, 1),
padding=(2, 0, 0),
groups=self.base_channels,
bias=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self):
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1_s.eval()
self.conv1_t.eval()
for param in self.conv1_s.parameters():
param.requires_grad = False
for param in self.conv1_t.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, BlockX3D):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input
samples extracted by the backbone.
"""
x = self.conv1_s(x)
x = self.conv1_t(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
x = self.conv5(x)
return x
def train(self, mode=True):
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
1151f7ed03de24063703e5ddb158d6bc568230aa
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/tests/opentrons/protocol_engine/execution/test_movement_handler.py
|
7eb048441797494944d5ff516e237d752ba59a1c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 12,968
|
py
|
test_movement_handler.py
|
"""MovementHandler command subject."""
import pytest
from decoy import Decoy
from typing import NamedTuple
from opentrons.types import MountType, Point, DeckSlotName
from opentrons.hardware_control import API as HardwareAPI
from opentrons.hardware_control.types import CriticalPoint
from opentrons.motion_planning import Waypoint
from opentrons.protocol_engine.types import (
DeckPoint,
MovementAxis,
WellLocation,
WellOrigin,
WellOffset,
DeckSlotLocation,
CurrentWell,
MotorAxis,
)
from opentrons.protocol_engine.state import (
StateStore,
PipetteLocationData,
)
from opentrons.protocol_engine.execution.movement import MovementHandler
from opentrons.protocol_engine.execution.thermocycler_movement_flagger import (
ThermocyclerMovementFlagger,
)
from opentrons.protocol_engine.execution.heater_shaker_movement_flagger import (
HeaterShakerMovementFlagger,
)
from opentrons.protocol_engine.execution.gantry_mover import GantryMover
@pytest.fixture
def hardware_api(decoy: Decoy) -> HardwareAPI:
"""Get a mock in the shape of a HardwareAPI."""
return decoy.mock(cls=HardwareAPI)
@pytest.fixture
def state_store(decoy: Decoy) -> StateStore:
"""Get a mock in the shape of a StateStore."""
return decoy.mock(cls=StateStore)
@pytest.fixture
def thermocycler_movement_flagger(decoy: Decoy) -> ThermocyclerMovementFlagger:
"""Get a mock in the shape of a ThermocyclerMovementFlagger."""
return decoy.mock(cls=ThermocyclerMovementFlagger)
@pytest.fixture
def heater_shaker_movement_flagger(decoy: Decoy) -> HeaterShakerMovementFlagger:
"""Get a mock in the shape of a HeaterShakerMovementFlagger."""
return decoy.mock(cls=HeaterShakerMovementFlagger)
@pytest.fixture
def mock_gantry_mover(decoy: Decoy) -> GantryMover:
"""Get a mock in the shape of a GantryMover."""
return decoy.mock(cls=GantryMover)
@pytest.fixture
def subject(
state_store: StateStore,
hardware_api: HardwareAPI,
thermocycler_movement_flagger: ThermocyclerMovementFlagger,
heater_shaker_movement_flagger: HeaterShakerMovementFlagger,
mock_gantry_mover: GantryMover,
) -> MovementHandler:
"""Create a MovementHandler with its dependencies mocked out."""
return MovementHandler(
state_store=state_store,
hardware_api=hardware_api,
thermocycler_movement_flagger=thermocycler_movement_flagger,
heater_shaker_movement_flagger=heater_shaker_movement_flagger,
gantry_mover=mock_gantry_mover,
)
async def test_move_to_well(
decoy: Decoy,
state_store: StateStore,
thermocycler_movement_flagger: ThermocyclerMovementFlagger,
heater_shaker_movement_flagger: HeaterShakerMovementFlagger,
mock_gantry_mover: GantryMover,
subject: MovementHandler,
) -> None:
"""Move requests should call hardware controller with movement data."""
well_location = WellLocation(
origin=WellOrigin.BOTTOM,
offset=WellOffset(x=0, y=0, z=1),
)
decoy.when(state_store.labware.get_location(labware_id="labware-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
)
decoy.when(
state_store.modules.get_heater_shaker_movement_restrictors()
).then_return([])
decoy.when(state_store.geometry.get_ancestor_slot_name("labware-id")).then_return(
DeckSlotName.SLOT_1
)
decoy.when(state_store.tips.get_pipette_channels("pipette-id")).then_return(1)
decoy.when(state_store.labware.is_tiprack("labware-id")).then_return(False)
decoy.when(
state_store.motion.get_pipette_location(
pipette_id="pipette-id",
current_well=None,
)
).then_return(
PipetteLocationData(
mount=MountType.LEFT,
critical_point=CriticalPoint.FRONT_NOZZLE,
)
)
decoy.when(
await mock_gantry_mover.get_position(
pipette_id="pipette-id",
)
).then_return(Point(1, 1, 1))
decoy.when(mock_gantry_mover.get_max_travel_z(pipette_id="pipette-id")).then_return(
42.0
)
decoy.when(
state_store.pipettes.get_movement_speed(
pipette_id="pipette-id", requested_speed=45.6
)
).then_return(39339.5)
decoy.when(
state_store.motion.get_movement_waypoints_to_well(
origin=Point(1, 1, 1),
origin_cp=CriticalPoint.FRONT_NOZZLE,
max_travel_z=42.0,
pipette_id="pipette-id",
labware_id="labware-id",
well_name="B2",
well_location=well_location,
current_well=None,
force_direct=True,
minimum_z_height=12.3,
)
).then_return(
[Waypoint(Point(1, 2, 3), CriticalPoint.XY_CENTER), Waypoint(Point(4, 5, 6))]
)
decoy.when(
await mock_gantry_mover.move_to(
pipette_id="pipette-id",
waypoints=[
Waypoint(Point(1, 2, 3), CriticalPoint.XY_CENTER),
Waypoint(Point(4, 5, 6)),
],
speed=39339.5,
),
).then_return(Point(4, 5, 6))
result = await subject.move_to_well(
pipette_id="pipette-id",
labware_id="labware-id",
well_name="B2",
well_location=well_location,
force_direct=True,
minimum_z_height=12.3,
speed=45.6,
)
assert result == Point(x=4, y=5, z=6)
decoy.verify(
await thermocycler_movement_flagger.raise_if_labware_in_non_open_thermocycler(
labware_parent=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
),
heater_shaker_movement_flagger.raise_if_movement_restricted(
hs_movement_restrictors=[],
destination_slot=1,
is_multi_channel=False,
destination_is_tip_rack=False,
),
)
async def test_move_to_well_from_starting_location(
decoy: Decoy,
state_store: StateStore,
thermocycler_movement_flagger: ThermocyclerMovementFlagger,
heater_shaker_movement_flagger: HeaterShakerMovementFlagger,
mock_gantry_mover: GantryMover,
subject: MovementHandler,
) -> None:
"""It should be able to move to a well from a start location."""
well_location = WellLocation(
origin=WellOrigin.BOTTOM,
offset=WellOffset(x=0, y=0, z=1),
)
current_well = CurrentWell(
pipette_id="pipette-id",
labware_id="labware-id",
well_name="B2",
)
decoy.when(state_store.labware.get_location(labware_id="labware-id")).then_return(
DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
)
decoy.when(
state_store.modules.get_heater_shaker_movement_restrictors()
).then_return([])
decoy.when(state_store.geometry.get_ancestor_slot_name("labware-id")).then_return(
DeckSlotName.SLOT_1
)
decoy.when(state_store.tips.get_pipette_channels("pipette-id")).then_return(1)
decoy.when(state_store.labware.is_tiprack("labware-id")).then_return(False)
decoy.when(
state_store.motion.get_pipette_location(
pipette_id="pipette-id",
current_well=current_well,
)
).then_return(
PipetteLocationData(
mount=MountType.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
)
)
decoy.when(
await mock_gantry_mover.get_position(
pipette_id="pipette-id",
)
).then_return(Point(1, 2, 5))
decoy.when(mock_gantry_mover.get_max_travel_z(pipette_id="pipette-id")).then_return(
42.0
)
decoy.when(
state_store.motion.get_movement_waypoints_to_well(
current_well=current_well,
origin=Point(1, 2, 5),
origin_cp=CriticalPoint.XY_CENTER,
max_travel_z=42.0,
pipette_id="pipette-id",
labware_id="labware-id",
well_name="B2",
well_location=well_location,
force_direct=False,
minimum_z_height=None,
)
).then_return([Waypoint(Point(1, 2, 3), CriticalPoint.XY_CENTER)])
decoy.when(
state_store.pipettes.get_movement_speed(
pipette_id="pipette-id", requested_speed=None
)
).then_return(39339.5)
decoy.when(
await mock_gantry_mover.move_to(
pipette_id="pipette-id",
waypoints=[Waypoint(Point(1, 2, 3), CriticalPoint.XY_CENTER)],
speed=39339.5,
),
).then_return(Point(4, 5, 6))
result = await subject.move_to_well(
pipette_id="pipette-id",
labware_id="labware-id",
well_name="B2",
well_location=well_location,
current_well=current_well,
)
assert result == Point(4, 5, 6)
decoy.verify(
await thermocycler_movement_flagger.raise_if_labware_in_non_open_thermocycler(
labware_parent=DeckSlotLocation(slotName=DeckSlotName.SLOT_1)
),
heater_shaker_movement_flagger.raise_if_movement_restricted(
hs_movement_restrictors=[],
destination_slot=1,
is_multi_channel=False,
destination_is_tip_rack=False,
),
)
class MoveRelativeSpec(NamedTuple):
"""Test data for move_relative."""
axis: MovementAxis
expected_delta: Point
distance: float = 42.0
@pytest.mark.parametrize(
MoveRelativeSpec._fields,
[
MoveRelativeSpec(
axis=MovementAxis.X,
expected_delta=Point(x=42.0, y=0, z=0),
),
MoveRelativeSpec(
axis=MovementAxis.Y,
expected_delta=Point(x=0, y=42.0, z=0),
),
MoveRelativeSpec(
axis=MovementAxis.Z,
expected_delta=Point(x=0, y=0, z=42.0),
),
],
)
async def test_move_relative(
decoy: Decoy,
state_store: StateStore,
mock_gantry_mover: GantryMover,
subject: MovementHandler,
axis: MovementAxis,
expected_delta: Point,
distance: float,
) -> None:
"""Test that move_relative triggers a relative move with the HardwareAPI."""
decoy.when(
await mock_gantry_mover.move_relative(
pipette_id="pipette-id",
delta=expected_delta,
speed=39339.5,
)
).then_return(Point(x=1, y=2, z=3))
decoy.when(
state_store.pipettes.get_movement_speed(pipette_id="pipette-id")
).then_return(39339.5)
result = await subject.move_relative(
pipette_id="pipette-id",
axis=axis,
distance=distance,
)
assert result == Point(x=1, y=2, z=3)
async def test_move_to_coordinates(
decoy: Decoy,
state_store: StateStore,
mock_gantry_mover: GantryMover,
subject: MovementHandler,
) -> None:
"""Test that move_to_coordinates correctly calls api.move_to."""
current_position = Point(4.44, 5.55, 6.66)
destination_deck = DeckPoint(x=1.11, y=2.22, z=3.33)
destination_point = Point(1.11, 2.22, 3.33)
planned_waypoint_1 = Waypoint(position=Point(3, 1, 4), critical_point=None)
planned_waypoint_2 = Waypoint(
position=Point(1, 5, 9), critical_point=CriticalPoint.XY_CENTER
)
decoy.when(
state_store.motion.get_pipette_location(
pipette_id="pipette-id",
)
).then_return(
PipetteLocationData(
mount=MountType.RIGHT,
critical_point=CriticalPoint.XY_CENTER,
)
)
decoy.when(
await mock_gantry_mover.get_position(pipette_id="pipette-id")
).then_return(current_position)
decoy.when(mock_gantry_mover.get_max_travel_z(pipette_id="pipette-id")).then_return(
5678
)
decoy.when(
state_store.motion.get_movement_waypoints_to_coords(
origin=current_position,
dest=destination_point,
max_travel_z=5678,
direct=True,
additional_min_travel_z=1234,
)
).then_return([planned_waypoint_1, planned_waypoint_2])
decoy.when(
state_store.pipettes.get_movement_speed(
pipette_id="pipette-id", requested_speed=567
)
).then_return(39339.5)
decoy.when(
await mock_gantry_mover.move_to(
pipette_id="pipette-id",
waypoints=[planned_waypoint_1, planned_waypoint_2],
speed=39339.5,
)
).then_return(Point(x=1, y=5, z=9))
result = await subject.move_to_coordinates(
pipette_id="pipette-id",
deck_coordinates=destination_deck,
direct=True,
additional_min_travel_z=1234,
speed=567,
)
assert result == Point(x=1, y=5, z=9)
async def test_retract_axis(
decoy: Decoy,
state_store: StateStore,
mock_gantry_mover: GantryMover,
subject: MovementHandler,
) -> None:
"""It should delegate to gantry mover to retract the specified axis."""
await subject.retract_axis(axis=MotorAxis.RIGHT_Z)
decoy.verify(await mock_gantry_mover.retract_axis(MotorAxis.RIGHT_Z), times=1)
|
bb5efb859fc4e8e26da720e4c6b4a841e7b661a0
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/security/event_propagation_status.py
|
80e0ea084463fae5cbd827d1026b7a6eda1f8557
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 215
|
py
|
event_propagation_status.py
|
from enum import Enum
class EventPropagationStatus(str, Enum):
None_ = "none",
InProcessing = "inProcessing",
Failed = "failed",
Success = "success",
UnknownFutureValue = "unknownFutureValue",
|
ae1e847da83108a61eb040d961869c181026ba7b
|
5d55e0885bacd718588f25b71675c1127c93fc0a
|
/river/covariance/test_emp.py
|
53167d1cf10f93a7377340636863df9477bb7950
|
[
"BSD-3-Clause"
] |
permissive
|
online-ml/river
|
5698b60e65493eba28d0c0c1992f19eb996c0bfa
|
c658393084ed4147a782daa6bcd4a467c3abb0cb
|
refs/heads/main
| 2023-09-03T00:12:55.121301
| 2023-08-29T12:04:20
| 2023-08-29T12:04:20
| 167,388,434
| 3,372
| 389
|
BSD-3-Clause
| 2023-09-12T08:11:15
| 2019-01-24T15:18:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,922
|
py
|
test_emp.py
|
from __future__ import annotations
import math
import random
import numpy as np
import pandas as pd
import pytest
from river import covariance, stream
@pytest.mark.parametrize(
"ddof",
[
pytest.param(
ddof,
id=f"{ddof=}",
)
for ddof in (0, 1, 2)
],
)
def test_covariance_revert(ddof):
X = np.random.random((100, 5))
X1 = X[: len(X) // 2]
X2 = X[len(X) // 2 :]
C1 = covariance.EmpiricalCovariance(ddof=ddof)
for x, _ in stream.iter_array(X1):
C1.update(x)
C2 = covariance.EmpiricalCovariance(ddof=ddof)
for x, _ in stream.iter_array(X):
C2.update(x)
for x, _ in stream.iter_array(X2):
C2.revert(x)
for k in C1._cov:
assert math.isclose(C1._cov[k].get(), C2._cov[k].get())
@pytest.mark.parametrize(
"ddof",
[
pytest.param(
ddof,
id=f"{ddof=}",
)
for ddof in (0, 1, 2)
],
)
def test_covariance_update_shuffled(ddof):
C1 = covariance.EmpiricalCovariance(ddof=ddof)
C2 = covariance.EmpiricalCovariance(ddof=ddof)
X = np.random.random((100, 5))
for x, _ in stream.iter_array(X):
C1.update(x)
C2.update({i: x[i] for i in random.sample(list(x.keys()), k=len(x))})
for i, j in C1._cov:
assert math.isclose(C1[i, j].get(), C2[i, j].get())
def test_covariance_update_sampled():
# NOTE: this test only works with ddof=1 because pandas ignores it if there are missing values
ddof = 1
cov = covariance.EmpiricalCovariance(ddof=ddof)
X = np.random.random((100, 5))
samples = []
for x, _ in stream.iter_array(X):
sample = {i: x[i] for i in random.sample(list(x.keys()), k=len(x) - 1)}
cov.update(sample)
samples.append(sample)
pd_cov = pd.DataFrame(samples).cov(ddof=ddof)
for i, j in cov._cov:
assert math.isclose(cov[i, j].get(), pd_cov.loc[i, j])
@pytest.mark.parametrize(
"ddof",
[
pytest.param(
ddof,
id=f"{ddof=}",
)
for ddof in [0, 1]
],
)
def test_covariance_update_many(ddof):
cov = covariance.EmpiricalCovariance(ddof=ddof)
p = 5
X_all = pd.DataFrame(columns=range(p))
for _ in range(p):
n = np.random.randint(1, 31)
X = pd.DataFrame(np.random.random((n, p)))
cov.update_many(X)
X_all = pd.concat((X_all, X)).astype(float)
pd_cov = X_all.cov(ddof=ddof)
for i, j in cov._cov:
assert math.isclose(cov[i, j].get(), pd_cov.loc[i, j])
@pytest.mark.parametrize(
"ddof",
[
pytest.param(
ddof,
id=f"{ddof=}",
)
for ddof in [0, 1]
],
)
def test_covariance_update_many_shuffled(ddof):
cov = covariance.EmpiricalCovariance(ddof=ddof)
p = 5
X_all = pd.DataFrame(columns=range(p))
for _ in range(p):
n = np.random.randint(5, 31)
X = pd.DataFrame(np.random.random((n, p))).sample(p, axis="columns")
cov.update_many(X)
X_all = pd.concat((X_all, X)).astype(float)
pd_cov = X_all.cov(ddof=ddof)
for i, j in cov._cov:
assert math.isclose(cov[i, j].get(), pd_cov.loc[i, j])
def test_covariance_update_many_sampled():
# NOTE: this test only works with ddof=1 because pandas ignores it if there are missing values
ddof = 1
cov = covariance.EmpiricalCovariance(ddof=ddof)
p = 5
X_all = pd.DataFrame(columns=range(p))
for _ in range(p):
n = np.random.randint(5, 31)
X = pd.DataFrame(np.random.random((n, p))).sample(p - 1, axis="columns")
cov.update_many(X)
X_all = pd.concat((X_all, X)).astype(float)
pd_cov = X_all.cov(ddof=ddof)
for i, j in cov._cov:
assert math.isclose(cov[i, j].get(), pd_cov.loc[i, j])
def test_precision_update_shuffled():
C1 = covariance.EmpiricalPrecision()
C2 = covariance.EmpiricalPrecision()
X = np.random.random((100, 5))
for x, _ in stream.iter_array(X):
C1.update(x)
C2.update({i: x[i] for i in random.sample(list(x.keys()), k=len(x))})
for i, j in C1._inv_cov:
assert math.isclose(C1[i, j], C2[i, j])
def test_precision_update_many_mini_batches():
C1 = covariance.EmpiricalPrecision()
C2 = covariance.EmpiricalPrecision()
X = pd.DataFrame(np.random.random((100, 5)))
C1.update_many(X)
for Xb in np.split(X, 5):
C2.update_many(Xb)
for i, j in C1._inv_cov:
assert math.isclose(C1[i, j], C2[i, j])
def test_precision_one_many_same():
one = covariance.EmpiricalPrecision()
many = covariance.EmpiricalPrecision()
X = np.random.random((100, 5))
for x, _ in stream.iter_array(X):
one.update(x)
many.update_many(pd.DataFrame(X))
for i, j in one._inv_cov:
assert math.isclose(one[i, j], many[i, j])
|
1f1ac016b1a3b926b846712b8ca19a286c0c7469
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayPcreditHuabeiFqqrcodeLogisticsQueryModel.py
|
d5041074f0f2043ebbcb44abfccab510c483de56
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
AlipayPcreditHuabeiFqqrcodeLogisticsQueryModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FqQrCodeExtendParams import FqQrCodeExtendParams
class AlipayPcreditHuabeiFqqrcodeLogisticsQueryModel(object):
def __init__(self):
self._fqqr_code_ext_info = None
self._merchant_id = None
self._out_request_no = None
self._process_id = None
self._source = None
@property
def fqqr_code_ext_info(self):
return self._fqqr_code_ext_info
@fqqr_code_ext_info.setter
def fqqr_code_ext_info(self, value):
if isinstance(value, FqQrCodeExtendParams):
self._fqqr_code_ext_info = value
else:
self._fqqr_code_ext_info = FqQrCodeExtendParams.from_alipay_dict(value)
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def process_id(self):
return self._process_id
@process_id.setter
def process_id(self, value):
self._process_id = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
def to_alipay_dict(self):
params = dict()
if self.fqqr_code_ext_info:
if hasattr(self.fqqr_code_ext_info, 'to_alipay_dict'):
params['fqqr_code_ext_info'] = self.fqqr_code_ext_info.to_alipay_dict()
else:
params['fqqr_code_ext_info'] = self.fqqr_code_ext_info
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.process_id:
if hasattr(self.process_id, 'to_alipay_dict'):
params['process_id'] = self.process_id.to_alipay_dict()
else:
params['process_id'] = self.process_id
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayPcreditHuabeiFqqrcodeLogisticsQueryModel()
if 'fqqr_code_ext_info' in d:
o.fqqr_code_ext_info = d['fqqr_code_ext_info']
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'process_id' in d:
o.process_id = d['process_id']
if 'source' in d:
o.source = d['source']
return o
|
7915d6428c85937525388aa679397e755c8d4190
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/dynamic_shape/test_pad_dyn.py
|
373f806e3aef43d07f3ac0fc6f0c4bc14097227b
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
test_pad_dyn.py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from functools import reduce
import numpy as np
import pytest
import mindspore
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
class PadNet(nn.Cell):
def __init__(self, paddings):
super(PadNet, self).__init__()
self.paddings = paddings
def construct(self, x):
return ops.pad(x, self.paddings)
def run_case():
paddings = ((1, 0), (0, 2))
paddings_ms = (0, 2, 1, 0)
shape = (4, 4)
shape_dyn = (None, 4)
sz = reduce(lambda a, b: a * b, shape)
x = np.arange(sz).reshape(shape).astype(np.float32)
expect = np.pad(x, paddings, mode="constant", constant_values=0)
x_dyn = Tensor(shape=shape_dyn, dtype=mindspore.float32)
net = PadNet(paddings_ms)
net.set_inputs(x_dyn)
output = net(Tensor(x))
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_pad_dyn_cpu():
"""
Feature: test Pad dynamic shape on CPU.
Description: inputs is dynamic shape.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
run_case()
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
run_case()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pad_dyn_gpu():
"""
Feature: test Pad dynamic shape on GPU.
Description: inputs is dynamic shape.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
run_case()
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
run_case()
|
9a29755f64af2d5d38d6d892faafc1a0225229e3
|
856e9a8afcb81ae66dd998b0d2cc3556c9f315ea
|
/dexy/filters/easy.py
|
0ce3121d1d6b5453c326c02a980bbc453277027d
|
[
"MIT"
] |
permissive
|
dexy/dexy
|
1d5c999830de4663c05a09f4cd00b1628dfc8d46
|
323c1806e51f75435e11d2265703e68f46c8aef3
|
refs/heads/develop
| 2023-06-10T08:02:45.076551
| 2021-02-28T22:40:41
| 2021-02-28T22:40:41
| 1,506,989
| 141
| 34
|
MIT
| 2020-06-15T17:44:50
| 2011-03-21T14:48:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
easy.py
|
# The easyhtml filter is defined in dexy.filters.fluid_html
from dexy.filter import DexyFilter
from pygments.formatters import LatexFormatter
class EasyLatex(DexyFilter):
"""
Wraps your text in LaTeX article header/footer.
Easy way to generate a document which can be compiled using LaTeX (includes
Pygments syntax highlighting).
"""
aliases = ['easylatex']
_settings = {
'input-extensions' : ['.tex'],
'output-extensions' : ['.tex'],
'documentclass' : ("The document class to generate.", "article"),
'style' : ("The pygments style to use.", "default"),
'title' : ("Title of article.", ""),
'author' : ("Author of article.", ""),
'date' : ("Date of article.", ""),
'font' : ("The font size to use.", "11pt"),
'papersize' : ("The document class to generate.", "a4paper"),
"preamble" : ("Additional custom LaTeX content to include in header.", "")
}
def pygments_sty(self):
formatter = LatexFormatter(style=self.setting('style'))
return formatter.get_style_defs()
def process_text(self, input_text):
args = self.setting_values()
args['input'] = input_text
args['pygments'] = self.pygments_sty()
if self.setting('title'):
args['title'] = r"\title{%(title)s}" % args
args['maketitle'] = r"\maketitle"
else:
args['title'] = ""
args['maketitle'] = ""
if self.setting('date'):
args['date'] = r"\date{%(date)s}" % args
if self.setting('author'):
args['author'] = r"\author{%(author)s}" % args
return self.template % args
template = r"""\documentclass[%(font)s,%(papersize)s]{%(documentclass)s}
\usepackage{color}
\usepackage{fancyvrb}
%(pygments)s
%(preamble)s
%(title)s
%(author)s
%(date)s
\begin{document}
%(maketitle)s
%(input)s
\end{document}
"""
|
d9a73f6c379c29b90116cb899eeb98ae0715a5f3
|
4feb5744ab5a26aeeb04573e4944d2bf4d1a6a2a
|
/peeringdb_server/migrations/0098_alter_environmentsetting_setting.py
|
c9f0c12f2f7cdb53c712f4ec46fc2a23c9d4feee
|
[
"BSD-2-Clause"
] |
permissive
|
peeringdb/peeringdb
|
cb79f809c4bb8cc5192180366df1f05d8fc0111f
|
3f62b2d97c78ccf151fb1a5761637e28463b9541
|
refs/heads/master
| 2023-09-04T09:26:43.741086
| 2023-08-22T19:20:34
| 2023-08-22T19:20:34
| 60,563,174
| 311
| 121
|
BSD-2-Clause
| 2023-09-13T02:13:42
| 2016-06-06T21:49:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,970
|
py
|
0098_alter_environmentsetting_setting.py
|
# Generated by Django 3.2.16 on 2022-10-15 04:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0097_user_never_flag_for_deletion"),
]
operations = [
migrations.AlterField(
model_name="environmentsetting",
name="setting",
field=models.CharField(
choices=[
("API_THROTTLE_RATE_ANON", "API: Anonymous API throttle rate"),
("API_THROTTLE_RATE_USER", "API: Authenticated API throttle rate"),
(
"API_THROTTLE_MELISSA_RATE_ADMIN",
"API: Melissa request throttle rate for admin users",
),
(
"API_THROTTLE_MELISSA_ENABLED_ADMIN",
"API: Melissa request throttle enabled for admin users",
),
(
"API_THROTTLE_MELISSA_RATE_USER",
"API: Melissa request throttle rate for users",
),
(
"API_THROTTLE_MELISSA_ENABLED_USER",
"API: Melissa request throttle enabled for users",
),
(
"API_THROTTLE_MELISSA_RATE_ORG",
"API: Melissa request throttle rate for organizations",
),
(
"API_THROTTLE_MELISSA_ENABLED_ORG",
"API: Melissa request throttle enabled for organizations",
),
(
"API_THROTTLE_MELISSA_RATE_IP",
"API: Melissa request throttle rate for anonymous requests (ips)",
),
(
"API_THROTTLE_MELISSA_ENABLED_IP",
"API: Melissa request throttle enabled for anonymous requests (ips)",
),
(
"API_THROTTLE_REPEATED_REQUEST_THRESHOLD_CIDR",
"API: Repeated request throttle size threshold for ip blocks (bytes)",
),
(
"API_THROTTLE_REPEATED_REQUEST_RATE_CIDR",
"API: Repeated request throttle rate for ip blocks",
),
(
"API_THROTTLE_REPEATED_REQUEST_ENABLED_CIDR",
"API: Repeated request throttle enabled for ip blocks",
),
(
"API_THROTTLE_REPEATED_REQUEST_THRESHOLD_IP",
"API: Repeated request throttle size threshold for ip addresses (bytes)",
),
(
"API_THROTTLE_REPEATED_REQUEST_RATE_IP",
"API: Repeated request throttle rate for ip addresses",
),
(
"API_THROTTLE_REPEATED_REQUEST_ENABLED_IP",
"API: Repeated request throttle enabled for ip addresses",
),
(
"API_THROTTLE_REPEATED_REQUEST_THRESHOLD_USER",
"API: Repeated request throttle size threshold for authenticated users (bytes)",
),
(
"API_THROTTLE_REPEATED_REQUEST_RATE_USER",
"API: Repeated request throttle rate for authenticated users",
),
(
"API_THROTTLE_REPEATED_REQUEST_ENABLED_USER",
"API: Repeated request throttle enabled for authenticated users",
),
(
"API_THROTTLE_REPEATED_REQUEST_THRESHOLD_ORG",
"API: Repeated request throttle size threshold for organization api-keys (bytes)",
),
(
"API_THROTTLE_REPEATED_REQUEST_RATE_ORG",
"API: Repeated request throttle rate for organization api-keys",
),
(
"API_THROTTLE_REPEATED_REQUEST_ENABLED_ORG",
"API: Repeated request throttle enabled for organization api-keys",
),
(
"API_THROTTLE_RATE_ANON_MSG",
"API: Anonymous API throttle rate message",
),
(
"API_THROTTLE_RATE_USER_MSG",
"API: Authenticated API throttle rate message",
),
],
max_length=255,
unique=True,
),
),
]
|
97d1bffd97505f38c1f846911e0a320af36167e1
|
b4faab9b904d155ce6e781a675f972dcb810c008
|
/nunif/modules/fourier_unit.py
|
3eebf82fa9cc87a3ab3934b11ff252d19944d587
|
[
"MIT",
"CC-BY-NC-4.0",
"Apache-2.0"
] |
permissive
|
nagadomi/nunif
|
0c595d3e61f3c89082ce7481cfba139b85ac863d
|
6d4b92da09801572e984b05f6733d460b60250aa
|
refs/heads/master
| 2023-08-31T21:29:56.460275
| 2023-08-21T18:16:01
| 2023-08-21T18:16:01
| 202,088,108
| 486
| 59
|
MIT
| 2023-08-04T05:51:17
| 2019-08-13T07:23:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,319
|
py
|
fourier_unit.py
|
import torch
import torch.nn as nn
from torch.nn.utils.parametrizations import spectral_norm as _spectral_norm
# NOTE: This module does not support export to ONNX (at 2023-04, rfftn and irfftn)
class FourierUnit(nn.Module):
""" From LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions
https://github.com/advimman/lama
Fast Fourier Convolution
https://github.com/pkumivision/FFC
"""
def __init__(self, in_channels, out_channels,
norm_layer=lambda dim: nn.BatchNorm2d(dim),
activation_layer=lambda dim: nn.ReLU(inplace=True),
spectral_norm=False, bias=False, residual=True):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels * 2, out_channels * 2,
kernel_size=1, stride=1, padding=0, bias=bias)
if spectral_norm:
self.conv = _spectral_norm(self.conv)
self.act = activation_layer(out_channels * 2)
self.norm = norm_layer(out_channels * 2)
if residual:
if in_channels == out_channels:
self.identity = nn.Identity()
else:
self.identity = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
else:
self.identity = None
def forward(self, x):
B, C, H, W = x.shape
# (B, C, H, W/2+1, 2)
if x.dtype == torch.float16:
ffted = torch.fft.rfftn(x.to(torch.float32), dim=(-2, -1), norm="ortho")
ffted = torch.stack((ffted.real, ffted.imag), dim=-1).to(torch.float16)
else:
ffted = torch.fft.rfftn(x, dim=(-2, -1), norm="ortho")
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
# (B, C, 2, H, W/2+1)
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous()
# (B, C*2, H, W/2+1)
ffted = ffted.view((B, -1) + ffted.shape[3:])
# (B, OUT_C*2, H, W/2+1)
ffted = self.act(self.norm(self.conv(ffted)))
# (B, OUT_C, H, W/2+1, 2)
ffted = ffted.view((B, -1, 2) + ffted.shape[2:]).permute(0, 1, 3, 4, 2).contiguous()
if x.dtype == torch.float16:
# (B, OUT_C, H, W/2+1)
ffted = ffted.to(torch.float32)
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
# (B, OUT_C, H, W)
output = torch.fft.irfftn(ffted, s=(H, W), dim=(-2, -1), norm="ortho").to(torch.float16)
else:
# (B, OUT_C, H, W/2+1)
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
# (B, OUT_C, H, W)
output = torch.fft.irfftn(ffted, s=(H, W), dim=(-2, -1), norm="ortho")
if self.identity is not None:
output = output + self.identity(x)
return output
def FourierUnitSNLReLU(in_channels, out_channels, residual=True):
return FourierUnit(in_channels, out_channels,
norm_layer=lambda dim: nn.Identity(),
activation_layer=lambda dim: nn.LeakyReLU(0.2, inplace=True),
spectral_norm=True, bias=True, residual=residual)
if __name__ == "__main__":
x = torch.zeros((4, 8, 32, 32)).cuda()
fourier_unit = FourierUnitSNLReLU(8, 64).cuda()
z = fourier_unit(x)
print(z.shape)
|
5f0387e0636801b684ed4daf4f3187ab51fd3355
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/FireEyeHelix/Integrations/FireEyeHelix/test_data/constants.py
|
d706095e8e7986eabac6cb42ade5b8dc67a10b13
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
constants.py
|
DICT_1to5 = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5}
DICT_NESTED_123 = {'nested': {'1': 1, '2': 2, '3': 3}}
DICT_LST_AAB2B = {'aa_b': [{'2': 2}, {'2': 3}], 'b': 4}
DICT_LST_NESTED = {'master': {'id': 1, 'assets': [{'id': 1, 'name': 'a'}, {'id': 2, 'name': 'b'}]}}
TRANS_DICT_134 = {'1': 'one', '3': 'three', '4': 'four'}
TRANS_DICT_NESTED_12 = {'nested.1': 'one', 'nested.2': 'two'}
TRANS_DICT_NESTED_VAL_12 = {'1': 'one.1', '2': 'two'}
TRANS_DICT_LST_A2B = {'aa_b': {'2': 'two'}, 'b': 'four'}
TRANS_DICT_LST_NESTED = {'master.id': 'Master.ID', 'master.assets': {'id': 'ID', 'name': 'Name'}}
|
f9ce63fbcd5748bc7e8a12377b4aac84af3dd528
|
adf3ca8f0ce8b3333d70c83118932d0e46d90182
|
/tiledb/tests/conftest.py
|
9465eb1656c5fa54078a6e0e0322754f62ec91c8
|
[
"MIT"
] |
permissive
|
TileDB-Inc/TileDB-Py
|
77ccce93e1607ce4280d57d2f715cf490aa9f0d3
|
223ee9939e23ecb618bd98d89dc9e874ebd47a2d
|
refs/heads/dev
| 2023-08-28T14:13:52.112262
| 2023-08-25T20:03:54
| 2023-08-26T01:19:46
| 91,851,641
| 167
| 35
|
MIT
| 2023-09-14T19:07:10
| 2017-05-19T23:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
conftest.py
|
import ctypes
import sys
import pytest
import tiledb
from .common import DiskTestCase
# fixture wrapper to use with pytest:
# mark.parametrize does not work with DiskTestCase subclasses
# (unittest.TestCase methods cannot take arguments)
@pytest.fixture(scope="class")
def checked_path():
dtc = DiskTestCase()
dtc.setup_method()
yield dtc
dtc.teardown_method()
if sys.platform != "win32":
@pytest.fixture(scope="function", autouse=True)
def no_output(capfd):
yield
# flush stdout
libc = ctypes.CDLL(None)
libc.fflush(None)
out, err = capfd.readouterr()
if out or err:
pytest.fail(f"Output captured: {out + err}")
def pytest_addoption(parser):
parser.addoption("--vfs", default="file")
parser.addoption("--vfs-config", default=None)
def pytest_configure(config):
# we need to try importing here so that we don't potentially cause
# a slowdown in the DenseArray/SparseArray.__new__ path when
# running `tiledb.open`.
try:
import tiledb.cloud # noqa: F401
except ImportError:
pass
# default must be set here rather than globally
pytest.tiledb_vfs = "file"
vfs_config(config)
def vfs_config(pytestconfig):
vfs_config_override = {}
vfs = pytestconfig.getoption("vfs")
if vfs == "s3":
pytest.tiledb_vfs = "s3"
vfs_config_override.update(
{
"vfs.s3.endpoint_override": "localhost:9999",
"vfs.s3.aws_access_key_id": "minio",
"vfs.s3.aws_secret_access_key": "miniosecretkey",
"vfs.s3.scheme": "https",
"vfs.s3.verify_ssl": False,
"vfs.s3.use_virtual_addressing": False,
}
)
vfs_config_arg = pytestconfig.getoption("vfs-config", None)
if vfs_config_arg:
pass
tiledb._orig_ctx = tiledb.Ctx
def get_config(config):
final_config = {}
if isinstance(config, tiledb.Config):
final_config = config.dict()
elif config:
final_config = config
final_config.update(vfs_config_override)
return final_config
class PatchedCtx(tiledb.Ctx):
def __init__(self, config=None):
super().__init__(get_config(config))
class PatchedConfig(tiledb.Config):
def __init__(self, params=None):
super().__init__(get_config(params))
tiledb.Ctx = PatchedCtx
tiledb.Config = PatchedConfig
|
0d6a6812da72af86793e8334af0c9b82501a2af2
|
8475af3849ca19094d3aa95c9108d7f8ef3e2536
|
/shub/deploy_egg.py
|
4f514ca58f17333b1d3cd3737632881ab02e9a9c
|
[
"BSD-3-Clause"
] |
permissive
|
scrapinghub/shub
|
9b1b9df4caad48861707aca3685fe8b2544e433b
|
519809d7eb16212f2644f6e2bf1672b1e86a78c2
|
refs/heads/master
| 2023-09-03T01:43:35.014440
| 2023-04-14T16:02:25
| 2023-04-14T16:02:25
| 20,890,335
| 124
| 84
|
BSD-3-Clause
| 2023-04-14T14:43:21
| 2014-06-16T15:38:11
|
Python
|
UTF-8
|
Python
| false
| false
| 4,164
|
py
|
deploy_egg.py
|
from __future__ import absolute_import
import os
import tempfile
import click
from shub import utils, DEPLOY_DOCS_LINK
from shub.config import get_target_conf
from shub.exceptions import (BadParameterException, NotFoundException,
SubcommandException)
from shub.utils import (decompress_egg_files, download_from_pypi,
find_executable, run_cmd)
HELP = """
Build a Python egg from source and deploy it to Scrapy Cloud.
You can either deploy to your default target (as defined in scrapinghub.yml),
or explicitly supply a numerical project ID or a target defined in
scrapinghub.yml (see shub deploy).
By default, shub will try to build the egg using the current folder's setup.py.
You can also build the egg from a remote (git/mercurial/bazaar) repository by
using the --from-url option:
shub deploy-egg --from-url https://github.com/scrapinghub/shub.git
For git repositories, you may additionally specify the branch to be checked
out:
shub deploy-egg --from-url https://xy.git --git-branch my-feature
Alternatively, you can build the egg from a PyPI package:
shub deploy-egg --from-pypi shub
"""
SHORT_HELP = "[DEPRECATED] Build and deploy egg from source"
@click.command(help=HELP, short_help=SHORT_HELP)
@click.argument("target", required=False, default='default')
@click.option("--from-url", help="Git, bazaar or mercurial repository URL")
@click.option("--git-branch", help="Git branch to checkout")
@click.option("--from-pypi", help="Name of package on pypi")
def cli(target, from_url=None, git_branch=None, from_pypi=None):
click.secho(
"deploy-egg was deprecated, define the eggs you would like to deploy "
"in your scrapinghub.yml instead. See {}".format(DEPLOY_DOCS_LINK),
err=True, fg='yellow',
)
main(target, from_url, git_branch, from_pypi)
def main(target, from_url=None, git_branch=None, from_pypi=None):
targetconf = get_target_conf(target)
if from_pypi:
_fetch_from_pypi(from_pypi)
decompress_egg_files()
utils.build_and_deploy_eggs(targetconf.project_id, targetconf.endpoint,
targetconf.apikey)
return
if from_url:
_checkout(from_url, git_branch)
if not os.path.isfile('setup.py'):
error = "No setup.py -- are you running from a valid Python project?"
raise NotFoundException(error)
utils.build_and_deploy_egg(targetconf.project_id, targetconf.endpoint,
targetconf.apikey)
def _checkout(repo, git_branch=None, target_dir='egg-tmp-clone'):
tmpdir = tempfile.mkdtemp(prefix='shub-deploy-egg-from-url')
click.echo("Cloning the repository to a tmp folder...")
os.chdir(tmpdir)
vcs_commands = [
['git', 'clone', repo, target_dir],
['hg', 'clone', repo, target_dir],
['bzr', 'branch', repo, target_dir],
]
missing_exes = []
for cmd in vcs_commands:
exe = find_executable(cmd[0])
if not exe:
missing_exes.append(cmd[0])
continue
try:
run_cmd([exe] + cmd[1:])
except SubcommandException:
pass
else:
break
else:
if missing_exes:
click.secho(
"shub was unable to find the following VCS executables and "
"could not try to check out your repository with these: %s"
"" % ', '.join(missing_exes), fg='yellow')
raise BadParameterException(
"\nERROR: The provided repository URL is not valid: %s\n")
os.chdir(target_dir)
if git_branch:
try:
run_cmd([find_executable('git'), 'checkout', git_branch])
except SubcommandException:
raise BadParameterException("Branch %s is not valid" % git_branch)
click.echo("%s branch was checked out" % git_branch)
def _fetch_from_pypi(pkg):
tmpdir = tempfile.mkdtemp(prefix='shub-deploy-egg-from-pypi')
click.echo('Fetching %s from pypi' % pkg)
download_from_pypi(tmpdir, pkg=pkg)
click.echo('Package fetched successfully')
os.chdir(tmpdir)
|
7b6409568e8cf757fc5279e3e59f1604117b6bfc
|
98810fbf90a42028915a88bfac9fb8cb8681008e
|
/azure-devops/azext_devops/dev/repos/_format.py
|
f7d2becd123eedf0b045565658fa6d8166b2da8d
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"PSF-2.0",
"PostgreSQL",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"CC-BY-4.0",
"Python-2.0",
"MPL-1.1",
"OpenSSL",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.0",
"ISC",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Azure/azure-devops-cli-extension
|
ba87357a8243e1318f100791fc32acbb59448d05
|
bd34a6fd0658a15dadf6c09c7f6217ca5ffa662b
|
refs/heads/master
| 2023-08-29T10:56:54.228674
| 2023-07-17T04:37:06
| 2023-07-17T04:37:06
| 107,708,057
| 419
| 208
|
MIT
| 2023-08-02T02:10:10
| 2017-10-20T17:39:11
|
Python
|
UTF-8
|
Python
| false
| false
| 11,398
|
py
|
_format.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
import dateutil.parser
import dateutil.tz
_PR_TITLE_TRUNCATION_LENGTH = 50
_WORK_ITEM_TITLE_TRUNCATION_LENGTH = 70
def transform_repo_policies_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_repo_policy_request_row(item))
return table_output
def transform_repo_policy_table_output(result):
table_output = [_transform_repo_policy_request_row(result)]
return table_output
def _transform_repo_policy_request_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = _get_policy_display_name(row)
table_row['Is Blocking'] = row['isBlocking']
table_row['Is Enabled'] = row['isEnabled']
# this will break if policy is applied across repo but that is not possible via UI at least now
table_row['Repository Id'] = row['settings']['scope'][0]['repositoryId']
if 'refName' in row['settings']['scope'][0]:
table_row['Branch'] = row['settings']['scope'][0]['refName']
else:
table_row['Branch'] = "All Branches"
return table_row
def _get_policy_display_name(row):
if 'displayName' in row['settings']:
return row['settings']['displayName']
return row['type']['displayName']
def transform_pull_requests_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_pull_request_row(item))
return table_output
def transform_pull_request_table_output(result):
table_output = [_transform_pull_request_row(result)]
return table_output
def _transform_pull_request_row(row):
table_row = OrderedDict()
table_row['ID'] = row['pullRequestId']
table_row['Created'] = dateutil.parser.parse(row['creationDate']).astimezone(dateutil.tz.tzlocal()).date()
table_row['Creator'] = row['createdBy']['uniqueName']
title = row['title']
if len(title) > _PR_TITLE_TRUNCATION_LENGTH:
title = title[0:_PR_TITLE_TRUNCATION_LENGTH - 3] + '...'
table_row['Title'] = title
table_row['Status'] = row['status'].capitalize()
table_row['IsDraft'] = str(row['isDraft']).capitalize()
table_row['Repository'] = row['repository']['name']
return table_row
def transform_reviewers_table_output(result):
table_output = []
for item in sorted(result, key=_get_reviewer_table_key):
table_output.append(_transform_reviewer_row(item))
return table_output
def transform_reviewer_table_output(result):
table_output = [_transform_reviewer_row(result)]
return table_output
def _get_reviewer_table_key(row):
if row['isRequired']:
key = '0'
else:
key = '1'
key += row['displayName'].lower()
return key
_UNIQUE_NAME_GROUP_PREFIX = 'vstfs:///'
def _transform_reviewer_row(row):
table_row = OrderedDict()
table_row['Name'] = row['displayName']
if row['uniqueName'][0:len(_UNIQUE_NAME_GROUP_PREFIX)] != _UNIQUE_NAME_GROUP_PREFIX:
table_row['Email'] = row['uniqueName']
else:
table_row['Email'] = ' '
table_row['ID'] = row['id']
table_row['Vote'] = _get_vote_from_vote_number(int(row['vote']))
if row['isRequired']:
table_row['Required'] = 'True'
else:
table_row['Required'] = 'False'
return table_row
def transform_work_items_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_work_items_row(item))
return table_output
def transform_work_item_table_output(result):
table_output = [_transform_work_items_row(result)]
return table_output
def _transform_work_items_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
if 'fields' in row:
if 'System.WorkItemType' in row['fields']:
table_row['Type'] = row['fields']['System.WorkItemType']
else:
table_row['Type'] = ' '
if 'System.AssignedTo' in row['fields']:
table_row['Assigned To'] = row['fields']['System.AssignedTo']
else:
table_row['Assigned To'] = ' '
if 'System.State' in row['fields']:
table_row['State'] = row['fields']['System.State']
else:
table_row['State'] = ' '
if 'System.Title' in row['fields']:
title = row['fields']['System.Title']
if len(title) > _WORK_ITEM_TITLE_TRUNCATION_LENGTH:
title = title[0:_WORK_ITEM_TITLE_TRUNCATION_LENGTH - 3] + '...'
table_row['Title'] = title
else:
table_row['Title'] = ' '
else:
table_row['Assigned To'] = ' '
table_row['State'] = ' '
table_row['Title'] = ' '
return table_row
def _get_vote_from_vote_number(number):
if number == 10:
return 'Approved'
if number == 5:
return 'Approved with suggestions'
if number == -5:
return 'Waiting for author'
if number == -10:
return 'Rejected'
return ' '
def transform_policies_table_output(result):
from azext_devops.dev.common.identities import (ensure_display_names_in_cache,
get_display_name_from_identity_id)
from azext_devops.dev.common.services import get_first_vss_instance_uri
table_output = []
reviewer_ids = []
for item in result:
reviewer_id = get_required_reviewer_from_evaluation_row(item)
if reviewer_id is not None:
reviewer_ids.append(get_required_reviewer_from_evaluation_row(item))
organization = get_first_vss_instance_uri()
ensure_display_names_in_cache(organization, reviewer_ids)
for item in result:
reviewer_id = get_required_reviewer_from_evaluation_row(item)
if reviewer_id is not None:
display_name = get_display_name_from_identity_id(organization, reviewer_id)
else:
display_name = None
if display_name is not None:
table_output.append(_transform_policy_row(item, display_name))
else:
table_output.append(_transform_policy_row(item))
return sorted(table_output, key=_get_policy_table_key)
def get_required_reviewer_from_evaluation_row(row):
if 'requiredReviewerIds' in row['configuration']['settings'] and len(
row['configuration']['settings']['requiredReviewerIds']) == 1:
return row['configuration']['settings']['requiredReviewerIds'][0]
return None
def transform_policy_table_output(result):
table_output = [_transform_policy_row(result)]
return table_output
def _get_policy_table_key(row):
if row['Blocking'] == 'True':
key = '0'
else:
key = '1'
key += row['Policy'].lower()
return key
def _transform_policy_row(row, identity_display_name=None):
table_row = OrderedDict()
table_row['Evaluation ID'] = row['evaluationId']
table_row['Policy'] = _build_policy_name(row, identity_display_name)
if row['configuration']['isBlocking']:
table_row['Blocking'] = 'True'
else:
table_row['Blocking'] = 'False'
table_row['Status'] = _convert_policy_status(row['status'])
if row['context'] and 'isExpired' in row['context']:
if row['context']['isExpired']:
table_row['Expired'] = 'True'
else:
table_row['Expired'] = 'False'
else:
# Not Applicable
table_row['Expired'] = ' '
if row['context'] and 'buildId' in row['context'] and row['context']['buildId'] is not None:
table_row['Build ID'] = row['context']['buildId']
else:
table_row['Build ID'] = ' '
return table_row
def _build_policy_name(row, identity_display_name=None):
policy = row['configuration']['type']['displayName']
if 'displayName' in row['configuration']['settings']\
and row['configuration']['settings']['displayName'] is not None:
policy += ' (' + row['configuration']['settings']['displayName'] + ')'
if 'minimumApproverCount' in row['configuration']['settings']\
and row['configuration']['settings']['minimumApproverCount'] is not None:
policy += ' (' + str(row['configuration']['settings']['minimumApproverCount']) + ')'
if identity_display_name is not None and 'requiredReviewerIds' in row['configuration']['settings']:
if len(row['configuration']['settings']['requiredReviewerIds']) > 1:
policy += ' (' + str(len(row['configuration']['settings']['requiredReviewerIds'])) + ')'
elif len(row['configuration']['settings']['requiredReviewerIds']) == 1:
policy += ' (' + identity_display_name + ')'
return policy
def _convert_policy_status(status):
if status == 'queued':
return ' '
return status.capitalize()
def transform_refs_table_output(result):
table_output = []
for item in sorted(result, key=_get_repo_key):
table_output.append(_transform_ref_row(item))
return table_output
def transform_ref_table_output(result):
table_output = [_transform_ref_row(result)]
return table_output
def _transform_ref_row(row):
from azext_devops.dev.common.git import get_ref_name_from_ref
table_row = OrderedDict()
if 'objectId' in row:
table_row['Object ID'] = row['objectId']
if ('oldObjectId' in row) and ('newObjectId' in row):
old_id = row['oldObjectId']
new_id = row['newObjectId']
if old_id == '0000000000000000000000000000000000000000':
table_row['Object ID'] = new_id
elif new_id == '0000000000000000000000000000000000000000':
table_row['Object ID'] = old_id
else:
table_row['Old Object ID'] = old_id
table_row['New Object ID'] = new_id
table_row['Name'] = get_ref_name_from_ref(row['name'])
table_row['Success'] = row['success'] if 'success' in row else None
table_row['Update Status'] = row['updateStatus'] if 'updateStatus' in row else None
return table_row
def transform_repos_table_output(result):
table_output = []
for item in sorted(result, key=_get_repo_key):
table_output.append(_transform_repo_row(item))
return table_output
def transform_repo_table_output(result):
table_output = [_transform_repo_row(result)]
return table_output
def transform_repo_import_table_output(result):
table_output = OrderedDict()
table_output['Name'] = result['repository']['name']
table_output['Project'] = result['repository']['project']['name']
table_output['Import Status'] = result['status']
return table_output
def _transform_repo_row(row):
from azext_devops.dev.common.git import get_branch_name_from_ref
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
if row['defaultBranch']:
table_row['Default Branch'] = get_branch_name_from_ref(row['defaultBranch'])
else:
table_row['Default Branch'] = ' '
table_row['Project'] = row['project']['name']
return table_row
def _get_repo_key(repo_row):
return repo_row['name']
|
76b2be583c80442f0dd04f0e21074527a3ab0bd4
|
9a2bbc25016326b3b1da275e3b9d9a3c5c5878a6
|
/project/utils/settings_handler.py
|
6159657c23d7eab8c6bc001278608a42d7f97b3a
|
[
"MIT"
] |
permissive
|
MahjongRepository/tenhou-python-bot
|
3daabf510d58dfe7525bccf0df1575f027b632d2
|
112b08faab08ee862813de06cb5acc5db1c4feb0
|
refs/heads/dev
| 2023-07-20T14:51:02.101557
| 2023-07-08T10:11:47
| 2023-07-08T10:11:47
| 56,445,019
| 217
| 75
|
MIT
| 2023-05-23T02:05:41
| 2016-04-17T15:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 804
|
py
|
settings_handler.py
|
import importlib
class SettingsSingleton:
"""
Let's load a settings in the memory one time when the app starts
Than override some settings with command arguments
After this we not should change the object
"""
instance = None
def __init__(self):
if not SettingsSingleton.instance:
SettingsSingleton.instance = Settings()
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, key, value):
return setattr(self.instance, key, value)
class Settings:
def __init__(self):
mod = importlib.import_module("settings.base")
for setting in dir(mod):
setting_value = getattr(mod, setting)
setattr(self, setting, setting_value)
settings = SettingsSingleton()
|
cf23f4bbef7252fd696b159c156700d4e5ecf525
|
9e01e9cca5bae23d2345c944dc672068c1c15582
|
/code/exercises/ex_5_10/__init__.py
|
ea915ae237214aac8568bcfcace8e1dea58e9c4e
|
[
"MIT"
] |
permissive
|
brynhayder/reinforcement_learning_an_introduction
|
ecb1fc564a956492b3cea80eb01102025409c719
|
d8b1945f61a8397b684f8d8d800ed0d9308a9a35
|
refs/heads/master
| 2022-10-18T03:02:20.263347
| 2022-10-02T08:35:31
| 2022-10-02T08:35:31
| 128,968,562
| 315
| 79
|
MIT
| 2022-06-21T21:21:03
| 2018-04-10T17:08:35
|
TeX
|
UTF-8
|
Python
| false
| false
| 138
|
py
|
__init__.py
|
#!/usr/bin/env python
"""
--------------------------------
project: code
created: 06/06/2018 15:59
---------------------------------
"""
|
3f26488f616720c55adfe43320f596a283115338
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/teamcity/tests/test_e2e.py
|
449cbb017b4a5fab7e15b999567a744a5b21680f
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,206
|
py
|
test_e2e.py
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from six import PY2
from datadog_checks.base import AgentCheck
from datadog_checks.teamcity.constants import SERVICE_CHECK_OPENMETRICS
from .common import (
LEGACY_REST_INSTANCE,
OPENMETRICS_INSTANCE,
REST_INSTANCE,
REST_INSTANCE_ALL_PROJECTS,
REST_METRICS,
USE_OPENMETRICS,
)
@pytest.mark.skipif(USE_OPENMETRICS or not PY2, reason="Not available in OpenMetricsV2 check")
@pytest.mark.e2e
def test_e2e_legacy(aggregator, dd_agent_check):
dd_agent_check(LEGACY_REST_INSTANCE)
for metric in REST_METRICS:
aggregator.assert_metric(metric)
aggregator.assert_service_check('teamcity.build.status', status=AgentCheck.OK)
aggregator.assert_service_check('teamcity.build.problems', count=2)
aggregator.assert_service_check('teamcity.test.results', count=6)
@pytest.mark.skipif(USE_OPENMETRICS or PY2, reason="Not available in OpenMetricsV2 check")
@pytest.mark.e2e
def test_e2e(aggregator, dd_agent_check):
dd_agent_check(REST_INSTANCE)
for metric in REST_METRICS:
aggregator.assert_metric(metric)
aggregator.assert_service_check('teamcity.build.status', status=AgentCheck.OK)
aggregator.assert_service_check('teamcity.build.problems', count=2)
aggregator.assert_service_check('teamcity.test.results', count=6)
@pytest.mark.skipif(USE_OPENMETRICS or PY2, reason="Not available in OpenMetricsV2 check")
@pytest.mark.e2e
def test_e2e_all_projects(aggregator, dd_agent_check):
dd_agent_check(REST_INSTANCE_ALL_PROJECTS)
for metric in REST_METRICS:
aggregator.assert_metric(metric)
aggregator.assert_service_check('teamcity.build.status', status=AgentCheck.OK)
aggregator.assert_service_check('teamcity.build.problems', count=2)
aggregator.assert_service_check('teamcity.test.results', count=6)
@pytest.mark.skipif(not USE_OPENMETRICS, reason="Not available in REST check")
@pytest.mark.e2e
def test_e2e_openmetrics(aggregator, dd_agent_check):
dd_agent_check(OPENMETRICS_INSTANCE)
aggregator.assert_service_check(SERVICE_CHECK_OPENMETRICS, status=AgentCheck.OK)
|
ece79d9d3d5dac80f309d0a8ba5d698b3f69fa36
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/nvcloth/1.1.6/conanfile.py
|
c303576ad28c2a46f9e9028991ef5f4c3290ad3b
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 7,464
|
py
|
conanfile.py
|
import os
import shutil
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
from conan.tools.microsoft import msvc_runtime_flag, is_msvc_static_runtime, is_msvc
required_conan_version = ">=1.35.0"
class NvclothConan(ConanFile):
name = "nvcloth"
license = "Nvidia Source Code License (1-Way Commercial)"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/NVIDIAGameWorks/NvCloth"
description = "NvCloth is a library that provides low level access to a cloth solver designed for realtime interactive applications."
topics = ("physics", "physics-engine", "physics-simulation", "game-development", "cuda")
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"use_cuda": [True, False],
"use_dx11": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"use_cuda": False,
"use_dx11": False
}
generators = "cmake"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def validate(self):
if self.settings.os not in ["Windows", "Linux", "Macos", "Android", "iOS"]:
raise ConanInvalidConfiguration("Current os is not supported")
build_type = self.settings.build_type
if build_type not in ["Debug", "RelWithDebInfo", "Release"]:
raise ConanInvalidConfiguration("Current build_type is not supported")
if is_msvc(self) and tools.Version(self.settings.compiler.version) < 9:
raise ConanInvalidConfiguration("Visual Studio versions < 9 are not supported")
def _configure_cmake(self):
cmake = CMake(self)
if not self.options.shared:
cmake.definitions["PX_STATIC_LIBRARIES"] = 1
cmake.definitions["STATIC_WINCRT"] = is_msvc_static_runtime(self)
cmake.definitions["NV_CLOTH_ENABLE_CUDA"] = self.options.use_cuda
cmake.definitions["NV_CLOTH_ENABLE_DX11"] = self.options.use_dx11
cmake.definitions["TARGET_BUILD_PLATFORM"] = self._get_target_build_platform()
cmake.configure(
build_folder=os.path.join(self.build_folder, self._build_subfolder)
)
return cmake
def _remove_samples(self):
tools.rmdir(os.path.join(self._source_subfolder, "NvCloth", "samples"))
def _patch_sources(self):
# There is no reason to force consumer of PhysX public headers to use one of
# NDEBUG or _DEBUG, since none of them relies on NDEBUG or _DEBUG
tools.replace_in_file(os.path.join(self.build_folder, self._source_subfolder, "PxShared", "include", "foundation", "PxPreprocessor.h"),
"#error Exactly one of NDEBUG and _DEBUG needs to be defined!",
"// #error Exactly one of NDEBUG and _DEBUG needs to be defined!")
shutil.copy(
os.path.join(self.build_folder, self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h"),
os.path.join(self.build_folder, self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h.origin")
)
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
if self.settings.build_type == "Debug":
shutil.copy(
os.path.join(self.build_folder, self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h"),
os.path.join(self.build_folder, self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h.patched")
)
shutil.copy(
os.path.join(self.build_folder, self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h.origin"),
os.path.join(self.build_folder, self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h")
)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def build(self):
with tools.environment_append({"GW_DEPS_ROOT": os.path.abspath(self._source_subfolder)}):
self._patch_sources()
self._remove_samples()
cmake = self._configure_cmake()
cmake.build()
def _get_build_type(self):
if self.settings.build_type == "Debug":
return "debug"
elif self.settings.build_type == "RelWithDebInfo":
return "checked"
elif self.settings.build_type == "Release":
return "release"
def _get_target_build_platform(self):
return {
"Windows" : "windows",
"Linux" : "linux",
"Macos" : "mac",
"Android" : "android",
"iOS" : "ios"
}.get(str(self.settings.os))
def package(self):
if self.settings.build_type == "Debug":
shutil.copy(
os.path.join(self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h.patched"),
os.path.join(self._source_subfolder, "NvCloth/include/NvCloth/Callbacks.h")
)
nvcloth_source_subfolder = os.path.join(self.build_folder, self._source_subfolder)
nvcloth_build_subfolder = os.path.join(self.build_folder, self._build_subfolder)
self.copy(pattern="NvCloth/license.txt", dst="licenses", src=nvcloth_source_subfolder, keep_path=False)
self.copy("*.h", dst="include", src=os.path.join(nvcloth_source_subfolder, "NvCloth", "include"))
self.copy("*.h", dst="include", src=os.path.join(nvcloth_source_subfolder, "NvCloth", "extensions", "include"))
self.copy("*.h", dst="include", src=os.path.join(nvcloth_source_subfolder, "PxShared", "include"))
self.copy("*.a", dst="lib", src=nvcloth_build_subfolder, keep_path=False)
self.copy("*.lib", dst="lib", src=nvcloth_build_subfolder, keep_path=False)
self.copy("*.dylib*", dst="lib", src=nvcloth_build_subfolder, keep_path=False)
self.copy("*.dll", dst="bin", src=nvcloth_build_subfolder, keep_path=False)
self.copy("*.so", dst="lib", src=nvcloth_build_subfolder, keep_path=False)
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "nvcloth"
self.cpp_info.names["cmake_find_package_multi"] = "nvcloth"
if self.settings.build_type == "Debug":
debug_suffix = "DEBUG"
else:
debug_suffix = ""
if self.settings.os == "Windows":
if self.settings.arch == "x86_64":
arch_suffix = "x64"
else:
arch_suffix = ""
self.cpp_info.libs = ["NvCloth{}_{}".format(debug_suffix, arch_suffix)]
else:
self.cpp_info.libs = ["NvCloth{}".format(debug_suffix)]
if not self.options.shared:
if self.settings.os in ("FreeBSD", "Linux"):
self.cpp_info.system_libs.append("m")
|
537c5fac9a22d36140f69b868c289e34fee5642d
|
6babafccb70ec391db9fdaf7e388ccd00eb1aeed
|
/test/run_tests.py
|
28c722682c8988d7e1bb966e388a1e134dcfe9d1
|
[
"MIT"
] |
permissive
|
neherlab/treetime
|
31140f60ec1c2c9ca7df1afd3ddd648b155dede5
|
52e7cd106a9e0b9a8ea661f213e7e5338e1165cd
|
refs/heads/master
| 2023-08-18T18:38:57.211722
| 2023-07-31T09:33:26
| 2023-07-31T09:33:26
| 55,709,169
| 196
| 57
|
MIT
| 2023-09-07T15:20:32
| 2016-04-07T16:12:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
run_tests.py
|
from test_treetime import *
test_import_short()
test_assign_gamma()
test_GTR()
test_ancestral()
test_seq_joint_reconstruction_correct()
test_seq_joint_lh_is_max()
print('\n\n TEST HAVE FINISHED SUCCESSFULLY\n\n')
|
93f7c09d5b43a7a31244d167b8f95a4085e6d4f1
|
cf8182ecc88888719cfaff79751834500800151a
|
/examples/undocumented/python/distribution_ppwm.py
|
444776b6b78a5924d42e8db018d290d4d2686a65
|
[
"BSD-3-Clause",
"DOC",
"GPL-3.0-only"
] |
permissive
|
shogun-toolbox/shogun
|
17beb82a04fbf1179d300c4fcd16ee68850ad994
|
9b8d856971af5a295dd6ad70623ae45647a6334c
|
refs/heads/develop
| 2023-03-11T04:46:36.167073
| 2020-12-08T16:56:38
| 2020-12-08T16:56:38
| 1,555,094
| 2,938
| 1,246
|
BSD-3-Clause
| 2022-08-12T11:12:34
| 2011-04-01T10:44:32
|
C++
|
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
distribution_ppwm.py
|
#!/usr/bin/env python
import shogun as sg
import numpy as np
from tools.load import LoadMatrix
lm=LoadMatrix()
traindna = lm.load_dna('../data/fm_train_dna.dat')
parameter_list = [[traindna,3],[traindna,4]]
def distribution_ppwm (fm_dna=traindna, order=3):
charfeat=sg.create_string_features(fm_dna, sg.DNA)
feats=sg.create_string_features(charfeat, order-1, order, 0, False, sg.PT_UINT8)
L=20
k=3
sigma = 1;
mu = 4
ppwm=sg.PositionalPWM()
ppwm.set_sigma(sigma)
ppwm.set_mean(mu)
pwm=np.array([[0.0, 0.5, 0.1, 1.0],
[0.0, 0.5, 0.5, 0.0],
[1.0, 0.0, 0.4, 0.0],
[0.0, 0.0, 0.0, 0.0]]);
pwm=np.array([[0.01,0.09,0.1],[0.09,0.01,0.1],[0.85,0.4,0.1],[0.05,0.5,0.7]])
ppwm.set_pwm(np.log(pwm))
#print(ppwm.get_pwm())
ppwm.compute_w(L)
w=ppwm.get_w()
#print(w)
#from pylab import *
#figure(1)
#pcolor(exp(w))
#pcolor(w)
#colorbar()
#figure(2)
ppwm.compute_scoring(1)
u=ppwm.get_scoring(0)
#pcolor(exp(u))
#show()
#ppwm=PositionalPWM(feats)
#ppwm.train()
#out_likelihood = histo.get_log_likelihood()
#out_sample = histo.get_log_likelihood_sample()
return w,u
###########################################################################
# call functions
###########################################################################
if __name__=='__main__':
print('PositionalPWM')
distribution_ppwm(*parameter_list[0])
|
52b6c748196f6b1474635927f80abac540409f37
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_algos/glm/pyunit_shuffling_glm_large.py
|
0912b50894d70ce5f647d85f2a8d7fdf81f96a08
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
pyunit_shuffling_glm_large.py
|
from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def shuffling_large():
print("Reading in Arcene training data for binomial modeling.")
train_data = h2o.upload_file(path=pyunit_utils.locate("smalldata/arcene/shuffle_test_version/arcene.csv"))
train_data_shuffled = h2o.upload_file(path=pyunit_utils.locate("smalldata/arcene/shuffle_test_version/arcene_shuffled.csv"))
print("Create model on original Arcene dataset.")
h2o_model = H2OGeneralizedLinearEstimator(family="binomial", lambda_search=True, alpha=0.5)
h2o_model.train(x=list(range(1000)), y=1000, training_frame=train_data)
print("Create second model on original Arcene dataset.")
h2o_model_2 = H2OGeneralizedLinearEstimator(family="binomial", lambda_search=True, alpha=0.5)
h2o_model_2.train(x=list(range(1000)), y=1000, training_frame=train_data)
print("Create model on shuffled Arcene dataset.")
h2o_model_s = H2OGeneralizedLinearEstimator(family="binomial", lambda_search=True, alpha=0.5)
h2o_model_s.train(x=list(range(1000)), y=1000, training_frame=train_data_shuffled)
print("Assert that number of predictors remaining and their respective coefficients are equal.")
for x, y in zip(h2o_model._model_json['output']['coefficients_table'].cell_values,h2o_model_2.
_model_json['output']['coefficients_table'].cell_values):
assert (type(x[1]) == type(y[1])) and (type(x[2]) == type(y[2])), "coefficients should be the same type"
if isinstance(x[1],float):
assert abs(x[1] - y[1]) < 5e-10, "coefficients should be equal"
if isinstance(x[2],float):
assert abs(x[2] - y[2]) < 5e-10, "coefficients should be equal"
for x, y in zip(h2o_model._model_json['output']['coefficients_table'].cell_values,h2o_model_s.
_model_json['output']['coefficients_table'].cell_values):
assert (type(x[1]) == type(y[1])) and (type(x[2]) == type(y[2])), "coefficients should be the same type"
if isinstance(x[1],float):
assert abs(x[1] - y[1]) < 5e-10, "coefficients should be equal"
if isinstance(x[2],float):
assert abs(x[2] - y[2]) < 5e-10, "coefficients should be equal"
if __name__ == "__main__":
pyunit_utils.standalone_test(shuffling_large)
else:
shuffling_large()
|
3a77bf224fde2047de6e85af1603157d4d05dabd
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/catmemes_reddit/app.py
|
5a3bb95032aaf5a6856389cb2d81bbef0381ddf6
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
app.py
|
#encoding:utf-8
subreddit = 'Catmemes'
t_channel = '@catmemes_reddit'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
9b936a1ac9c6f5827c3f73a879858535250dd7f8
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/salt/cloud/clouds/hetzner.py
|
e666769ee6aa703d6138a409e814722747828dfc
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 16,880
|
py
|
hetzner.py
|
"""
Hetzner Cloud Module
====================
The Hetzner cloud module is used to control access to the hetzner cloud.
https://docs.hetzner.cloud/
:depends: hcloud >= 1.10
Use of this module requires the ``key`` parameter to be set.
.. code-block:: yaml
my-hetzner-cloud-config:
key: <your api key>
driver: hetzner
"""
# pylint: disable=invalid-name,function-redefined
import logging
import time
import salt.config as config
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
# hcloud module will be needed
# pylint: disable=import-error
try:
import hcloud
HAS_HCLOUD = True
except ImportError:
HAS_HCLOUD = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = "hetzner"
def __virtual__():
"""
Check for hetzner configurations
"""
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
("key",),
)
def get_dependencies():
"""
Warn if dependencies aren't met.
"""
return config.check_driver_dependencies(
_get_active_provider_name() or __virtualname__,
{"hcloud": HAS_HCLOUD},
)
def _object_to_dict(obj, attrs):
return {attr: getattr(obj, attr) for attr in attrs}
def _datacenter_to_dict(datacenter):
return {
"name": datacenter.name,
"location": datacenter.location.name,
}
def _public_network_to_dict(net):
return {
"ipv4": getattr(net.ipv4, "ip", None),
"ipv6": getattr(net.ipv6, "ip", None),
}
def _private_network_to_dict(net):
return {
"ip": getattr(net, "ip", None),
}
def _connect_client():
provider = get_configured_provider()
return hcloud.Client(provider["key"])
def avail_locations(call=None):
"""
Return a dictionary of available locations
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_locations function must be called with -f or --function"
)
client = _connect_client()
locations = {}
for loc in client.locations.get_all():
locations[loc.name] = _object_to_dict(loc, loc.model.__slots__)
return locations
def avail_images(call=None):
"""
Return a dictionary of available images
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with -f or --function"
)
client = _connect_client()
images = {}
for image in client.images.get_all():
images[image.name] = _object_to_dict(image, image.model.__slots__)
return images
def avail_sizes(call=None):
"""
Return a dictionary of available VM sizes
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_sizes function must be called with -f or --function"
)
client = _connect_client()
sizes = {}
for size in client.server_types.get_all():
sizes[size.name] = _object_to_dict(size, size.model.__slots__)
return sizes
def list_ssh_keys(call=None):
"""
Return a dictionary of available SSH keys configured in the current project
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_ssh_keys function must be called with -f or --function"
)
client = _connect_client()
ssh_keys = {}
for key in client.ssh_keys.get_all():
ssh_keys[key.name] = _object_to_dict(key, key.model.__slots__)
return ssh_keys
def list_nodes_full(call=None):
"""
Return a dictionary of existing VMs in the current project, containing full details per VM
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes_full function must be called with -f or --function"
)
client = _connect_client()
nodes = {}
for node in client.servers.get_all():
nodes[node.name] = {
"id": node.id,
"name": node.name,
"image": node.image.name,
"size": node.server_type.name,
"state": node.status,
"public_ips": _public_network_to_dict(node.public_net),
"private_ips": list(map(_private_network_to_dict, node.private_net)),
"labels": node.labels,
"created": str(node.created),
"datacenter": _datacenter_to_dict(node.datacenter),
"volumes": [vol.name for vol in node.volumes],
}
return nodes
def list_nodes(call=None):
"""
Return a dictionary of existing VMs in the current project, containing basic details of each VM
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes function must be called with -f or --function"
)
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {"name": node}
for prop in ("id", "image", "size", "state", "private_ips", "public_ips"):
ret[node][prop] = nodes[node].get(prop)
return ret
def wait_until(name, state, timeout=300):
"""
Wait until a specific state has been reached on a node
"""
start_time = time.time()
node = show_instance(name, call="action")
while True:
if node["state"] == state:
return True
time.sleep(1)
if time.time() - start_time > timeout:
return False
node = show_instance(name, call="action")
def show_instance(name, call=None):
"""
Return the details of a specific VM
"""
if call != "action":
raise SaltCloudSystemExit(
"The show_instance function must be called with -a or --action."
)
try:
node = list_nodes_full("function")[name]
except KeyError:
log.debug("Failed to get data for node '%s'", name)
node = {}
__utils__["cloud.cache_node"](
node,
_get_active_provider_name() or __virtualname__,
__opts__,
)
return node
def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_.get("profile")
and config.is_profile_configured(
__opts__,
_get_active_provider_name() or __virtualname__,
vm_["profile"],
vm_=vm_,
)
is False
):
return False
except AttributeError:
pass
client = _connect_client()
name = config.get_cloud_config_value(
"name",
vm_,
__opts__,
search_global=False,
)
if not name:
raise SaltCloudException("Missing server name")
# Get the required configuration
server_type = client.server_types.get_by_name(
config.get_cloud_config_value(
"size",
vm_,
__opts__,
search_global=False,
)
)
if server_type is None:
raise SaltCloudException("The server size is not supported")
image = client.images.get_by_name(
config.get_cloud_config_value(
"image",
vm_,
__opts__,
search_global=False,
)
)
if image is None:
raise SaltCloudException("The server image is not supported")
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{}/creating".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"creating",
vm_,
["name", "profile", "provider", "driver"],
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
# Get the ssh_keys
ssh_keys = config.get_cloud_config_value(
"ssh_keys", vm_, __opts__, search_global=False
)
if ssh_keys:
names, ssh_keys = ssh_keys[:], []
for n in names:
ssh_key = client.ssh_keys.get_by_name(n)
if ssh_key is None:
log.error("Invalid ssh key %s.", n)
else:
ssh_keys.append(ssh_key)
# Get the location
location = config.get_cloud_config_value(
"location",
vm_,
__opts__,
search_global=False,
)
if location:
location = client.locations.get_by_name(location)
if location is None:
raise SaltCloudException("The server location is not supported")
# Get the datacenter
datacenter = config.get_cloud_config_value(
"datacenter",
vm_,
__opts__,
search_global=False,
)
if datacenter:
datacenter = client.datacenters.get_by_name(datacenter)
if datacenter is None:
raise SaltCloudException("The server datacenter is not supported")
# Get the volumes
volumes = config.get_cloud_config_value(
"volumes",
vm_,
__opts__,
search_global=False,
)
if volumes:
volumes = [vol for vol in client.volumes.get_all() if vol in volumes]
# Get the networks
networks = config.get_cloud_config_value(
"networks",
vm_,
__opts__,
search_global=False,
)
if networks:
networks = [vol for vol in client.networks.get_all() if vol in networks]
# Create the machine
response = client.servers.create(
name=name,
server_type=server_type,
image=image,
ssh_keys=ssh_keys,
volumes=volumes,
networks=networks,
location=location,
datacenter=datacenter,
user_data=config.get_cloud_config_value(
"user_data",
vm_,
__opts__,
search_global=False,
),
labels=config.get_cloud_config_value(
"labels",
vm_,
__opts__,
search_global=False,
),
automount=config.get_cloud_config_value(
"automount",
vm_,
__opts__,
search_global=False,
),
)
# Bootstrap if ssh keys are configured
server = response.server
vm_.update(
{
"ssh_host": server.public_net.ipv4.ip or server.public_net.ipv6.ip,
"ssh_password": response.root_password,
"key_filename": config.get_cloud_config_value(
"private_key", vm_, __opts__, search_global=False, default=None
),
}
)
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
log.info("Created Cloud VM '%s'", vm_["name"])
ret["created"] = True
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(vm_["name"]),
args=__utils__["cloud.filter_event"](
"created",
vm_,
["name", "profile", "provider", "driver"],
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def start(name, call=None, wait=True):
"""
Start a node.
CLI Example:
.. code-block:: bash
salt-cloud -a start mymachine
"""
if call != "action":
raise SaltCloudSystemExit(
"The start action must be called with -a or --action."
)
client = _connect_client()
server = client.servers.get_by_name(name)
if server is None:
return "Instance {} doesn't exist.".format(name)
server.power_on()
if wait and not wait_until(name, "running"):
return "Instance {} doesn't start.".format(name)
__utils__["cloud.fire_event"](
"event",
"started instance",
"salt/cloud/{}/started".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return {"Started": "{} was started.".format(name)}
def stop(name, call=None, wait=True):
"""
Stop a node.
CLI Example:
.. code-block:: bash
salt-cloud -a stop mymachine
"""
if call != "action":
raise SaltCloudSystemExit("The stop action must be called with -a or --action.")
client = _connect_client()
server = client.servers.get_by_name(name)
if server is None:
return "Instance {} doesn't exist.".format(name)
server.power_off()
if wait and not wait_until(name, "off"):
return "Instance {} doesn't stop.".format(name)
__utils__["cloud.fire_event"](
"event",
"stopped instance",
"salt/cloud/{}/stopped".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return {"Stopped": "{} was stopped.".format(name)}
def reboot(name, call=None, wait=True):
"""
Reboot a node.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot mymachine
"""
if call != "action":
raise SaltCloudSystemExit(
"The reboot action must be called with -a or --action."
)
client = _connect_client()
server = client.servers.get_by_name(name)
if server is None:
return "Instance {} doesn't exist.".format(name)
server.reboot()
if wait and not wait_until(name, "running"):
return "Instance {} doesn't start.".format(name)
return {"Rebooted": "{} was rebooted.".format(name)}
def destroy(name, call=None):
"""
Destroy a node.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
"""
if call == "function":
raise SaltCloudSystemExit(
"The destroy action must be called with -d, --destroy, -a or --action."
)
client = _connect_client()
server = client.servers.get_by_name(name)
if server is None:
return "Instance {} doesn't exist.".format(name)
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
node = show_instance(name, call="action")
if node["state"] == "running":
stop(name, call="action", wait=False)
if not wait_until(name, "off"):
return {"Error": "Unable to destroy {}, command timed out".format(name)}
server.delete()
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name,
_get_active_provider_name().split(":")[0],
__opts__,
)
return {"Destroyed": "{} was destroyed.".format(name)}
def resize(name, kwargs, call=None):
"""
Resize a node.
CLI Example:
.. code-block:: bash
salt-cloud -a resize mymachine size=...
"""
if call != "action":
raise SaltCloudSystemExit(
"The resize action must be called with -a or --action."
)
client = _connect_client()
server = client.servers.get_by_name(name)
if server is None:
return "Instance {} doesn't exist.".format(name)
# Check the configuration
size = kwargs.get("size", None)
if size is None:
raise SaltCloudException("The new size is required")
server_type = client.server_types.get_by_name(size)
if server_type is None:
raise SaltCloudException("The server size is not supported")
__utils__["cloud.fire_event"](
"event",
"resizing instance",
"salt/cloud/{}/resizing".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
node = show_instance(name, call="action")
if node["state"] == "running":
stop(name, call="action", wait=False)
if not wait_until(name, "off"):
return {"Error": "Unable to resize {}, command timed out".format(name)}
server.change_type(server_type, kwargs.get("upgrade_disk", False))
__utils__["cloud.fire_event"](
"event",
"resizing instance",
"salt/cloud/{}/resized".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return {"Resized": "{} was resized.".format(name)}
|
7c34d6785142ba648f493f02c700198d357409f2
|
bb90ad20468f9fe2039b8c16858bd8eae8bbc050
|
/tests/infra/proposal.py
|
07ade979d5f1dcfe791fb07fba3536568f6db16b
|
[
"Apache-2.0"
] |
permissive
|
microsoft/CCF
|
0997fd81a924d36d775b219720b26b4ff196b18a
|
2fbf87840b9e8334c141f4a9c9b25aae979b0540
|
refs/heads/main
| 2023-09-05T15:39:37.265089
| 2023-09-05T15:27:25
| 2023-09-05T15:27:25
| 180,112,558
| 687
| 229
|
Apache-2.0
| 2023-09-14T14:28:39
| 2019-04-08T09:13:04
|
C++
|
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
proposal.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
from enum import Enum
class ProposalNotCreated(Exception):
def __init__(self, response):
super(ProposalNotCreated, self).__init__()
self.response = response
class ProposalNotAccepted(Exception):
def __init__(self, proposal, response):
super(ProposalNotAccepted, self).__init__()
self.proposal = proposal
self.response = response
# Values defined in include/ccf/service/tables/proposals.h
class ProposalState(Enum):
OPEN = "Open"
ACCEPTED = "Accepted"
WITHDRAWN = "Withdrawn"
REJECTED = "Rejected"
FAILED = "Failed"
class Proposal:
def __init__(
self,
proposer_id,
proposal_id,
state,
view=None,
seqno=None,
):
self.proposer_id = proposer_id
self.proposal_id = proposal_id
self.state = state
self.voters = []
self.view = view
self.seqno = seqno
self.completed_view = view if state == ProposalState.ACCEPTED else None
self.completed_seqno = seqno if state == ProposalState.ACCEPTED else None
def set_completed(self, seqno, view):
self.completed_seqno = seqno
self.completed_view = view
def increment_votes_for(self, member_id):
self.voters.append(member_id)
@property
def votes_for(self):
return len(self.voters)
|
1e637b67368dbda49ad540574dbf1cd9ffb67406
|
75bee875a2d26ed71513f46a2acbb564dd9a1c44
|
/migrations/versions/81ce4ac01c45_migrate_static_roles.py
|
c1ae927bab50f5fa9e3ef799cbae1f592e715d8b
|
[
"MIT"
] |
permissive
|
frol/flask-restplus-server-example
|
d096aa1f4e3b6024ecb16af3d0769ccc20e7cff8
|
53a3a156cc9df414537860ed677bd0cc98dd2271
|
refs/heads/master
| 2023-08-28T14:27:34.047855
| 2023-06-21T14:30:54
| 2023-06-21T14:30:54
| 46,421,329
| 1,487
| 412
|
MIT
| 2023-06-21T14:30:55
| 2015-11-18T13:43:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
81ce4ac01c45_migrate_static_roles.py
|
"""Migrate static roles (new "internal" role type requires data migration)
Revision ID: 81ce4ac01c45
Revises: beb065460c24
Create Date: 2016-11-08 15:58:55.932297
"""
# revision identifiers, used by Alembic.
revision = '81ce4ac01c45'
down_revision = 'beb065460c24'
from alembic import op
import sqlalchemy as sa
UserHelper = sa.Table(
'user',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('static_roles', sa.Integer),
)
def upgrade():
connection = op.get_bind()
for user in connection.execute(UserHelper.select()):
if user.static_roles & 0x1000:
continue
new_static_roles = user.static_roles >> 1
connection.execute(
UserHelper.update().where(
UserHelper.c.id == user.id
).values(
static_roles=new_static_roles
)
)
def downgrade():
connection = op.get_bind()
for user in connection.execute(UserHelper.select()):
if not user.static_roles & 0x1000:
continue
new_static_roles = user.static_roles << 1
connection.execute(
UserHelper.update().where(
UserHelper.c.id == user.id
).values(
static_roles=new_static_roles
)
)
|
efa2c02c2635c40010e8a85f29780fcd8898ec0c
|
807438e6974bf68762208ec24cf824dd0e5fabd6
|
/docs/examples/compute/cloudsigma/tag_server.py
|
7115d347484844e67e4d08bb809b012d6f7516ee
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/libcloud
|
019c5bd839dedd2423f9604936886eaff252e04b
|
abba8c1719a8bda6db8efde2d46fd1b423ae4304
|
refs/heads/trunk
| 2023-08-31T20:14:22.369970
| 2023-08-21T20:17:57
| 2023-08-21T20:17:57
| 419,555
| 1,644
| 968
|
Apache-2.0
| 2023-09-13T19:34:44
| 2009-12-11T09:00:14
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
tag_server.py
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.CLOUDSIGMA)
driver = cls("username", "password", region="zrh", api_version="2.0")
node = driver.list_nodes()[0]
tag_names = ["zrh", "database-server", "monited"]
tags = []
# 1. Create necessary tags
for tag_name in tag_names:
tag = driver.ex_create_tag(name="database-servers")
tags.append(tag)
# 2. Tag node with the created tags
for tag in tags:
driver.ex_tag_resource(resource=node, tag=tag)
|
860f4ea5453419a50d910280f706c69c41b0c86d
|
4262804598ec9669d3537459591bf8e710436b5a
|
/src/imagetagger/images/migrations/0001_initial.py
|
4528301346e1531537a7a6cd34a681120995f382
|
[
"MIT"
] |
permissive
|
bit-bots/imagetagger
|
5a639c35bf57e28eec2f340f0786cc3c5c34c982
|
f8ac5f25bf7375baeef8b23b1be7da331246bde2
|
refs/heads/master
| 2022-11-28T22:57:05.551770
| 2021-11-26T10:59:18
| 2021-11-26T10:59:18
| 96,428,939
| 255
| 59
|
MIT
| 2022-11-22T09:21:10
| 2017-07-06T12:41:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-22 09:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('filename', models.CharField(max_length=100, unique=True)),
('time', models.DateTimeField(auto_now_add=True)),
('checksum', models.BinaryField()),
],
),
migrations.CreateModel(
name='ImageSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=100, null=True, unique=True)),
('name', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('description', models.TextField(max_length=1000, null=True)),
('time', models.DateTimeField(auto_now_add=True)),
('public', models.BooleanField(default=False)),
('image_lock', models.BooleanField(default=False)),
],
options={
'permissions': (('edit_set', 'Edit set'), ('delete_set', 'Delete set'), ('edit_annotation', 'Edit annotations in the set'), ('delete_annotation', 'Delete annotations in the set'), ('annotate', 'Create annotations in the set'), ('read', 'Read and download annotations and images'), ('create_export', 'Create export files of the set'), ('delete_export', 'Delete export files of the set')),
},
),
]
|
cc2e7071b60c9036a84d74f281a0baabf121067d
|
4aa2afa66d7e5778a8de2a0de5fbecffe536d382
|
/scripts/loadTimes.py
|
f489cdad78c01fd240883693d266b0beb16da072
|
[
"MIT"
] |
permissive
|
RunestoneInteractive/RunestoneServer
|
576f2dcb24f51535bfb5c25ba1e9d6b47e7c98eb
|
6d565838b7d2d61c06c5ed49e340083ce4008637
|
refs/heads/master
| 2023-08-28T22:00:59.888847
| 2023-06-07T16:50:45
| 2023-06-07T16:50:45
| 3,278,888
| 376
| 318
|
NOASSERTION
| 2023-06-07T16:49:06
| 2012-01-27T00:25:53
|
C++
|
UTF-8
|
Python
| false
| false
| 5,760
|
py
|
loadTimes.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
#
# {address space usage: 359067648 bytes/342MB} {rss usage: 107823104 bytes/102MB} [pid: 11266|app: 0|req: 99163/885977] 64.208.17.170 () {48 vars in 1249 bytes} [Thu Feb 15 16:28:43 2018] GET /runestone/ajax/getnumonline => generated 16 bytes in 2553 msecs (HTTP/1.1 200) 8 headers in 381 bytes (1 switches on core 0)
# 2601:2c3:8880:860:cc2b:7932:b6b5:50f9 - - - [22/Jan/2022:01:06:57 +0000] Request: "GET /ns/books/published/csawesome/_static/runtime.da9e52c73fc3d100.bundle.js?v=F2F2D427 HTTP/1.1" Status: 200 Bytes: 5199 Host: 10.136.0.8:80 ResponseTime: 0.008 Referrer: "https://runestone.academy/ns/books/published/csawesome/Unit8-2DArray/freeResponse.html" Agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
# 207.163.116.25 - - - [22/Jan/2022:01:06:57 +0000] Request: "POST /assignments/autograde HTTP/1.1" Status: 200 Bytes: 103 Host: 10.136.0.2:80 ResponseTime: 0.208 Referrer: "https://runestone.academy/runestone/admin/grading" Agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:96.0) Gecko/20100101 Firefox/96.0"
# 2601:444:4100:3c00:756d:34fe:d1b0:8963 - - - [22/Jan/2022:01:06:57 +0000] Request: "GET /runestone/default/user/login?_next=/runestone/default/index HTTP/1.1" Status: 200 Bytes: 6537 Host: 10.136.0.10:80 ResponseTime: 0.052 Referrer: "-" Agent: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"
# 76.219.135.229 - - - [22/Jan/2022:01:29:31 +0000] Request: "POST /ns/logger/bookevent HTTP/1.1" Status: 201 Bytes: 53 Host: 10.136.0.11:80 ResponseTime: 0.024 Referrer: "https://runestone.academy/ns/books/published/UCMST_APCSA_2022/Unit6-Arrays/topic-6-1-array-basics.html" Agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36 Edg/97.0.1072.62"
import re, sys, os
from dateutil.parser import parse
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
Integer,
String,
Date,
Float,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import datetime
if "-date" in sys.argv:
ix = sys.argv.index('-date') + 1
dday = parse(sys.argv[ix]).date()
ix += 1
else:
dday = None
ix = 1
i = 0
timepat = re.compile(r'.*/(dashboard|proxy|assignments|logger|assessment|books)/(\w+)(\s*|/.*?|\?.*?|\.html.*?) HTTP.*ResponseTime: (\d+\.\d+)')
datepat = re.compile(r'.*\[(\d+/(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)/\d\d\d\d):(.*)\].*')
statuspat = re.compile(r'.*Status:\s+(\d+)\s+.*')
hostpat = re.compile(r'.*Host:\s+(\d+\.\d+\.\d+\.\d+:\d+)\s+.*')
print(dday)
runtimes = {}
pagetimes = {}
status_counts = {}
host_counts = {}
for fname in sys.argv[ix:]:
logfile = open(fname, 'r')
line = logfile.readline()
while line:
currentday = None
if (gd := datepat.match(line)) and dday:
currentday = parse(gd.group(1))
if (currentday and dday == currentday.date()) or dday == None:
if gt := timepat.match(line):
epkey = gt.group(2)
if 'dashboard/index' in line:
epkey = 'db'+epkey
if epkey not in runtimes:
runtimes[epkey] = []
runtimes[epkey].append(float(gt.group(4)))
if st := statuspat.match(line):
status_counts[st.group(1)] = status_counts.get(st.group(1), 0) + 1
if ht := hostpat.match(line):
host_counts[ht.group(1)] = host_counts.get(ht.group(1), 0) + 1
try:
line = logfile.readline()
except:
continue
for k,v in sorted(status_counts.items()):
print(k, v)
for k,v in sorted(host_counts.items()):
print(k, v)
Base = declarative_base()
dburl = os.environ["LOGDBURL"]
engine = create_engine(
dburl,
echo=True
# "postgresql://bmiller:autocubanlobbyduck@localhost/bmiller", echo=True
)
meta = MetaData()
Session = sessionmaker(bind=engine)
class LogEntry(Base):
__tablename__ = "api_times"
id = Column(Integer, primary_key=True)
timestamp = Column(Date)
endpoint = Column(String)
calls = Column(Integer)
response_average = Column(Float)
max_response = Column(Integer)
class HostCounts(Base):
__tablename__ = "host_counts"
id = Column(Integer, primary_key=True)
timestamp = Column(Date)
host = Column(String)
requests = Column(Integer)
class StatusCounts(Base):
__tablename__ = "status_counts"
id = Column(Integer, primary_key=True)
timestamp = Column(Date)
status = Column(String)
requests = Column(Integer)
Base.metadata.create_all(engine)
db = Session()
today = datetime.datetime.now().date()-datetime.timedelta(days=1)
for k in sorted(runtimes,key=lambda x: sum(runtimes[x])/len(runtimes[x] )):
e = LogEntry(endpoint=k,
calls=len(runtimes[k]),
response_average=sum(runtimes[k])/len(runtimes[k]),
max_response=max(runtimes[k]),
timestamp=today)
db.add(e)
print("%20s\t%7d\t%6.3f\t%7d"%(k,len(runtimes[k]),sum(runtimes[k])/len(runtimes[k]),max(runtimes[k])))
for k,v in sorted(status_counts.items()):
e = StatusCounts(
status=k,
requests=v,
timestamp=today
)
db.add(e)
for k,v in sorted(host_counts.items()):
e = HostCounts(
host=k,
requests=v,
timestamp=today
)
db.add(e)
db.commit()
|
e4dae92f99349b8a2192ddd24aec91157d445202
|
160f08e768d7271f9522ad2597ac4ee79c04477a
|
/src/c3nav/mapdata/management/commands/importsvg.py
|
5ec55727c784846ca110076bcc31f7c7c75ed5df
|
[
"Apache-2.0"
] |
permissive
|
c3nav/c3nav
|
6254724dfc8589ee03c6028577befd7c65b05857
|
1a4ef5caa06ddacc8d9370b5adcee248fd4f55f7
|
refs/heads/main
| 2023-08-04T08:36:18.431458
| 2023-07-24T09:57:18
| 2023-07-24T09:57:18
| 56,852,994
| 140
| 47
|
Apache-2.0
| 2023-07-05T22:55:27
| 2016-04-22T12:13:51
|
Python
|
UTF-8
|
Python
| false
| false
| 8,461
|
py
|
importsvg.py
|
import argparse
import logging
import re
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import gettext_lazy as _
from shapely.affinity import scale, translate
from shapely.geometry import Polygon
from c3nav.mapdata.models import Area, MapUpdate, Obstacle, Space
from c3nav.mapdata.utils.cache.changes import changed_geometries
class Command(BaseCommand):
help = 'render the map'
@staticmethod
def space_value(value):
try:
space = Space.objects.get(pk=value)
except Space.DoesNotExist:
raise argparse.ArgumentTypeError(
_('unknown space')
)
return space
def add_arguments(self, parser):
parser.add_argument('svgfile', type=argparse.FileType('r'), help=_('svg file to import'))
parser.add_argument('name', type=str, help=_('name of the import'))
parser.add_argument('--type', type=str, required=True, choices=('areas', 'obstacles'),
help=_('type of objects to create'))
parser.add_argument('--space', type=self.space_value, required=True,
help=_('space to add the objects to'))
parser.add_argument('--minx', type=float, required=True,
help=_('minimum x coordinate, everthing left of it will be cropped'))
parser.add_argument('--miny', type=float, required=True,
help=_('minimum y coordinate, everthing below it will be cropped'))
parser.add_argument('--maxx', type=float, required=True,
help=_('maximum x coordinate, everthing right of it will be cropped'))
parser.add_argument('--maxy', type=float, required=True,
help=_('maximum y coordinate, everthing above it will be cropped'))
@staticmethod
def parse_svg_data(data):
first = False
last_point = (0, 0)
last_end_point = None
done_subpaths = []
current_subpath = []
while data:
data = data.lstrip().replace(',', ' ')
command = data[0]
if first and command not in 'Mm':
raise ValueError('path data has to start with moveto command.')
data = data[1:].lstrip()
first = False
numbers = []
while True:
match = re.match(r'^-?[0-9]+(\.[0-9]+)?(e-?[0-9]+)?', data)
if match is None:
break
numbers.append(float(match.group(0)))
data = data[len(match.group(0)):].lstrip()
relative = command.islower()
if command in 'Mm':
if not len(numbers) or len(numbers) % 2:
raise ValueError('Invalid number of arguments for moveto command!')
numbers = iter(numbers)
first = True
for x, y in zip(numbers, numbers):
if relative:
x, y = last_point[0] + x, last_point[1] + y
if first:
first = False
if current_subpath:
done_subpaths.append(current_subpath)
last_end_point = current_subpath[-1]
current_subpath = []
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Ll':
if not len(numbers) or len(numbers) % 2:
raise ValueError('Invalid number of arguments for lineto command!')
numbers = iter(numbers)
for x, y in zip(numbers, numbers):
if relative:
x, y = last_point[0] + x, last_point[1] + y
if not current_subpath:
current_subpath.append(last_end_point)
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Hh':
if not len(numbers):
raise ValueError('Invalid number of arguments for horizontal lineto command!')
y = last_point[1]
for x in numbers:
if relative:
x = last_point[0] + x
if not current_subpath:
current_subpath.append(last_end_point)
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Vv':
if not len(numbers):
raise ValueError('Invalid number of arguments for vertical lineto command!')
x = last_point[0]
for y in numbers:
if relative:
y = last_point[1] + y
if not current_subpath:
current_subpath.append(last_end_point)
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Zz':
if numbers:
raise ValueError('Invalid number of arguments for closepath command!')
current_subpath.append(current_subpath[0])
done_subpaths.append(current_subpath)
last_end_point = current_subpath[-1]
current_subpath = []
else:
raise ValueError('unknown svg command: ' + command)
if current_subpath:
done_subpaths.append(current_subpath)
return done_subpaths
def handle(self, *args, **options):
minx = options['minx']
miny = options['miny']
maxx = options['maxx']
maxy = options['maxy']
if minx >= maxx:
raise CommandError(_('minx has to be lower than maxx'))
if miny >= maxy:
raise CommandError(_('miny has to be lower than maxy'))
width = maxx-minx
height = maxy-miny
model = {'areas': Area, 'obstacles': Obstacle}[options['type']]
namespaces = {'svg': 'http://www.w3.org/2000/svg'}
svg = ElementTree.fromstring(options['svgfile'].read())
svg_width = float(svg.attrib['width'])
svg_height = float(svg.attrib['height'])
svg_viewbox = svg.attrib.get('viewBox')
if svg_viewbox:
offset_x, offset_y, svg_width, svg_height = [float(i) for i in svg_viewbox.split(' ')]
else:
offset_x, offset_y = 0, 0
for element in svg.findall('.//svg:clipPath/..', namespaces):
for clippath in element.findall('./svg:clipPath', namespaces):
element.remove(clippath)
for element in svg.findall('.//svg:symbol/..', namespaces):
for clippath in element.findall('./svg:symbol', namespaces):
element.remove(clippath)
if svg.findall('.//*[@transform]'):
raise CommandError(_('svg contains transform attributes. Use inkscape apply transforms.'))
if model.objects.filter(space=options['space'], import_tag=options['name']).exists():
raise CommandError(_('objects with this import tag already exist in this space.'))
with MapUpdate.lock():
changed_geometries.reset()
for path in svg.findall('.//svg:path', namespaces):
for polygon in self.parse_svg_data(path.attrib['d']):
if len(polygon) < 3:
continue
polygon = Polygon(polygon)
polygon = translate(polygon, xoff=-offset_x, yoff=-offset_y)
polygon = scale(polygon, xfact=1, yfact=-1, origin=(0, svg_height/2))
polygon = scale(polygon, xfact=width / svg_width, yfact=height / svg_height, origin=(0, 0))
polygon = translate(polygon, xoff=minx, yoff=miny)
obj = model(geometry=polygon, space=options['space'], import_tag=options['name'])
obj.save()
MapUpdate.objects.create(type='importsvg')
logger = logging.getLogger('c3nav')
logger.info('Imported, map update created.')
logger.info('Next step: go into the shell and edit them using '
'%s.objects.filter(space_id=%r, import_tag=%r)' %
(model.__name__, options['space'].pk, options['name']))
|
247a1b9cfce61d5c9bc5dc604b19f51c83de4120
|
6946f9a3e9d57b00ea275b2303ced0dedcdba1d4
|
/qf_lib/plotting/decorators/scatter_decorator.py
|
bbd23e8d5c235090e5321919268ae9fb11f2fd10
|
[
"Apache-2.0"
] |
permissive
|
quarkfin/qf-lib
|
8eaf76e3db385295ff8845b3250ba64a6fcfc7a6
|
f707e51bc2ff45f6e46dcdd24d59d83ce7dc4f94
|
refs/heads/master
| 2023-08-31T17:41:57.213680
| 2023-08-29T10:01:49
| 2023-08-29T10:01:49
| 202,696,503
| 379
| 51
|
Apache-2.0
| 2023-09-05T06:11:35
| 2019-08-16T09:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,595
|
py
|
scatter_decorator.py
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Sequence
from typing import Any
from qf_lib.plotting.charts.chart import Chart
from qf_lib.plotting.decorators.chart_decorator import ChartDecorator
from qf_lib.plotting.decorators.simple_legend_item import SimpleLegendItem
class ScatterDecorator(ChartDecorator, SimpleLegendItem):
"""
Creates a scatter plot based on the data specified.
Parameters
----------
x_data: Sequence
values of x coordinate
y_data: Sequence
values of y coordinate
size: int
size in points^2; scalar or an array of the same length as x_data and y_data
color
*c* can be a single color format string, or a sequence of color specifications of length x_data and y_data,
or a sequence of x_data and y_data numbers to be mapped to colors using the *cmap* and *norm* specified via
kwargs (see below). Note that color should not be a single numeric RGB or RGBA sequence because that is
indistinguishable from an array of values to be colormapped. color can be a 2-D array in which the rows are
RGB or RGBA, however, including the case of a single row to specify the same color for all points.
plot_settings
other settings like for example: alpha, linewidths, verts, edgecolors
"""
def __init__(
self, x_data: Sequence, y_data: Sequence, size: int = 40, color=None, key: str = None, **plot_settings: Any):
ChartDecorator.__init__(self, key)
SimpleLegendItem.__init__(self)
self.x_data = x_data
self.y_data = y_data
self.size = size
if color is None:
self.color = Chart.get_axes_colors()[0]
else:
self.color = color
self.plot_settings = plot_settings
def decorate(self, chart: "Chart"):
self.legend_artist = chart.axes.scatter(
self.x_data, self.y_data, s=self.size, c=self.color, **self.plot_settings)
|
5db43f8dffe740c4681a2dad76b2989c3987a1cc
|
7030c780db36c7d8efedb1152cf945a3cc248fdb
|
/python/cuml/tsa/batched_lbfgs.py
|
4089b04954a69c5b0d2038fc4a653ff734e42297
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cuml
|
546af8151fd2ee0f737cc4e62386d4b0ede74f3d
|
7d86042b8de06bc8acce632230fe5821bd36c17d
|
refs/heads/branch-23.10
| 2023-08-30T19:17:41.816373
| 2023-08-28T13:23:15
| 2023-08-28T13:23:15
| 152,616,802
| 3,615
| 569
|
Apache-2.0
| 2023-09-14T00:21:52
| 2018-10-11T15:45:35
|
C++
|
UTF-8
|
Python
| false
| false
| 7,502
|
py
|
batched_lbfgs.py
|
#
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common import has_scipy
import cuml.internals.logger as logger
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator,
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
np = cpu_only_import("numpy")
def _fd_fprime(x, f, h):
"""(internal) Computes finite difference."""
g = np.zeros(len(x))
for i in range(len(x)):
xph = np.copy(x)
xmh = np.copy(x)
xph[i] += h
xmh[i] -= h
fph = f(xph)
fmh = f(xmh)
g[i] = (fph - fmh) / (2 * h)
return g
@nvtx_annotate(message="LBFGS", domain="cuml_python")
def batched_fmin_lbfgs_b(
func,
x0,
num_batches,
fprime=None,
args=(),
bounds=None,
m=10,
factr=1e7,
pgtol=1e-5,
epsilon=1e-8,
iprint=-1,
maxiter=15000,
maxls=20,
):
"""A batch-aware L-BFGS-B implementation to minimize a loss function `f` given
an initial set of parameters `x0`.
Parameters
----------
func : function (x: array) -> array[M] (M = n_batches)
The function to minimize. The function should return an array of
size = `num_batches`
x0 : array
Starting parameters
fprime : function (x: array) -> array[M*n_params] (optional)
The gradient. Should return an array of derivatives for each
parameter over batches.
When omitted, uses Finite-differencing to estimate the gradient.
args : Tuple
Additional arguments to func and fprime
bounds : List[Tuple[float, float]]
Box-constrains on the parameters
m : int
L-BFGS parameter: number of previous arrays to store when
estimating inverse Hessian.
factr : float
Stopping criterion when function evaluation not progressing.
Stop when `|f(xk+1) - f(xk)| < factor*eps_mach`
where `eps_mach` is the machine precision
pgtol : float
Stopping criterion when gradient is sufficiently "flat".
Stop when |grad| < pgtol.
epsilon : float
Finite differencing step size when approximating `fprime`
iprint : int
-1 for no diagnostic info
n=1-100 for diagnostic info every n steps.
>100 for detailed diagnostic info
maxiter : int
Maximum number of L-BFGS iterations
maxls : int
Maximum number of line-search iterations.
"""
if has_scipy():
from scipy.optimize import _lbfgsb
else:
raise RuntimeError("Scipy is needed to run batched_fmin_lbfgs_b")
n = len(x0) // num_batches
if fprime is None:
def fprime_f(x):
return _fd_fprime(x, func, epsilon)
fprime = fprime_f
if bounds is None:
bounds = [(None, None)] * n
nbd = np.zeros(n, np.int32)
low_bnd = np.zeros(n, np.float64)
upper_bnd = np.zeros(n, np.float64)
bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3}
for i in range(0, n):
lb, ub = bounds[i]
if lb is not None:
low_bnd[i] = lb
lb = 1
if ub is not None:
upper_bnd[i] = ub
ub = 1
nbd[i] = bounds_map[lb, ub]
# working arrays needed by L-BFGS-B implementation in SciPy.
# One for each series
x = [
np.copy(np.array(x0[ib * n : (ib + 1) * n], np.float64))
for ib in range(num_batches)
]
f = [np.copy(np.array(0.0, np.float64)) for ib in range(num_batches)]
g = [np.copy(np.zeros((n,), np.float64)) for ib in range(num_batches)]
wa = [
np.copy(np.zeros(2 * m * n + 5 * n + 11 * m * m + 8 * m, np.float64))
for ib in range(num_batches)
]
iwa = [np.copy(np.zeros(3 * n, np.int32)) for ib in range(num_batches)]
task = [np.copy(np.zeros(1, "S60")) for ib in range(num_batches)]
csave = [np.copy(np.zeros(1, "S60")) for ib in range(num_batches)]
lsave = [np.copy(np.zeros(4, np.int32)) for ib in range(num_batches)]
isave = [np.copy(np.zeros(44, np.int32)) for ib in range(num_batches)]
dsave = [np.copy(np.zeros(29, np.float64)) for ib in range(num_batches)]
for ib in range(num_batches):
task[ib][:] = "START"
n_iterations = np.zeros(num_batches, dtype=np.int32)
converged = num_batches * [False]
warn_flag = np.zeros(num_batches)
while not all(converged):
with nvtx_annotate("LBFGS-ITERATION", domain="cuml_python"):
for ib in range(num_batches):
if converged[ib]:
continue
_lbfgsb.setulb(
m,
x[ib],
low_bnd,
upper_bnd,
nbd,
f[ib],
g[ib],
factr,
pgtol,
wa[ib],
iwa[ib],
task[ib],
iprint,
csave[ib],
lsave[ib],
isave[ib],
dsave[ib],
maxls,
)
xk = np.concatenate(x)
fk = func(xk)
gk = fprime(xk)
for ib in range(num_batches):
if converged[ib]:
continue
task_str = task[ib].tobytes()
task_str_strip = task[ib].tobytes().strip(b"\x00").strip()
if task_str.startswith(b"FG"):
# needs function evaluation
f[ib] = fk[ib]
g[ib] = gk[ib * n : (ib + 1) * n]
elif task_str.startswith(b"NEW_X"):
n_iterations[ib] += 1
if n_iterations[ib] >= maxiter:
task[ib][
:
] = "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT"
elif task_str_strip.startswith(b"CONV"):
converged[ib] = True
warn_flag[ib] = 0
else:
converged[ib] = True
warn_flag[ib] = 2
continue
xk = np.concatenate(x)
if iprint > 0:
logger.info(
"CONVERGED in ({}-{}) iterations (|\\/f|={})".format(
np.min(n_iterations),
np.max(n_iterations),
np.linalg.norm(fprime(xk), np.inf),
)
)
if (warn_flag > 0).any():
for ib in range(num_batches):
if warn_flag[ib] > 0:
logger.info(
"WARNING: id={} convergence issue: {}".format(
ib, task[ib].tobytes()
)
)
return xk, n_iterations, warn_flag
|
ff62a69eb884c139dea6fa3562d61e703e730b05
|
2b5ffa18e7198e45fa77674b96dac8d91159fed7
|
/djangae/tasks/tests/test_deferred.py
|
1c506512f479bd131fac0bfea5a3135e07eb3316
|
[
"BSD-3-Clause"
] |
permissive
|
potatolondon/djangae
|
73681d0c8302ac216f74bc00b980de368e8d4280
|
bef308632790bb6f87e71bb91183f57bad6bd149
|
refs/heads/master
| 2023-09-01T15:27:51.995232
| 2023-08-30T14:40:48
| 2023-08-30T14:40:48
| 10,217,788
| 474
| 155
|
BSD-3-Clause
| 2023-02-08T01:05:31
| 2013-05-22T10:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,755
|
py
|
test_deferred.py
|
import os
from django.db import models
from gcloudc.db import transaction
from djangae.contrib import sleuth
from djangae.tasks.deferred import defer, PermanentTaskFailure
from djangae.test import (
TaskFailedError,
TestCase,
)
def test_task(*args, **kwargs):
pass
def assert_cache_wiped(instance):
field = DeferModelA._meta.get_field("b")
assert(field.get_cached_value(instance, None) is None)
class DeferModelA(models.Model):
b = models.ForeignKey("DeferModelB", on_delete=models.CASCADE)
class Meta:
app_label = "djangae"
class DeferModelB(models.Model):
class Meta:
app_label = "djangae"
class DeferModelC(models.Model):
text = models.TextField()
class Meta:
app_label = "djangae"
def create_defer_model_b(key_value):
DeferModelB.objects.create(pk=key_value)
def process_argument(arg):
DeferModelC.objects.create(text=arg)
def permanent_task_failure():
raise PermanentTaskFailure
class DeferTests(TestCase):
def test_large_task(self):
random_file = os.path.join(os.path.dirname(__file__), "random_data")
with open(random_file, "r") as f:
big_string = f.read()
try:
defer(process_argument, big_string)
except Exception: # noqa
self.fail("A large task couldn't be deferred")
self.process_task_queues()
self.assertTrue(DeferModelC.objects.exists())
instance = DeferModelC.objects.get()
self.assertEqual(instance.text, big_string)
def test_wipe_related_caches(self):
b = DeferModelB.objects.create()
a = DeferModelA.objects.create(b=b)
a.b # Make sure we access it
cache_name = DeferModelA._meta.get_field("b").get_cache_name()
self.assertTrue(getattr(a, cache_name))
defer(assert_cache_wiped, a)
# Should raise an assertion error if the cache existed
try:
self.process_task_queues()
except TaskFailedError as e:
raise e.original_exception
# Should not have wiped the cache for us!
self.assertIsNotNone(getattr(a, cache_name, None))
def test_queues_task(self):
initial_count = self.get_task_count()
defer(test_task)
self.assertEqual(self.get_task_count(), initial_count + 1)
def test_task_default_routing(self):
gae_version = 'demo'
os.environ['GAE_VERSION'] = gae_version
with sleuth.watch('google.cloud.tasks_v2.CloudTasksClient.create_task') as _create_task:
defer(test_task)
self.assertTrue(_create_task.called)
routing = _create_task.calls[0].args[2]['app_engine_http_request']['app_engine_routing']
self.assertFalse('service' in routing)
self.assertFalse('instance' in routing)
self.assertEqual(routing['version'], gae_version)
del os.environ['GAE_VERSION']
def test_task_routing(self):
service = 'service123'
version = 'version456'
instance = 'instance789'
os.environ['GAE_VERSION'] = 'demo'
with sleuth.watch('google.cloud.tasks_v2.CloudTasksClient.create_task') as _create_task:
defer(test_task, _service=service, _version=version, _instance=instance)
self.assertTrue(_create_task.called)
routing = _create_task.calls[0].args[2]['app_engine_http_request']['app_engine_routing']
self.assertEqual(routing['service'], service)
self.assertEqual(routing['version'], version)
self.assertEqual(routing['instance'], instance)
del os.environ['GAE_VERSION']
def test_deprecated_target_parameter(self):
self.assertRaises(UserWarning, defer, test_task, _target='test')
def test_transactional_defer(self):
try:
with transaction.atomic():
defer(create_defer_model_b, 1, _transactional=True)
raise ValueError() # Rollback the transaction
except ValueError:
pass
self.process_task_queues()
# Shouldn't have created anything
self.assertEqual(0, DeferModelB.objects.count())
with transaction.atomic():
defer(create_defer_model_b, 1, _transactional=True)
self.process_task_queues()
self.assertEqual(0, DeferModelB.objects.count()) # Still nothing
# Now we should be good!
self.process_task_queues()
self.assertEqual(1, DeferModelB.objects.count())
def test_permanent_task_failure(self):
defer(permanent_task_failure)
# Complete task without exception raised externally.
self.process_task_queues()
self.assertEqual(self.get_task_count(), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.