content stringlengths 0 1.55M |
|---|
<import_from_stmt>stronghold.views StrongholdPublicMixin<import_stmt>django<import_from_stmt>django.views.generic View<import_from_stmt>django.views.generic.base TemplateResponseMixin<if_stmt>django.VERSION[:2]<l>(1 9)<block_start><import_from_stmt>django.utils unittest<block_end><else_stmt><block_start><import_stmt>unittest<block_end><class_stmt>StrongholdMixinsTests(unittest.TestCase)<block_start><def_stmt>test_public_mixin_sets_attr self<block_start><class_stmt>TestView(StrongholdPublicMixin View)<block_start><pass><block_end>self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)<block_end><def_stmt>test_public_mixin_sets_attr_with_multiple_mixins self<block_start><class_stmt>TestView(StrongholdPublicMixin TemplateResponseMixin View)<block_start>template_name='dummy.html'<block_end>self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)<block_end><block_end> |
# Generated by Django 2.2.14 on 2020-07-15 07:37
<import_stmt>django.contrib.postgres.fields<import_stmt>django.core.validators<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('api' '0054_user_invalidate_unknown_role') ]<line_sep>operations=[migrations.AddField(model_name='reminder' name='mentions' field=django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(validators=[django.core.validators.MinValueValidator(limit_value=0 message='Mention IDs cannot be negative.')]) blank=<true> default=list help_text='IDs of roles or users to ping with the reminder.' size=<none>) ) ]<block_end> |
__all__=()<import_from_stmt>...backend.utils KeepType<import_from_stmt>...discord.client Client<import_from_stmt>.extension EXTENSIONS EXTENSION_STATE_LOADED<line_sep>@KeepType(Client)<class_stmt>Client<block_start>@property<def_stmt>extensions self<block_start>"""
Returns a list of extensions added to the client. Added by the `extension_loader` extension.
Returns
-------
extensions : `list` of ``Extension``
"""<line_sep>extensions=[]<for_stmt>extension EXTENSIONS.values()<block_start><if_stmt>extension._state<eq>EXTENSION_STATE_LOADED<block_start>snapshot_difference=extension._snapshot_difference<if_stmt>(snapshot_difference<is><not><none>)<block_start><for_stmt>client,client_snapshot_difference snapshot_difference<block_start><if_stmt>(self<is>client)<and>client_snapshot_difference<block_start>extensions.append(extension)<line_sep><break><block_end><block_end><block_end><block_end><block_end><return>extensions<block_end><block_end> |
<import_stmt>tensorflow<as>tf<def_stmt>cosine_similarity x y eps=1e-6<block_start>z=tf.batch_matmul(x tf.transpose(y perm=[0 2 1]))<line_sep>z<augdiv>tf.sqrt(tf.multiply(tf.expand_dims(tf.reduce_sum(tf.multiply(x x) 2) 2) tf.expand_dims(tf.reduce_sum(tf.multiply(y y) 2) 1))+eps)<line_sep><return>z<block_end> |
<import_stmt>gzip<import_stmt>pickle<import_stmt>os<def_stmt>analyze data_path<block_start>"""
Run the comparison on the given data file
:param data_path:
:return:
"""<if_stmt>data_path.endswith(".gz")<block_start><with_stmt>gzip.open(data_path 'r')<as>f<block_start>S,true_model=pickle.load(f)<block_end><block_end><else_stmt><block_start><with_stmt>open(data_path 'r')<as>f<block_start>S,true_model=pickle.load(f)<block_end><block_end>print("True model:")<line_sep>print(true_model)<line_sep>T=float(S.shape[0])<line_sep>N=S.sum(axis=0)<line_sep>print("lambda0: " true_model.bias_model.lambda0.mean())<line_sep>print("Average event count: " N.mean() " +- " N.std())<line_sep>print("Average event count: " (N/T).mean() " +- " (N/T).std())<block_end># seed = 2650533028
K=50<line_sep>C=5<line_sep>T=100000<line_sep>data_path=os.path.join("data" "synthetic" "synthetic_K%d_C%d_T%d.pkl.gz"%(K C T))<line_sep>analyze(data_path)<line_sep> |
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_stmt>typing List Union Tuple<import_from_stmt>rl_coach.base_parameters AgentParameters VisualizationParameters TaskParameters PresetValidationParameters<import_from_stmt>rl_coach.core_types EnvironmentSteps<import_from_stmt>rl_coach.environments.environment EnvironmentParameters Environment<import_from_stmt>rl_coach.graph_managers.graph_manager GraphManager ScheduleParameters<import_from_stmt>rl_coach.level_manager LevelManager<import_from_stmt>rl_coach.utils short_dynamic_import<class_stmt>HRLGraphManager(GraphManager)<block_start>"""
A simple HRL graph manager creates a deep hierarchy with a single composite agent per hierarchy level, and a single
environment which is interacted with.
"""<def_stmt>__init__ self agents_params:List[AgentParameters] env_params:EnvironmentParameters schedule_params:ScheduleParameters vis_params:VisualizationParameters consecutive_steps_to_run_each_level:Union[EnvironmentSteps List[EnvironmentSteps]] preset_validation_params:PresetValidationParameters=PresetValidationParameters()<block_start>"""
:param agents_params: the parameters of all the agents in the hierarchy starting from the top level of the
hierarchy to the bottom level
:param env_params: the parameters of the environment
:param schedule_params: the parameters for scheduling the graph
:param vis_params: the visualization parameters
:param consecutive_steps_to_run_each_level: the number of time steps that each level is ran.
for example, when the top level gives the bottom level a goal, the bottom level can act for
consecutive_steps_to_run_each_level steps and try to reach that goal. This is expected to be either
an EnvironmentSteps which will be used for all levels, or an EnvironmentSteps for each level as a list.
"""<line_sep>super().__init__('hrl_graph' schedule_params vis_params)<line_sep>self.agents_params=agents_params<line_sep>self.env_params=env_params<line_sep>self.preset_validation_params=preset_validation_params<if_stmt>isinstance(consecutive_steps_to_run_each_level list)<block_start><if_stmt>len(consecutive_steps_to_run_each_level)<ne>len(self.agents_params)<block_start><raise>ValueError("If the consecutive_steps_to_run_each_level is given as a list, it should match "<concat>"the number of levels in the hierarchy. Alternatively, it is possible to use a single "<concat>"value for all the levels, by passing an EnvironmentSteps")<block_end><block_end><elif_stmt>isinstance(consecutive_steps_to_run_each_level EnvironmentSteps)<block_start>self.consecutive_steps_to_run_each_level=[consecutive_steps_to_run_each_level]<times>len(self.agents_params)<block_end><for_stmt>agent_params agents_params<block_start>agent_params.visualization=self.visualization_parameters<if_stmt>agent_params.input_filter<is><none><block_start>agent_params.input_filter=self.env_params.default_input_filter()<block_end><if_stmt>agent_params.output_filter<is><none><block_start>agent_params.output_filter=self.env_params.default_output_filter()<block_end><block_end><if_stmt>len(self.agents_params)<l>2<block_start><raise>ValueError("The HRL graph manager must receive the agent parameters for at least two levels of the "<concat>"hierarchy. Otherwise, use the basic RL graph manager.")<block_end><block_end><def_stmt>_create_graph self task_parameters:TaskParameters<arrow>Tuple[List[LevelManager] List[Environment]]<block_start>self.env_params.seed=task_parameters.seed<line_sep>env=short_dynamic_import(self.env_params.path)(**self.env_params.__dict__ visualization_parameters=self.visualization_parameters)<for_stmt>agent_params self.agents_params<block_start>agent_params.task_parameters=task_parameters<block_end># we need to build the hierarchy in reverse order (from the bottom up) in order for the spaces of each level
# to be known
level_managers=[]<line_sep>current_env=env<line_sep># out_action_space = env.action_space
<for_stmt>level_idx,agent_params reversed(list(enumerate(self.agents_params)))# TODO: the code below is specific for HRL on observation scale
# in action space
# if level_idx == 0:
# # top level agents do not get directives
# in_action_space = None
# else:
# pass
# attention_size = (env.state_space['observation'].shape - 1)//4
# in_action_space = AttentionActionSpace(shape=2, low=0, high=env.state_space['observation'].shape - 1,
# forced_attention_size=attention_size)
# agent_params.output_filter.action_filters['masking'].set_masking(0, attention_size)
<block_start>agent_params.name="agent_{}".format(level_idx)<line_sep>agent_params.is_a_highest_level_agent=level_idx<eq>0<line_sep>agent=short_dynamic_import(agent_params.path)(agent_params)<line_sep>level_manager=LevelManager(agents=agent environment=current_env real_environment=env steps_limit=self.consecutive_steps_to_run_each_level[level_idx] should_reset_agent_state_after_time_limit_passes=level_idx<g>0 name="level_{}".format(level_idx))<line_sep>current_env=level_manager<line_sep>level_managers.insert(0 level_manager)<line_sep># out_action_space = in_action_space
<block_end><return>level_managers [env]<block_end><block_end> |
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes
the BroLogReader and simply loops over the static bro log
file, replaying rows and changing any time stamps
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""<import_from_future_stmt> print_function<import_stmt>os<import_stmt>time<import_stmt>datetime<import_stmt>itertools<line_sep># Third party
<import_stmt>numpy<as>np<line_sep># Local Imports
<import_from_stmt>brothon bro_log_reader<import_from_stmt>brothon.utils file_utils<class_stmt>LiveSimulator(object)<block_start>"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes the
BroLogReader and simply loops over the static bro log file
replaying rows at the specified EPS and changing timestamps to 'now()'
"""<def_stmt>__init__ self filepath eps=10 max_rows=<none><block_start>"""Initialization for the LiveSimulator Class
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""<line_sep># Compute EPS timer
# Logic:
# - Normal distribution centered around 1.0/eps
# - Make sure never less than 0
# - Precompute 1000 deltas and then just cycle around
self.eps_timer=itertools.cycle([max(0 delta)<for>delta np.random.normal(1.0/float(eps) .5/float(eps) size=1000)])<line_sep># Initialize the Bro log reader
self.log_reader=bro_log_reader.BroLogReader(filepath tail=<false>)<line_sep># Store max_rows
self.max_rows=max_rows<block_end><def_stmt>readrows self<block_start>"""Using the BroLogReader this method yields each row of the log file
replacing timestamps, looping and emitting rows based on EPS rate
"""<line_sep># Loop forever or until max_rows is reached
num_rows=0<while_stmt><true># Yield the rows from the internal reader
<block_start><for_stmt>row self.log_reader.readrows()<block_start><yield>self.replace_timestamp(row)<line_sep># Sleep and count rows
time.sleep(next(self.eps_timer))<line_sep>num_rows<augadd>1<line_sep># Check for max_rows
<if_stmt>self.max_rows<and>(num_rows<ge>self.max_rows)<block_start><return><block_end><block_end><block_end><block_end>@staticmethod<def_stmt>replace_timestamp row<block_start>"""Replace the timestamp with now()"""<if_stmt>'ts'<in>row<block_start>row['ts']=datetime.datetime.utcnow()<block_end><return>row<block_end><block_end><def_stmt>test <block_start>"""Test for LiveSimulator Python Class"""<line_sep># Grab a test file
data_path=file_utils.relative_dir(__file__ '../data')<line_sep>test_path=os.path.join(data_path 'conn.log')<line_sep>print('Opening Data File: {:s}'.format(test_path))<line_sep># Create a LiveSimulator reader
reader=LiveSimulator(test_path max_rows=10)<for_stmt>line reader.readrows()<block_start>print(line)<block_end>print('Read with max_rows Test successful!')<block_end><if_stmt>__name__<eq>'__main__'# Run the test for easy testing/debugging
<block_start>test()<block_end> |
# Script to reproject a shapefile.
<import_from_stmt>osgeo ogr osr<line_sep># Create an output SRS.
sr=osr.SpatialReference()<line_sep>sr.ImportFromProj4('''+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23
+lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80
+datum=NAD83 +units=m +no_defs''')<line_sep># Don't forget to change your directory here.
ds=ogr.Open(r'D:\osgeopy-data\US' 1)<line_sep># Get the input layer.
in_lyr=ds.GetLayer('us_volcanos')<line_sep># Create the empty output layer.
out_lyr=ds.CreateLayer('us_volcanos_aea' sr ogr.wkbPoint)<line_sep>out_lyr.CreateFields(in_lyr.schema)<line_sep># Loop through the features in the input layer.
out_feat=ogr.Feature(out_lyr.GetLayerDefn())<for_stmt>in_feat in_lyr# Clone the geometry, project it, and add it to the feature.
<block_start>geom=in_feat.geometry().Clone()<line_sep>geom.TransformTo(sr)<line_sep>out_feat.SetGeometry(geom)<line_sep># Copy attributes.
<for_stmt>i range(in_feat.GetFieldCount())<block_start>out_feat.SetField(i in_feat.GetField(i))<block_end># Insert the feature
out_lyr.CreateFeature(out_feat)<block_end> |
<import_from_future_stmt> print_function<line_sep># This customise file provides an example (in the form of holes in
# BPix L1-L2 and L3-L3) on how to select a subset of generalTracks
# (e.g. by phi and eta) and setup various MTV instances for those
# (selected tracks, built tracks, and seeds in this case). The naming
# of DQM folders is consistent with an example in trackingCompare.py
<import_stmt>FWCore.ParameterSet.Config<as>cms<def_stmt>customiseMTVForBPix123Holes process<block_start><import_from_stmt>Validation.RecoTrack.cutsRecoTracks_cfi cutsRecoTracks<as>_cutsRecoTracks<import_stmt>math<line_sep>_minPhi=process.trackValidatorTrackingOnly.histoProducerAlgoBlock.minPhi.value()<line_sep>_maxPhi=process.trackValidatorTrackingOnly.histoProducerAlgoBlock.maxPhi.value()<line_sep>_nPhi=process.trackValidatorTrackingOnly.histoProducerAlgoBlock.nintPhi.value()<line_sep>_binPhi=(_maxPhi-_minPhi)/_nPhi<line_sep>process.generalTracksL1L2=_cutsRecoTracks.clone(minLayer=0 quality=[] minRapidity=-1.0 # also eta < -1 is affected, but let's start with this
minPhi=_minPhi+_binPhi<times>14 maxPhi=_minPhi+_binPhi<times>19)<line_sep># ~0.7 .. ~0.2
process.generalTracksL2L3=process.generalTracksL1L2.clone(minRapidity=-0.9 maxRapidity=2 minPhi=_minPhi+_binPhi<times>33 maxPhi=_minPhi+_binPhi+2<times>math.pi)<line_sep># ~2.6 .. ~3.3
print("L1L2 %f %f"%(process.generalTracksL1L2.minPhi.value() process.generalTracksL1L2.maxPhi.value()))<line_sep>print("L2L3 %f %f"%(process.generalTracksL2L3.minPhi.value() process.generalTracksL2L3.maxPhi.value()))<import_from_stmt>CommonTools.RecoAlgos.trackingParticleRefSelector_cfi trackingParticleRefSelector<as>_trackingParticleRefSelector<line_sep>process.trackingParticlesL1L2=_trackingParticleRefSelector.clone(signalOnly=<false> chargedOnly=<false> tip=1e5 lip=1e5 minRapidity=process.generalTracksL1L2.minRapidity.value() maxRapidity=process.generalTracksL1L2.maxRapidity.value() ptMin=0 minPhi=process.generalTracksL1L2.minPhi.value() maxPhi=process.generalTracksL1L2.maxPhi.value() )<line_sep>process.trackingParticlesL2L3=process.trackingParticlesL1L2.clone(minRapidity=process.generalTracksL2L3.minRapidity.value() maxRapidity=process.generalTracksL2L3.maxRapidity.value() minPhi=process.generalTracksL2L3.minPhi.value() maxPhi=process.generalTracksL2L3.maxPhi.value() )<line_sep>process.tracksPreValidationTrackingOnly<augadd>(process.trackingParticlesL1L2+process.trackingParticlesL2L3+process.generalTracksL1L2+process.generalTracksL2L3)<line_sep>process.trackValidatorTrackingOnlyL1L2=process.trackValidatorTrackingOnly.clone(dirName=process.trackValidatorTrackingOnly.dirName.value().replace("Track/" "TrackL1L2/") label_tp_effic="trackingParticlesL1L2" label_tp_effic_refvector=<true> label=["generalTracksL1L2"] )<line_sep>process.trackValidatorTrackingOnlyL2L3=process.trackValidatorTrackingOnlyL1L2.clone(dirName=process.trackValidatorTrackingOnlyL1L2.dirName.value().replace("L1L2" "L2L3") label_tp_effic="trackingParticlesL2L3" label=["generalTracksL2L3"] )<line_sep>process.trackValidatorsTrackingOnly<augadd>(process.trackValidatorTrackingOnlyL1L2+process.trackValidatorTrackingOnlyL2L3)<for_stmt>trkColl process.trackValidatorTrackingOnly.label<block_start><if_stmt>"ByAlgoMask"<in>trkColl<block_start><continue><block_end><if_stmt>"Pt09"<in>trkColl<and><not>trkColl<in>["generalTracksPt09" "cutsRecoTracksPt09Hp"]<block_start><continue><block_end><if_stmt>trkColl<ne>"generalTracks"<block_start>selL1L2=getattr(process trkColl).clone(src="generalTracksL1L2")<line_sep>selL2L3=getattr(process trkColl).clone(src="generalTracksL2L3")<if_stmt>"Pt09"<in>trkColl<block_start>selL1L2Name=trkColl.replace("Pt09" "Pt09L1L2")<line_sep>selL2L3Name=trkColl.replace("Pt09" "Pt09L2L3")<block_end><else_stmt><block_start>selL1L2Name=trkColl.replace("cutsRecoTracks" "cutsRecoTracksL1L2")<line_sep>selL2L3Name=trkColl.replace("cutsRecoTracks" "cutsRecoTracksL2L3")<block_end>setattr(process selL1L2Name selL1L2)<line_sep>setattr(process selL2L3Name selL2L3)<line_sep>process.tracksPreValidationTrackingOnly<augadd>(selL1L2+selL2L3)<line_sep>process.trackValidatorTrackingOnlyL1L2.label.append(selL1L2Name)<line_sep>process.trackValidatorTrackingOnlyL2L3.label.append(selL2L3Name)<block_end><block_end><for_stmt>midfix ["Building" "Seeding"]<block_start>label="trackValidator%sTrackingOnly"%midfix<line_sep>mtv=getattr(process label)<line_sep>mtvL1L2=mtv.clone(dirName=mtv.dirName.value()[:-1]+"L1L2/" label_tp_effic="trackingParticlesL1L2" label_tp_effic_refvector=<true> label=[] mvaLabels=cms.PSet() doMVAPlots=<false> )<line_sep>mtvL2L3=mtvL1L2.clone(dirName=mtvL1L2.dirName.value().replace("L1L2" "L2L3") label_tp_effic="trackingParticlesL2L3" )<line_sep>setattr(process label+"L1L2" mtvL1L2)<line_sep>setattr(process label+"L2L3" mtvL2L3)<line_sep>process.trackValidatorsTrackingOnly<augadd>(mtvL1L2+mtvL2L3)<for_stmt>trkColl mtv.label<block_start>selL1L2=process.generalTracksL1L2.clone(src=trkColl)<line_sep>selL2L3=process.generalTracksL2L3.clone(src=trkColl)<line_sep>selL1L2Name=trkColl+"L1L2"<line_sep>selL2L3Name=trkColl+"L2L3"<line_sep>setattr(process selL1L2Name selL1L2)<line_sep>setattr(process selL2L3Name selL2L3)<line_sep>process.tracksPreValidationTrackingOnly<augadd>(selL1L2+selL2L3)<line_sep>mtvL1L2.label.append(selL1L2Name)<line_sep>mtvL2L3.label.append(selL2L3Name)<block_end><block_end><return>process<block_end> |
"""
Example of 2D histogram
"""<import_stmt>geoplotlib<import_from_stmt>geoplotlib.utils read_csv BoundingBox<line_sep>data=read_csv('data/opencellid_dk.csv')<line_sep>geoplotlib.hist(data colorscale='sqrt' binsize=8)<line_sep>geoplotlib.set_bbox(BoundingBox.DK)<line_sep>geoplotlib.show()<line_sep> |
# (c) MIT License Copyright 2014 <NAME>
# Please reuse, modify or distribute freely.
<import_from_stmt>collections OrderedDict<import_stmt>tkinter<as>tk<class_stmt>StripChart(tk.Frame)<block_start><def_stmt>__init__ self parent scale historySize trackColors *args **opts# Initialize
<block_start>super().__init__(parent *args **opts)<line_sep>self._trackHist=OrderedDict()# Map: TrackName -> list of canvas objID
self._trackColor=trackColors# Map: Track Name -> color
self._chartHeight=scale+1<line_sep>self._chartLength=historySize<times>2# Stretch for readability
self._canvas=tk.Canvas(self height=self._chartHeight+17 width=self._chartLength background='black')<line_sep>self._canvas.grid(sticky=tk.N+tk.S+tk.E+tk.W)<line_sep># Draw horizontal to divide plot from tick labels
x,y=0 self._chartHeight+2<line_sep>x2,y2=self._chartLength y<line_sep>self._baseLine=self._canvas.create_line(x y x2 y2 fill='white')<line_sep># Init track def and histories lists
self._trackColor.update({'tick':'white' 'tickline':'white' 'ticklabel':'white'})<for_stmt>trackName self._trackColor.keys()<block_start>self._trackHist[trackName]=[<none><for>x range(historySize)]<block_end><block_end><def_stmt>plotValues self **vals<block_start><for_stmt>trackName,trackHistory self._trackHist.items()# Scroll left-wards
<block_start>self._canvas.delete(trackHistory.pop(0))<line_sep># Remove left-most canvas objs
self._canvas.move(trackName -2 0)<line_sep># Scroll canvas objs 2 pixels left
# Plot the new values
<try_stmt><block_start>val=vals[trackName]<line_sep>x=self._chartLength<line_sep>y=self._chartHeight-val<line_sep>color=self._trackColor[trackName]<line_sep>objId=self._canvas.create_line(x y x+1 y fill=color width=3 tags=trackName)<line_sep>trackHistory.append(objId)<block_end><except_stmt><block_start>trackHistory.append(<none>)<block_end><block_end><block_end><def_stmt>drawTick self text=<none> **lineOpts# draw vertical tick line
<block_start>x=self._chartLength<line_sep>y=1<line_sep>x2=x<line_sep>y2=self._chartHeight<line_sep>color=self._trackColor['tickline']<line_sep>objId=self._canvas.create_line(x y x2 y2 fill=color tags='tick' **lineOpts)<line_sep>self._trackHist['tickline'].append(objId)<line_sep># draw tick label
<if_stmt>text<is><not><none><block_start>x=self._chartLength<line_sep>y=self._chartHeight+10<line_sep>color=self._trackColor['ticklabel']<line_sep>objId=self._canvas.create_text(x y text=text fill=color tags='tick')<line_sep>self._trackHist['ticklabel'].append(objId)<block_end><block_end><def_stmt>configTrackColors self **trackColors# Change plotted data color
<block_start><for_stmt>trackName,colorName trackColors.items()<block_start>self._canvas.itemconfigure(trackName fill=colorName)<block_end># Change settings so future data has the new color
self._trackColor.update(trackColors)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>top=tk.Tk()<line_sep>graph=StripChart(top 100 300 {'A':'blue' 'B':'green' 'C':'red'})<line_sep>graph.grid()<line_sep>val_A=0<line_sep>val_B=0<line_sep>val_C=0<line_sep>delta=[-3 -2 -1 0 1 2 3]# randomly vary the values by one of these
tickCount=0<def_stmt>nextVal current lowerBound upperBound<block_start><import_from_stmt>random choice<line_sep>current<augadd>choice(delta)<if_stmt>current<l>lowerBound<block_start><return>lowerBound<block_end><elif_stmt>current<g>upperBound<block_start><return>upperBound<block_end><else_stmt><block_start><return>current<block_end><block_end><def_stmt>plotNextVals <block_start><global>val_A val_B val_C tickCount<if_stmt>tickCount%50<eq>0<block_start>graph.drawTick(text=str(tickCount) dash=(1 4))<block_end>tickCount<augadd>1<line_sep>val_A=nextVal(val_A 0 99)<line_sep>val_B=nextVal(val_B 0 99)<line_sep>val_C=nextVal(val_C 0 99)<line_sep>graph.plotValues(A=val_A B=val_B C=val_C)<line_sep>#changeColor = { 800: 'black',
#1200: 'yellow',
#1600: 'orange',
#2000: 'white',
#2400: 'brown',
#2800: 'blue' }
#if tickCount in changeColor:
#graph.configTrackColors( A=changeColor[tickCount] )
top.after(1 plotNextVals)<block_end>top.after(1 plotNextVals)<line_sep>top.mainloop()<block_end> |
<import_from_stmt>. Plugin<class_stmt>VmstatPlugin(Plugin)<block_start>targets=[{'match':'^servers\.(?P<server>[^\.]+)\.vmstat\.(?P<type>.*)$' 'target_type':'rate' 'tags':{'unit':'Page'}}]<def_stmt>sanitize self target<block_start>target['tags']['type']=target['tags']['type'].replace('pgpg' 'paging_')<line_sep>target['tags']['type']=target['tags']['type'].replace('pswp' 'swap_')<block_end><block_end># vim: ts=4 et sw=4:
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>simEcalDigis=cms.EDProducer("EcalSelectiveReadoutProducer" # Label of input EB and EE digi collections
digiProducer=cms.string('simEcalUnsuppressedDigis') # Instance name of input EB digi collections
EBdigiCollection=cms.string('') # Instance name of input EB digi collections
EEdigiCollection=cms.string('') # Instance name of output EB SR flags collection
EBSrFlagCollection=cms.string('ebSrFlags') # Instance name of output EE SR flags collection
EESrFlagCollection=cms.string('eeSrFlags') # Instance name of output EB digis collection
EBSRPdigiCollection=cms.string('ebDigis') # Instance name of output EE digis collection
EESRPdigiCollection=cms.string('eeDigis') # Label name of input ECAL trigger primitive collection
trigPrimProducer=cms.string('simEcalTriggerPrimitiveDigis') # Instance name of ECAL trigger primitive collection
trigPrimCollection=cms.string('') # Neighbour eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaEta=cms.int32(1) # Neighbouring eta range, neighborhood: (2*deltaEta+1)*(2*deltaPhi+1)
deltaPhi=cms.int32(1) # Index of time sample (staring from 1) the first DCC weights is implied
ecalDccZs1stSample=cms.int32(3) # ADC to GeV conversion factor used in ZS filter for EB
ebDccAdcToGeV=cms.double(0.035) # ADC to GeV conversion factor used in ZS filter for EE
eeDccAdcToGeV=cms.double(0.06) #DCC ZS FIR weights.
#d-efault value set of DCC firmware used in CRUZET and CRAFT
dccNormalizedWeights=cms.vdouble(-1.1865 0.0195 0.2900 0.3477 0.3008 0.2266) # Switch to use a symetric zero suppression (cut on absolute value). For
# studies only, for time being it is not supported by the hardware.
symetricZS=cms.bool(<false>) # ZS energy threshold in GeV to apply to low interest channels of barrel
srpBarrelLowInterestChannelZS=cms.double(3<times>.035) # ZS energy threshold in GeV to apply to low interest channels of endcap
srpEndcapLowInterestChannelZS=cms.double(3<times>0.06) # ZS energy threshold in GeV to apply to high interest channels of barrel
srpBarrelHighInterestChannelZS=cms.double(-1.e9) # ZS energy threshold in GeV to apply to high interest channels of endcap
srpEndcapHighInterestChannelZS=cms.double(-1.e9) #switch to run w/o trigger primitive. For debug use only
trigPrimBypass=cms.bool(<false>) #for debug mode only:
trigPrimBypassLTH=cms.double(1.0) #for debug mode only:
trigPrimBypassHTH=cms.double(1.0) #for debug mode only
trigPrimBypassWithPeakFinder=cms.bool(<true>) # Mode selection for "Trig bypass" mode
# 0: TT thresholds applied on sum of crystal Et's
# 1: TT thresholds applies on compressed Et from Trigger primitive
# @ee trigPrimByPass_ switch
trigPrimBypassMode=cms.int32(0) #number of events whose TT and SR flags must be dumped (for debug purpose):
dumpFlags=cms.untracked.int32(0) #logical flag to write out SrFlags
writeSrFlags=cms.untracked.bool(<true>) #switch to apply selective readout decision on the digis and produce
#the "suppressed" digis
produceDigis=cms.untracked.bool(<true>) #Trigger Tower Flag to use when a flag is not found from the input
#Trigger Primitive collection. Must be one of the following values:
# 0: low interest, 1: mid interest, 3: high interest
# 4: forced low interest, 5: forced mid interest, 7: forced high interest
defaultTtf_=cms.int32(4) # SR->action flag map
actions=cms.vint32(1 3 3 3 5 7 7 7))<line_sep> |
<import_from_stmt>django.http.response Http404<import_from_stmt>django.http HttpResponse<import_from_stmt>blogs.helpers unmark clean_text<import_from_stmt>blogs.views.blog resolve_address<import_from_stmt>feedgen.feed FeedGenerator<import_stmt>mistune<def_stmt>feed request<block_start>blog=resolve_address(request)<if_stmt><not>blog<block_start><raise>Http404("Blog does not exist")<block_end>all_posts=blog.post_set.filter(publish=<true> is_page=<false>).order_by('-published_date')<line_sep>fg=FeedGenerator()<line_sep>fg.id(blog.useful_domain())<line_sep>fg.author({'name':blog.subdomain 'email':'hidden'})<line_sep>fg.title(blog.title)<line_sep>fg.subtitle(blog.meta_description<or>clean_text(unmark(blog.content)[:160])<or>blog.title)<line_sep>fg.link(href=f"{blog.useful_domain()}/" rel='alternate')<for_stmt>post all_posts<block_start>fe=fg.add_entry()<line_sep>fe.id(f"{blog.useful_domain()}/{post.slug}/")<line_sep>fe.title(post.title)<line_sep>fe.author({'name':blog.subdomain 'email':'hidden'})<line_sep>fe.link(href=f"{blog.useful_domain()}/{post.slug}/")<line_sep>fe.content(clean_text(mistune.html(post.content)) type="html")<line_sep>fe.published(post.published_date)<line_sep>fe.updated(post.published_date)<block_end><if_stmt>request.GET.get('type')<eq>'rss'<block_start>fg.link(href=f"{blog.useful_domain()}/feed/?type=rss" rel='self')<line_sep>rssfeed=fg.rss_str(pretty=<true>)<line_sep><return>HttpResponse(rssfeed content_type='application/rss+xml')<block_end><else_stmt><block_start>fg.link(href=f"{blog.useful_domain()}/feed/" rel='self')<line_sep>atomfeed=fg.atom_str(pretty=<true>)<line_sep><return>HttpResponse(atomfeed content_type='application/atom+xml')<block_end><block_end> |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
<import_from_stmt>botocore.session Session<import_from_stmt>tests unittest<class_stmt>TestTaggedUnionsUnknown(unittest.TestCase)<block_start><def_stmt>test_tagged_union_member_name_does_not_coincide_with_unknown_key self# This test ensures that operation models do not use SDK_UNKNOWN_MEMBER
# as a member name. Thereby reserving SDK_UNKNOWN_MEMBER for the parser to
# set as a key on the reponse object. This is necessary when the client
# encounters a member that it is unaware of or not modeled.
<block_start>session=Session()<for_stmt>service_name session.get_available_services()<block_start>service_model=session.get_service_model(service_name)<for_stmt>shape_name service_model.shape_names<block_start>shape=service_model.shape_for(shape_name)<if_stmt>hasattr(shape 'is_tagged_union')<and>shape.is_tagged_union<block_start>self.assertNotIn('SDK_UNKNOWN_MEMBER' shape.members)<block_end><block_end><block_end><block_end><block_end> |
# MIT License
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
<import_from_stmt>jarvis.skills.skill AssistantSkill<import_from_stmt>jarvis.utils.mongoDB db<import_from_stmt>jarvis.utils input<line_sep>header="""
-----------------------------------------------------------------------------------------------
I would like to learn, tell me the right answer!
-----------------------------------------------------------------------------------------------
* Note: Create new skill! Write your question and the appropriate answer.
\n
"""<class_stmt>RememberSkills(AssistantSkill)<block_start>@classmethod<def_stmt>remember cls **kwargs<block_start>cls.console(header)<line_sep>continue_add=<true><while_stmt>continue_add<block_start>cls.console(text='Question: ')<line_sep>tags=cls.user_input()<line_sep>cls.console(text='Suggested Response: ')<line_sep>response=cls.user_input()<line_sep>new_skill={'name':'learned_skill' 'enable':<true> 'func':cls.tell_response.__name__ 'response':response 'tags':tags } <line_sep>cls.response('Add more? ' refresh_console=<false>)<line_sep>continue_add=input.check_input_to_continue()<line_sep>db.insert_many_documents(collection='learned_skills' documents=new_skill)<block_end><block_end>@classmethod<def_stmt>tell_response cls **kwargs<block_start>cls.response(kwargs.get('skill').get('response'))<block_end>@classmethod<def_stmt>clear_learned_skills cls **kwargs<block_start><if_stmt>db.is_collection_empty(collection='learned_skills')<block_start>cls.response("I can't find learned skills in my database")<block_end><else_stmt><block_start>cls.response('I found learned skills..')<line_sep>cls.response('Are you sure to remove learned skills? ' refresh_console=<false>)<line_sep>user_answer=input.check_input_to_continue()<if_stmt>user_answer<block_start>db.drop_collection(collection='learned_skills')<line_sep>cls.response("Perfect I have deleted them all")<block_end><block_end><block_end><block_end> |
<import_stmt>enum<import_stmt>databases<import_stmt>pydantic<import_stmt>pytest<import_stmt>sqlalchemy<import_from_stmt>pydantic ValidationError<import_stmt>ormar<import_from_stmt>tests.settings DATABASE_URL<line_sep>metadata=sqlalchemy.MetaData()<line_sep>database=databases.Database(DATABASE_URL)<class_stmt>BaseMeta(ormar.ModelMeta)<block_start>database=database<line_sep>metadata=metadata<block_end><class_stmt>EnumExample(str enum.Enum)<block_start>A="A"<line_sep>B="B"<line_sep>C="C"<block_end><class_stmt>ModelExample(ormar.Model)<block_start><class_stmt>Meta(ormar.ModelMeta)<block_start>database=database<line_sep>metadata=metadata<line_sep>tablename="examples"<block_end>id:int=ormar.Integer(primary_key=<true>)<line_sep>str_field:str=ormar.String(min_length=5 max_length=10 nullable=<false>)<line_sep>enum_field:str=ormar.String(max_length=1 nullable=<false> choices=list(EnumExample))<line_sep>@pydantic.validator("str_field")<def_stmt>validate_str_field cls v<block_start><if_stmt>" "<not><in>v<block_start><raise>ValueError("must contain a space")<block_end><return>v<block_end><block_end>ModelExampleCreate=ModelExample.get_pydantic(exclude={"id"})<def_stmt>test_ormar_validator <block_start>ModelExample(str_field="a aaaaaa" enum_field="A")<with_stmt>pytest.raises(ValidationError)<as>e<block_start>ModelExample(str_field="aaaaaaa" enum_field="A")<block_end><assert_stmt>"must contain a space"<in>str(e)<with_stmt>pytest.raises(ValidationError)<as>e<block_start>ModelExample(str_field="a aaaaaaa" enum_field="Z")<block_end><assert_stmt>"not in allowed choices"<in>str(e)<block_end><def_stmt>test_pydantic_validator <block_start>ModelExampleCreate(str_field="a aaaaaa" enum_field="A")<with_stmt>pytest.raises(ValidationError)<as>e<block_start>ModelExampleCreate(str_field="aaaaaaa" enum_field="A")<block_end><assert_stmt>"must contain a space"<in>str(e)<with_stmt>pytest.raises(ValidationError)<as>e<block_start>ModelExampleCreate(str_field="a aaaaaaa" enum_field="Z")<block_end><assert_stmt>"not in allowed choices"<in>str(e)<block_end> |
<import_stmt>math<import_stmt>torch<import_from_stmt>.modules *<def_stmt>_kl_loss mu_0 log_sigma_0 mu_1 log_sigma_1<block_start>"""
An method for calculating KL divergence between two Normal distribtuion.
Arguments:
mu_0 (Float) : mean of normal distribution.
log_sigma_0 (Float): log(standard deviation of normal distribution).
mu_1 (Float): mean of normal distribution.
log_sigma_1 (Float): log(standard deviation of normal distribution).
"""<line_sep>kl=log_sigma_1-log_sigma_0+(torch.exp(log_sigma_0)<power>2+(mu_0-mu_1)<power>2)/(2<times>math.exp(log_sigma_1)<power>2)-0.5<line_sep><return>kl.sum()<block_end><def_stmt>bayesian_kl_loss model reduction='mean' last_layer_only=<false><block_start>"""
An method for calculating KL divergence of whole layers in the model.
Arguments:
model (nn.Module): a model to be calculated for KL-divergence.
reduction (string, optional): Specifies the reduction to apply to the output:
``'mean'``: the sum of the output will be divided by the number of
elements of the output.
``'sum'``: the output will be summed.
last_layer_only (Bool): True for return only the last layer's KL divergence.
"""<line_sep>device=torch.device("cuda"<if>next(model.parameters()).is_cuda<else>"cpu")<line_sep>kl=torch.Tensor([0]).to(device)<line_sep>kl_sum=torch.Tensor([0]).to(device)<line_sep>n=torch.Tensor([0]).to(device)<for_stmt>m model.modules()<block_start><if_stmt>isinstance(m (BayesLinear BayesConv2d))<block_start>kl=_kl_loss(m.weight_mu m.weight_log_sigma m.prior_mu m.prior_log_sigma)<line_sep>kl_sum<augadd>kl<line_sep>n<augadd>len(m.weight_mu.view(-1))<if_stmt>m.bias<block_start>kl=_kl_loss(m.bias_mu m.bias_log_sigma m.prior_mu m.prior_log_sigma)<line_sep>kl_sum<augadd>kl<line_sep>n<augadd>len(m.bias_mu.view(-1))<block_end><block_end><if_stmt>isinstance(m BayesBatchNorm2d)<block_start><if_stmt>m.affine<block_start>kl=_kl_loss(m.weight_mu m.weight_log_sigma m.prior_mu m.prior_log_sigma)<line_sep>kl_sum<augadd>kl<line_sep>n<augadd>len(m.weight_mu.view(-1))<line_sep>kl=_kl_loss(m.bias_mu m.bias_log_sigma m.prior_mu m.prior_log_sigma)<line_sep>kl_sum<augadd>kl<line_sep>n<augadd>len(m.bias_mu.view(-1))<block_end><block_end><block_end><if_stmt>last_layer_only<or>n<eq>0<block_start><return>kl<block_end><if_stmt>reduction<eq>'mean'<block_start><return>kl_sum/n<block_end><elif_stmt>reduction<eq>'sum'<block_start><return>kl_sum<block_end><else_stmt><block_start><raise>ValueError(reduction+" is not valid")<block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>metaworld.policies.action Action<import_from_stmt>metaworld.policies.policy Policy assert_fully_parsed move<class_stmt>SawyerPushWallV2Policy(Policy)<block_start>@staticmethod@assert_fully_parsed<def_stmt>_parse_obs obs<block_start><return>{'hand_pos':obs[:3] 'unused_1':obs[3] 'obj_pos':obs[4:7] 'unused_2':obs[7:-3] 'goal_pos':obs[-3:] }<block_end><def_stmt>get_action self obs<block_start>o_d=self._parse_obs(obs)<line_sep>action=Action({'delta_pos':np.arange(3) 'grab_effort':3})<line_sep>action['delta_pos']=move(o_d['hand_pos'] to_xyz=self.desired_pos(o_d) p=10.)<line_sep>action['grab_effort']=self.grab_effort(o_d)<line_sep><return>action.array<block_end>@staticmethod<def_stmt>desired_pos o_d<block_start>pos_curr=o_d['hand_pos']<line_sep>pos_obj=o_d['obj_pos']+np.array([-0.005 0 0])<line_sep># If error in the XY plane is greater than 0.02, place end effector above the puck
<if_stmt>np.linalg.norm(pos_curr[:2]-pos_obj[:2])<g>0.02<block_start><return>pos_obj+np.array([0. 0. 0.2])<block_end># Once XY error is low enough, drop end effector down on top of obj
<elif_stmt>abs(pos_curr[2]-pos_obj[2])<g>0.04<block_start><return>pos_obj+np.array([0. 0. 0.03])<block_end># Move to the goal
<else_stmt>#if the wall is between the puck and the goal, go around the wall
<block_start><if_stmt>(-0.1<le>pos_obj[0]<le>0.3<and>0.65<le>pos_obj[1]<le>0.75)<block_start><return>pos_curr+np.array([-1 0 0])<block_end><elif_stmt>((-0.15<l>pos_obj[0]<l>0.05<or>0.15<l>pos_obj[0]<l>0.35)<and>0.695<le>pos_obj[1]<le>0.755)<block_start><return>pos_curr+np.array([0 1 0])<block_end><return>o_d['goal_pos']<block_end><block_end>@staticmethod<def_stmt>grab_effort o_d<block_start>pos_curr=o_d['hand_pos']<line_sep>pos_obj=o_d['obj_pos']<if_stmt>np.linalg.norm(pos_curr[:2]-pos_obj[:2])<g>0.02<or>abs(pos_curr[2]-pos_obj[2])<g>0.1<block_start><return>0.0<block_end># While end effector is moving down toward the obj, begin closing the grabber
<else_stmt><block_start><return>0.6<block_end><block_end><block_end> |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
<try_stmt><block_start><import_from_stmt>._models_py3 AdmCredential<import_from_stmt>._models_py3 ApnsCredential<import_from_stmt>._models_py3 BaiduCredential<import_from_stmt>._models_py3 CheckAvailabilityParameters<import_from_stmt>._models_py3 CheckAvailabilityResult<import_from_stmt>._models_py3 DebugSendResponse<import_from_stmt>._models_py3 ErrorResponse<import_from_stmt>._models_py3 GcmCredential<import_from_stmt>._models_py3 MpnsCredential<import_from_stmt>._models_py3 NamespaceCreateOrUpdateParameters<import_from_stmt>._models_py3 NamespaceListResult<import_from_stmt>._models_py3 NamespacePatchParameters<import_from_stmt>._models_py3 NamespaceResource<import_from_stmt>._models_py3 NotificationHubCreateOrUpdateParameters<import_from_stmt>._models_py3 NotificationHubListResult<import_from_stmt>._models_py3 NotificationHubPatchParameters<import_from_stmt>._models_py3 NotificationHubResource<import_from_stmt>._models_py3 Operation<import_from_stmt>._models_py3 OperationDisplay<import_from_stmt>._models_py3 OperationListResult<import_from_stmt>._models_py3 PnsCredentialsResource<import_from_stmt>._models_py3 PolicykeyResource<import_from_stmt>._models_py3 Resource<import_from_stmt>._models_py3 ResourceListKeys<import_from_stmt>._models_py3 SharedAccessAuthorizationRuleCreateOrUpdateParameters<import_from_stmt>._models_py3 SharedAccessAuthorizationRuleListResult<import_from_stmt>._models_py3 SharedAccessAuthorizationRuleProperties<import_from_stmt>._models_py3 SharedAccessAuthorizationRuleResource<import_from_stmt>._models_py3 Sku<import_from_stmt>._models_py3 SubResource<import_from_stmt>._models_py3 WnsCredential<block_end><except_stmt>(SyntaxError ImportError)<block_start><import_from_stmt>._models AdmCredential# type: ignore
<import_from_stmt>._models ApnsCredential# type: ignore
<import_from_stmt>._models BaiduCredential# type: ignore
<import_from_stmt>._models CheckAvailabilityParameters# type: ignore
<import_from_stmt>._models CheckAvailabilityResult# type: ignore
<import_from_stmt>._models DebugSendResponse# type: ignore
<import_from_stmt>._models ErrorResponse# type: ignore
<import_from_stmt>._models GcmCredential# type: ignore
<import_from_stmt>._models MpnsCredential# type: ignore
<import_from_stmt>._models NamespaceCreateOrUpdateParameters# type: ignore
<import_from_stmt>._models NamespaceListResult# type: ignore
<import_from_stmt>._models NamespacePatchParameters# type: ignore
<import_from_stmt>._models NamespaceResource# type: ignore
<import_from_stmt>._models NotificationHubCreateOrUpdateParameters# type: ignore
<import_from_stmt>._models NotificationHubListResult# type: ignore
<import_from_stmt>._models NotificationHubPatchParameters# type: ignore
<import_from_stmt>._models NotificationHubResource# type: ignore
<import_from_stmt>._models Operation# type: ignore
<import_from_stmt>._models OperationDisplay# type: ignore
<import_from_stmt>._models OperationListResult# type: ignore
<import_from_stmt>._models PnsCredentialsResource# type: ignore
<import_from_stmt>._models PolicykeyResource# type: ignore
<import_from_stmt>._models Resource# type: ignore
<import_from_stmt>._models ResourceListKeys# type: ignore
<import_from_stmt>._models SharedAccessAuthorizationRuleCreateOrUpdateParameters# type: ignore
<import_from_stmt>._models SharedAccessAuthorizationRuleListResult# type: ignore
<import_from_stmt>._models SharedAccessAuthorizationRuleProperties# type: ignore
<import_from_stmt>._models SharedAccessAuthorizationRuleResource# type: ignore
<import_from_stmt>._models Sku# type: ignore
<import_from_stmt>._models SubResource# type: ignore
<import_from_stmt>._models WnsCredential<block_end># type: ignore
<import_from_stmt>._notification_hubs_management_client_enums AccessRights NamespaceType SkuName <line_sep>__all__=['AdmCredential' 'ApnsCredential' 'BaiduCredential' 'CheckAvailabilityParameters' 'CheckAvailabilityResult' 'DebugSendResponse' 'ErrorResponse' 'GcmCredential' 'MpnsCredential' 'NamespaceCreateOrUpdateParameters' 'NamespaceListResult' 'NamespacePatchParameters' 'NamespaceResource' 'NotificationHubCreateOrUpdateParameters' 'NotificationHubListResult' 'NotificationHubPatchParameters' 'NotificationHubResource' 'Operation' 'OperationDisplay' 'OperationListResult' 'PnsCredentialsResource' 'PolicykeyResource' 'Resource' 'ResourceListKeys' 'SharedAccessAuthorizationRuleCreateOrUpdateParameters' 'SharedAccessAuthorizationRuleListResult' 'SharedAccessAuthorizationRuleProperties' 'SharedAccessAuthorizationRuleResource' 'Sku' 'SubResource' 'WnsCredential' 'AccessRights' 'NamespaceType' 'SkuName' ]<line_sep> |
"""
Author: dreamkong
"""<import_from_stmt>typing List<def_stmt>bsearch nums:List[int] target:int<arrow>int<block_start><return>bsearch_internally(nums 0 len(nums)-1 target)<block_end><def_stmt>bsearch_internally nums:List[int] low:int high:int target:int<arrow>int<block_start><if_stmt>low<g>high<block_start><return>-1<block_end>mid=low+int((high-low)<rshift>2)<if_stmt>nums[mid]<eq>target<block_start><return>mid<block_end><elif_stmt>nums[mid]<l>target<block_start><return>bsearch_internally(nums mid+1 high target)<block_end><else_stmt><block_start><return>bsearch_internally(nums low mid-1 target)<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""<import_stmt>bisect<import_stmt>datetime<as>dt<import_from_stmt>typing Iterable<import_from_stmt>typing Union<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>simpleutils.asserts require<import_from_stmt>PyFin.DateUtilities Period<import_from_stmt>PyFin.api BizDayConventions<import_from_stmt>PyFin.api DateGeneration<import_from_stmt>PyFin.api advanceDateByCalendar<import_from_stmt>PyFin.api makeSchedule<import_from_stmt>alphamind.data.engines.sqlengine SqlEngine<import_from_stmt>alphamind.data.engines.sqlengine total_risk_factors<import_from_stmt>alphamind.data.engines.universe Universe<import_from_stmt>alphamind.data.processing factor_processing<import_from_stmt>alphamind.data.transformer Transformer<import_from_stmt>alphamind.utilities alpha_logger<import_from_stmt>alphamind.utilities map_freq<def_stmt>_merge_df engine names factor_df target_df universe dates risk_model neutralized_risk<block_start>risk_df=engine.fetch_risk_model_range(universe dates=dates risk_model=risk_model)[1]<line_sep>used_neutralized_risk=list(set(total_risk_factors).difference(names))<line_sep>risk_df=risk_df[['trade_date' 'code']+used_neutralized_risk].dropna()<line_sep>target_df=pd.merge(target_df risk_df on=['trade_date' 'code']).dropna()<if_stmt>neutralized_risk<block_start>train_x=pd.merge(factor_df risk_df on=['trade_date' 'code'])<line_sep>train_y=target_df.copy()<line_sep>risk_exp=train_x[neutralized_risk].values.astype(float)<line_sep>x_values=train_x[names].values.astype(float)<line_sep>y_values=train_y[['dx']].values<block_end><else_stmt><block_start>risk_exp=<none><line_sep>train_x=factor_df.copy()<line_sep>train_y=target_df.copy()<line_sep>x_values=train_x[names].values.astype(float)<line_sep>y_values=train_y[['dx']].values<block_end>codes=train_x['code'].values<line_sep>date_label=pd.DatetimeIndex(factor_df.trade_date).to_pydatetime()<line_sep>dates=np.unique(date_label)<line_sep><return>target_df dates date_label risk_exp x_values y_values train_x train_y codes<block_end><def_stmt>prepare_data engine:SqlEngine factors:Union[Transformer Iterable[object]] start_date:str end_date:str frequency:str universe:Universe benchmark:int warm_start:int=0 fit_target:Union[Transformer object]=<none><block_start><if_stmt>warm_start<g>0<block_start>p=Period(frequency)<line_sep>p=Period(length=-warm_start<times>p.length() units=p.units())<line_sep>start_date=advanceDateByCalendar('china.sse' start_date p).strftime('%Y-%m-%d')<block_end>dates=makeSchedule(start_date end_date frequency calendar='china.sse' dateRule=BizDayConventions.Following dateGenerationRule=DateGeneration.Forward)<line_sep>dates=[d.strftime('%Y-%m-%d')<for>d dates]<line_sep>horizon=map_freq(frequency)<if_stmt>isinstance(factors Transformer)<block_start>transformer=factors<block_end><else_stmt><block_start>transformer=Transformer(factors)<block_end>factor_df=engine.fetch_factor_range(universe factors=transformer dates=dates).sort_values(['trade_date' 'code'])<line_sep>alpha_logger.info("factor data loading finished")<if_stmt>fit_target<is><none><block_start>target_df=engine.fetch_dx_return_range(universe dates=dates horizon=horizon)<block_end><else_stmt><block_start>one_more_date=advanceDateByCalendar('china.sse' dates[-1] frequency)<line_sep>target_df=engine.fetch_factor_range_forward(universe factors=fit_target dates=dates+[one_more_date])<line_sep>target_df=target_df[target_df.trade_date.isin(dates)]<line_sep>target_df=target_df.groupby('code').apply(<lambda>x:x.fillna(method='pad'))<block_end>alpha_logger.info("fit target data loading finished")<line_sep>industry_df=engine.fetch_industry_range(universe dates=dates)<line_sep>alpha_logger.info("industry data loading finished")<line_sep>benchmark_df=engine.fetch_benchmark_range(benchmark dates=dates)<line_sep>alpha_logger.info("benchmark data loading finished")<line_sep>df=pd.merge(factor_df target_df on=['trade_date' 'code']).dropna()<line_sep>df=pd.merge(df benchmark_df on=['trade_date' 'code'] how='left')<line_sep>df=pd.merge(df industry_df on=['trade_date' 'code'])<line_sep>df['weight']=df['weight'].fillna(0.)<line_sep>df.dropna(inplace=<true>)<line_sep><return>dates df[['trade_date' 'code' 'dx']] df[['trade_date' 'code' 'weight' 'industry_code' 'industry']+transformer.names]<block_end><def_stmt>batch_processing names x_values y_values groups group_label batch risk_exp pre_process post_process codes<block_start>train_x_buckets={}<line_sep>train_y_buckets={}<line_sep>train_risk_buckets={}<line_sep>predict_x_buckets={}<line_sep>predict_y_buckets={}<line_sep>predict_risk_buckets={}<line_sep>predict_codes_bucket={}<for_stmt>i,start enumerate(groups[:-batch])<block_start>end=groups[i+batch]<line_sep>left_index=bisect.bisect_left(group_label start)<line_sep>right_index=bisect.bisect_left(group_label end)<line_sep>this_raw_x=x_values[left_index:right_index]<line_sep>this_raw_y=y_values[left_index:right_index]<if_stmt>risk_exp<is><not><none><block_start>this_risk_exp=risk_exp[left_index:right_index]<block_end><else_stmt><block_start>this_risk_exp=<none><block_end>train_x_buckets[end]=pd.DataFrame(factor_processing(this_raw_x pre_process=pre_process risk_factors=this_risk_exp post_process=post_process) columns=names)<line_sep>train_y_buckets[end]=factor_processing(this_raw_y pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>train_risk_buckets[end]=this_risk_exp<line_sep>left_index=bisect.bisect_right(group_label start)<line_sep>right_index=bisect.bisect_right(group_label end)<line_sep>sub_dates=group_label[left_index:right_index]<line_sep>this_raw_x=x_values[left_index:right_index]<line_sep>this_codes=codes[left_index:right_index]<if_stmt>risk_exp<is><not><none><block_start>this_risk_exp=risk_exp[left_index:right_index]<block_end><else_stmt><block_start>this_risk_exp=<none><block_end>ne_x=factor_processing(this_raw_x pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>inner_left_index=bisect.bisect_left(sub_dates end)<line_sep>inner_right_index=bisect.bisect_right(sub_dates end)<line_sep>predict_x_buckets[end]=pd.DataFrame(ne_x[inner_left_index:inner_right_index] columns=names)<if_stmt>risk_exp<is><not><none><block_start>predict_risk_buckets[end]=this_risk_exp[inner_left_index:inner_right_index]<block_end><else_stmt><block_start>predict_risk_buckets=<none><block_end>predict_codes_bucket[end]=this_codes[inner_left_index:inner_right_index]<line_sep>this_raw_y=y_values[left_index:right_index]<if_stmt>len(this_raw_y)<g>0<block_start>ne_y=factor_processing(this_raw_y pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>predict_y_buckets[end]=ne_y[inner_left_index:inner_right_index]<block_end><block_end><return>train_x_buckets train_y_buckets train_risk_buckets predict_x_buckets predict_y_buckets predict_risk_buckets predict_codes_bucket<block_end><def_stmt>fetch_data_package engine:SqlEngine alpha_factors:Iterable[object] start_date:str end_date:str frequency:str universe:Universe benchmark:int warm_start:int=0 batch:int=1 neutralized_risk:Iterable[str]=<none> risk_model:str='short' pre_process:Iterable[object]=<none> post_process:Iterable[object]=<none> fit_target:Union[Transformer object]=<none><arrow>dict<block_start>alpha_logger.info("Starting data package fetching ...")<line_sep>transformer=Transformer(alpha_factors)<line_sep>names=transformer.names<line_sep>dates,target_df,factor_df=prepare_data(engine transformer start_date end_date frequency universe benchmark warm_start+batch fit_target=fit_target)<line_sep>target_df,dates,date_label,risk_exp,x_values,y_values,train_x,train_y,codes=_merge_df(engine names factor_df target_df universe dates risk_model neutralized_risk)<line_sep>alpha_logger.info("data merging finished")<line_sep>target_df['weight']=train_x['weight']<line_sep>target_df['industry']=train_x['industry']<line_sep>target_df['industry_code']=train_x['industry_code']<if_stmt>neutralized_risk<block_start><for_stmt>i,name enumerate(neutralized_risk)<block_start>target_df.loc[: name]=risk_exp[: i]<block_end><block_end>alpha_logger.info("Loading data is finished")<line_sep>train_x_buckets,train_y_buckets,train_risk_buckets,predict_x_buckets,predict_y_buckets,predict_risk_buckets,predict_codes_bucket=batch_processing(names x_values y_values dates date_label batch risk_exp pre_process post_process codes)<line_sep>alpha_logger.info("Data processing is finished")<line_sep>ret=dict()<line_sep>ret['x_names']=names<line_sep>ret['settlement']=target_df[target_df.trade_date<ge>start_date]<line_sep>train_x_buckets={k:train_x_buckets[k]<for>k train_x_buckets<if>k.strftime('%Y-%m-%d')<ge>start_date}<line_sep>train_y_buckets={k:train_y_buckets[k]<for>k train_y_buckets<if>k.strftime('%Y-%m-%d')<ge>start_date}<line_sep>train_risk_buckets={k:train_risk_buckets[k]<for>k train_risk_buckets<if>k.strftime('%Y-%m-%d')<ge>start_date}<line_sep>predict_x_buckets={k:predict_x_buckets[k]<for>k predict_x_buckets<if>k.strftime('%Y-%m-%d')<ge>start_date}<line_sep>predict_y_buckets={k:predict_y_buckets[k]<for>k predict_y_buckets<if>k.strftime('%Y-%m-%d')<ge>start_date}<if_stmt>neutralized_risk<block_start>predict_risk_buckets={k:predict_risk_buckets[k]<for>k predict_risk_buckets<if>k.strftime('%Y-%m-%d')<ge>start_date}<block_end><else_stmt><block_start>predict_risk_buckets=<none><block_end>predict_codes_bucket={k:predict_codes_bucket[k]<for>k predict_codes_bucket<if>k.strftime('%Y-%m-%d')<ge>start_date}<line_sep>ret['train']={'x':train_x_buckets 'y':train_y_buckets 'risk':train_risk_buckets}<line_sep>ret['predict']={'x':predict_x_buckets 'y':predict_y_buckets 'risk':predict_risk_buckets 'code':predict_codes_bucket}<line_sep><return>ret<block_end><def_stmt>fetch_train_phase engine alpha_factors:Union[Transformer Iterable[object]] ref_date frequency universe batch=1 neutralized_risk:Iterable[str]=<none> risk_model:str='short' pre_process:Iterable[object]=<none> post_process:Iterable[object]=<none> warm_start:int=0 fit_target:Union[Transformer object]=<none><arrow>dict<block_start><if_stmt>isinstance(alpha_factors Transformer)<block_start>transformer=alpha_factors<block_end><else_stmt><block_start>transformer=Transformer(alpha_factors)<block_end>p=Period(frequency)<line_sep>p=Period(length=-(warm_start+batch)<times>p.length() units=p.units())<line_sep>start_date=advanceDateByCalendar('china.sse' ref_date p BizDayConventions.Following)<line_sep>dates=makeSchedule(start_date ref_date frequency calendar='china.sse' dateRule=BizDayConventions.Following dateGenerationRule=DateGeneration.Backward)<line_sep>horizon=map_freq(frequency)<line_sep>factor_df=engine.fetch_factor_range(universe factors=transformer dates=dates)<if_stmt>fit_target<is><none><block_start>target_df=engine.fetch_dx_return_range(universe dates=dates horizon=horizon)<block_end><else_stmt><block_start>one_more_date=advanceDateByCalendar('china.sse' dates[-1] frequency)<line_sep>target_df=engine.fetch_factor_range_forward(universe factors=fit_target dates=dates+[one_more_date])<line_sep>target_df=target_df[target_df.trade_date.isin(dates)]<line_sep>target_df=target_df.groupby('code').apply(<lambda>x:x.fillna(method='pad'))<block_end>df=pd.merge(factor_df target_df on=['trade_date' 'code']).dropna()<line_sep>target_df,factor_df=df[['trade_date' 'code' 'dx']] df[['trade_date' 'code']+transformer.names]<line_sep>target_df,dates,date_label,risk_exp,x_values,y_values,_,_,codes=_merge_df(engine transformer.names factor_df target_df universe dates risk_model neutralized_risk)<if_stmt>dates[-1]<eq>dt.datetime.strptime(ref_date '%Y-%m-%d')<block_start>require(len(dates)<ge>2 ValueError "No previous data for training for the date {0}".format(ref_date))<line_sep>end=dates[-2]<line_sep>start=dates[-batch-1]<if>batch<le>len(dates)-1<else>dates[0]<block_end><else_stmt><block_start>end=dates[-1]<line_sep>start=dates[-batch]<if>batch<le>len(dates)<else>dates[0]<block_end>index=(date_label<ge>start)&(date_label<le>end)<line_sep>this_raw_x=x_values[index]<line_sep>this_raw_y=y_values[index]<line_sep>this_code=codes[index]<if_stmt>risk_exp<is><not><none><block_start>this_risk_exp=risk_exp[index]<block_end><else_stmt><block_start>this_risk_exp=<none><block_end>ne_x=factor_processing(this_raw_x pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>ne_y=factor_processing(this_raw_y pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>ret=dict()<line_sep>ret['x_names']=transformer.names<line_sep>ret['train']={'x':pd.DataFrame(ne_x columns=transformer.names) 'y':ne_y 'code':this_code}<line_sep><return>ret<block_end><def_stmt>fetch_predict_phase engine alpha_factors:Union[Transformer Iterable[object]] ref_date frequency universe batch=1 neutralized_risk:Iterable[str]=<none> risk_model:str='short' pre_process:Iterable[object]=<none> post_process:Iterable[object]=<none> warm_start:int=0 fillna:str=<none> fit_target:Union[Transformer object]=<none><block_start><if_stmt>isinstance(alpha_factors Transformer)<block_start>transformer=alpha_factors<block_end><else_stmt><block_start>transformer=Transformer(alpha_factors)<block_end>p=Period(frequency)<line_sep>p=Period(length=-(warm_start+batch-1)<times>p.length() units=p.units())<line_sep>start_date=advanceDateByCalendar('china.sse' ref_date p BizDayConventions.Following)<line_sep>dates=makeSchedule(start_date ref_date frequency calendar='china.sse' dateRule=BizDayConventions.Following dateGenerationRule=DateGeneration.Backward)<line_sep>horizon=map_freq(frequency)<line_sep>factor_df=engine.fetch_factor_range(universe factors=transformer dates=dates)<if_stmt>fillna<block_start>factor_df=factor_df.groupby('trade_date').apply(<lambda>x:x.fillna(x.median())).reset_index(drop=<true>).dropna()<block_end><else_stmt><block_start>factor_df=factor_df.dropna()<block_end><if_stmt>fit_target<is><none><block_start>target_df=engine.fetch_dx_return_range(universe dates=dates horizon=horizon)<block_end><else_stmt><block_start>one_more_date=advanceDateByCalendar('china.sse' dates[-1] frequency)<line_sep>target_df=engine.fetch_factor_range_forward(universe factors=fit_target dates=dates+[one_more_date])<line_sep>target_df=target_df[target_df.trade_date.isin(dates)]<line_sep>target_df=target_df.groupby('code').apply(<lambda>x:x.fillna(method='pad'))<block_end>names=transformer.names<if_stmt>neutralized_risk<block_start>risk_df=engine.fetch_risk_model_range(universe dates=dates risk_model=risk_model)[1]<line_sep>used_neutralized_risk=list(set(neutralized_risk).difference(names))<line_sep>risk_df=risk_df[['trade_date' 'code']+used_neutralized_risk].dropna()<line_sep>train_x=pd.merge(factor_df risk_df on=['trade_date' 'code'])<line_sep>train_x=pd.merge(train_x target_df on=['trade_date' 'code'] how='left')<line_sep>risk_exp=train_x[neutralized_risk].values.astype(float)<block_end><else_stmt><block_start>train_x=pd.merge(factor_df target_df on=['trade_date' 'code'] how='left')<line_sep>risk_exp=<none><block_end>train_x.dropna(inplace=<true> subset=train_x.columns[:-1])<line_sep>x_values=train_x[names].values.astype(float)<line_sep>y_values=train_x[['dx']].values.astype(float)<line_sep>date_label=pd.DatetimeIndex(train_x.trade_date).to_pydatetime()<line_sep>dates=np.unique(date_label)<if_stmt>dates[-1]<eq>dt.datetime.strptime(ref_date '%Y-%m-%d')<block_start>end=dates[-1]<line_sep>start=dates[-batch]<if>batch<le>len(dates)<else>dates[0]<line_sep>left_index=bisect.bisect_left(date_label start)<line_sep>right_index=bisect.bisect_right(date_label end)<line_sep>this_raw_x=x_values[left_index:right_index]<line_sep>this_raw_y=y_values[left_index:right_index]<line_sep>sub_dates=date_label[left_index:right_index]<if_stmt>risk_exp<is><not><none><block_start>this_risk_exp=risk_exp[left_index:right_index]<block_end><else_stmt><block_start>this_risk_exp=<none><block_end>ne_x=factor_processing(this_raw_x pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>ne_y=factor_processing(this_raw_y pre_process=pre_process risk_factors=this_risk_exp post_process=post_process)<line_sep>inner_left_index=bisect.bisect_left(sub_dates end)<line_sep>inner_right_index=bisect.bisect_right(sub_dates end)<line_sep>ne_x=ne_x[inner_left_index:inner_right_index]<line_sep>ne_y=ne_y[inner_left_index:inner_right_index]<line_sep>left_index=bisect.bisect_left(date_label end)<line_sep>right_index=bisect.bisect_right(date_label end)<line_sep>codes=train_x.code.values[left_index:right_index]<block_end><else_stmt><block_start>ne_x=<none><line_sep>ne_y=<none><line_sep>codes=<none><block_end>ret=dict()<line_sep>ret['x_names']=transformer.names<line_sep>ret['predict']={'x':pd.DataFrame(ne_x columns=transformer.names index=codes) 'code':codes 'y':ne_y.flatten()}<line_sep><return>ret<block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>numpy<as>np<import_from_stmt>libcity.model.abstract_model AbstractModel<import_from_stmt>torch.nn.utils.rnn pack_padded_sequence<import_from_stmt>torch.nn.utils.rnn pad_packed_sequence<class_stmt>EmbeddingMatrix(nn.Module)# text_embdeding
<block_start><def_stmt>__init__ self input_size output_size word_vec<block_start>super(EmbeddingMatrix self).__init__()<line_sep>self.input_size=input_size<line_sep>self.output_size=output_size<line_sep>self.layer=nn.Linear(in_features=self.input_size out_features=self.output_size bias=<false>)<line_sep>self.init_weight(word_vec)<block_end><def_stmt>init_weight self word_vec# word_vec为text_embedding初始权重矩阵,从data_feature传入.
# self.weight output_size*input_size namely length_of_wordvect_glove_pretrained(50)
# *text_size(the size of dictionary)
# 按照论文源代码 word_vec = text_size(the size of dictionary)*length_of_wordvect_glove_pretrained
<block_start>word_vec=torch.Tensor(word_vec).t()# 转置
self.layer.weight=nn.Parameter(word_vec)<block_end><def_stmt>forward self x# x:batch*seq*input_size
# return torch.matmul(x, self.weights) #batch*seq*text_size * text_size*output_size = batch*seq*output_size
<block_start><return>self.layer(x)<block_end><block_end># batch*seq*output_size
<class_stmt>SERM(AbstractModel)<block_start><def_stmt>__init__ self config data_feature<block_start>super(SERM self).__init__(config data_feature)<line_sep># initialize parameters
# print(config['dataset_class'])
self.loc_size=data_feature['loc_size']<line_sep>self.loc_emb_size=config['loc_emb_size']<line_sep>self.tim_size=data_feature['tim_size']<line_sep>self.tim_emb_size=config['tim_emb_size']<line_sep>self.user_size=data_feature['uid_size']<line_sep>self.user_emb_size=data_feature['loc_size']# 根据论文
self.text_size=data_feature['text_size']<line_sep>self.text_emb_size=len(data_feature['word_vec'][0])# 这个受限于 word_vec 的长度
self.hidden_size=config['hidden_size']<line_sep>self.word_one_hot_matrix=np.eye(self.text_size)<line_sep>self.device=config['device']<line_sep># Embedding layer
self.emb_loc=nn.Embedding(num_embeddings=self.loc_size embedding_dim=self.loc_emb_size padding_idx=data_feature['loc_pad'])<line_sep>self.emb_tim=nn.Embedding(num_embeddings=self.tim_size embedding_dim=self.tim_emb_size padding_idx=data_feature['tim_pad'])<line_sep>self.emb_user=nn.Embedding(num_embeddings=self.user_size embedding_dim=self.user_emb_size)<line_sep>self.emb_text=EmbeddingMatrix(self.text_size self.text_emb_size data_feature['word_vec'])<line_sep># lstm layer
self.lstm=nn.LSTM(input_size=self.loc_emb_size+self.tim_emb_size+self.text_emb_size hidden_size=self.hidden_size)<line_sep># self.lstm = nn.LSTM(input_size=self.loc_emb_size + self.tim_emb_size, hidden_size=self.hidden_size)
# dense layer
self.dense=nn.Linear(in_features=self.hidden_size out_features=self.loc_size)<line_sep># init weight
self.apply(self._init_weight)<block_end><def_stmt>_init_weight self module<block_start><if_stmt>isinstance(module nn.Embedding)<block_start>nn.init.xavier_normal_(module.weight)<block_end><elif_stmt>isinstance(module nn.Linear)<block_start>nn.init.xavier_uniform_(module.weight)<block_end><elif_stmt>isinstance(module nn.LSTM)<block_start><for_stmt>name,param module.named_parameters()<block_start><if_stmt>'weight_ih'<in>name<block_start>nn.init.xavier_uniform_(param.data)<block_end><elif_stmt>'weight_hh'<in>name<block_start>nn.init.orthogonal_(param.data)<block_end><elif_stmt>'bias'<in>name<block_start>nn.init.constant_(param.data 0)<block_end><block_end><block_end><block_end><def_stmt>forward self batch<block_start>loc=batch['current_loc']<line_sep>tim=batch['current_tim']<line_sep>user=batch['uid']<line_sep>text=batch['text']<line_sep>max_len=batch['current_loc'].shape[1]<line_sep>text_pad=np.zeros((self.text_size))<line_sep># text 现在是 word index 的形式,还需要进行 one_hot encoding
one_hot_text=[]<for_stmt>word_index text<block_start>one_hot_text_a_slice=[]<for_stmt>words word_index<block_start><if_stmt>len(words)<eq>0<block_start>one_hot_text_a_slice.append(np.zeros((self.text_size)))<block_end><else_stmt><block_start>one_hot_text_a_slice.append(np.sum(self.word_one_hot_matrix[words] axis=0)/len(words))<block_end><block_end># pad
one_hot_text_a_slice<augadd>[text_pad]<times>(max_len-len(one_hot_text_a_slice))<line_sep>one_hot_text.append(np.array(one_hot_text_a_slice))<block_end># batch_size * seq_len * text_size
one_hot_text=torch.FloatTensor(one_hot_text).to(self.device)<line_sep>loc_emb=self.emb_loc(loc)<line_sep>tim_emb=self.emb_tim(tim)<line_sep>user_emb=self.emb_user(user)<line_sep>text_emb=self.emb_text(one_hot_text)<line_sep># change batch*seq*emb_size to seq*batch*emb_size
x=torch.cat([loc_emb tim_emb text_emb] dim=2).permute(1 0 2)<line_sep># attrs_latent = torch.cat([loc_emb, tim_emb], dim=2).permute(1, 0, 2)
# print(attrs_latent.size())
# pack attrs_latent
seq_len=batch.get_origin_len('current_loc')<line_sep>pack_x=pack_padded_sequence(x lengths=seq_len enforce_sorted=<false>)<line_sep>lstm_out,(h_n c_n)=self.lstm(pack_x)# seq*batch*hidden_size
# print(lstm_out.size())
# unpack
lstm_out,out_len=pad_packed_sequence(lstm_out batch_first=<true>)<line_sep># user_emb is batch*loc_size, so we need get the final lstm_out
<for_stmt>i range(lstm_out.shape[0])<block_start><if_stmt>i<eq>0<block_start>out=lstm_out[0][seq_len[i]-1].reshape(1 -1)# .reshape(1,-1)表示:转化为1行
<block_end><else_stmt><block_start>out=torch.cat((out lstm_out[i][seq_len[i]-1].reshape(1 -1)) 0)<block_end><block_end>dense=self.dense(out)# batch * loc_size
out_vec=torch.add(dense user_emb)# batch * loc_size
pred=nn.LogSoftmax(dim=1)(out_vec)# result
# print(pred.size())
<return>pred<block_end># batch*loc_size
<def_stmt>predict self batch<block_start><return>self.forward(batch)<block_end><def_stmt>calculate_loss self batch<block_start>criterion=nn.NLLLoss()<line_sep>scores=self.forward(batch)# batch*loc_size
<return>criterion(scores batch['target'])<block_end><block_end> |
<import_from_stmt>mock patch<import_from_stmt>django.test TestCase<import_from_stmt>..serializer DataHubSerializer<class_stmt>DataHubSerializerTests(TestCase)<block_start>"""Test DataHubSerializer methods"""<def_stmt>setUp self<block_start>self.username="delete_me_username"<line_sep>self.repo_base="delete_me_repo_base"<line_sep>self.password="<PASSWORD>"<line_sep>self.mock_manager=self.create_patch('api.serializer.DataHubManager')<line_sep>self.serializer=DataHubSerializer(username=self.username repo_base=self.repo_base)<block_end><def_stmt>create_patch self name# helper method for creating patches
<block_start>patcher=patch(name)<line_sep>thing=patcher.start()<line_sep>self.addCleanup(patcher.stop)<line_sep><return>thing<block_end><def_stmt>test_initialization self<block_start>dataHubSerializer=DataHubSerializer(username=self.username repo_base=self.repo_base)<line_sep>self.assertEqual(dataHubSerializer.username self.username)<line_sep>self.assertEqual(dataHubSerializer.repo_base self.repo_base)<line_sep>self.assertEqual(self.mock_manager.call_args[1]['repo_base'] self.repo_base)<line_sep>self.assertEqual(self.mock_manager.call_args[1]['user'] self.username)<block_end><block_end> |
<import_stmt>os<import_stmt>json<import_stmt>shutil<import_stmt>numpy<as>np<import_from_stmt>typing Any<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>typing Type<import_from_stmt>typing Tuple<import_from_stmt>typing Union<import_from_stmt>typing Callable<import_from_stmt>typing Optional<import_from_stmt>typing NamedTuple<import_from_stmt>tqdm.autonotebook tqdm<import_from_stmt>cfdata.tabular TabularData<import_from_stmt>cftool.ml ModelPattern<import_from_stmt>cftool.ml EnsemblePattern<import_from_stmt>cftool.dist Parallel<import_from_stmt>cftool.misc update_dict<import_from_stmt>cftool.misc shallow_copy_dict<import_from_stmt>cftool.ml.utils patterns_type<import_from_stmt>cftool.ml.utils Comparer<import_from_stmt>cftool.ml.utils Estimator<import_from_stmt>.pipeline SimplePipeline<import_from_stmt>.pipeline CarefreePipeline<import_from_stmt>...data MLData<import_from_stmt>...data MLInferenceData<import_from_stmt>...trainer get_sorted_checkpoints<import_from_stmt>...constants SCORES_FILE<import_from_stmt>...constants WARNING_PREFIX<import_from_stmt>...constants CHECKPOINTS_FOLDER<import_from_stmt>...constants ML_PIPELINE_SAVE_NAME<import_from_stmt>...dist.ml Experiment<import_from_stmt>...dist.ml ExperimentResults<import_from_stmt>...misc.toolkit to_2d<import_from_stmt>...misc.toolkit get_latest_workplace<import_from_stmt>...models.ml.protocol MLCoreProtocol<def_stmt>register_core name:str<arrow>Callable[[Type] Type]<block_start><return>MLCoreProtocol.register(name)<block_end>pipelines_type=Dict[str List[SimplePipeline]]<line_sep>various_pipelines_type=Union[SimplePipeline List[SimplePipeline] Dict[str SimplePipeline] pipelines_type ]<def_stmt>_to_pipelines pipelines:various_pipelines_type<arrow>pipelines_type<block_start><if_stmt>isinstance(pipelines dict)<block_start>pipeline_dict={}<for_stmt>key,value pipelines.items()<block_start><if_stmt>isinstance(value list)<block_start>pipeline_dict[key]=value<block_end><else_stmt><block_start>pipeline_dict[key]=[value]<block_end><block_end><block_end><else_stmt><block_start><if_stmt><not>isinstance(pipelines list)<block_start>pipelines=[pipelines]<block_end>pipeline_dict={}<for_stmt>pipeline pipelines<block_start><assert_stmt>pipeline.model<is><not><none><line_sep>key=pipeline.model.__identifier__<line_sep>pipeline_dict.setdefault(key []).append(pipeline)<block_end><block_end><return>pipeline_dict<block_end><def_stmt>evaluate data:Union[MLData MLInferenceData] * metrics:Union[str List[str]] metric_configs:Optional[Union[Dict[str Any] List[Dict[str Any]]]]=<none> contains_labels:bool=<true> pipelines:Optional[various_pipelines_type]=<none> predict_config:Optional[Dict[str Any]]=<none> other_patterns:Optional[Dict[str patterns_type]]=<none> comparer_verbose_level:Optional[int]=1 <arrow>Comparer<block_start><if_stmt><not>contains_labels<block_start>err_msg="`cflearn.evaluate` must be called with `contains_labels = True`"<line_sep><raise>ValueError(err_msg)<block_end><if_stmt>metric_configs<is><none><block_start>metric_configs=[{}<for>_ range(len(metrics))]<block_end>patterns={}<line_sep>x,y=data.x_train data.y_train<if_stmt>pipelines<is><none><block_start>msg=<none><if_stmt>y<is><none><block_start>msg="either `pipelines` or `y` should be provided"<block_end><if_stmt>other_patterns<is><none><block_start>msg="either `pipelines` or `other_patterns` should be provided"<block_end><if_stmt>msg<is><not><none><block_start><raise>ValueError(msg)<block_end><block_end><else_stmt><block_start>pipelines=_to_pipelines(pipelines)<line_sep># get data
# TODO : different pipelines may have different labels
<if_stmt>y<is><not><none><block_start>y=to_2d(y)<block_end><else_stmt><block_start><if_stmt><not>isinstance(x str)<block_start><raise>ValueError("`x` should be str when `y` is not provided")<block_end>data_pipeline=list(pipelines.values())[0][0]<if_stmt><not>isinstance(data_pipeline CarefreePipeline)<block_start><raise>ValueError("only `CarefreePipeline` can handle file inputs")<block_end>cf_data=data_pipeline.cf_data<assert_stmt>cf_data<is><not><none><line_sep>x,y=cf_data.read_file(x contains_labels=contains_labels)<line_sep>y=cf_data.transform(x y).y<block_end># get metrics
<if_stmt>predict_config<is><none><block_start>predict_config={}<block_end>predict_config.setdefault("contains_labels" contains_labels)<for_stmt>name,pipeline_list pipelines.items()<block_start>patterns[name]=[pipeline.to_pattern(**predict_config)<for>pipeline pipeline_list]<block_end><block_end><if_stmt>other_patterns<is><not><none><block_start><for_stmt>other_name other_patterns.keys()<block_start><if_stmt>other_name<in>patterns<block_start>print(f"{WARNING_PREFIX}'{other_name}' is found in "<concat>"`other_patterns`, it will be overwritten")<block_end><block_end>update_dict(other_patterns patterns)<block_end><if_stmt>isinstance(metrics list)<block_start>metrics_list=metrics<block_end><else_stmt><block_start><assert_stmt>isinstance(metrics str)<line_sep>metrics_list=[metrics]<block_end><if_stmt>isinstance(metric_configs list)<block_start>metric_configs_list=metric_configs<block_end><else_stmt><block_start><assert_stmt>isinstance(metric_configs dict)<line_sep>metric_configs_list=[metric_configs]<block_end>estimators=[Estimator(metric metric_config=metric_config)<for>metric,metric_config zip(metrics_list metric_configs_list)]<line_sep>comparer=Comparer(patterns estimators)<line_sep>comparer.compare(data y verbose_level=comparer_verbose_level)<line_sep><return>comparer<block_end><def_stmt>task_loader workplace:str pipeline_base:Type[SimplePipeline]=CarefreePipeline compress:bool=<true> <arrow>SimplePipeline<block_start>export_folder=os.path.join(workplace ML_PIPELINE_SAVE_NAME)<line_sep>m=pipeline_base.load(export_folder=export_folder compress=compress)<assert_stmt>isinstance(m SimplePipeline)<line_sep><return>m<block_end><def_stmt>load_experiment_results results:ExperimentResults pipeline_base:Type[SimplePipeline] <arrow>pipelines_type<block_start>pipelines_dict:Dict[str Dict[int SimplePipeline]]={}<line_sep>iterator=list(zip(results.workplaces results.workplace_keys))<for_stmt>workplace,workplace_key tqdm(iterator desc="load")<block_start>pipeline=task_loader(workplace pipeline_base)<line_sep>model,str_i=workplace_key<line_sep>pipelines_dict.setdefault(model {})[int(str_i)]=pipeline<block_end><return>{k:[v[i]<for>i sorted(v)]<for>k,v pipelines_dict.items()}<block_end><class_stmt>RepeatResult(NamedTuple)<block_start>data:Optional[TabularData]<line_sep>experiment:Optional[Experiment]<line_sep>pipelines:Optional[Dict[str List[SimplePipeline]]]<line_sep>patterns:Optional[Dict[str List[ModelPattern]]]<block_end><def_stmt>repeat_with data:MLData * pipeline_base:Type[SimplePipeline]=CarefreePipeline workplace:str="_repeat" models:Union[str List[str]]="fcnn" model_configs:Optional[Dict[str Dict[str Any]]]=<none> predict_config:Optional[Dict[str Any]]=<none> sequential:Optional[bool]=<none> num_jobs:int=1 num_repeat:int=5 return_patterns:bool=<true> compress:bool=<true> use_tqdm:bool=<true> available_cuda_list:Optional[List[int]]=<none> resource_config:Optional[Dict[str Any]]=<none> task_meta_kwargs:Optional[Dict[str Any]]=<none> is_fix:bool=<false> **kwargs:Any <arrow>RepeatResult<block_start><if_stmt>os.path.isdir(workplace)<and><not>is_fix<block_start>print(f"{WARNING_PREFIX}'{workplace}' already exists, it will be erased")<line_sep>shutil.rmtree(workplace)<block_end>kwargs=shallow_copy_dict(kwargs)<if_stmt>isinstance(models str)<block_start>models=[models]<block_end><if_stmt>sequential<is><none><block_start>sequential=num_jobs<le>1<block_end><if_stmt>model_configs<is><none><block_start>model_configs={}<block_end><def_stmt>is_buggy i_:int model_:str<arrow>bool<block_start>i_workplace=os.path.join(workplace model_ str(i_))<line_sep>i_latest_workplace=get_latest_workplace(i_workplace)<if_stmt>i_latest_workplace<is><none><block_start><return><true><block_end>checkpoint_folder=os.path.join(i_latest_workplace CHECKPOINTS_FOLDER)<if_stmt><not>os.path.isfile(os.path.join(checkpoint_folder SCORES_FILE))<block_start><return><true><block_end><if_stmt><not>get_sorted_checkpoints(checkpoint_folder)<block_start><return><true><block_end><return><false><block_end><def_stmt>fetch_config core_name:str<arrow>Dict[str Any]<block_start>local_kwargs=shallow_copy_dict(kwargs)<assert_stmt>model_configs<is><not><none><line_sep>local_core_config=model_configs.setdefault(core_name {})<line_sep>local_kwargs["core_name"]=core_name<line_sep>local_kwargs["core_config"]=shallow_copy_dict(local_core_config)<line_sep><return>shallow_copy_dict(local_kwargs)<block_end>pipelines_dict:Optional[Dict[str List[SimplePipeline]]]=<none><if_stmt>sequential<block_start>cuda=kwargs.pop("cuda" <none>)<line_sep>experiment=<none><line_sep>tqdm_settings=kwargs.setdefault("tqdm_settings" {})<line_sep>tqdm_settings["tqdm_position"]=2<if_stmt><not>return_patterns<block_start>print(f"{WARNING_PREFIX}`return_patterns` should be "<concat>"True when `sequential` is True, because patterns "<concat>"will always be generated")<line_sep>return_patterns=<true><block_end>pipelines_dict={}<if_stmt><not>use_tqdm<block_start>iterator=models<block_end><else_stmt><block_start>iterator=tqdm(models total=len(models) position=0)<block_end><for_stmt>model iterator<block_start>local_pipelines=[]<line_sep>sub_iterator=range(num_repeat)<if_stmt>use_tqdm<block_start>sub_iterator=tqdm(sub_iterator total=num_repeat position=1 leave=<false> )<block_end><for_stmt>i sub_iterator<block_start><if_stmt>is_fix<and><not>is_buggy(i model)<block_start><continue><block_end>local_config=fetch_config(model)<line_sep>local_workplace=os.path.join(workplace model str(i))<line_sep>local_config.setdefault("workplace" local_workplace)<line_sep>m=pipeline_base(**local_config)<line_sep>m.fit(data cuda=cuda)<line_sep>local_pipelines.append(m)<block_end>pipelines_dict[model]=local_pipelines<block_end><block_end><else_stmt><block_start><if_stmt>num_jobs<le>1<block_start>print(f"{WARNING_PREFIX}we suggest setting `sequential` "<concat>f"to True when `num_jobs` is {num_jobs}")<block_end># data
data_folder=Experiment.dump_data_bundle(data.x_train data.y_train data.x_valid data.y_valid workplace=workplace )<line_sep># experiment
experiment=Experiment(num_jobs=num_jobs available_cuda_list=available_cuda_list resource_config=resource_config )<for_stmt>model models<block_start><for_stmt>i range(num_repeat)<block_start><if_stmt>is_fix<and><not>is_buggy(i model)<block_start><continue><block_end>local_config=fetch_config(model)<line_sep>experiment.add_task(model=model compress=compress root_workplace=workplace workplace_key=(model str(i)) config=local_config data_folder=data_folder **(task_meta_kwargs<or>{}) )<block_end><block_end># finalize
results=experiment.run_tasks(use_tqdm=use_tqdm)<if_stmt>return_patterns<block_start>pipelines_dict=load_experiment_results(results pipeline_base)<block_end><block_end>patterns=<none><if_stmt>return_patterns<block_start><assert_stmt>pipelines_dict<is><not><none><if_stmt>predict_config<is><none><block_start>predict_config={}<block_end>patterns={model:[m.to_pattern(**predict_config)<for>m pipelines]<for>model,pipelines pipelines_dict.items()}<block_end>cf_data=<none><if_stmt>patterns<is><not><none><block_start>m=patterns[models[0]][0].model<if_stmt>isinstance(m CarefreePipeline)<block_start>cf_data=m.cf_data<block_end><block_end><return>RepeatResult(cf_data experiment pipelines_dict patterns)<block_end><def_stmt>pack_repeat workplace:str pipeline_base:Type[SimplePipeline] * num_jobs:int=1 <arrow>List[str]<block_start>sub_workplaces=[]<for_stmt>stuff sorted(os.listdir(workplace))<block_start>stuff_path=os.path.join(workplace stuff)<if_stmt><not>os.path.isdir(stuff_path)<block_start><continue><block_end>sub_workplaces.append(get_latest_workplace(stuff_path))<block_end>rs=Parallel(num_jobs).grouped(pipeline_base.pack sub_workplaces).ordered_results<line_sep><return>sum(rs [])<block_end><def_stmt>pick_from_repeat_and_pack workplace:str pipeline_base:Type[SimplePipeline] * num_pick:int num_jobs:int=1 <arrow>List[str]<block_start>score_workplace_pairs=[]<for_stmt>stuff sorted(os.listdir(workplace))<block_start>stuff_path=os.path.join(workplace stuff)<if_stmt><not>os.path.isdir(stuff_path)<block_start><continue><block_end>sub_workplace=get_latest_workplace(stuff_path)<assert_stmt>sub_workplace<is><not><none> "internal error occurred"<line_sep>score_path=os.path.join(sub_workplace CHECKPOINTS_FOLDER SCORES_FILE)<with_stmt>open(score_path "r")<as>f<block_start>score=float(max(json.load(f).values()))<line_sep>score_workplace_pairs.append((score sub_workplace))<block_end><block_end>score_workplace_pairs=sorted(score_workplace_pairs)[::-1]<line_sep>sub_workplaces=[pair[1]<for>pair score_workplace_pairs[:num_pick]]<line_sep>rs=Parallel(num_jobs).grouped(pipeline_base.pack sub_workplaces).ordered_results<line_sep><return>sum(rs [])<block_end><def_stmt>make_toy_model model:str="fcnn" config:Optional[Dict[str Any]]=<none> * pipeline_type:str="ml.carefree" is_classification:bool=<false> cf_data_config:Optional[Dict[str Any]]=<none> data_tuple:Optional[Tuple[np.ndarray np.ndarray]]=<none> cuda:Optional[str]=<none> <arrow>SimplePipeline<block_start><if_stmt>config<is><none><block_start>config={}<block_end><if_stmt>data_tuple<is><not><none><block_start>x_np,y_np=data_tuple<block_end><else_stmt><block_start><if_stmt><not>is_classification<block_start>x,y=[[0]] [[1.0]]<block_end><else_stmt><block_start>x,y=[[0] [1]] [[1] [0]]<block_end>x_np,y_np=map(np.array [x y])<block_end>model_config={}<if_stmt>model<in>("fcnn" "tree_dnn")<block_start>model_config={"hidden_units":[100] "batch_norm":<false> "dropout":0.0 }<block_end>base_config={"core_name":model "core_config":model_config "output_dim":1+int(is_classification) "num_epoch":2 "max_epoch":4 }<line_sep>updated=update_dict(config base_config)<line_sep>m=SimplePipeline.make(pipeline_type updated)<assert_stmt>isinstance(m SimplePipeline)<if_stmt>cf_data_config<is><none><block_start>cf_data_config={}<block_end>cf_data_config=update_dict(cf_data_config dict(valid_columns=list(range(x_np.shape[1])) label_process_method="identical" ) )<line_sep>data=MLData.with_cf_data(x_np y_np is_classification=is_classification cf_data_config=cf_data_config valid_split=0.0 )<line_sep>m.fit(data cuda=cuda)<line_sep><return>m<block_end>__all__=["register_core" "evaluate" "task_loader" "load_experiment_results" "repeat_with" "pack_repeat" "pick_from_repeat_and_pack" "make_toy_model" "ModelPattern" "EnsemblePattern" ]<line_sep> |
<import_stmt>unittest<import_stmt>pandas<as>pd<import_stmt>gokart<class_stmt>TestPandasAssert(unittest.TestCase)<block_start><def_stmt>test_assert_frame_contents_equal self<block_start>expected=pd.DataFrame(data=dict(f1=[1 2 3] f3=[111 222 333] f2=[4 5 6]) index=[0 1 2])<line_sep>resulted=pd.DataFrame(data=dict(f2=[5 4 6] f1=[2 1 3] f3=[222 111 333]) index=[1 0 2])<line_sep>gokart.testing.assert_frame_contents_equal(resulted expected)<block_end><def_stmt>test_assert_frame_contents_equal_with_small_error self<block_start>expected=pd.DataFrame(data=dict(f1=[1.0001 2.0001 3.0001] f3=[111 222 333] f2=[4 5 6]) index=[0 1 2])<line_sep>resulted=pd.DataFrame(data=dict(f2=[5 4 6] f1=[2.0002 1.0002 3.0002] f3=[222 111 333]) index=[1 0 2])<line_sep>gokart.testing.assert_frame_contents_equal(resulted expected atol=1e-1)<block_end><def_stmt>test_assert_frame_contents_equal_with_duplicated_columns self<block_start>expected=pd.DataFrame(data=dict(f1=[1 2 3] f3=[111 222 333] f2=[4 5 6]) index=[0 1 2])<line_sep>expected.columns=['f1' 'f1' 'f2']<line_sep>resulted=pd.DataFrame(data=dict(f2=[5 4 6] f1=[2 1 3] f3=[222 111 333]) index=[1 0 2])<line_sep>resulted.columns=['f2' 'f1' 'f1']<with_stmt>self.assertRaises(AssertionError)<block_start>gokart.testing.assert_frame_contents_equal(resulted expected)<block_end><block_end><def_stmt>test_assert_frame_contents_equal_with_duplicated_indexes self<block_start>expected=pd.DataFrame(data=dict(f1=[1 2 3] f3=[111 222 333] f2=[4 5 6]) index=[0 1 2])<line_sep>expected.index=[0 1 1]<line_sep>resulted=pd.DataFrame(data=dict(f2=[5 4 6] f1=[2 1 3] f3=[222 111 333]) index=[1 0 2])<line_sep>expected.index=[1 0 1]<with_stmt>self.assertRaises(AssertionError)<block_start>gokart.testing.assert_frame_contents_equal(resulted expected)<block_end><block_end><block_end> |
<if_stmt>__name__<eq>"__main__"<block_start><import_stmt>logSetup<line_sep>logSetup.initLogging()<block_end><import_stmt>pickle<import_from_stmt>common database<import_stmt>config<import_stmt>common.LogBase<import_stmt>WebMirror.rules<import_from_stmt>WebMirror.OutputFilters.util.MessageConstructors pack_message<import_stmt>WebMirror.TimedTriggers.TriggerBase<import_stmt>common.get_rpyc<line_sep># import WebMirror.OutputFilters.AmqpInterface
<class_stmt>MetaUpdater(WebMirror.TimedTriggers.TriggerBase.TriggerBaseClass)<block_start>pluginName="Meta Updater"<line_sep>loggerPath='MetaUpdater'<def_stmt>__init__ self<block_start>super().__init__()<line_sep># print()
self.rpc_interface=common.get_rpyc.RemoteJobInterface("FeedUpdater")<line_sep># if config.C_DO_RABBIT:
# print("No message queue! Doing independent RabbitMQ connection!")
# # traceback.print_stack()
# # print("Wat?")
# # print()
# self.msg_q = False
# amqp_settings = {
# "RABBIT_LOGIN" : config.C_RABBIT_LOGIN,
# "RABBIT_PASWD" : config.C_RABBIT_PASWD,
# "RABBIT_SRVER" : config.C_RABBIT_SRVER,
# "RABBIT_VHOST" : config.C_RABBIT_VHOST,
# 'taskq_task' : 'task.master.q',
# 'taskq_response' : 'response.master.q',
# }
# self._amqpint = WebMirror.OutputFilters.AmqpInterface.RabbitQueueHandler(amqp_settings)
<block_end><def_stmt>get_feed_count_message self<block_start>feeds=set()<for_stmt>ruleset WebMirror.rules.load_rules()<block_start>feeds<augor>set(ruleset['feedurls'])<block_end>data={"feed-count":len(feeds)}<line_sep><return>pack_message("system-feed-counts" data)<block_end><def_stmt>get_times self<block_start><with_stmt>common.database.session_context()<as>conn<block_start>aps=conn.execute("SELECT job_state FROM apscheduler_jobs;")<line_sep>update_times=[]<for_stmt>blob, aps<block_start>job_dict=pickle.loads(blob)<line_sep>update_times.append((job_dict['id'] job_dict['next_run_time'].isoformat()))<block_end>data={"update-times":update_times }<line_sep>database.delete_db_session()<line_sep><return>pack_message("system-update-times" data)<block_end><block_end><def_stmt>go self<block_start>feeds=self.get_feed_count_message()<line_sep>times=self.get_times()<line_sep>self.rpc_interface.put_feed_job(feeds)<line_sep>self.rpc_interface.put_feed_job(times)<line_sep># self._amqpint.put_item(feeds)
# self._amqpint.put_item(times)
<block_end><block_end><def_stmt>do_meta_update <block_start>updator=MetaUpdater()<line_sep>updator._go()<line_sep>updator=MetaUpdater()<line_sep>updator._go()<line_sep>updator=MetaUpdater()<line_sep>updator._go()<block_end><if_stmt>__name__<eq>'__main__'<block_start>do_meta_update()<block_end> |
"""
Heap in python using heapq library function
Note: by default, heapq creates a min-heap. To make it a
max-heap, add items after multiplying them by -1
"""<import_from_stmt>heapq heappop heappush heapify<line_sep>heap=[]<line_sep>heapify(heap)<line_sep>heappush(heap 10)<line_sep>heappush(heap 11)<line_sep>heappush(heap 2)<line_sep>heappush(heap 4)<line_sep>heappush(heap 14)<line_sep>heappush(heap 1)<line_sep>print('first element - ' heap[0])<line_sep>print('popping min element - ' heappop(heap))<line_sep>print('first element - ' heap[0])<line_sep># Heap prints as an array and can be access using indexes
print(heap)<line_sep>print(heap[2])<line_sep> |
# coding: utf8
"""
@Author : <NAME>
"""<import_stmt>os<import_stmt>torch.nn<as>nn<import_stmt>torch.utils.model_zoo<as>model_zoo<import_from_stmt>torch.autograd Variable<import_stmt>torch<import_from_stmt>basic.common rdict<import_stmt>numpy<as>np<import_from_stmt>easydict EasyDict<as>edict<import_from_stmt>collections OrderedDict<as>odict<import_from_stmt>itertools product<import_from_stmt>basic.common add_path env<line_sep>this_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>add_path(this_dir+'/../lib/')<import_from_stmt>helper *<import_from_stmt>model VGG16_Trunk<import_from_stmt>modelSE VGG16_Trunk<as>VGG16SE_Trunk<line_sep># net_arch2Trunk = dict(
# vgg16 = VGG16_Trunk,
# vgg16se = VGG16SE_Trunk,
# )
net_arch2Trunk=dict(vgg16=dict(Sflat=VGG16_Trunk Sexp=VGG16SE_Trunk ) )<import_from_stmt>pytorch_util.libtrain copy_weights init_weights_by_filling<import_from_stmt>pytorch_util.torch_v4_feature LocalResponseNorm# *
<import_from_stmt>pytorch_util.torch_3rd_layers Maskout<import_from_stmt>pytorch_util.torch_3rd_funcs norm2unit exp_Normalization<def_stmt>cls_pred output topk=(1 ) dim=1<block_start>maxk=max(topk)<line_sep>batch_size=output.size(0)<line_sep>_,pred=output.topk(maxk dim=dim largest=<true> sorted=<true>)<line_sep><return>pred<block_end><class_stmt>_regNormalNet(nn.Module)<block_start><def_stmt>__init__ self method net_arch='vgg16' init_weights=<true><block_start>super(_regNormalNet self).__init__()<line_sep>_Trunk=net_arch2Trunk[net_arch][method]<line_sep>self.trunk=_Trunk(init_weights=init_weights)<block_end><def_stmt>forword self x label<block_start><raise>NotImplementedError<block_end><block_end>#---------------------------------------------------------------------[regQuat]
<class_stmt>reg_Sflat_Net(_regNormalNet)<block_start><def_stmt>__init__ self net_arch='vgg16' init_weights=<true><block_start>_regNormalNet.__init__(self 'Sflat' net_arch=net_arch init_weights=init_weights)<line_sep># loss module
self.loss_handler=Cos_Proximity_Loss_Handler()<line_sep>self.targets=['norm']<block_end><def_stmt>forward self x<block_start>"""label shape (batchsize, ) """<line_sep>x=self.trunk(x)# Forward Conv and Fc6,Fc7
#
batchsize=x.size(0)# x of shape (40, 3, 240, 320)
#-- Normalize coordinate to a unit
x_norm=norm2unit(x dim=1)<line_sep>Prob=edict(norm=x_norm.permute(0 2 3 1).double())# transpose prediction from BxCxHxW to BxHxWxC order.
<return>Prob<block_end><def_stmt>compute_loss self Prob GT<block_start>Loss,Errs=self.loss_handler.compute_loss(self.targets Prob GT)<line_sep>_metric_=edict(norm=Errs['norm'])<line_sep><return>Loss _metric_<block_end><def_stmt>compute_pred self Prob encode_bit=8<block_start>x_norm=Prob['norm']<line_sep># Get cpu data.
norm=x_norm.data.cpu().numpy().copy()# B,H,W,C
<assert_stmt>encode_bit<in>[8 16]<if_stmt>encode_bit<eq>8<block_start>normImgs=((norm+1)<times>(2<power>7)).astype(np.uint8)# map [-1,1] to [0,256)
<block_end><else_stmt><block_start>normImgs=((norm+1)<times>(2<power>15)).astype(np.uint16)# map [-1,1] to [0,65535)
<block_end>Pred=edict(norm=normImgs)<line_sep><return>Pred<block_end><block_end>#---------------------------------------------------------------------[regQuat]
<class_stmt>reg_Sexp_Net(_regNormalNet)# Spherical exponential Problem + sign classification
<block_start><def_stmt>__init__ self net_arch='vgg16' init_weights=<true><block_start>_regNormalNet.__init__(self 'Sexp' net_arch=net_arch init_weights=init_weights)<line_sep>self.reg_n_D=3<line_sep># Note: for a surface normal (x,z,y) (Watch out the order)
# z should always satisfy z<=0 (Surface normal should from visible surfaces)
# Thus only x,y need sign prediction.
dim_need_sign=2<line_sep>_signs=list(product(*([(-1 1)]<times>dim_need_sign)))# [(-1, -1), (-1, 1), (1, -1), (1, 1)], with len=4
self.signs=[(x[0] -1 x[1])<for>x _signs]# y-z-x order: [(-1, -1, -1), (-1, -1, 1), (1, -1, -1), (1, -1, 1)], with len=4; z always -1
self.signs2label=odict(zip(self.signs range(len(self.signs))))<line_sep>self.label2signs=Variable(torch.DoubleTensor(self.signs)).cuda()# make it as a Variable
self.softmax=nn.Softmax(dim=1).cuda()<line_sep># loss module
self.loss_handler_abs_norm=Cos_Proximity_Loss_Handler()<line_sep>self.loss_handler_sgc_norm=Cross_Entropy_Loss_Handler()<line_sep>self.targets=['sgc_norm' 'abs_norm']<line_sep>self.gt_targets=['norm']<line_sep>self.cost,self.sint=torch.tensor(np.cos(np.pi/4)).double().cuda() torch.tensor(np.sin(np.pi/4)).double().cuda()<block_end><def_stmt>forward self x<block_start>"""label shape (batchsize, ) """<line_sep>x_abs,x_sgc=self.trunk(x)# Forward Conv and Fc6,Fc7
#
batchsize=x_abs.size(0)<line_sep>#-- Exp and Normalize coordinate to a unit
x_sqr_norm=self.softmax(x_abs)#, nr_cate=self.nr_cate)
# sign category head (totally 4 category)
x_sgc_norm=x_sgc<line_sep>Prob=edict(abs_norm=torch.sqrt(x_sqr_norm).permute(0 2 3 1).double() # B,H,W,3
sgc_norm=x_sgc_norm.permute(0 2 3 1))<line_sep># B,H,W,4
<return>Prob<block_end><def_stmt>compute_loss self Prob GT<block_start>B,H,W,_3_=GT.norm.size()<assert_stmt>_3_<eq>3 "Wrong dim: %s,%s,%s,%s"%(B H W _3_)<line_sep># First get sign label from GT
#== Formulate squared value of quaternion
GT_abs_norm=torch.abs(GT.norm)# B,H,W,3
#== Formulate signs label of quaternion
GT_sign_norm=torch.sign(GT.norm)# B,H,W,3
#-------------------------------------
# hard coded: sign to label
#-------------------------------------
# y x label
# [-1 -1] --> 0
# [-1 1] --> 1
# [ 1 -1] --> 2
# [ 1 1] --> 3
# GT_sign_norm (B,H,W,3) in y-z-x order
GT_sign_norm[GT_sign_norm<eq>0]=-1# make sign of '0' as -1 (use -1 instead of 1 just because z<=0)
y_sign,x_sign=GT_sign_norm[: : : 0] GT_sign_norm[: : : 2]<line_sep>y_sign<augadd>1# [y_sign==-1]
x_sign[x_sign<eq>-1]=0<line_sep>GT_sgc_norm=(y_sign+x_sign).long()# data with shape with (B,H,W) index of [0,1,2,3]
# here just because compute_loss need a same key from Prob and GT,
# so we just give a fake name to GT.sqr_quat as '_GT.logsqr_norm'.
_GT=edict(abs_norm=GT_abs_norm sgc_norm=GT_sgc_norm mask=GT.mask)# abs_norm: (B,H,W,3) sgc_norm: (B,H,W)
Loss_abs_norm,abs_Errs=self.loss_handler_abs_norm.compute_loss(['abs_norm'] Prob _GT)<line_sep>Loss_sgc_norm=self.loss_handler_sgc_norm.compute_loss(['sgc_norm'] Prob _GT)<line_sep># ----------------------------------------
# Compute the metric.
sign_ind=cls_pred(Prob['sgc_norm'] topk=(1 ) dim=3).data.squeeze(dim=3)# B,H,W
pr_sign_norm=self.label2signs[sign_ind]# magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3) (10, 240, 320, 3)
pr_abs_norm=Prob['abs_norm']<line_sep>_Prob=edict(norm=pr_abs_norm<times>pr_sign_norm)# current predicted final norm (applied sign prediction)
_Loss_norm,out_Errs=self.loss_handler_abs_norm.compute_loss(['norm'] _Prob GT)# just borrow loss_handler_abs_norm, nothing more.
# Compute acc of classification: sign_ind vs GT_sgc_norm
mask=GT['mask']<line_sep>acc=eval_cls(sign_ind[mask] GT_sgc_norm[mask])<line_sep>_metric_=edict(abs_norm=abs_Errs['abs_norm'] norm=out_Errs['norm'] sgc_norm_acc=acc )<line_sep># To add loss weights here.
Loss=edict(abs_norm=Loss_abs_norm['abs_norm']<times>10 # / 5.
sgc_norm=Loss_sgc_norm['sgc_norm'] )<line_sep><return>Loss _metric_<block_end># .update(abs_Errs)
<def_stmt>compute_pred self Prob encode_bit=8<block_start>x_abs_norm=Prob['abs_norm']# B,H,W,3
x_sgc_norm=Prob['sgc_norm']# B,H,W,4
batchsize=x_abs_norm.size(0)<line_sep>#
sign_ind=cls_pred(x_sgc_norm topk=(1 ) dim=3).data.squeeze(dim=3)# .view(-1,) # B,H,W
x_sign_norm=self.label2signs[sign_ind]# magic here: Indexing label2signs (4x3) by sign_ind (B,H,W) becomes (B,H,W,3)
#
x_norm=x_abs_norm<times>x_sign_norm# B,H,W,3
# --------------Recover rot45 trick --------------
# Note: since we applied rot45 trick, here we recover it back
_x_norm=x_norm.detach().clone()# return a copy of x_norm without grad
_y,_z,_x=_x_norm[: : : 0] _x_norm[: : : 1] _x_norm[: : : 2]<line_sep>y,z,x=x_norm[: : : 0] x_norm[: : : 1] x_norm[: : : 2]<line_sep>x[:]=self.cost<times>_x-self.sint<times>_y<line_sep>y[:]=self.sint<times>_x+self.cost<times>_y<line_sep># ------------------------------------------------
# Get cpu data.
norm=x_norm.data.cpu().numpy().copy()# B,H,W,C
<assert_stmt>encode_bit<in>[8 16]<if_stmt>encode_bit<eq>8<block_start>normImgs=((norm+1)<times>(2<power>7)).astype(np.uint8)# map [-1,1] to [0,256)
<block_end><else_stmt><block_start>normImgs=((norm+1)<times>(2<power>15)).astype(np.uint16)# map [-1,1] to [0,65535)
<block_end>Pred=edict(norm=normImgs)<line_sep><return>Pred<block_end><block_end> |
<import_from_stmt>timemachines.skaters.orbt.orbitinclusion using_orbit<if_stmt>using_orbit<block_start><import_from_stmt>timemachines.skaters.orbt.orbitwrappers orbit_lgt_iskater<import_from_stmt>timemachines.skatertools.utilities.conventions Y_TYPE A_TYPE R_TYPE E_TYPE T_TYPE<import_from_stmt>timemachines.skatertools.batch.batchskater batch_skater_factory<def_stmt>orbit_lgt_skater_factory y:Y_TYPE s k:int a:A_TYPE=<none> t:T_TYPE=<none> e:E_TYPE=<none> r:R_TYPE=<none> emp_mass=0.0 seasonality=<none><block_start><return>batch_skater_factory(y=y s=s k=k a=a t=t e=e r=r emp_mass=emp_mass iskater=orbit_lgt_iskater iskater_kwargs={'seasonality':seasonality} min_e=0 n_warm=20)<block_end><def_stmt>orbit_lgt_12 y s k a=<none> t=<none> e=<none><block_start><return>orbit_lgt_skater_factory(y=y s=s k=k a=a t=t e=e seasonality=12)<block_end><def_stmt>orbit_lgt_24 y s k a=<none> t=<none> e=<none><block_start><return>orbit_lgt_skater_factory(y s k a=a t=t e=e seasonality=24)<block_end><block_end> |
# Generated by Django 2.0.13 on 2021-08-27 12:23
<import_from_stmt>django.db migrations<def_stmt>populate_sponsorship_package_fk apps schema_editor<block_start>Sponsorship=apps.get_model('sponsors.Sponsorship')<line_sep>SponsorshipPackage=apps.get_model('sponsors.SponsorshipPackage')<for_stmt>sponsorship Sponsorship.objects.all().iterator()<block_start><try_stmt><block_start>package=SponsorshipPackage.objects.get(name=sponsorship.level_name)<line_sep>sponsorship.package=package<line_sep>sponsorship.save()<block_end><except_stmt>SponsorshipPackage.DoesNotExist<block_start><continue><block_end><block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('sponsors' '0037_sponsorship_package') ]<line_sep>operations=[migrations.RunPython(populate_sponsorship_package_fk migrations.RunPython.noop)]<block_end> |
"""Module defining DiagGGNPermute."""<import_from_stmt>backpack.core.derivatives.permute PermuteDerivatives<import_from_stmt>backpack.extensions.secondorder.diag_ggn.diag_ggn_base DiagGGNBaseModule<class_stmt>DiagGGNPermute(DiagGGNBaseModule)<block_start>"""DiagGGN extension of Permute."""<def_stmt>__init__ self<block_start>"""Initialize."""<line_sep>super().__init__(derivatives=PermuteDerivatives())<block_end><block_end> |
"""
Helper for loading a ``Trading History`` dataset
"""<import_stmt>json<import_stmt>zlib<import_stmt>pandas<as>pd<import_stmt>analysis_engine.consts<as>ae_consts<import_stmt>spylunking.log.setup_logging<as>log_utils<line_sep>log=log_utils.build_colorized_logger(name=__name__)<def_stmt>prepare_history_dataset data compress=<false> encoding='utf-8' convert_to_dict=<false> include_keys=<none> ignore_keys=<none> convert_to_dates=<none> verbose=<false><block_start>"""prepare_history_dataset
Load a ``Trading History`` dataset into a dictionary
with a ``pd.DataFrame`` for the trading history record
list
:param data: string holding contents of a ``Trading History``
from a file, s3 key or redis-key
:param compress: optional - boolean flag for decompressing
the contents of the ``data`` if necessary
(default is ``False`` and algorithms
use ``zlib`` for compression)
:param convert_to_dict: optional - bool for s3 use ``False``
and for files use ``True``
:param encoding: optional - string for data encoding
:param include_keys: optional - list of string keys
to include before from the dataset
.. note:: tickers are automatically included in the ``pd.DataFrame``
:param ignore_keys: optional - list of string keys
to remove before building the ``pd.DataFrame``
:param convert_to_dates: optional - list of string keys
to convert to datetime before building the ``pd.DataFrame``
:param verbose: optional - bool show the logs
(default is ``False``)
"""<if_stmt>verbose<block_start>log.debug('start')<block_end>use_data=<none><line_sep>parsed_data=<none><line_sep>data_as_dict=<none><if_stmt>compress<block_start><if_stmt>verbose<block_start>log.debug('decompressing')<block_end>parsed_data=zlib.decompress(data).decode(encoding)<block_end><else_stmt><block_start>parsed_data=data<block_end><if_stmt><not>parsed_data<block_start>log.error('failed parsing')<line_sep><return><none><block_end><if_stmt>verbose<block_start>log.debug('loading as dict')<block_end>use_data={}<if_stmt>convert_to_dict<block_start><try_stmt><block_start>data_as_dict=json.loads(parsed_data)<block_end><except_stmt>Exception<as>e<block_start><if_stmt>('the JSON object must be str, bytes or '<concat>'bytearray, not')<in>str(e)<block_start>log.critical(f'failed decoding json for string - double '<concat>f'compression for history dataset found ex={e}')<block_end>data_as_dict=parsed_data<block_end><block_end><else_stmt><block_start>data_as_dict=parsed_data<block_end><if_stmt>len(data_as_dict)<eq>0<block_start>log.error('empty trading history dictionary')<line_sep><return>use_data<block_end>convert_these_date_keys=['date' 'minute' 'exp_date']<line_sep>use_include_keys=['tickers' 'version' 'last_trade_data' 'algo_config_dict' 'algo_name' 'created']<if_stmt>include_keys<block_start>use_include_keys=include_keys<block_end>use_ignore_keys=[]<if_stmt>ignore_keys<block_start>use_ignore_keys=ignore_keys<block_end><for_stmt>k data_as_dict<block_start><if_stmt>k<in>use_include_keys<block_start>use_data[k]=data_as_dict[k]<block_end><block_end>all_records=[]<line_sep>num_records=0<for_stmt>ticker data_as_dict['tickers']<block_start><if_stmt>ticker<not><in>use_data<block_start>use_data[ticker]=[]<block_end><for_stmt>node data_as_dict[ticker]<block_start><for_stmt>ignore use_ignore_keys<block_start>node.pop(ignore <none>)<block_end>all_records.append(node)<block_end># end for all datasets on this date to load
num_records=len(all_records)<if_stmt>num_records<block_start><if_stmt>verbose<block_start>log.info(f'found records={num_records}')<block_end>history_df=pd.DataFrame(all_records)<for_stmt>dc convert_these_date_keys<block_start><if_stmt>dc<in>history_df<block_start>history_df[dc]=pd.to_datetime(history_df[dc] format=ae_consts.COMMON_TICK_DATE_FORMAT)<block_end><block_end># end of converting all date columns
use_data[ticker]=history_df<block_end><else_stmt><block_start>log.error(f'did not find any records={num_records} in history dataset')<block_end><block_end># end for all tickers in the dataset
<return>use_data<block_end># end of prepare_history_dataset
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_stmt>unittest<import_from_stmt>telemetry.core.platform.power_monitor android_ds2784_power_monitor<class_stmt>DS2784PowerMonitorMonitorTest(unittest.TestCase)<block_start><def_stmt>testEnergyComsumption self<block_start>data=('0000 1000 -10 12\n'<concat>'1800 1000 -10 11\n'<concat>'3600 1000 -10 09\n'<concat>'5400 0000 -20 08\n'<concat>'7200 0000 -20 11\n'<concat>'9000 0000 -20 11\n')<line_sep>results=(android_ds2784_power_monitor.DS2784PowerMonitor.ParseSamplingOutput(data))<line_sep>self.assertEqual(results['power_samples_mw'] [1.2e-07 1.1e-07 9e-08 1.6e-07 2.2e-07 2.2e-07])<line_sep>self.assertEqual(results['energy_consumption_mwh'] 2.1e-07)<block_end><block_end> |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>sys<import_from_stmt>os.path join dirname<import_from_stmt>absl app<import_stmt>tensorflow<as>tf<line_sep>tf.compat.v1.enable_eager_execution()<line_sep>sys.path.append('../')<import_stmt>datasets<import_from_stmt>util io<as>ioutil<def_stmt>main _<block_start>config_ini=join(dirname(__file__) '..' 'config' 'dragon_specular.ini')<line_sep>config=ioutil.read_config(config_ini)<line_sep># Make training dataset
dataset_name=config.get('DEFAULT' 'dataset')<line_sep>Dataset=datasets.get_dataset_class(dataset_name)<line_sep>dataset=Dataset(config 'train')<line_sep>path=dataset.files[1]<line_sep>ret=dataset._load_data(path)<line_sep># Iterate
no_batch=config.getboolean('DEFAULT' 'no_batch')<line_sep>datapipe=dataset.build_pipeline(no_batch=no_batch)<for_stmt>batch_i,batch enumerate(datapipe)<block_start><import_from_stmt>IPython embed<line_sep>embed()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end> |
# Generated by Django 2.0 on 2018-08-25 14:19
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("events" "0042_allow_team_without_country")]<line_sep>operations=[migrations.RemoveField(model_name="team" name="is_premium") migrations.RemoveField(model_name="team" name="premium_by") migrations.RemoveField(model_name="team" name="premium_expires") migrations.RemoveField(model_name="team" name="premium_started") ]<block_end> |
""" Example module """<def_stmt>java_maker *args **kwargs<block_start>""" Make you a java """<line_sep>java_library(*args **kwargs)<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_stmt>struct<import_stmt>asyncio<import_from_stmt>io BytesIO<import_from_stmt>.base TAsyncTransportBase readall<import_from_stmt>.buffered TAsyncBufferedTransport<class_stmt>TAsyncFramedTransport(TAsyncTransportBase)<block_start>"""Class that wraps another transport and frames its I/O when writing."""<def_stmt>__init__ self trans<block_start>self._trans=trans<line_sep>self._rbuf=BytesIO()<line_sep>self._wbuf=BytesIO()<block_end><def_stmt>is_open self<block_start><return>self._trans.is_open()<block_end>@asyncio.coroutine<def_stmt>open self<block_start><return>(<yield><from>self._trans.open())<block_end><def_stmt>close self<block_start><return>self._trans.close()<block_end>@asyncio.coroutine<def_stmt>read self sz# Important: don't attempt to read the next frame if the caller
# doesn't actually need any data.
<block_start><if_stmt>sz<eq>0<block_start><return>b''<block_end>ret=self._rbuf.read(sz)<if_stmt>len(ret)<ne>0<block_start><return>ret<block_end><yield><from>self.read_frame()<line_sep><return>self._rbuf.read(sz)<block_end>@asyncio.coroutine<def_stmt>read_frame self<block_start>buff=<yield><from>readall(self._trans.read 4)<line_sep>sz,=struct.unpack('!i' buff)<line_sep>frame=<yield><from>readall(self._trans.read sz)<line_sep>self._rbuf=BytesIO(frame)<block_end><def_stmt>write self buf<block_start>self._wbuf.write(buf)<block_end>@asyncio.coroutine<def_stmt>flush self# reset wbuf before write/flush to preserve state on underlying failure
<block_start>out=self._wbuf.getvalue()<line_sep>self._wbuf=BytesIO()<line_sep># N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive
# copies
self._trans.write(struct.pack("!i" len(out))+out)<line_sep><yield><from>self._trans.flush()<block_end><def_stmt>getvalue self<block_start><return>self._trans.getvalue()<block_end><block_end><class_stmt>TAsyncFramedTransportFactory(object)<block_start><def_stmt>get_transport self trans<block_start><return>TAsyncBufferedTransport(TAsyncFramedTransport(trans))<block_end><block_end> |
<import_stmt>os<import_from_stmt>test test_support<line_sep># Skip this test if _tkinter does not exist.
test_support.import_module('_tkinter')<line_sep>this_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>lib_tk_test=os.path.abspath(os.path.join(this_dir '..' 'lib-tk' 'test'))<with_stmt>test_support.DirsOnSysPath(lib_tk_test)<block_start><import_stmt>runtktests<block_end><def_stmt>test_main <block_start><with_stmt>test_support.DirsOnSysPath(lib_tk_test)<block_start>test_support.run_unittest(*runtktests.get_tests(gui=<false> packages=['test_ttk']))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_main()<block_end> |
<import_from_stmt>django.db models<import_from_stmt>lbworkflow.models BaseWFObj<class_stmt>Purchase(BaseWFObj)<block_start>title=models.CharField("Title" max_length=255)<line_sep>reason=models.CharField("Reason" max_length=255)<def_stmt>__str__ self<block_start><return>self.reason<block_end><block_end><class_stmt>Item(models.Model)<block_start>purchase=models.ForeignKey(Purchase on_delete=models.CASCADE )<line_sep>name=models.CharField("Name" max_length=255)<line_sep>qty=models.IntegerField("Qty")<line_sep>note=models.CharField("Note" max_length=255)<class_stmt>Meta<block_start>verbose_name="Purchase Item"<block_end><def_stmt>__str__ self<block_start><return>self.name<block_end><block_end> |
"""Module for regex components."""<import_from_stmt>.regexconfig RegexConfig<line_sep>__all__=["RegexConfig"]<line_sep> |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 12:31
<import_from_future_stmt> unicode_literals<import_stmt>json<import_stmt>django.contrib.postgres.fields.jsonb<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('annotations' '0005_auto_20170826_1424') ]<def_stmt>forward_func apps schema_editor<block_start>Annotation=apps.get_model("annotations" "Annotation")<line_sep>db_alias=schema_editor.connection.alias<line_sep># Copy all valid annotations from raw_vector to vector
<for_stmt>annotation Annotation.objects.using(db_alias).all()<block_start><try_stmt><block_start>vector=json.loads(annotation.raw_vector)<for_stmt>key,value vector.items()<block_start><try_stmt># try to convert all numeric vector values to integer
<block_start>vector[key]=int(value)<block_end><except_stmt>ValueError<block_start><continue><block_end><block_end>annotation.vector=vector<line_sep>annotation.save()<block_end><except_stmt>ValueError# Annotation is invalid, delete it
<block_start>annotation.delete()<block_end><block_end><block_end><def_stmt>backward_func apps schema_editor<block_start>Annotation=apps.get_model("annotations" "Annotation")<line_sep>db_alias=schema_editor.connection.alias<line_sep># Copy all annotations from vector to raw_vector
<for_stmt>annotation Annotation.objects.using(db_alias).all()<block_start>annotation.raw_vector=json.dumps(annotation.vector)<line_sep>annotation.save()<block_end><block_end>operations=[migrations.RenameField(model_name='annotation' old_name='vector' new_name='raw_vector' ) migrations.AddField(model_name='annotation' name='vector' field=django.contrib.postgres.fields.jsonb.JSONField(null=<true>) ) migrations.RunPython(forward_func backward_func atomic=<true>) ]<block_end> |
__version__='0.159.0'<line_sep> |
<import_stmt>mock<import_stmt>os<import_stmt>pandas<as>pd<import_from_stmt>datetime datetime<import_from_stmt>flexmock flexmock<import_from_stmt>sportsipy utils<import_from_stmt>sportsipy.constants HOME<import_from_stmt>sportsipy.ncaab.constants BOXSCORES_URL SCHEDULE_URL<import_from_stmt>sportsipy.ncaab.boxscore Boxscore Boxscores<line_sep>MONTH=1<line_sep>YEAR=2020<line_sep>BOXSCORE='2020-01-22-19-louisville'<def_stmt>read_file filename<block_start>filepath=os.path.join(os.path.dirname(__file__) 'ncaab' filename)<line_sep><return>open('%s'%filepath 'r' encoding='utf8').read()<block_end><def_stmt>mock_pyquery url<block_start><class_stmt>MockPQ<block_start><def_stmt>__init__ self html_contents<block_start>self.status_code=200<line_sep>self.html_contents=html_contents<line_sep>self.text=html_contents<block_end><def_stmt>__call__ self div<block_start><return>read_file('table.html')<block_end><block_end><if_stmt>url<eq>BOXSCORES_URL%(MONTH 5 YEAR)<block_start><return>MockPQ(read_file('boxscores-1-5-2020.html'))<block_end><if_stmt>url<eq>BOXSCORES_URL%(MONTH 6 YEAR)<block_start><return>MockPQ(read_file('boxscores-1-6-2020.html'))<block_end>boxscore=read_file('%s.html'%BOXSCORE)<line_sep><return>MockPQ(boxscore)<block_end><class_stmt>MockDateTime<block_start><def_stmt>__init__ self year month<block_start>self.year=year<line_sep>self.month=month<block_end><block_end><class_stmt>TestNCAABBoxscore<block_start>@mock.patch('requests.get' side_effect=mock_pyquery)<def_stmt>setup_method self *args **kwargs<block_start>self.results={'date':'January 22, 2020' 'location':'KFC Yum! Center, Louisville, Kentucky' 'winner':HOME 'winning_name':'Louisville' 'winning_abbr':'LOUISVILLE' 'losing_name':'Georgia Tech' 'losing_abbr':'GEORGIA-TECH' 'pace':66.2 'away_ranking':<none> 'away_win_percentage':.421 'away_wins':8 'away_losses':11 'away_minutes_played':200 'away_field_goals':22 'away_field_goal_attempts':48 'away_field_goal_percentage':.458 'away_two_point_field_goals':17 'away_two_point_field_goal_attempts':31 'away_two_point_field_goal_percentage':.548 'away_three_point_field_goals':5 'away_three_point_field_goal_attempts':17 'away_three_point_field_goal_percentage':.294 'away_free_throws':15 'away_free_throw_attempts':20 'away_free_throw_percentage':.750 'away_offensive_rebounds':7 'away_defensive_rebounds':23 'away_total_rebounds':30 'away_assists':11 'away_steals':4 'away_blocks':4 'away_turnovers':16 'away_personal_fouls':18 'away_points':64 'away_true_shooting_percentage':.557 'away_effective_field_goal_percentage':.510 'away_three_point_attempt_rate':.354 'away_free_throw_attempt_rate':.417 'away_offensive_rebound_percentage':28.0 'away_defensive_rebound_percentage':63.9 'away_total_rebound_percentage':49.2 'away_assist_percentage':50.0 'away_steal_percentage':6.1 'away_block_percentage':10.5 'away_turnover_percentage':22.0 'away_offensive_rating':97.0 'away_defensive_rating':103.0 'home_ranking':6 'home_win_percentage':.842 'home_wins':16 'home_losses':3 'home_minutes_played':200 'home_field_goals':24 'home_field_goal_attempts':58 'home_field_goal_percentage':.414 'home_two_point_field_goals':18 'home_two_point_field_goal_attempts':38 'home_two_point_field_goal_percentage':.474 'home_three_point_field_goals':6 'home_three_point_field_goal_attempts':20 'home_three_point_field_goal_percentage':.300 'home_free_throws':14 'home_free_throw_attempts':23 'home_free_throw_percentage':.609 'home_offensive_rebounds':13 'home_defensive_rebounds':18 'home_total_rebounds':31 'home_assists':12 'home_steals':9 'home_blocks':3 'home_turnovers':10 'home_personal_fouls':17 'home_points':68 'home_true_shooting_percentage':.493 'home_effective_field_goal_percentage':.466 'home_three_point_attempt_rate':.345 'home_free_throw_attempt_rate':.397 'home_offensive_rebound_percentage':36.1 'home_defensive_rebound_percentage':72.0 'home_total_rebound_percentage':50.8 'home_assist_percentage':50.0 'home_steal_percentage':13.6 'home_block_percentage':9.7 'home_turnover_percentage':12.8 'home_offensive_rating':103.0 'home_defensive_rating':97.0}<line_sep>flexmock(utils).should_receive('_todays_date').and_return(MockDateTime(YEAR MONTH))<line_sep>self.boxscore=Boxscore('2020-01-22-19-louisville')<block_end><def_stmt>test_ncaab_boxscore_returns_requested_boxscore self<block_start><for_stmt>attribute,value self.results.items()<block_start><assert_stmt>getattr(self.boxscore attribute)<eq>value<block_end><assert_stmt>getattr(self.boxscore 'summary')<eq>{# Box score is not parsed correctly
'away':[] 'home':[]}<block_end><def_stmt>test_invalid_url_yields_empty_class self<block_start>flexmock(Boxscore).should_receive('_retrieve_html_page').and_return(<none>)<line_sep>boxscore=Boxscore(BOXSCORE)<for_stmt>key,value boxscore.__dict__.items()<block_start><if_stmt>key<eq>'_uri'<block_start><continue><block_end><assert_stmt>value<is><none><block_end><block_end><def_stmt>test_ncaab_boxscore_dataframe_returns_dataframe_of_all_values self<block_start>df=pd.DataFrame([self.results] index=[BOXSCORE])<line_sep># Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames=[df self.boxscore.dataframe]<line_sep>df1=pd.concat(frames).drop_duplicates(keep=<false>)<assert_stmt>df1.empty<block_end><def_stmt>test_ncaab_boxscore_players self<block_start>boxscore=Boxscore(BOXSCORE)<assert_stmt>len(boxscore.home_players)<eq>10<assert_stmt>len(boxscore.away_players)<eq>7<for_stmt>player boxscore.home_players<block_start><assert_stmt><not>player.dataframe.empty<block_end><for_stmt>player boxscore.away_players<block_start><assert_stmt><not>player.dataframe.empty<block_end><block_end><def_stmt>test_ncaab_boxscore_string_representation self<block_start>expected=('Boxscore for Georgia Tech '<concat>'at Louisville (January 22, 2020)')<line_sep>boxscore=Boxscore(BOXSCORE)<assert_stmt>boxscore.__repr__()<eq>expected<block_end><block_end><class_stmt>TestNCAABBoxscores<block_start><def_stmt>setup_method self<block_start>self.expected={'1-5-2020':[{'boxscore':'2020-01-05-13-michigan-state' 'away_name':'Michigan' 'away_abbr':'michigan' 'away_score':69 'away_rank':12 'home_name':'Michigan State' 'home_abbr':'michigan-state' 'home_score':87 'home_rank':14 'non_di':<false> 'top_25':<true> 'winning_name':'Michigan State' 'winning_abbr':'michigan-state' 'losing_name':'Michigan' 'losing_abbr':'michigan'} {'boxscore':'2020-01-05-13-saint-josephs' 'away_name':'Dayton' 'away_abbr':'dayton' 'away_score':80 'away_rank':20 'home_name':"<NAME>" 'home_abbr':'saint-josephs' 'home_score':67 'home_rank':<none> 'non_di':<false> 'top_25':<true> 'winning_name':'Dayton' 'winning_abbr':'dayton' 'losing_name':"<NAME>" 'losing_abbr':'saint-josephs'} {'boxscore':'2020-01-05-15-american' 'away_name':'Boston University' 'away_abbr':'boston-university' 'away_score':63 'away_rank':<none> 'home_name':'American' 'home_abbr':'american' 'home_score':67 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'American' 'winning_abbr':'american' 'losing_name':'Boston University' 'losing_abbr':'boston-university'} {'boxscore':'2020-01-05-14-lafayette' 'away_name':'Bucknell' 'away_abbr':'bucknell' 'away_score':78 'away_rank':<none> 'home_name':'Lafayette' 'home_abbr':'lafayette' 'home_score':66 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Bucknell' 'winning_abbr':'bucknell' 'losing_name':'Lafayette' 'losing_abbr':'lafayette'} {'boxscore':'2020-01-05-14-duquesne' 'away_name':'Davidson' 'away_abbr':'davidson' 'away_score':64 'away_rank':<none> 'home_name':'Duquesne' 'home_abbr':'duquesne' 'home_score':71 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Duquesne' 'winning_abbr':'duquesne' 'losing_name':'Davidson' 'losing_abbr':'davidson'} {'boxscore':'2020-01-05-16-south-dakota' 'away_name':'Denver' 'away_abbr':'denver' 'away_score':78 'away_rank':<none> 'home_name':'South Dakota' 'home_abbr':'south-dakota' 'home_score':80 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'South Dakota' 'winning_abbr':'south-dakota' 'losing_name':'Denver' 'losing_abbr':'denver'} {'boxscore':'2020-01-05-14-canisius' 'away_name':'Fairfield' 'away_abbr':'fairfield' 'away_score':46 'away_rank':<none> 'home_name':'Canisius' 'home_abbr':'canisius' 'home_score':42 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Fairfield' 'winning_abbr':'fairfield' 'losing_name':'Canisius' 'losing_abbr':'canisius'} {'boxscore':'2020-01-05-17-northwestern-state' 'away_name':'<NAME>' 'away_abbr':'houston-baptist' 'away_score':79 'away_rank':<none> 'home_name':'Northwestern State' 'home_abbr':'northwestern-state' 'home_score':106 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Northwestern State' 'winning_abbr':'northwestern-state' 'losing_name':'<NAME>' 'losing_abbr':'houston-baptist'} {'boxscore':'2020-01-05-14-milwaukee' 'away_name':'UIC' 'away_abbr':'illinois-chicago' 'away_score':62 'away_rank':<none> 'home_name':'Milwaukee' 'home_abbr':'milwaukee' 'home_score':64 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Milwaukee' 'winning_abbr':'milwaukee' 'losing_name':'UIC' 'losing_abbr':'illinois-chicago'} {'boxscore':'2020-01-05-14-monmouth' 'away_name':'Iona' 'away_abbr':'iona' 'away_score':61 'away_rank':<none> 'home_name':'Monmouth' 'home_abbr':'monmouth' 'home_score':73 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Monmouth' 'winning_abbr':'monmouth' 'losing_name':'Iona' 'losing_abbr':'iona'} {'boxscore':'2020-01-05-17-north-dakota' 'away_name':"<NAME>" 'away_abbr':'ipfw' 'away_score':69 'away_rank':<none> 'home_name':'North Dakota' 'home_abbr':'north-dakota' 'home_score':83 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'North Dakota' 'winning_abbr':'north-dakota' 'losing_name':"<NAME>" 'losing_abbr':'ipfw'} {'boxscore':'2020-01-05-14-green-bay' 'away_name':'IUPUI' 'away_abbr':'iupui' 'away_score':93 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'green-bay' 'home_score':78 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'IUPUI' 'winning_abbr':'iupui' 'losing_name':'<NAME>' 'losing_abbr':'green-bay'} {'boxscore':'2020-01-05-14-fordham' 'away_name':'<NAME>' 'away_abbr':'la-salle' 'away_score':66 'away_rank':<none> 'home_name':'Fordham' 'home_abbr':'fordham' 'home_score':60 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'la-salle' 'losing_name':'Fordham' 'losing_abbr':'fordham'} {'boxscore':'2020-01-05-14-lehigh' 'away_name':'Loyola (MD)' 'away_abbr':'loyola-md' 'away_score':71 'away_rank':<none> 'home_name':'Lehigh' 'home_abbr':'lehigh' 'home_score':78 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Lehigh' 'winning_abbr':'lehigh' 'losing_name':'Loyola (MD)' 'losing_abbr':'loyola-md'} {'boxscore':'2020-01-05-13-niagara' 'away_name':'Manhattan' 'away_abbr':'manhattan' 'away_score':67 'away_rank':<none> 'home_name':'Niagara' 'home_abbr':'niagara' 'home_score':62 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Manhattan' 'winning_abbr':'manhattan' 'losing_name':'Niagara' 'losing_abbr':'niagara'} {'boxscore':'2020-01-05-14-saint-peters' 'away_name':'Marist' 'away_abbr':'marist' 'away_score':40 'away_rank':<none> 'home_name':"<NAME>" 'home_abbr':'saint-peters' 'home_score':66 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':"<NAME>" 'winning_abbr':'saint-peters' 'losing_name':'Marist' 'losing_abbr':'marist'} {'boxscore':'2020-01-05-16-saint-louis' 'away_name':'UMass' 'away_abbr':'massachusetts' 'away_score':80 'away_rank':<none> 'home_name':'Saint Louis' 'home_abbr':'saint-louis' 'home_score':83 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'saint-louis' 'losing_name':'UMass' 'losing_abbr':'massachusetts'} {'boxscore':'2020-01-05-12-holy-cross' 'away_name':'Navy' 'away_abbr':'navy' 'away_score':61 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'holy-cross' 'home_score':63 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'holy-cross' 'losing_name':'Navy' 'losing_abbr':'navy'} {'boxscore':'2020-01-05-15-oakland' 'away_name':'Northern Kentucky' 'away_abbr':'northern-kentucky' 'away_score':75 'away_rank':<none> 'home_name':'Oakland' 'home_abbr':'oakland' 'home_score':64 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Northern Kentucky' 'winning_abbr':'northern-kentucky' 'losing_name':'Oakland' 'losing_abbr':'oakland'} {'boxscore':'2020-01-05-15-north-dakota-state' 'away_name':'Northland' 'away_abbr':'Northland' 'away_score':43 'away_rank':<none> 'home_name':'North Dakota State' 'home_abbr':'north-dakota-state' 'home_score':97 'home_rank':<none> 'non_di':<true> 'top_25':<false> 'winning_name':'North Dakota State' 'winning_abbr':'north-dakota-state' 'losing_name':'Northland' 'losing_abbr':'Northland'} {'boxscore':'2020-01-05-19-minnesota' 'away_name':'Northwestern' 'away_abbr':'northwestern' 'away_score':68 'away_rank':<none> 'home_name':'Minnesota' 'home_abbr':'minnesota' 'home_score':77 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Minnesota' 'winning_abbr':'minnesota' 'losing_name':'Northwestern' 'losing_abbr':'northwestern'} {'boxscore':'2020-01-05-18-colorado' 'away_name':'Oregon State' 'away_abbr':'oregon-state' 'away_score':76 'away_rank':<none> 'home_name':'Colorado' 'home_abbr':'colorado' 'home_score':68 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Oregon State' 'winning_abbr':'oregon-state' 'losing_name':'Colorado' 'losing_abbr':'colorado'} {'boxscore':'2020-01-05-20-illinois' 'away_name':'Purdue' 'away_abbr':'purdue' 'away_score':37 'away_rank':<none> 'home_name':'Illinois' 'home_abbr':'illinois' 'home_score':63 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Illinois' 'winning_abbr':'illinois' 'losing_name':'Purdue' 'losing_abbr':'purdue'} {'boxscore':'2020-01-05-12-rhode-island' 'away_name':'Richmond' 'away_abbr':'richmond' 'away_score':69 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'rhode-island' 'home_score':61 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Richmond' 'winning_abbr':'richmond' 'losing_name':'<NAME>' 'losing_abbr':'rhode-island'} {'boxscore':'2020-01-05-14-rider' 'away_name':'Siena' 'away_abbr':'siena' 'away_score':77 'away_rank':<none> 'home_name':'Rider' 'home_abbr':'rider' 'home_score':85 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Rider' 'winning_abbr':'rider' 'losing_name':'Siena' 'losing_abbr':'siena'} {'boxscore':'2020-01-05-22-washington' 'away_name':'USC' 'away_abbr':'southern-california' 'away_score':40 'away_rank':<none> 'home_name':'Washington' 'home_abbr':'washington' 'home_score':72 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Washington' 'winning_abbr':'washington' 'losing_name':'USC' 'losing_abbr':'southern-california'} {'boxscore':'2020-01-05-16-george-washington' 'away_name':'St. Bonaventure' 'away_abbr':'st-bonaventure' 'away_score':71 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'george-washington' 'home_score':66 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'St. Bonaventure' 'winning_abbr':'st-bonaventure' 'losing_name':'<NAME>' 'losing_abbr':'george-washington'} {'boxscore':'2020-01-05-16-xavier' 'away_name':"<NAME> (NY)" 'away_abbr':'st-johns-ny' 'away_score':67 'away_rank':<none> 'home_name':'Xavier' 'home_abbr':'xavier' 'home_score':75 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Xavier' 'winning_abbr':'xavier' 'losing_name':"<NAME> (NY)" 'losing_abbr':'st-johns-ny'} {'boxscore':'2020-01-05-13-maine' 'away_name':'<NAME>' 'away_abbr':'stony-brook' 'away_score':73 'away_rank':<none> 'home_name':'Maine' 'home_abbr':'maine' 'home_score':52 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'stony-brook' 'losing_name':'Maine' 'losing_abbr':'maine'} {'boxscore':'2020-01-05-12-george-mason' 'away_name':'VCU' 'away_abbr':'virginia-commonwealth' 'away_score':72 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'george-mason' 'home_score':59 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'VCU' 'winning_abbr':'virginia-commonwealth' 'losing_name':'<NAME>' 'losing_abbr':'george-mason'} {'boxscore':'2020-01-05-13-detroit-mercy' 'away_name':"Wright State" 'away_abbr':"wright-state" 'away_score':70 'away_rank':<none> 'home_name':'Detroit' 'home_abbr':'detroit-mercy' 'home_score':69 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Wright State' 'winning_abbr':'wright-state' 'losing_name':"Detroit" 'losing_abbr':"detroit-mercy"}]}<block_end>@mock.patch('requests.get' side_effect=mock_pyquery)<def_stmt>test_boxscores_search self *args **kwargs<block_start>result=Boxscores(datetime(2020 1 5)).games<assert_stmt>result<eq>self.expected<block_end>@mock.patch('requests.get' side_effect=mock_pyquery)<def_stmt>test_boxscores_search_invalid_end self *args **kwargs<block_start>result=Boxscores(datetime(2020 1 5) datetime(2020 1 4)).games<assert_stmt>result<eq>self.expected<block_end>@mock.patch('requests.get' side_effect=mock_pyquery)<def_stmt>test_boxscores_search_multiple_days self *args **kwargs<block_start>expected={'1-5-2020':[{'boxscore':'2020-01-05-13-michigan-state' 'away_name':'Michigan' 'away_abbr':'michigan' 'away_score':69 'away_rank':12 'home_name':'Michigan State' 'home_abbr':'michigan-state' 'home_score':87 'home_rank':14 'non_di':<false> 'top_25':<true> 'winning_name':'Michigan State' 'winning_abbr':'michigan-state' 'losing_name':'Michigan' 'losing_abbr':'michigan'} {'boxscore':'2020-01-05-13-saint-josephs' 'away_name':'Dayton' 'away_abbr':'dayton' 'away_score':80 'away_rank':20 'home_name':"<NAME>" 'home_abbr':'saint-josephs' 'home_score':67 'home_rank':<none> 'non_di':<false> 'top_25':<true> 'winning_name':'Dayton' 'winning_abbr':'dayton' 'losing_name':"<NAME>" 'losing_abbr':'saint-josephs'} {'boxscore':'2020-01-05-15-american' 'away_name':'Boston University' 'away_abbr':'boston-university' 'away_score':63 'away_rank':<none> 'home_name':'American' 'home_abbr':'american' 'home_score':67 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'American' 'winning_abbr':'american' 'losing_name':'Boston University' 'losing_abbr':'boston-university'} {'boxscore':'2020-01-05-14-lafayette' 'away_name':'Bucknell' 'away_abbr':'bucknell' 'away_score':78 'away_rank':<none> 'home_name':'Lafayette' 'home_abbr':'lafayette' 'home_score':66 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Bucknell' 'winning_abbr':'bucknell' 'losing_name':'Lafayette' 'losing_abbr':'lafayette'} {'boxscore':'2020-01-05-14-duquesne' 'away_name':'Davidson' 'away_abbr':'davidson' 'away_score':64 'away_rank':<none> 'home_name':'Duquesne' 'home_abbr':'duquesne' 'home_score':71 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Duquesne' 'winning_abbr':'duquesne' 'losing_name':'Davidson' 'losing_abbr':'davidson'} {'boxscore':'2020-01-05-16-south-dakota' 'away_name':'Denver' 'away_abbr':'denver' 'away_score':78 'away_rank':<none> 'home_name':'South Dakota' 'home_abbr':'south-dakota' 'home_score':80 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'South Dakota' 'winning_abbr':'south-dakota' 'losing_name':'Denver' 'losing_abbr':'denver'} {'boxscore':'2020-01-05-14-canisius' 'away_name':'Fairfield' 'away_abbr':'fairfield' 'away_score':46 'away_rank':<none> 'home_name':'Canisius' 'home_abbr':'canisius' 'home_score':42 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Fairfield' 'winning_abbr':'fairfield' 'losing_name':'Canisius' 'losing_abbr':'canisius'} {'boxscore':'2020-01-05-17-northwestern-state' 'away_name':'<NAME>' 'away_abbr':'houston-baptist' 'away_score':79 'away_rank':<none> 'home_name':'Northwestern State' 'home_abbr':'northwestern-state' 'home_score':106 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Northwestern State' 'winning_abbr':'northwestern-state' 'losing_name':'Houston Baptist' 'losing_abbr':'houston-baptist'} {'boxscore':'2020-01-05-14-milwaukee' 'away_name':'UIC' 'away_abbr':'illinois-chicago' 'away_score':62 'away_rank':<none> 'home_name':'Milwaukee' 'home_abbr':'milwaukee' 'home_score':64 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Milwaukee' 'winning_abbr':'milwaukee' 'losing_name':'UIC' 'losing_abbr':'illinois-chicago'} {'boxscore':'2020-01-05-14-monmouth' 'away_name':'Iona' 'away_abbr':'iona' 'away_score':61 'away_rank':<none> 'home_name':'Monmouth' 'home_abbr':'monmouth' 'home_score':73 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Monmouth' 'winning_abbr':'monmouth' 'losing_name':'Iona' 'losing_abbr':'iona'} {'boxscore':'2020-01-05-17-north-dakota' 'away_name':"<NAME>" 'away_abbr':'ipfw' 'away_score':69 'away_rank':<none> 'home_name':'North Dakota' 'home_abbr':'north-dakota' 'home_score':83 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'North Dakota' 'winning_abbr':'north-dakota' 'losing_name':"<NAME>" 'losing_abbr':'ipfw'} {'boxscore':'2020-01-05-14-green-bay' 'away_name':'IUPUI' 'away_abbr':'iupui' 'away_score':93 'away_rank':<none> 'home_name':'Green Bay' 'home_abbr':'green-bay' 'home_score':78 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'IUPUI' 'winning_abbr':'iupui' 'losing_name':'<NAME>' 'losing_abbr':'green-bay'} {'boxscore':'2020-01-05-14-fordham' 'away_name':'<NAME>' 'away_abbr':'la-salle' 'away_score':66 'away_rank':<none> 'home_name':'Fordham' 'home_abbr':'fordham' 'home_score':60 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'la-salle' 'losing_name':'Fordham' 'losing_abbr':'fordham'} {'boxscore':'2020-01-05-14-lehigh' 'away_name':'Loyola (MD)' 'away_abbr':'loyola-md' 'away_score':71 'away_rank':<none> 'home_name':'Lehigh' 'home_abbr':'lehigh' 'home_score':78 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Lehigh' 'winning_abbr':'lehigh' 'losing_name':'Loyola (MD)' 'losing_abbr':'loyola-md'} {'boxscore':'2020-01-05-13-niagara' 'away_name':'Manhattan' 'away_abbr':'manhattan' 'away_score':67 'away_rank':<none> 'home_name':'Niagara' 'home_abbr':'niagara' 'home_score':62 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Manhattan' 'winning_abbr':'manhattan' 'losing_name':'Niagara' 'losing_abbr':'niagara'} {'boxscore':'2020-01-05-14-saint-peters' 'away_name':'Marist' 'away_abbr':'marist' 'away_score':40 'away_rank':<none> 'home_name':"<NAME>" 'home_abbr':'saint-peters' 'home_score':66 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':"<NAME>" 'winning_abbr':'saint-peters' 'losing_name':'Marist' 'losing_abbr':'marist'} {'boxscore':'2020-01-05-16-saint-louis' 'away_name':'UMass' 'away_abbr':'massachusetts' 'away_score':80 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'saint-louis' 'home_score':83 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Saint Louis' 'winning_abbr':'saint-louis' 'losing_name':'UMass' 'losing_abbr':'massachusetts'} {'boxscore':'2020-01-05-12-holy-cross' 'away_name':'Navy' 'away_abbr':'navy' 'away_score':61 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'holy-cross' 'home_score':63 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'holy-cross' 'losing_name':'Navy' 'losing_abbr':'navy'} {'boxscore':'2020-01-05-15-oakland' 'away_name':'Northern Kentucky' 'away_abbr':'northern-kentucky' 'away_score':75 'away_rank':<none> 'home_name':'Oakland' 'home_abbr':'oakland' 'home_score':64 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>entucky' 'winning_abbr':'northern-kentucky' 'losing_name':'Oakland' 'losing_abbr':'oakland'} {'boxscore':'2020-01-05-15-north-dakota-state' 'away_name':'Northland' 'away_abbr':'Northland' 'away_score':43 'away_rank':<none> 'home_name':'North Dakota State' 'home_abbr':'north-dakota-state' 'home_score':97 'home_rank':<none> 'non_di':<true> 'top_25':<false> 'winning_name':'North Dakota State' 'winning_abbr':'north-dakota-state' 'losing_name':'Northland' 'losing_abbr':'Northland'} {'boxscore':'2020-01-05-19-minnesota' 'away_name':'Northwestern' 'away_abbr':'northwestern' 'away_score':68 'away_rank':<none> 'home_name':'Minnesota' 'home_abbr':'minnesota' 'home_score':77 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Minnesota' 'winning_abbr':'minnesota' 'losing_name':'Northwestern' 'losing_abbr':'northwestern'} {'boxscore':'2020-01-05-18-colorado' 'away_name':'Oregon State' 'away_abbr':'oregon-state' 'away_score':76 'away_rank':<none> 'home_name':'Colorado' 'home_abbr':'colorado' 'home_score':68 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Oregon State' 'winning_abbr':'oregon-state' 'losing_name':'Colorado' 'losing_abbr':'colorado'} {'boxscore':'2020-01-05-20-illinois' 'away_name':'Purdue' 'away_abbr':'purdue' 'away_score':37 'away_rank':<none> 'home_name':'Illinois' 'home_abbr':'illinois' 'home_score':63 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Illinois' 'winning_abbr':'illinois' 'losing_name':'Purdue' 'losing_abbr':'purdue'} {'boxscore':'2020-01-05-12-rhode-island' 'away_name':'Richmond' 'away_abbr':'richmond' 'away_score':69 'away_rank':<none> 'home_name':'Rhode Island' 'home_abbr':'rhode-island' 'home_score':61 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Richmond' 'winning_abbr':'richmond' 'losing_name':'<NAME>' 'losing_abbr':'rhode-island'} {'boxscore':'2020-01-05-14-rider' 'away_name':'Siena' 'away_abbr':'siena' 'away_score':77 'away_rank':<none> 'home_name':'Rider' 'home_abbr':'rider' 'home_score':85 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Rider' 'winning_abbr':'rider' 'losing_name':'Siena' 'losing_abbr':'siena'} {'boxscore':'2020-01-05-22-washington' 'away_name':'USC' 'away_abbr':'southern-california' 'away_score':40 'away_rank':<none> 'home_name':'Washington' 'home_abbr':'washington' 'home_score':72 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Washington' 'winning_abbr':'washington' 'losing_name':'USC' 'losing_abbr':'southern-california'} {'boxscore':'2020-01-05-16-george-washington' 'away_name':'<NAME>' 'away_abbr':'st-bonaventure' 'away_score':71 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'george-washington' 'home_score':66 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'st-bonaventure' 'losing_name':'<NAME>' 'losing_abbr':'george-washington'} {'boxscore':'2020-01-05-16-xavier' 'away_name':"<NAME> (NY)" 'away_abbr':'st-johns-ny' 'away_score':67 'away_rank':<none> 'home_name':'Xavier' 'home_abbr':'xavier' 'home_score':75 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Xavier' 'winning_abbr':'xavier' 'losing_name':"<NAME> (NY)" 'losing_abbr':'st-johns-ny'} {'boxscore':'2020-01-05-13-maine' 'away_name':'<NAME>' 'away_abbr':'stony-brook' 'away_score':73 'away_rank':<none> 'home_name':'Maine' 'home_abbr':'maine' 'home_score':52 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'stony-brook' 'losing_name':'Maine' 'losing_abbr':'maine'} {'boxscore':'2020-01-05-12-george-mason' 'away_name':'VCU' 'away_abbr':'virginia-commonwealth' 'away_score':72 'away_rank':<none> 'home_name':'<NAME>' 'home_abbr':'george-mason' 'home_score':59 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'VCU' 'winning_abbr':'virginia-commonwealth' 'losing_name':'<NAME>' 'losing_abbr':'george-mason'} {'boxscore':'2020-01-05-13-detroit-mercy' 'away_name':"<NAME>" 'away_abbr':"wright-state" 'away_score':70 'away_rank':<none> 'home_name':'Detroit' 'home_abbr':'detroit-mercy' 'home_score':69 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'wright-state' 'losing_name':"Detroit" 'losing_abbr':"detroit-mercy"}] '1-6-2020':[{'boxscore':'2020-01-06-21-oklahoma-state' 'away_name':'West Virginia' 'away_abbr':'west-virginia' 'away_score':55 'away_rank':17 'home_name':'Oklahoma State' 'home_abbr':'oklahoma-state' 'home_score':41 'home_rank':<none> 'non_di':<false> 'top_25':<true> 'winning_name':'West Virginia' 'winning_abbr':'west-virginia' 'losing_name':'Oklahoma State' 'losing_abbr':'oklahoma-state'} {'boxscore':'2020-01-06-20-jackson-state' 'away_name':'Alabama A&M' 'away_abbr':'alabama-am' 'away_score':66 'away_rank':<none> 'home_name':'Jackson State' 'home_abbr':'jackson-state' 'home_score':57 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Alabama A&M' 'winning_abbr':'alabama-am' 'losing_name':'<NAME>' 'losing_abbr':'jackson-state'} {'boxscore':'2020-01-06-20-grambling' 'away_name':'Alabama State' 'away_abbr':'alabama-state' 'away_score':63 'away_rank':<none> 'home_name':'Grambling' 'home_abbr':'grambling' 'home_score':68 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Grambling' 'winning_abbr':'grambling' 'losing_name':'Alabama State' 'losing_abbr':'alabama-state'} {'boxscore':'2020-01-06-20-texas-southern' 'away_name':'Alcorn State' 'away_abbr':'alcorn-state' 'away_score':95 'away_rank':<none> 'home_name':'Texas Southern' 'home_abbr':'texas-southern' 'home_score':80 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Al<NAME>' 'winning_abbr':'alcorn-state' 'losing_name':'Texas Southern' 'losing_abbr':'texas-southern'} {'boxscore':'2020-01-06-19-howard' 'away_name':'Bethune-Cookman' 'away_abbr':'bethune-cookman' 'away_score':102 'away_rank':<none> 'home_name':'Howard' 'home_abbr':'howard' 'home_score':73 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Bethune-Cookman' 'winning_abbr':'bethune-cookman' 'losing_name':'Howard' 'losing_abbr':'howard'} {'boxscore':'2020-01-06-19-army' 'away_name':'Colgate' 'away_abbr':'colgate' 'away_score':70 'away_rank':<none> 'home_name':'Army' 'home_abbr':'army' 'home_score':65 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Colgate' 'winning_abbr':'colgate' 'losing_name':'Army' 'losing_abbr':'army'} {'boxscore':'2020-01-06-19-north-carolina-at' 'away_name':'Florida A&M' 'away_abbr':'florida-am' 'away_score':90 'away_rank':<none> 'home_name':'North Carolina A&T' 'home_abbr':'north-carolina-at' 'home_score':97 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'North Carolina A&T' 'winning_abbr':'north-carolina-at' 'losing_name':'Florida A&M' 'losing_abbr':'florida-am'} {'boxscore':'2020-01-06-19-arkansas-little-rock' 'away_name':'Georgia Southern' 'away_abbr':'georgia-southern' 'away_score':73 'away_rank':<none> 'home_name':'<NAME>ock' 'home_abbr':'little-rock' 'home_score':79 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'little-rock' 'losing_name':'Georgia Southern' 'losing_abbr':'georgia-southern'} {'boxscore':'2020-01-06-20-arkansas-state' 'away_name':'Georgia State' 'away_abbr':'georgia-state' 'away_score':87 'away_rank':<none> 'home_name':'Arkansas State' 'home_abbr':'arkansas-state' 'home_score':90 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Arkansas State' 'winning_abbr':'arkansas-state' 'losing_name':'Georgia State' 'losing_abbr':'georgia-state'} {'boxscore':'2020-01-06-19-appalachian-state' 'away_name':'Louisiana' 'away_abbr':'louisiana' 'away_score':81 'away_rank':<none> 'home_name':'Appalachian State' 'home_abbr':'appalachian-state' 'home_score':73 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Louisiana' 'winning_abbr':'louisiana' 'losing_name':'Appalachian State' 'losing_abbr':'appalachian-state'} {'boxscore':'2020-01-06-19-coastal-carolina' 'away_name':'Louisiana-Monroe' 'away_abbr':'louisiana-monroe' 'away_score':64 'away_rank':<none> 'home_name':'Coastal Carolina' 'home_abbr':'coastal-carolnia' 'home_score':93 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Coastal Carolina' 'winning_abbr':'coastal-carolina' 'losing_name':'Louisiana-Monroe' 'losing_abbr':'louisiana-monroe'} {'boxscore':'2020-01-06-19-coppin-state' 'away_name':'Norfolk State' 'away_abbr':'norfolk-state' 'away_score':82 'away_rank':<none> 'home_name':'Coppin State' 'home_abbr':'coppin-state' 'home_score':59 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Norfolk State' 'winning_abbr':'norfolk-state' 'losing_name':'Coppin State' 'losing_abbr':'coppin-state'} {'boxscore':'2020-01-06-20-texas-arlington' 'away_name':'South Alabama' 'away_abbr':'south-alabama' 'away_score':66 'away_rank':<none> 'home_name':'Texas-Arlington' 'home_abbr':'texas-arlington' 'home_score':54 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'South Alabama' 'winning_abbr':'south-alabama' 'losing_name':'Texas-Arlington' 'losing_abbr':'texas-arlington'} {'boxscore':'2020-01-06-19-morgan-state' 'away_name':'South Carolina State' 'away_abbr':'south-carolina-state' 'away_score':63 'away_rank':<none> 'home_name':'Morgan State' 'home_abbr':'morgan-state' 'home_score':77 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Morgan State' 'winning_abbr':'morgan-state' 'losing_name':'South Carolina State' 'losing_abbr':'south-carolina-state'} {'boxscore':'2020-01-06-21-prairie-view' 'away_name':'Southern' 'away_abbr':'southern' 'away_score':54 'away_rank':<none> 'home_name':'Prairie View' 'home_abbr':'prairie-view' 'home_score':64 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'<NAME>' 'winning_abbr':'prairie-view' 'losing_name':'Southern' 'losing_abbr':'southern'} {'boxscore':'2020-01-06-20-texas-state' 'away_name':'Troy' 'away_abbr':'troy' 'away_score':71 'away_rank':<none> 'home_name':'Texas State' 'home_abbr':'texas-state' 'home_score':63 'home_rank':<none> 'non_di':<false> 'top_25':<false> 'winning_name':'Troy' 'winning_abbr':'troy' 'losing_name':'Texas State' 'losing_abbr':'texas-state'}]}<block_end>@mock.patch('requests.get' side_effect=mock_pyquery)<def_stmt>test_boxscores_search_string_representation self *args **kwargs<block_start>result=Boxscores(datetime(2020 1 5))<assert_stmt>result.__repr__()<eq>'NCAAB games for 1-5-2020'<block_end><block_end> |
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>nnabla<as>nn<import_stmt>nnabla.functions<as>F<import_stmt>nnabla.parametric_functions<as>PF<import_stmt>nnabla.initializer<as>I<import_stmt>numpy<as>np<line_sep>np.random.seed(1)<class_stmt>ResidualStack(object)<block_start><def_stmt>__init__ self in_channels num_hidden num_res_layers rng=313<block_start>self.in_channels=in_channels<line_sep>self.num_hidden=num_hidden<line_sep>self.num_res_layers=num_res_layers<line_sep>self.rng=rng<block_end><def_stmt>__call__ self x test<block_start>out=x<for_stmt>i range(self.num_res_layers)<block_start>out=self.res_block(out scope_name='res_block_'+str(i))<block_end><return>F.relu(out)<block_end><def_stmt>res_block self x scope_name='res_block' test=<false><block_start><with_stmt>nn.parameter_scope(scope_name)<block_start>out=F.relu(x)<line_sep>out=PF.convolution(out self.num_hidden (3 3) stride=(1 1) pad=(1 1) with_bias=<false> name='conv_1' rng=self.rng)<line_sep>out=PF.batch_normalization(out name='bn_1' batch_stat=<not>test)<line_sep>out=F.relu(out)<line_sep>out=PF.convolution(out self.num_hidden (1 1) stride=(1 1) with_bias=<false> name='conv_2' rng=self.rng)<line_sep>out=PF.batch_normalization(out name='bn_2' batch_stat=<not>test)<block_end><return>x+out<block_end><block_end><class_stmt>VectorQuantizer(object)<block_start><def_stmt>__init__ self embedding_dim num_embedding commitment_cost rng scope_name='vector_quantizer'<block_start>self.embedding_dim=embedding_dim<line_sep>self.num_embedding=num_embedding<line_sep>self.commitment_cost=commitment_cost<line_sep>self.rng=rng<line_sep>self.scope_name=scope_name<with_stmt>nn.parameter_scope(scope_name)<block_start>self.embedding_weight=nn.parameter.get_parameter_or_create('W' shape=(self.num_embedding self.embedding_dim) initializer=I.UniformInitializer((-1./self.num_embedding 1./self.num_embedding) rng=self.rng) need_grad=<true>)<block_end><block_end><def_stmt>__call__ self x return_encoding_indices=<false><block_start>x=F.transpose(x (0 2 3 1))<line_sep>x_flat=x.reshape((-1 self.embedding_dim))<line_sep>x_flat_squared=F.broadcast(F.sum(x_flat<power>2 axis=1 keepdims=<true>) (x_flat.shape[0] self.num_embedding))<line_sep>emb_wt_squared=F.transpose(F.sum(self.embedding_weight<power>2 axis=1 keepdims=<true>) (1 0))<line_sep>distances=x_flat_squared+emb_wt_squared-2<times>F.affine(x_flat F.transpose(self.embedding_weight (1 0)))<line_sep>encoding_indices=F.min(distances only_index=<true> axis=1 keepdims=<true>)<line_sep>encoding_indices.need_grad=<false><line_sep>quantized=F.embed(encoding_indices.reshape(encoding_indices.shape[:-1]) self.embedding_weight).reshape(x.shape)<if_stmt>return_encoding_indices<block_start><return>encoding_indices F.transpose(quantized (0 3 1 2))<block_end>encodings=F.one_hot(encoding_indices (self.num_embedding ))<line_sep>e_latent_loss=F.mean(F.squared_error(quantized.get_unlinked_variable(need_grad=<false>) x))<line_sep>q_latent_loss=F.mean(F.squared_error(quantized x.get_unlinked_variable(need_grad=<false>)))<line_sep>loss=q_latent_loss+self.commitment_cost<times>e_latent_loss<line_sep>quantized=x+(quantized-x).get_unlinked_variable(need_grad=<false>)<line_sep>avg_probs=F.mean(encodings axis=0)<line_sep>perplexity=F.exp(-F.sum(avg_probs<times>F.log(avg_probs+1.0e-10)))<line_sep><return>loss F.transpose(quantized (0 3 1 2)) perplexity encodings<block_end><block_end><class_stmt>VQVAE(object)<block_start><def_stmt>__init__ self config training=<true><block_start>self.in_channels=config['model']['in_channels']<line_sep>self.num_hidden=config['model']['num_hidden']<line_sep>self.num_res_layers=config['model']['num_res_layers']<line_sep>self.rng=np.random.RandomState(config['model']['rng'])<line_sep>self.encoder_res_stack=ResidualStack(in_channels=self.num_hidden num_hidden=self.num_hidden num_res_layers=self.num_res_layers rng=self.rng)<line_sep>self.decoder_res_stack=ResidualStack(in_channels=self.num_hidden num_hidden=self.num_hidden num_res_layers=self.num_res_layers rng=self.rng)<line_sep>self.num_embedding=config['model']['num_embeddings']<line_sep>self.embedding_dim=config['model']['embedding_dim']<line_sep>self.commitment_cost=config['model']['commitment_cost']<line_sep>self.decay=config['model']['decay']<line_sep>self.training=training<line_sep>self.vq=VectorQuantizer(self.embedding_dim self.num_embedding self.commitment_cost self.rng)<block_end><def_stmt>encoder self x test<block_start><with_stmt>nn.parameter_scope('encoder')<block_start>out=PF.convolution(x self.num_hidden (4 4) stride=(2 2) pad=(1 1) name='conv_1' rng=self.rng)<line_sep>out=PF.batch_normalization(out batch_stat=<not>test)<line_sep>out=F.relu(out)<line_sep>out=PF.convolution(out self.num_hidden (4 4) stride=(2 2) pad=(1 1) name='conv_2' rng=self.rng)<line_sep>out=self.encoder_res_stack(out test=test)<block_end><return>out<block_end><def_stmt>decoder self x test<block_start><with_stmt>nn.parameter_scope('decoder')<block_start>out=self.decoder_res_stack(x test=test)<line_sep>out=F.relu(out)<line_sep>out=PF.deconvolution(out self.num_hidden (4 4) stride=(2 2) pad=(1 1) name='deconv_1' rng=self.rng)<line_sep>out=PF.batch_normalization(out batch_stat=<not>test)<line_sep>out=F.relu(out)<line_sep>out=PF.deconvolution(out self.in_channels (4 4) stride=(2 2) pad=(1 1) name='deconv_2' rng=self.rng)<line_sep>out=F.tanh(out)<block_end><return>out<block_end><def_stmt>__call__ self img return_encoding_indices=<false> quantized_as_input=<false> test=<false><block_start><with_stmt>nn.parameter_scope('vq_vae')# import pdb; pdb.set_trace()
<block_start><if_stmt>quantized_as_input<block_start><return>self.decoder(img test)<block_end>z=self.encoder(img test)<line_sep>z=PF.convolution(z self.embedding_dim (1 1) stride=(1 1))<if_stmt>return_encoding_indices<block_start><return>self.vq(z return_encoding_indices=<true>)<block_end>loss,quantized,perplexity,encodings=self.vq(z)<line_sep>img_recon=self.decoder(quantized test)<block_end><return>loss img_recon perplexity<block_end><block_end> |
<import_from_stmt>abc ABC abstractmethod<import_from_stmt>copy copy<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>coreir<as>pycoreir<import_from_stmt>magma.digital Digital<import_from_stmt>magma.array Array<import_from_stmt>magma.bits Bits<import_from_stmt>magma.backend.check_wiring_context check_wiring_context<import_from_stmt>magma.backend.coreir.coreir_utils attach_debug_info check_magma_interface constant_to_value get_inst_args get_module_of_inst magma_interface_to_coreir_module_type magma_port_to_coreir_port make_cparams map_genarg magma_name_to_coreir_select Slice <import_from_stmt>magma.compile_exception UnconnectedPortException<import_from_stmt>magma.interface InterfaceKind<import_from_stmt>magma.is_definition isdefinition<import_from_stmt>magma.linking get_linked_modules has_default_linked_module get_default_linked_module <import_from_stmt>magma.logging root_logger<import_from_stmt>magma.passes dependencies<import_from_stmt>magma.tuple Tuple<import_from_stmt>magma.backend.util get_codegen_debug_info<import_from_stmt>magma.clock is_clock_or_nested_clock<import_from_stmt>magma.passes.clock drive_all_undriven_clocks_in_value get_all_output_clocks_in_defn <import_from_stmt>magma.config get_debug_mode<import_from_stmt>magma.protocol_type MagmaProtocol MagmaProtocolMeta<import_from_stmt>magma.ref PortViewRef ArrayRef<import_from_stmt>magma.symbol_table SYMBOL_TABLE_EMPTY<line_sep># NOTE(rsetaluri): We do not need to set the level of this logger since it has
# already been done in backend/coreir/coreir_backend.py.
_logger=root_logger().getChild("coreir_backend")<line_sep>_generator_callbacks={}<def_stmt>_is_generator ckt_or_inst<block_start><return>ckt_or_inst.coreir_genargs<is><not><none><block_end><def_stmt>_coreir_longname magma_defn_or_decl coreir_module_or_generator# NOTE(rsetaluri): This is a proxy to exposing a pycoreir/coreir-c API to
# get a module's longname. This logic should be identical right now. Another
# caveat is that we don't elaborate the CoreIR generator at the magma level,
# so it's longname needs to be dynamically reconstructed anyway.
<block_start>namespace=coreir_module_or_generator.namespace.name<line_sep>prefix=""<if>namespace<eq>"global"<else>f"{namespace}_"<line_sep>longname=prefix+coreir_module_or_generator.name<if_stmt>isinstance(coreir_module_or_generator pycoreir.Module)<block_start><return>longname<block_end><assert_stmt>isinstance(coreir_module_or_generator pycoreir.Generator)<line_sep>param_keys=coreir_module_or_generator.params.keys()<for_stmt>k param_keys<block_start>v=magma_defn_or_decl.coreir_genargs[k]<line_sep>longname<augadd>f"__{k}{v}"<block_end><return>longname<block_end><def_stmt>_collect_drivers value<block_start>"""
Iterate over value to collect the child drivers, packing slices together
"""<line_sep>drivers=[]<line_sep>start_idx=0<for_stmt>i range(1 len(value))# If the next value item is not a reference to an array of bits where
# the array matches the previous item and the index is incremented by
# one, append the current slice to drivers (may introduce slices of
# length 1)
<block_start><if_stmt><not>(isinstance(value[i].name ArrayRef)<and>issubclass(value[i].name.array.T Digital)<and>isinstance(value[i-1].name ArrayRef)<and>value[i].name.array<is>value[i-1].name.array<and>value[i].name.index<eq>value[i-1].name.index+1)<block_start>drivers.append(value[start_idx:i])<line_sep>start_idx=i<block_end><block_end>drivers.append(value[start_idx:])<line_sep><return>drivers<block_end><def_stmt>_unwrap x<block_start><if_stmt>isinstance(x MagmaProtocol)<block_start><return>x._get_magma_value_()<block_end><if_stmt>isinstance(x MagmaProtocolMeta)<block_start><return>x._to_magma_()<block_end><return>x<block_end><class_stmt>TransformerBase(ABC)<block_start>__MISSING=object()<def_stmt>__init__ self backend opts<block_start>self.backend=backend<line_sep>self.opts=opts<line_sep>self.ran=<false><line_sep>self._children=<none><block_end><def_stmt>run self<block_start><if_stmt>self.ran<block_start><raise>RuntimeError("Can only run transformer once")<block_end>self._children=self.children()<for_stmt>child self._children<block_start>child.run()<block_end>self.run_self()<line_sep>self.ran=<true><block_end>@abstractmethod<def_stmt>children self<block_start><raise>NotImplementedError()<block_end><def_stmt>run_self self<block_start><pass><block_end><def_stmt>get_opt self key default=__MISSING<block_start><if_stmt>default<is>TransformerBase.__MISSING<block_start><return>self.opts[key]<block_end><return>self.opts.get(key default)<block_end><block_end><class_stmt>LeafTransformer(TransformerBase)<block_start><def_stmt>children self<block_start><return>[]<block_end><block_end><class_stmt>DefnOrDeclTransformer(TransformerBase)<block_start><def_stmt>__init__ self backend opts defn_or_decl<block_start>super().__init__(backend opts)<line_sep>self.defn_or_decl=defn_or_decl<line_sep>self.coreir_module=<none><block_end><def_stmt>children self<block_start><if_stmt>_is_generator(self.defn_or_decl)<block_start><return>[GeneratorTransformer(self.backend self.opts self.defn_or_decl)]<block_end><try_stmt><block_start>coreir_module=self.backend.get_module(self.defn_or_decl)<line_sep>_logger.debug(f"{self.defn_or_decl} already compiled, skipping")<line_sep>self.coreir_module=coreir_module<line_sep><return>[]<block_end><except_stmt>KeyError<block_start><pass><block_end><if_stmt><not>isdefinition(self.defn_or_decl)<block_start><return>[DeclarationTransformer(self.backend self.opts self.defn_or_decl)]<block_end>wrapped=getattr(self.defn_or_decl "wrappedModule" <none>)<if_stmt>wrapped<and>wrapped.context<is>self.backend.context<block_start><return>[WrappedTransformer(self.backend self.opts self.defn_or_decl)]<block_end><return>[DefinitionTransformer(self.backend self.opts self.defn_or_decl)]<block_end><def_stmt>run_self self<block_start>self._run_self_impl()<line_sep>self._generate_symbols()<line_sep>self._link_default_module()<line_sep>self._link_modules()<block_end><def_stmt>_link_default_module self<block_start><if_stmt><not>has_default_linked_module(self.defn_or_decl)<block_start><return><block_end>target=get_default_linked_module(self.defn_or_decl)<line_sep>target=self.backend.get_module(target)<line_sep>self.coreir_module.link_default_module(target)<block_end><def_stmt>_link_modules self<block_start>targets=get_linked_modules(self.defn_or_decl)<for_stmt>key,target targets.items()<block_start>target=self.backend.get_module(target)<line_sep>self.coreir_module.link_module(key target)<block_end><block_end><def_stmt>_generate_symbols self<block_start><if_stmt><not>self.get_opt("generate_symbols" <false>)<block_start><return><block_end>out_module_name=_coreir_longname(self.defn_or_decl self.coreir_module)<line_sep>self.opts.get("symbol_table").set_module_name(self.defn_or_decl.name out_module_name)<block_end><def_stmt>_run_self_impl self<block_start><if_stmt>self.coreir_module<block_start><return><block_end>self.coreir_module=self._children[0].coreir_module<line_sep>self.backend.add_module(self.defn_or_decl self.coreir_module)<if_stmt>isdefinition(self.defn_or_decl)<block_start>self.defn_or_decl.wrappedModule=self.coreir_module<line_sep>libs=self.backend.included_libs()<line_sep>self.defn_or_decl.coreir_wrapped_modules_libs_used=libs<block_end><block_end><block_end><class_stmt>GeneratorTransformer(TransformerBase)<block_start><def_stmt>__init__ self backend opts defn_or_decl<block_start>super().__init__(backend opts)<line_sep>self.defn_or_decl=defn_or_decl<line_sep>self.coreir_module=<none><block_end><def_stmt>children self<block_start><try_stmt><block_start>coreir_module=self.backend.get_module(self.defn_or_decl)<line_sep>_logger.debug(f"{self.defn_or_decl} already compiled, skipping")<line_sep>self.coreir_module=coreir_module<line_sep><return>[]<block_end><except_stmt>KeyError<block_start><pass><block_end><assert_stmt><not>isdefinition(self.defn_or_decl)<line_sep><return>[DeclarationTransformer(self.backend self.opts self.defn_or_decl)]<block_end><def_stmt>run_self self<block_start>self._generate_symbols()<if_stmt>self.coreir_module<is><not><none><block_start><return><block_end>self.coreir_module=self._children[0].coreir_module<block_end><def_stmt>_generate_symbols self<block_start><if_stmt><not>self.get_opt("generate_symbols" <false>)<block_start><return><block_end><global>_generator_callbacks<def_stmt>_callback coreir_inst<block_start>magma_names=list(self.defn_or_decl.interface.ports.keys())<line_sep>coreir_names=list(k<for>k,_ coreir_inst.module.type.items())<assert_stmt>len(magma_names)<eq>len(coreir_names)<for_stmt>magma_name,coreir_name zip(magma_names coreir_names)<block_start>self.opts.get("symbol_table").set_port_name(self.defn_or_decl.name magma_name coreir_name)<block_end><block_end><assert_stmt>self.defn_or_decl<not><in>_generator_callbacks<line_sep>_generator_callbacks[self.defn_or_decl]=_callback<block_end><block_end><class_stmt>InstanceTransformer(LeafTransformer)<block_start><def_stmt>__init__ self backend opts inst defn<block_start>super().__init__(backend opts)<line_sep>self.inst=inst<line_sep>self.defn=defn<line_sep>self.coreir_inst_gen=<none><block_end><def_stmt>run_self self<block_start>self.coreir_inst_gen=self.run_self_impl()<block_end><def_stmt>run_self_impl self<block_start>_logger.debug(f"Compiling instance {(self.inst.name type(self.inst).name)}")<line_sep>defn=type(self.inst)<if_stmt>hasattr(self.inst "namespace")<block_start>lib=self.backend.get_lib(self.inst.namespace)<block_end><else_stmt><block_start>lib=self.backend.get_lib(self.inst.coreir_lib)<if_stmt>self.inst.coreir_lib<eq>"global"<block_start>lib=self.get_opt("user_namespace" lib)<block_end><block_end><if_stmt><not>_is_generator(self.inst)<block_start>module=get_module_of_inst(self.backend.context self.inst lib)<line_sep>args=get_inst_args(self.inst)<line_sep>args=self.backend.context.new_values(args)<line_sep><return><lambda>m:m.add_module_instance(self.inst.name module args)<block_end>generator=lib.generators[defn.coreir_name]<line_sep>config_args={k:v<for>k,v self.inst.coreir_configargs.items()}<line_sep>config_args=self.backend.context.new_values(config_args)<line_sep>gen_args={k:map_genarg(self.backend.context v)<for>k,v defn.coreir_genargs.items()}<line_sep>gen_args=self.backend.context.new_values(gen_args)<line_sep><return><lambda>m:m.add_generator_instance(self.inst.name generator gen_args config_args)<block_end><block_end><class_stmt>WrappedTransformer(LeafTransformer)<block_start><def_stmt>__init__ self backend opts defn<block_start>super().__init__(backend opts)<line_sep>self.defn=defn<line_sep>self.coreir_module=self.defn.wrappedModule<line_sep>self.backend.include_lib_or_libs(self.defn.coreir_wrapped_modules_libs_used)<block_end><block_end><class_stmt>DefinitionTransformer(TransformerBase)<block_start><def_stmt>__init__ self backend opts defn<block_start>super().__init__(backend opts)<line_sep>self.defn=defn<line_sep>self.coreir_module=<none><line_sep>self.decl_tx=DeclarationTransformer(self.backend self.opts self.defn)<line_sep>self.inst_txs={inst:InstanceTransformer(self.backend self.opts inst self.defn)<for>inst self.defn.instances}<line_sep>self.clocks=get_all_output_clocks_in_defn(defn)<line_sep>self._constant_cache={}<block_end><def_stmt>children self<block_start>children=[]<if_stmt><not>self.get_opt("skip_instance_graph" <false>)<block_start>deps=dependencies(self.defn include_self=<false>)<line_sep>opts=self.opts.copy()<line_sep>opts.update({"skip_instance_graph":<true>})<line_sep>children<augadd>[DefnOrDeclTransformer(self.backend opts dep)<for>dep deps]<block_end>children<augadd>[self.decl_tx]<line_sep>children<augadd>self.inst_txs.values()<line_sep><return>children<block_end><def_stmt>run_self self<block_start>_logger.debug(f"Compiling definition {self.defn}")<line_sep>self.coreir_module=self.decl_tx.coreir_module<if_stmt>self.defn.inline_verilog_strs<block_start>inline_verilog="\n\n".join(x[0]<for>x self.defn.inline_verilog_strs)<line_sep>connect_references={}<for_stmt>_,inline_value_map self.defn.inline_verilog_strs<block_start><for_stmt>key,value inline_value_map.items()<block_start>connect_references[key]=magma_port_to_coreir_port(value)<block_end><block_end>self.coreir_module.add_metadata("inline_verilog" json.dumps({"str":inline_verilog "connect_references":connect_references}))<block_end><for_stmt>name,module self.defn.compiled_bind_modules.items()<block_start>self.backend.bind_module(name module)<block_end>self.coreir_module.definition=self.get_coreir_defn()<block_end><def_stmt>_generate_symbols self coreir_insts<block_start><if_stmt><not>self.get_opt("generate_symbols" <false>)<block_start><return><block_end><for_stmt>inst,coreir_inst coreir_insts.items()<block_start>self.get_opt("symbol_table").set_instance_name(self.defn.name inst.name (SYMBOL_TABLE_EMPTY coreir_inst.name))<line_sep>self.get_opt("symbol_table").set_instance_type(self.defn.name inst.name type(inst).name)<block_end><block_end><def_stmt>get_coreir_defn self<block_start>coreir_defn=self.coreir_module.new_definition()<line_sep>coreir_insts={inst:self.inst_txs[inst].coreir_inst_gen(coreir_defn)<for>inst self.defn.instances}<line_sep># Call generator callback if necessary.
<global>_generator_callbacks<for_stmt>inst,coreir_inst coreir_insts.items()<block_start><try_stmt><block_start>callback=_generator_callbacks.pop(type(inst))<block_end><except_stmt>KeyError<block_start><continue><block_end>callback(coreir_inst)<block_end>self._generate_symbols(coreir_insts)<line_sep># If this module was imported from verilog, do not go through the
# general module construction flow. Instead just attach the verilog
# source as metadata and return the module.
<if_stmt>hasattr(self.defn "verilogFile")<and>self.defn.verilogFile<block_start>metadata=json.dumps({"verilog_string":self.defn.verilogFile})<line_sep>self.coreir_module.add_metadata("verilog" metadata)<line_sep><return>coreir_defn<block_end><if_stmt>hasattr(self.defn "verilog")<and>self.defn.verilog<block_start>metadata=json.dumps({"verilog_body":self.defn.verilog})<line_sep>self.coreir_module.add_metadata("verilog" metadata)<line_sep><return>coreir_defn<block_end><if_stmt>self.defn.coreir_lib<is><not><none><block_start>self.backend.include_lib_or_libs(self.defn.coreir_lib)<block_end><for_stmt>name,port self.defn.interface.ports.items()<block_start>_logger.debug(f"{name}, {port}, {port.is_output()}")<block_end><for_stmt>inst,coreir_inst coreir_insts.items()<block_start><if_stmt>get_codegen_debug_info()<and>getattr(inst "debug_info" <false>)<block_start>attach_debug_info(coreir_inst inst.debug_info)<block_end><if_stmt>getattr(inst "coreir_metadata")<block_start><for_stmt>k,v inst.coreir_metadata.items()<block_start>coreir_inst.add_metadata(k json.dumps(v))<block_end><block_end><block_end><for_stmt>inst coreir_insts<block_start><for_stmt>name,port inst.interface.ports.items()<block_start>self.connect_non_outputs(coreir_defn port)<block_end><block_end><for_stmt>port self.defn.interface.ports.values()<block_start>self.connect_non_outputs(coreir_defn port)<block_end><return>coreir_defn<block_end><def_stmt>connect_non_outputs self module_defn port# Recurse into non input types that may contain inout children.
<block_start><if_stmt>isinstance(port Tuple)<and><not>port.is_input()<or>isinstance(port Array)<and><not>port.T.is_input()<block_start><for_stmt>elem port<block_start>self.connect_non_outputs(module_defn elem)<block_end><block_end><elif_stmt><not>port.is_output()<block_start>self.connect(module_defn port port.trace())<block_end><block_end><def_stmt>get_source self port value module_defn<block_start>port=_unwrap(port)<line_sep>value=_unwrap(value)<if_stmt>isinstance(value pycoreir.Wireable)<block_start><return>value<block_end><if_stmt>isinstance(value Slice)<block_start><return>module_defn.select(value.get_coreir_select())<block_end><if_stmt>isinstance(value Bits)<and>value.const()<block_start><return>self._const_instance(value len(value) module_defn)<block_end><if_stmt>value.anon()<and>isinstance(value Array)<block_start>drivers=_collect_drivers(value)<line_sep>offset=0<for_stmt>d drivers<block_start>d=_unwrap(d)<if_stmt>len(d)<eq>1# _collect_drivers will introduce a slice of length 1 for
# non-slices, so we index them here with 0 to unpack the
# extra array dimension
<block_start>self.connect(module_defn port[offset] d[0])<block_end><else_stmt><block_start>self.connect(module_defn Slice(port offset offset+len(d)) Slice(d[0].name.array d[0].name.index d[-1].name.index+1))<block_end>offset<augadd>len(d)<block_end><return><none><block_end><if_stmt>isinstance(value Tuple)<and>value.anon()<block_start><for_stmt>p,v zip(port value)<block_start>self.connect(module_defn p v)<block_end><return><none><block_end><if_stmt>value.const()<block_start><return>self._const_instance(value <none> module_defn)<block_end><if_stmt>isinstance(value.name PortViewRef)<block_start><return>module_defn.select(magma_name_to_coreir_select(value.name))<block_end><return>module_defn.select(magma_port_to_coreir_port(value))<block_end><def_stmt>connect self module_defn port value<block_start><if_stmt>value<is><none><and>is_clock_or_nested_clock(type(port))<block_start><with_stmt>self.defn.open()<block_start><if_stmt><not>drive_all_undriven_clocks_in_value(port self.clocks)# No default clock
<block_start><raise>UnconnectedPortException(port)<block_end><block_end>value=port.trace()<block_end><if_stmt>value<is><none><block_start><if_stmt>port.is_inout()<block_start><return># skip inouts because they might be conn. as an input.
<block_end><if_stmt>getattr(self.defn "_ignore_undriven_" <false>)<block_start><return><block_end><raise>UnconnectedPortException(port)<block_end>check_wiring_context(port value)<line_sep>source=self.get_source(port value module_defn)<if_stmt><not>source<block_start><return><block_end>sink=module_defn.select(magma_port_to_coreir_port(port))<line_sep>module_defn.connect(source sink)<if_stmt>get_codegen_debug_info()<and>getattr(port "debug_info" <false>)<block_start>attach_debug_info(module_defn port.debug_info source sink)<block_end><block_end><def_stmt>_const_instance self constant num_bits module_defn<block_start>value=constant_to_value(constant)<line_sep>key=(value num_bits)<try_stmt><block_start><return>self._constant_cache[key]<block_end><except_stmt>KeyError<block_start><pass><block_end><if_stmt>num_bits<is><none><block_start>config=self.backend.context.new_values({"value":bool(value)})<line_sep>name=f"bit_const_{value}_{num_bits}"<line_sep>mod=self.backend.get_lib("corebit").modules["const"]<line_sep>module_defn.add_module_instance(name mod config)<block_end><else_stmt><block_start>config=self.backend.context.new_values({"value":value})<line_sep>name=f"const_{value}_{num_bits}"<line_sep>gen=self.backend.get_lib("coreir").generators["const"]<line_sep>gen_args=self.backend.context.new_values({"width":num_bits})<line_sep>module_defn.add_generator_instance(name gen gen_args config)<block_end>out=module_defn.select(f"{name}.out")<line_sep><return>self._constant_cache.setdefault(key out)<block_end><block_end><class_stmt>DeclarationTransformer(LeafTransformer)<block_start><def_stmt>__init__ self backend opts decl<block_start>super().__init__(backend opts)<line_sep>self.decl=decl<line_sep>self.coreir_module=<none><block_end><def_stmt>run_self self<block_start>self.coreir_module=self._run_self_impl()<line_sep>self._generate_symbols()<block_end><def_stmt>_generate_symbols self<block_start><if_stmt><not>self.get_opt("generate_symbols" <false>)<block_start><return><block_end><if_stmt>_is_generator(self.decl)<block_start><return><block_end>magma_names=list(self.decl.interface.ports.keys())<line_sep>coreir_names=list(k<for>k,_ self.coreir_module.type.items())<assert_stmt>len(magma_names)<eq>len(coreir_names)<for_stmt>magma_name,coreir_name zip(magma_names coreir_names)<block_start>self.opts.get("symbol_table").set_port_name(self.decl.name magma_name coreir_name)<block_end><block_end><def_stmt>_run_self_impl self<block_start>self.decl=self.decl<line_sep>_logger.debug(f"Compiling declaration {self.decl}")<if_stmt>self.decl.coreir_lib<is><not><none><block_start>self.backend.include_lib_or_libs(self.decl.coreir_lib)<block_end># These libraries are already available by default in coreir, so we
# don't need declarations.
<if_stmt>self.decl.coreir_lib<in>["coreir" "corebit" "commonlib" "memory"]<block_start>lib=self.backend.get_lib(self.decl.coreir_lib)<if_stmt><not>_is_generator(self.decl)<block_start><return>lib.modules[self.decl.coreir_name]<block_end><return>lib.generators[self.decl.coreir_name]<block_end><try_stmt><block_start>coreir_module=self.backend.get_module(self.decl)<line_sep>_logger.debug(f"{self.decl} already compiled, skipping")<line_sep><return>coreir_module<block_end><except_stmt>KeyError<block_start><pass><block_end><if_stmt>get_debug_mode()<block_start>check_magma_interface(self.decl.interface)<block_end>module_type=magma_interface_to_coreir_module_type(self.backend.context self.decl.interface)<if_stmt>isinstance(self.decl.interface InterfaceKind)<block_start>module_type=self.backend.context.Flip(module_type)<block_end>kwargs={}<if_stmt>hasattr(self.decl "coreir_config_param_types")<block_start>param_types=self.decl.coreir_config_param_types<line_sep>kwargs["cparams"]=make_cparams(self.backend.context param_types)<block_end><if_stmt>hasattr(self.decl "namespace")# Allows users to choose namespace explicitly with
# class MyCircuit(m.Circuit):
# namespace = "foo"
# overrides user_namespace setting
<block_start>namespace=self.backend.get_lib(self.decl.namespace)<block_end><else_stmt><block_start>namespace=self.get_opt("user_namespace" self.backend.context.global_namespace)<block_end>coreir_module=namespace.new_module(self.decl.coreir_name module_type **kwargs)<if_stmt>get_codegen_debug_info()<and>self.decl.debug_info<block_start>attach_debug_info(coreir_module self.decl.debug_info)<block_end><for_stmt>key,value self.decl.coreir_metadata.items()<block_start>coreir_module.add_metadata(key json.dumps(value))<block_end><return>coreir_module<block_end><block_end> |
<import_from_stmt>typing Dict List Tuple Union<import_from_stmt>collections OrderedDict<import_from_stmt>functools lru_cache<import_stmt>warnings<import_from_stmt>torch.utils.data BatchSampler DataLoader<import_from_stmt>catalyst.core.callback Callback CallbackWrapper IBackwardCallback ICriterionCallback IOptimizerCallback ISchedulerCallback <import_from_stmt>catalyst.typing RunnerCriterion RunnerOptimizer RunnerScheduler<def_stmt>get_original_callback callback:Callback<arrow>Callback<block_start>"""Docs."""<while_stmt>isinstance(callback CallbackWrapper)<block_start>callback=callback.callback<block_end><return>callback<block_end><def_stmt>callback_isinstance callback:Callback class_or_tuple<arrow>bool<block_start>"""Check if callback is the same type as required ``class_or_tuple``
Args:
callback: callback to check
class_or_tuple: class_or_tuple to compare with
Returns:
bool: true if first object has the required type
"""<line_sep>callback=get_original_callback(callback)<line_sep><return>isinstance(callback class_or_tuple)<block_end><def_stmt>sort_callbacks_by_order callbacks:Union[List Dict OrderedDict]<arrow>"OrderedDict[str, Callback]"<block_start>"""Creates an sequence of callbacks and sort them.
Args:
callbacks: either list of callbacks or ordered dict
Returns:
sequence of callbacks sorted by ``callback order``
Raises:
TypeError: if `callbacks` is out of `None`, `dict`, `OrderedDict`, `list`
"""<if_stmt>callbacks<is><none><block_start>output=OrderedDict()<block_end><elif_stmt>isinstance(callbacks (dict OrderedDict))<block_start>output=[(k v)<for>k,v callbacks.items()]<line_sep>output=sorted(output key=<lambda>x:x[1].order)<line_sep>output=OrderedDict(output)<block_end><elif_stmt>isinstance(callbacks list)<block_start>output=sorted(callbacks key=<lambda>x:x.order)<line_sep>output=OrderedDict([(i value)<for>i,value enumerate(output)])<block_end><else_stmt><block_start><raise>TypeError(f"Callbacks must be either Dict/OrderedDict or list, "<concat>f"got {type(callbacks)}")<block_end><return>output<block_end>@lru_cache(maxsize=42)<def_stmt>is_str_intersections origin_string:str strings:Tuple<block_start>"""Docs."""<line_sep><return>any(x<in>origin_string<for>x strings)<block_end><def_stmt>get_loader_batch_size loader:DataLoader<block_start>"""Docs."""<line_sep>batch_size=loader.batch_size<if_stmt>batch_size<is><not><none><block_start><return>batch_size<block_end>batch_size=loader.batch_sampler.batch_size<if_stmt>batch_size<is><not><none><block_start><return>batch_size<block_end><raise>NotImplementedError("No `batch_size` found,"<concat>"please specify it with `loader.batch_size`,"<concat>"or `loader.batch_sampler.batch_size`")<block_end><def_stmt>get_loader_num_samples loader:DataLoader<block_start>"""Docs."""<line_sep>batch_size=get_loader_batch_size(loader)<if_stmt>isinstance(loader.batch_sampler BatchSampler)# pytorch default item-based samplers
<block_start><if_stmt>loader.drop_last<block_start><return>(len(loader.dataset)<floordiv>batch_size)<times>batch_size<block_end><else_stmt><block_start><return>len(loader.dataset)<block_end><block_end><else_stmt># pytorch batch-based samplers
<block_start><return>len(loader)<times>batch_size<block_end><block_end><def_stmt>check_callbacks callbacks:OrderedDict criterion:RunnerCriterion=<none> optimizer:RunnerOptimizer=<none> scheduler:RunnerScheduler=<none> <block_start>"""Docs."""<line_sep>callback_exists=<lambda>callback_fn:any(callback_isinstance(x callback_fn)<for>x callbacks.values())<if_stmt>criterion<is><not><none><and><not>callback_exists(ICriterionCallback)<block_start>warnings.warn("No ``ICriterionCallback/CriterionCallback`` were found "<concat>"while runner.criterion is not None."<concat>"Do you compute the loss during ``runner.handle_batch``?")<block_end><if_stmt>(criterion<is><not><none><or>optimizer<is><not><none>)<and><not>callback_exists(IBackwardCallback)<block_start>warnings.warn("No ``IBackwardCallback/BackwardCallback`` were found "<concat>"while runner.criterion/optimizer is not None."<concat>"Do you backward the loss during ``runner.handle_batch``?")<block_end><if_stmt>optimizer<is><not><none><and><not>callback_exists(IOptimizerCallback)<block_start>warnings.warn("No ``IOptimizerCallback/OptimizerCallback`` were found "<concat>"while runner.optimizer is not None."<concat>"Do run optimisation step pass during ``runner.handle_batch``?")<block_end><if_stmt>scheduler<is><not><none><and><not>callback_exists(ISchedulerCallback)<block_start>warnings.warn("No ``ISchedulerCallback/SchedulerCallback`` were found "<concat>"while runner.scheduler is not None."<concat>"Do you make scheduler step during ``runner.handle_batch``?")<block_end><block_end>__all__=["get_original_callback" "callback_isinstance" "check_callbacks" "is_str_intersections" "get_loader_batch_size" "get_loader_num_samples" "sort_callbacks_by_order" ]<line_sep> |
<import_from_stmt>sklearn.neural_network MLPClassifier<import_from_stmt>commons variables<import_from_stmt>commons tools<import_from_stmt>scipy.stats mode<def_stmt>learn x y test_x<block_start>(temp_x temp_y)=tools.simple_negative_sample(x y variables.select_rate_nn)<line_sep>clf=MLPClassifier(hidden_layer_sizes=(variables.unit_num_nn ) random_state=2017 max_iter=2000 alpha=variables.alpha_nn learning_rate_init=variables.learning_rate_init_nn solver="adam" activation="relu").fit(temp_x temp_y)<line_sep>prediction_list=clf.predict(test_x)<line_sep>prediction_list_prob=clf.predict_proba(test_x)<line_sep><return>prediction_list prediction_list_prob<block_end> |
<import_from_stmt>getters *<import_from_stmt>parsers *<def_stmt>main <block_start>data=get_data()<line_sep>parse_top_players(data 'data/2020-21')<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# -*- coding: UTF-8 -*-
<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_array_almost_equal<import_from_stmt>spectral_clustering.spectral_embedding_ spectral_embedding<def_stmt>assert_first_col_equal maps<block_start>constant_vec=[1]<times>maps.shape[0]<line_sep>assert_array_almost_equal(maps[: 0]/maps[0 0] constant_vec)<block_end><def_stmt>test_spectral_embedding <block_start>"""
根据spectral embedding的定义,第一列的数据是恒等的
"""<line_sep>adjacency=np.array([[0. 0.8 0.9 0.] [0.8 0. 0. 0.] [0.9 0. 0. 1.] [0. 0. 1. 0.]])<line_sep>maps=spectral_embedding(adjacency n_components=2 drop_first=<false> eigen_solver="arpack")<line_sep>assert_first_col_equal(maps)<line_sep>maps_1=spectral_embedding(adjacency n_components=2 drop_first=<false> eigen_solver="lobpcg")<line_sep>assert_first_col_equal(maps_1)<block_end> |
a=str(input())<line_sep>b={'0':0 '1':0 '2':0 '3':0 '4':0 '5':0 '6':0 '7':0 '8':0 '9':0}<for_stmt>i a<block_start>b[i]=b[i]+1<block_end><for_stmt>i range(len(b))<block_start><if_stmt>b[str(i)]<eq>0<block_start><continue><block_end>print(str(i)+':'+str(b[str(i)]))<block_end> |
<import_from_stmt>.browser *<import_from_stmt>.cells *<import_from_stmt>.computation *<import_from_stmt>.statutils *<line_sep> |
<import_from_stmt>pymoo.core.mutation Mutation<class_stmt>NoMutation(Mutation)<block_start><def_stmt>_do self problem X **kwargs<block_start><return>X<block_end><block_end> |
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
<import_stmt>os<import_from_stmt>setuptools setup<def_stmt>read fname<block_start><return>open(os.path.join(os.path.dirname(__file__) fname)).read()<block_end>setup(name="fusesoc" packages=["fusesoc" "fusesoc.capi2" "fusesoc.provider"] use_scm_version={"relative_to":__file__ "write_to":"fusesoc/version.py" } author="<NAME>" author_email="<EMAIL>" description=("FuseSoC is a package manager and a set of build tools for HDL "<concat>"(Hardware Description Language) code.") license="BSD-2-Clause" keywords=["VHDL" "verilog" "hdl" "rtl" "synthesis" "FPGA" "simulation" "Xilinx" "Altera" ] url="https://github.com/olofk/fusesoc" long_description=read("README.md") long_description_content_type="text/markdown" classifiers=["Development Status :: 5 - Production/Stable" "Topic :: Utilities" "Topic :: Software Development :: Build Tools" "License :: OSI Approved :: BSD License" ] entry_points={"console_scripts":["fusesoc = fusesoc.main:main"]} setup_requires=["setuptools_scm" ] install_requires=["edalize>=0.2.3" "pyparsing" "pyyaml" "simplesat>=0.8.0" ] # Supported Python versions: 3.6+
python_requires=">=3.6, <4" )<line_sep> |
<import_from_stmt>._flight_availabilities FlightAvailabilities<line_sep>__all__=['FlightAvailabilities']<line_sep> |
<import_from_stmt>django.conf.urls url<import_from_stmt>. views<line_sep>urlpatterns=[url(r'^remove/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$' views.remove_comment name='crits-comments-views-remove_comment') url(r'^(?P<method>\S+)/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$' views.add_update_comment name='crits-comments-views-add_update_comment') url(r'^activity/$' views.activity name='crits-comments-views-activity') url(r'^activity/(?P<atype>\S+)/(?P<value>\S+)/$' views.activity name='crits-comments-views-activity') url(r'^activity/get_new_comments/$' views.get_new_comments name='crits-comments-views-get_new_comments') url(r'^search/(?P<stype>[A-Za-z0-9\-\._]+)/(?P<sterm>.+?)/$' views.comment_search name='crits-comments-views-comment_search') url(r'^list/$' views.comments_listing name='crits-comments-views-comments_listing') url(r'^list/(?P<option>\S+)/$' views.comments_listing name='crits-comments-views-comments_listing') ]<line_sep> |
# -*- coding: utf-8 -*-
<import_stmt>mock<import_stmt>string<import_stmt>unittest<import_stmt>random<import_from_stmt>pprint pprint<import_from_stmt>bitshares BitShares<import_from_stmt>bitshares.account Account<import_from_stmt>bitsharesbase.operationids getOperationNameForId<import_from_stmt>bitshares.amount Amount<import_from_stmt>bitsharesbase.account PrivateKey<import_from_stmt>bitsharesbase.asset_permissions todict<import_from_stmt>bitshares.instance set_shared_bitshares_instance<import_from_stmt>.fixtures fixture_data bitshares<class_stmt>Testcases(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>fixture_data()<block_end><def_stmt>test_connect self<block_start>bitshares.connect()<block_end><def_stmt>test_set_default_account self<block_start>bitshares.set_default_account("init0")<block_end><def_stmt>test_info self<block_start>info=bitshares.info()<for_stmt>key ["current_witness" "head_block_id" "head_block_number" "id" "last_irreversible_block_num" "next_maintenance_time" "recently_missed_count" "time" ]<block_start>self.assertTrue(key<in>info)<block_end><block_end><def_stmt>test_finalizeOps self<block_start>tx1=bitshares.new_tx()<line_sep>tx2=bitshares.new_tx()<line_sep>bitshares.transfer("init1" 1 "BTS" append_to=tx1)<line_sep>bitshares.transfer("init1" 2 "BTS" append_to=tx2)<line_sep>bitshares.transfer("init1" 3 "BTS" append_to=tx1)<line_sep>tx1=tx1.json()<line_sep>tx2=tx2.json()<line_sep>ops1=tx1["operations"]<line_sep>ops2=tx2["operations"]<line_sep>self.assertEqual(len(ops1) 2)<line_sep>self.assertEqual(len(ops2) 1)<block_end><def_stmt>test_transfer self<block_start>tx=bitshares.transfer("1.2.101" 1.33 "BTS" memo="Foobar" account="init0")<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "transfer")<line_sep>op=tx["operations"][0][1]<line_sep>self.assertIn("memo" op)<line_sep>self.assertEqual(op["from"] "1.2.100")<line_sep>self.assertEqual(op["to"] "1.2.101")<line_sep>amount=Amount(op["amount"])<line_sep>self.assertEqual(float(amount) 1.33)<block_end><def_stmt>test_create_account self<block_start>name="".join(random.choice(string.ascii_lowercase)<for>_ range(12))<line_sep>key1=PrivateKey()<line_sep>key2=PrivateKey()<line_sep>key3=PrivateKey()<line_sep>key4=PrivateKey()<line_sep>tx=bitshares.create_account(name registrar="init0" # 1.2.100
referrer="init1" # 1.2.101
referrer_percent=33 owner_key=format(key1.pubkey "BTS") active_key=format(key2.pubkey "BTS") memo_key=format(key3.pubkey "BTS") additional_owner_keys=[format(key4.pubkey "BTS")] additional_active_keys=[format(key4.pubkey "BTS")] additional_owner_accounts=["committee-account"] # 1.2.0
additional_active_accounts=["committee-account"] proxy_account="init0" storekeys=<false> )<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "account_create")<line_sep>op=tx["operations"][0][1]<line_sep>role="active"<line_sep>self.assertIn(format(key4.pubkey "BTS") [x[0]<for>x op[role]["key_auths"]])<line_sep>self.assertIn(format(key4.pubkey "BTS") [x[0]<for>x op[role]["key_auths"]])<line_sep>self.assertIn("1.2.0" [x[0]<for>x op[role]["account_auths"]])<line_sep>role="owner"<line_sep>self.assertIn(format(key4.pubkey "BTS") [x[0]<for>x op[role]["key_auths"]])<line_sep>self.assertIn(format(key4.pubkey "BTS") [x[0]<for>x op[role]["key_auths"]])<line_sep>self.assertIn("1.2.0" [x[0]<for>x op[role]["account_auths"]])<line_sep>self.assertEqual(op["options"]["voting_account"] "1.2.100")<line_sep>self.assertEqual(op["registrar"] "1.2.100")<line_sep>self.assertEqual(op["referrer"] "1.2.101")<line_sep>self.assertEqual(op["referrer_percent"] 33<times>100)<block_end><def_stmt>test_create_asset self<block_start>symbol="FOOBAR"<line_sep>precision=7<line_sep>max_supply=100000<line_sep>description="Test asset"<line_sep>is_bitasset=<true><line_sep>market_fee_percent=0.1<line_sep>max_market_fee=10<line_sep>blacklist_authorities=["init1"]<line_sep>blacklist_authorities_ids=[Account(a)["id"]<for>a blacklist_authorities]<line_sep>blacklist_markets=["BTS"]<line_sep>blacklist_markets_ids=["1.3.0"]<line_sep>permissions={"charge_market_fee":<true> "white_list":<true> "override_authority":<true> "transfer_restricted":<true> "disable_force_settle":<true> "global_settle":<true> "disable_confidential":<true> "witness_fed_asset":<true> "committee_fed_asset":<true> }<line_sep>flags={"charge_market_fee":<false> "white_list":<false> "override_authority":<false> "transfer_restricted":<false> "disable_force_settle":<false> "global_settle":<false> "disable_confidential":<false> "witness_fed_asset":<false> "committee_fed_asset":<false> }<line_sep>tx=bitshares.create_asset(symbol precision max_supply market_fee_percent=market_fee_percent max_market_fee=max_market_fee description=description is_bitasset=is_bitasset blacklist_authorities=blacklist_authorities blacklist_markets=blacklist_markets permissions=permissions flags=flags )<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "asset_create")<line_sep>op=tx["operations"][0][1]<line_sep>self.assertEqual(op["issuer"] "1.2.100")<line_sep>self.assertEqual(op["symbol"] symbol)<line_sep>self.assertEqual(op["precision"] precision)<line_sep>self.assertEqual(op["common_options"]["max_supply"] int(max_supply<times>10<power>precision))<line_sep>self.assertEqual(op["common_options"]["market_fee_percent"] int(market_fee_percent<times>100))<line_sep>self.assertEqual(op["common_options"]["max_market_fee"] int(max_market_fee<times>10<power>precision) )<line_sep>self.assertEqual(op["common_options"]["description"] description)<line_sep>self.assertEqual(op["common_options"]["blacklist_authorities"] blacklist_authorities_ids)<line_sep>self.assertEqual(op["common_options"]["blacklist_markets"] blacklist_markets_ids)<line_sep>self.assertEqual(todict(op["common_options"]["issuer_permissions"]) permissions)<line_sep>self.assertEqual(todict(op["common_options"]["flags"]) flags)<block_end><def_stmt>test_weight_threshold self<block_start>auth={"account_auths":[["1.2.0" "1"]] "extensions":[] "key_auths":[["<KEY>" 1] ["<KEY>" 1] ] "weight_threshold":3 }<line_sep># threshold fine
bitshares._test_weights_treshold(auth)<line_sep>auth={"account_auths":[["1.2.0" "1"]] "extensions":[] "key_auths":[["<KEY>" 1] ["BTS7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv" 1] ] "weight_threshold":4 }<line_sep># too high
<with_stmt>self.assertRaises(ValueError)<block_start>bitshares._test_weights_treshold(auth)<block_end><block_end><def_stmt>test_allow self<block_start>tx=bitshares.allow("BTS<KEY>" weight=1 threshold=1 permission="owner" )<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "account_update")<line_sep>op=tx["operations"][0][1]<line_sep>self.assertIn("owner" op)<line_sep>self.assertIn(["B<KEY>" "1"] op["owner"]["key_auths"] )<line_sep>self.assertEqual(op["owner"]["weight_threshold"] 1)<block_end><def_stmt>test_disallow self<block_start><with_stmt>self.assertRaisesRegex(ValueError ".*Changes nothing.*")<block_start>bitshares.disallow("BTS<KEY>" weight=1 threshold=1 permission="owner" )<block_end><with_stmt>self.assertRaisesRegex(ValueError "Cannot have threshold of 0")<block_start>bitshares.disallow("BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8B<KEY>" weight=1 threshold=1 permission="owner" )<block_end>bitshares.disallow("BTS5i8bEmtnN4fP4jAsBe17z9CCuQcHLkRyTuRZXYZeN2kVCL1sXa" weight=1 threshold=1 permission="active" )<block_end><def_stmt>test_update_memo_key self<block_start>tx=bitshares.update_memo_key("<KEY>")<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "account_update")<line_sep>op=tx["operations"][0][1]<line_sep>self.assertEqual(op["new_options"]["memo_key"] "<KEY>" )<block_end><def_stmt>test_approvewitness self<block_start>tx=bitshares.approvewitness("1.6.1")<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "account_update")<line_sep>op=tx["operations"][0][1]<line_sep>self.assertIn("1:0" op["new_options"]["votes"])<block_end><def_stmt>test_approvecommittee self<block_start>tx=bitshares.approvecommittee("1.5.0")<line_sep>self.assertEqual(getOperationNameForId(tx["operations"][0][0]) "account_update")<line_sep>op=tx["operations"][0][1]<line_sep>self.assertIn("0:11" op["new_options"]["votes"])<block_end><block_end> |
<import_from_stmt>kivy.uix.widget Widget<class_stmt>MyWidget(Widget)<block_start><def_stmt>__init__ self **kwargs<block_start>super(MyWidget self).__init__(**kwargs)<def_stmt>callback *l<block_start>self.x=self.y<block_end>self.fbind('y' callback)<line_sep>callback()<block_end><block_end> |
<import_from_stmt>django.conf.urls url<import_from_stmt>api.guids views<line_sep>app_name='osf'<line_sep>urlpatterns=[url(r'^(?P<guids>\w+)/$' views.GuidDetail.as_view() name=views.GuidDetail.view_name) ]<line_sep> |
<import_stmt>torch<import_from_stmt>torch Tensor<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>.. BaseModel register_model<import_from_stmt>.knowledge_base KGEModel<line_sep>@register_model("rotate")<class_stmt>RotatE(KGEModel)<block_start>r"""
Implementation of RotatE model from the paper `"RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space"
<https://openreview.net/forum?id=HkgEQnRqYQ>`.
borrowed from `KnowledgeGraphEmbedding<https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding>`
"""<def_stmt>__init__ self nentity nrelation hidden_dim gamma double_entity_embedding=<false> double_relation_embedding=<false><block_start>super(RotatE self).__init__(nentity nrelation hidden_dim gamma <true> double_relation_embedding)<block_end><def_stmt>score self head relation tail mode<block_start>pi=3.14159265358979323846<line_sep>re_head,im_head=torch.chunk(head 2 dim=2)<line_sep>re_tail,im_tail=torch.chunk(tail 2 dim=2)<line_sep># Make phases of relations uniformly distributed in [-pi, pi]
phase_relation=relation/(self.embedding_range.item()/pi)<line_sep>re_relation=torch.cos(phase_relation)<line_sep>im_relation=torch.sin(phase_relation)<if_stmt>mode<eq>"head-batch"<block_start>re_score=re_relation<times>re_tail+im_relation<times>im_tail<line_sep>im_score=re_relation<times>im_tail-im_relation<times>re_tail<line_sep>re_score=re_score-re_head<line_sep>im_score=im_score-im_head<block_end><else_stmt><block_start>re_score=re_head<times>re_relation-im_head<times>im_relation<line_sep>im_score=re_head<times>im_relation+im_head<times>re_relation<line_sep>re_score=re_score-re_tail<line_sep>im_score=im_score-im_tail<block_end>score=torch.stack([re_score im_score] dim=0)<line_sep>score=score.norm(dim=0)<line_sep>score=self.gamma.item()-score.sum(dim=2)<line_sep><return>score<block_end><block_end> |
<import_from_future_stmt> print_function division absolute_import<import_from_stmt>fontTools.misc.py23 *<import_from_stmt>.T_S_I_V_ table_T_S_I_V_<class_stmt>table_T_S_I_B_(table_T_S_I_V_)<block_start><pass><block_end> |
<import_from_stmt>datadog_checks.base OpenMetricsBaseCheckV2<import_from_stmt>.metrics METRIC_MAP<class_stmt>CalicoCheck(OpenMetricsBaseCheckV2)<block_start><def_stmt>__init__ self name init_config instances=<none><block_start>super(CalicoCheck self).__init__(name init_config instances )<block_end><def_stmt>get_default_config self<block_start><return>{'namespace':'calico' 'metrics':[METRIC_MAP]}<block_end><block_end> |
<import_from_stmt>django.test TestCase<import_from_stmt>dojo.models Test<import_from_stmt>dojo.tools.cloudsploit.parser CloudsploitParser<class_stmt>TestCloudsploitParser(TestCase)<block_start><def_stmt>test_cloudsploit_parser_with_no_vuln_has_no_findings self<block_start>testfile=open("dojo/unittests/scans/cloudsploit/cloudsploit_zero_vul.json")<line_sep>parser=CloudsploitParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(0 len(findings))<block_end><def_stmt>test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings self<block_start>testfile=open("dojo/unittests/scans/cloudsploit/cloudsploit_one_vul.json")<line_sep>parser=CloudsploitParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(1 len(findings))<block_end><def_stmt>test_cloudsploit_parser_with_many_vuln_has_many_findings self<block_start>testfile=open("dojo/unittests/scans/cloudsploit/cloudsploit_many_vul.json")<line_sep>parser=CloudsploitParser()<line_sep>findings=parser.get_findings(testfile Test())<line_sep>testfile.close()<line_sep>self.assertEqual(6 len(findings))<block_end><block_end> |
<import_from_stmt>datetime datetime timedelta<import_from_stmt>django.conf settings<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.db connection<import_from_stmt>phonelog.models OldDeviceReportEntry DeviceReportEntry<line_sep>COLUMNS=("xform_id" "i" "msg" "type" "date" "server_date" "domain" "device_id" "app_version" "username" "user_id")<class_stmt>Command(BaseCommand)<block_start>help="Migrate device reports to partitioned table"<def_stmt>handle self *args **options<block_start>partitioned_table=DeviceReportEntry._meta.db_table<line_sep>old_table=OldDeviceReportEntry._meta.db_table<line_sep>now=datetime.utcnow()<line_sep>oldest_date=now-timedelta(days=settings.DAYS_TO_KEEP_DEVICE_LOGS)<line_sep>current=now<while_stmt>current<g>oldest_date<block_start>hour_ago=current-timedelta(hours=1)<with_stmt>connection.cursor()<as>cursor<block_start>cursor.execute("INSERT INTO "+partitioned_table+" ("+','.join(COLUMNS)+") "+"SELECT "+','.join(COLUMNS)+" "+"FROM "+old_table+" "+"WHERE server_date > %s AND server_date <= %s" [hour_ago current])<block_end>print("Inserted device logs from %s to %s"%(hour_ago current))<line_sep>current=hour_ago<block_end><block_end><block_end> |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a DDPG agent.
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
"""<import_stmt>agent<import_from_stmt>common replay_buffer<import_from_stmt>common.actor_critic ActorNetwork<import_from_stmt>common.actor_critic CriticNetwork<import_stmt>numpy<as>np<class_stmt>DDPG(agent.Agent)<block_start>"""DDPG agent."""<def_stmt>__init__ self env sess config<block_start>"""Initialize members."""<line_sep>state_dim=env.observation_space.shape[0]<line_sep>self.env=env<line_sep>self.action_dim=env.action_space.shape[0]<line_sep>self.action_high=env.action_space.high<line_sep>self.action_low=env.action_space.low<line_sep>self.batch_size=config.batch_size<line_sep>self.warmup_size=config.warmup_size<line_sep>self.gamma=config.gamma<line_sep>self.sigma=config.sigma<line_sep>self.noise_cap=config.c<line_sep>self.actor=ActorNetwork(sess=sess state_dim=state_dim action_dim=self.action_dim action_high=self.action_high action_low=self.action_low learning_rate=config.actor_lr grad_norm_clip=config.grad_norm_clip tau=config.tau batch_size=config.batch_size)<line_sep>self.critic=CriticNetwork(sess=sess state_dim=state_dim action_dim=self.action_dim learning_rate=config.critic_lr tau=config.tau gamma=config.gamma)<line_sep>self.replay_buffer=replay_buffer.ReplayBuffer(buffer_size=config.buffer_size)<block_end><def_stmt>random_action self observation<block_start>"""Return a random action."""<line_sep><return>self.env.action_space.sample()<block_end><def_stmt>action self observation<block_start>"""Return an action according to the agent's policy."""<line_sep><return>self.actor.get_action(observation)<block_end><def_stmt>action_with_noise self observation<block_start>"""Return a noisy action."""<if_stmt>self.replay_buffer.size<g>self.warmup_size<block_start>action=self.action(observation)<block_end><else_stmt><block_start>action=self.random_action(observation)<block_end>noise=np.clip(np.random.randn(self.action_dim)<times>self.sigma -self.noise_cap self.noise_cap)<line_sep>action_with_noise=action+noise<line_sep><return>(np.clip(action_with_noise self.action_low self.action_high) action noise)<block_end><def_stmt>store_experience self s a r t s2<block_start>"""Save experience to replay buffer."""<line_sep>self.replay_buffer.add(s a r t s2)<block_end><def_stmt>train self global_step<block_start>"""Train the agent's policy for 1 iteration."""<if_stmt>self.replay_buffer.size<g>self.warmup_size<block_start>s0,a,r,t,s1=self.replay_buffer.sample_batch(self.batch_size)<line_sep>target_actions=self.actor.get_target_action(s1)<line_sep>target_qval=self.get_target_qval(s1 target_actions)<line_sep>t=t.astype(dtype=int)<line_sep>y=r+self.gamma<times>target_qval<times>(1-t)<line_sep>self.critic.train(s0 a y)<line_sep>actions=self.actor.get_action(s0)<line_sep>grads=self.critic.get_action_gradients(s0 actions)<line_sep>self.actor.train(s0 grads[0])<line_sep>self.update_targets()<block_end><block_end><def_stmt>update_targets self<block_start>"""Update all target networks."""<line_sep>self.actor.update_target_network()<line_sep>self.critic.update_target_network()<block_end><def_stmt>get_target_qval self observation action<block_start>"""Get target Q-val."""<line_sep><return>self.critic.get_target_qval(observation action)<block_end><def_stmt>get_qval self observation action<block_start>"""Get Q-val."""<line_sep><return>self.critic.get_qval(observation action)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>unemp_dur path<block_start>"""Unemployment Duration
Journal of Business Economics and Statistics web site :
http://amstat.tandfonline.com/loi/ubes20
*number of observations* : 3343
A time serie containing :
spell
length of spell in number of two-week intervals
censor1
= 1 if re-employed at full-time job
censor2
= 1 if re-employed at part-time job
censor3
1 if re-employed but left job: pt-ft status unknown
censor4
1 if still jobless
age
age
ui
= 1 if filed UI claim
reprate
eligible replacement rate
disrate
eligible disregard rate
logwage
log weekly earnings in lost job (1985\\$)
tenure
years tenure in lost job
<NAME>. (1996) “Unemployment Insurance Rules, Joblessness, and
Part-time Work”, *Econometrica*, **64**, 647–682.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `unemp_dur.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3343 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='unemp_dur.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/Ecdat/UnempDur.csv'<line_sep>maybe_download_and_extract(path url save_file_name='unemp_dur.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
<import_from_stmt>pathlib Path<import_from_stmt>typing Callable<import_from_stmt>classy_vision.generic.registry_utils import_all_modules<line_sep>FILE_ROOT=Path(__file__).parent<line_sep>MODEL_HEADS_REGISTRY={}<line_sep>MODEL_HEADS_NAMES=set()<def_stmt>register_model_head name:str<block_start>"""Registers Self-Supervision Model Heads.
This decorator allows VISSL to add custom model heads, even if the
model head itself is not part of VISSL. To use it, apply this decorator
to a model head class, like this:
.. code-block:: python
@register_model_head('my_model_head_name')
def my_model_head():
...
To get a model head from a configuration file, see :func:`get_model_head`."""<def_stmt>register_model_head_cls cls:Callable[<ellipsis> Callable]<block_start><if_stmt>name<in>MODEL_HEADS_REGISTRY<block_start><raise>ValueError("Cannot register duplicate model head ({})".format(name))<block_end><if_stmt>cls.__name__<in>MODEL_HEADS_NAMES<block_start><raise>ValueError("Cannot register task with duplicate model head name ({})".format(cls.__name__))<block_end>MODEL_HEADS_REGISTRY[name]=cls<line_sep>MODEL_HEADS_NAMES.add(cls.__name__)<line_sep><return>cls<block_end><return>register_model_head_cls<block_end><def_stmt>get_model_head name:str<block_start>"""
Given the model head name, construct the head if it's registered
with VISSL.
"""<assert_stmt>name<in>MODEL_HEADS_REGISTRY "Unknown model head"<line_sep><return>MODEL_HEADS_REGISTRY[name]<block_end># automatically import any Python files in the heads/ directory
import_all_modules(FILE_ROOT "vissl.models.heads")<import_from_stmt>vissl.models.heads.linear_eval_mlp LinearEvalMLP# isort:skip # noqa
<import_from_stmt>vissl.models.heads.mlp MLP# isort:skip # noqa
<import_from_stmt>vissl.models.heads.siamese_concat_view # isort:skip # noqa
SiameseConcatView <import_from_stmt>vissl.models.heads.swav_prototypes_head # isort:skip # noqa
SwAVPrototypesHead <line_sep>__all__=["get_model_head" "LinearEvalMLP" "MLP" "SiameseConcatView" "SwAVPrototypesHead" ]<line_sep> |
"""Constants for the Ridwell integration."""<import_stmt>logging<line_sep>DOMAIN="ridwell"<line_sep>LOGGER=logging.getLogger(__package__)<line_sep>DATA_ACCOUNT="account"<line_sep>DATA_COORDINATOR="coordinator"<line_sep>SENSOR_TYPE_NEXT_PICKUP="next_pickup"<line_sep> |
<import_from_stmt>.EncoderRNN EncoderRNN<import_from_stmt>.DecoderRNN DecoderRNN<import_from_stmt>.TopKDecoder TopKDecoder<import_from_stmt>.seq2seq Seq2seq<line_sep> |
"""
Spacer components to add horizontal or vertical space to a layout.
"""<import_stmt>param<import_from_stmt>bokeh.models Div<as>BkDiv Spacer<as>BkSpacer<import_from_stmt>..reactive Reactive<class_stmt>Spacer(Reactive)<block_start>"""
The `Spacer` layout is a very versatile component which makes it easy to
put fixed or responsive spacing between objects.
Like all other components spacers support both absolute and responsive
sizing modes.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... 1, pn.Spacer(width=200),
... 2, pn.Spacer(width=100),
... 3
... )
"""<line_sep>_bokeh_model=BkSpacer<def_stmt>_get_model self doc root=<none> parent=<none> comm=<none><block_start>properties=self._process_param_change(self._init_params())<line_sep>model=self._bokeh_model(**properties)<if_stmt>root<is><none><block_start>root=model<block_end>self._models[root.ref['id']]=(model parent)<line_sep><return>model<block_end><block_end><class_stmt>VSpacer(Spacer)<block_start>"""
The `VSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Column(
... pn.layout.VSpacer(), 'Item 1',
... pn.layout.VSpacer(), 'Item 2',
... pn.layout.VSpacer()
... )
"""<line_sep>sizing_mode=param.Parameter(default='stretch_height' readonly=<true>)<block_end><class_stmt>HSpacer(Spacer)<block_start>"""
The `HSpacer` layout provides responsive vertical spacing.
Using this component we can space objects equidistantly in a layout and
allow the empty space to shrink when the browser is resized.
Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
:Example:
>>> pn.Row(
... pn.layout.HSpacer(), 'Item 1',
... pn.layout.HSpacer(), 'Item 2',
... pn.layout.HSpacer()
... )
"""<line_sep>sizing_mode=param.Parameter(default='stretch_width' readonly=<true>)<block_end><class_stmt>Divider(Reactive)<block_start>"""
A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate
multiple components in a layout. It automatically spans the full width of
the container.
Reference: https://panel.holoviz.org/reference/layouts/Divider.html
:Example:
>>> pn.Column(
... '# Lorem Ipsum',
... pn.layout.Divider(),
... 'A very long text... '
>>> )
"""<line_sep>width_policy=param.ObjectSelector(default="fit" readonly=<true>)<line_sep>_bokeh_model=BkDiv<def_stmt>_get_model self doc root=<none> parent=<none> comm=<none><block_start>properties=self._process_param_change(self._init_params())<line_sep>properties['style']={'width':'100%' 'height':'100%'}<line_sep>model=self._bokeh_model(text='<hr style="margin: 0px">' **properties)<if_stmt>root<is><none><block_start>root=model<block_end>self._models[root.ref['id']]=(model parent)<line_sep><return>model<block_end><block_end> |
#encoding:utf-8
<import_from_stmt>utils weighted_random_subreddit<line_sep>subreddit=weighted_random_subreddit({'BeautifulFemales':0.25 'cutegirlgifs':0.25 'gentlemanboners':0.25 'gentlemanbonersgifs':0.25})<line_sep>t_channel='@r_gentlemanboners'<def_stmt>send_post submission r2t<block_start><return>r2t.send_simple(submission text=<false> gif=<true> img=<true> album=<false> other=<false>)<block_end> |
"""
Implementation of attack methods. Running this file as a program will
evaluate the model and get the validation accuracy and then
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>cifar10_input<import_stmt>cifar100_input<import_stmt>config<import_from_stmt>tqdm tqdm<import_stmt>os<line_sep>config=config.get_args()<line_sep>_NUM_RESTARTS=config.num_restarts<class_stmt>LinfPGDAttack<block_start><def_stmt>__init__ self model epsilon num_steps step_size loss_func<block_start>"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""<line_sep>self.model=model<line_sep>self.epsilon=epsilon<line_sep>self.num_steps=num_steps<line_sep>self.step_size=step_size<if_stmt>loss_func<eq>'xent'<block_start>loss=model.xent<block_end><elif_stmt>loss_func<eq>'cw'<block_start>label_mask=tf.one_hot(model.y_input 10 on_value=1.0 off_value=0.0 dtype=tf.float32)<line_sep>correct_logit=tf.reduce_sum(label_mask<times>model.pre_softmax axis=1)<line_sep>wrong_logit=tf.reduce_max((1-label_mask)<times>model.pre_softmax-1e4<times>label_mask axis=1)<line_sep>loss=-tf.nn.relu(correct_logit-wrong_logit+0)<block_end><else_stmt><block_start>print('Unknown loss function. Defaulting to cross-entropy')<line_sep>loss=model.xent<block_end>self.grad=tf.gradients(loss model.x_input)[0]<block_end><def_stmt>perturb self x_nat y sess<block_start>"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""<line_sep>x=x_nat+np.random.uniform(-self.epsilon self.epsilon x_nat.shape)<line_sep>x=np.clip(x 0 255)<for_stmt>i range(self.num_steps)<block_start>grad=sess.run(self.grad feed_dict={self.model.x_input:x self.model.y_input:y})<line_sep>x=np.add(x self.step_size<times>np.sign(grad) out=x casting='unsafe')<line_sep>x=np.clip(x x_nat-self.epsilon x_nat+self.epsilon)<line_sep>x=np.clip(x 0 255)<block_end># ensure valid pixel range
<return>x<block_end><block_end><def_stmt>get_path_dir data_dir dataset **_<block_start>path=os.path.join(data_dir dataset)<if_stmt>os.path.islink(path)<block_start>path=os.readlink(path)<block_end><return>path<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<import_stmt>math<import_from_stmt>free_model Model<line_sep>model_file=tf.train.latest_checkpoint(config.model_dir)<if_stmt>model_file<is><none><block_start>print('No model found')<line_sep>sys.exit()<block_end>dataset=config.dataset<line_sep>data_dir=config.data_dir<line_sep>data_path=get_path_dir(data_dir dataset)<line_sep>model=Model(mode='eval' dataset=dataset)<line_sep>attack=LinfPGDAttack(model config.epsilon config.pgd_steps config.step_size config.loss_func)<line_sep>saver=tf.train.Saver()<if_stmt>dataset<eq>'cifar10'<block_start>cifar=cifar10_input.CIFAR10Data(data_path)<block_end><else_stmt><block_start>cifar=cifar100_input.CIFAR100Data(data_path)<block_end><with_stmt>tf.Session()<as>sess# Restore the checkpoint
<block_start>saver.restore(sess model_file)<line_sep># Iterate over the samples batch-by-batch
num_eval_examples=config.eval_examples<line_sep>eval_batch_size=config.eval_size<line_sep>num_batches=int(math.ceil(num_eval_examples/eval_batch_size))<line_sep>x_adv=[]# adv accumulator
print('getting clean validation accuracy')<line_sep>total_corr=0<for_stmt>ibatch tqdm(range(num_batches))<block_start>bstart=ibatch<times>eval_batch_size<line_sep>bend=min(bstart+eval_batch_size num_eval_examples)<line_sep>x_batch=cifar.eval_data.xs[bstart:bend :].astype(np.float32)<line_sep>y_batch=cifar.eval_data.ys[bstart:bend]<line_sep>dict_val={model.x_input:x_batch model.y_input:y_batch}<line_sep>cur_corr=sess.run(model.num_correct feed_dict=dict_val)<line_sep>total_corr<augadd>cur_corr<block_end>print('** validation accuracy: %.3f **\n\n'%(total_corr/float(num_eval_examples)<times>100))<line_sep>print('Iterating over {} batches'.format(num_batches))<line_sep>total_corr,total_num=0 0<for_stmt>ibatch range(num_batches)<block_start>bstart=ibatch<times>eval_batch_size<line_sep>bend=min(bstart+eval_batch_size num_eval_examples)<line_sep>curr_num=bend-bstart<line_sep>total_num<augadd>curr_num<line_sep>print('mini batch: {}/{} -- batch size: {}'.format(ibatch+1 num_batches curr_num))<line_sep>sys.stdout.flush()<line_sep>x_batch=cifar.eval_data.xs[bstart:bend :].astype(np.float32)<line_sep>y_batch=cifar.eval_data.ys[bstart:bend]<line_sep>best_batch_adv=np.copy(x_batch)<line_sep>dict_adv={model.x_input:best_batch_adv model.y_input:y_batch}<line_sep>cur_corr,y_pred_batch,best_loss=sess.run([model.num_correct model.predictions model.y_xent] feed_dict=dict_adv)<for_stmt>ri range(_NUM_RESTARTS)<block_start>x_batch_adv=attack.perturb(x_batch y_batch sess)<line_sep>dict_adv={model.x_input:x_batch_adv model.y_input:y_batch}<line_sep>cur_corr,y_pred_batch,this_loss=sess.run([model.num_correct model.predictions model.y_xent] feed_dict=dict_adv)<line_sep>bb=best_loss<ge>this_loss<line_sep>bw=best_loss<l>this_loss<line_sep>best_batch_adv[bw : : :]=x_batch_adv[bw : : :]<line_sep>best_corr,y_pred_batch,best_loss=sess.run([model.num_correct model.predictions model.y_xent] feed_dict={model.x_input:best_batch_adv model.y_input:y_batch})<line_sep>print('restart %d: num correct: %d -- loss:%.4f'%(ri best_corr np.mean(best_loss)))<block_end>total_corr<augadd>best_corr<line_sep>print('accuracy till now {:4}% \n\n'.format(float(total_corr)/total_num<times>100))<line_sep>x_adv.append(best_batch_adv)<block_end>x_adv=np.concatenate(x_adv axis=0)<block_end><block_end> |
<import_stmt>redis<line_sep># Default values.
REDIS_URL=<none><line_sep>REDIS_HOST='localhost'<line_sep>REDIS_PORT=6379<line_sep>FILTER_URL=<none><line_sep>FILTER_HOST='localhost'<line_sep>FILTER_PORT=6379<line_sep>FILTER_DB=0<def_stmt>from_settings settings<block_start>url=settings.get('REDIS_URL' REDIS_URL)<line_sep>host=settings.get('REDIS_HOST' REDIS_HOST)<line_sep>port=settings.get('REDIS_PORT' REDIS_PORT)<line_sep># REDIS_URL takes precedence over host/port specification.
<if_stmt>url<block_start><return>redis.from_url(url)<block_end><else_stmt><block_start><return>redis.Redis(host=host port=port)<block_end><block_end><def_stmt>from_settings_filter settings<block_start>url=settings.get('FILTER_URL' FILTER_URL)<line_sep>host=settings.get('FILTER_HOST' FILTER_HOST)<line_sep>port=settings.get('FILTER_PORT' FILTER_PORT)<line_sep>db=settings.get('FILTER_DB' FILTER_DB)<if_stmt>url<block_start><return>redis.from_url(url)<block_end><else_stmt><block_start><return>redis.Redis(host=host port=port db=db)<block_end><block_end> |
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utility module.
A test directory contains a Test proto stored in "source/test.json", the reference and variant shader jobs, and various
other files, including results.
This module is used to read Test proto files and get various paths that exist in test directories.
"""<import_from_stmt>pathlib Path<import_from_stmt>gfauto proto_util util<import_from_stmt>gfauto.test_pb2 Test<line_sep>TEST_METADATA="test.json"<line_sep>REFERENCE_DIR="reference"<line_sep>VARIANT_DIR="variant"<line_sep>SHADER_JOB="shader.json"<line_sep>SHADER_JOB_RESULT="shader.info.json"<def_stmt>get_source_dir test_dir:Path<arrow>Path<block_start><return>test_dir/"source"<block_end><def_stmt>get_metadata_path test_dir:Path<arrow>Path<block_start><return>get_metadata_path_from_source_dir(get_source_dir(test_dir))<block_end><def_stmt>get_metadata_path_from_source_dir source_dir:Path<arrow>Path<block_start><return>source_dir/TEST_METADATA<block_end><def_stmt>metadata_write metadata:Test test_dir:Path<arrow>Path<block_start>metadata_write_to_path(metadata get_metadata_path(test_dir))<line_sep><return>test_dir<block_end><def_stmt>metadata_read test_dir:Path<arrow>Test<block_start><return>metadata_read_from_path(get_metadata_path(test_dir))<block_end><def_stmt>metadata_read_from_source_dir source_dir:Path<arrow>Test<block_start><return>metadata_read_from_path(get_metadata_path_from_source_dir(source_dir))<block_end><def_stmt>metadata_read_from_path test_metadata_path:Path<arrow>Test<block_start>text=util.file_read_text(test_metadata_path)<line_sep>result=Test()<line_sep>proto_util.json_to_message(text result)<line_sep><return>result<block_end><def_stmt>metadata_write_to_path metadata:Test test_metadata_path:Path<arrow>Path<block_start>text=proto_util.message_to_json(metadata)<line_sep>util.file_write_text(test_metadata_path text)<line_sep><return>test_metadata_path<block_end><def_stmt>get_shader_job_path test_dir:Path shader_name:str<arrow>Path<block_start><return>test_dir/"source"/shader_name/SHADER_JOB<block_end><def_stmt>get_device_directory test_dir:Path device_name:str<arrow>Path<block_start><return>test_dir/"results"/device_name<block_end><def_stmt>get_results_directory test_dir:Path device_name:str<arrow>Path<block_start><return>get_device_directory(test_dir device_name)/"result"<block_end><def_stmt>get_reductions_dir test_dir:Path device_name:str<arrow>Path<block_start><return>get_device_directory(test_dir device_name)/"reductions"<block_end><def_stmt>get_reduced_test_dir test_dir:Path device_name:str reduction_name:str<arrow>Path<block_start><return>get_reductions_dir(test_dir device_name)/reduction_name<block_end><def_stmt>get_reduction_work_directory reduced_test_dir:Path name_of_shader:str<arrow>Path<block_start><return>reduced_test_dir/"reduction_work"/name_of_shader<block_end> |
<import_stmt>djclick<as>click<import_from_stmt>django.conf settings<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>.forms AddOrganizerForm<import_from_stmt>.slack_client slack<line_sep># "Get organizers info" functions used in 'new_event' and 'copy_event' management commands.
<def_stmt>get_main_organizer <block_start>"""
We're asking user for name and address of main organizer, and return
a list of dictionary.
"""<line_sep>team=[]<line_sep>click.echo(_("Let's talk about the team. First the main organizer:"))<line_sep>main_name=click.prompt(click.style("First and last name" bold=<true> fg='yellow'))<line_sep>main_email=click.prompt(click.style("E-mail address" bold=<true> fg='yellow'))<line_sep>team.append({'name':main_name 'email':main_email})<line_sep>click.echo("All right, the main organizer is {0} ({1})".format(main_name main_email))<line_sep><return>team<block_end><def_stmt>get_team team<block_start>"""
We're asking user for names and address of the rest of the team,
and append that to a list we got from get_main_organizer
"""<line_sep>add_team=click.confirm(click.style("Do you want to add additional team members?" bold=<true> fg='yellow') default=<false>)<line_sep>i=1<while_stmt>add_team<block_start>i<augadd>1<line_sep>name=click.prompt(click.style(f"First and last name of #{i} member" bold=<true> fg='yellow'))<line_sep>email=click.prompt(click.style(f"E-mail address of #{i} member" bold=<true> fg='yellow'))<if_stmt>len(name)<g>0<block_start>team.append({'name':name 'email':email})<line_sep>click.echo(f"All right, the #{i} team member of Django Girls is {name} ({email})")<block_end>add_team=click.confirm(click.style("Do you want to add additional team members?" bold=<true> fg='yellow') default=<false>)<block_end><return>team<block_end><def_stmt>create_users team event<block_start>"""
Create or get User objects based on team list
"""<line_sep>members=[]<for_stmt>member team<block_start>member['event']=event.pk<line_sep>form=AddOrganizerForm(member)<line_sep>user=form.save()<line_sep>members.append(user)<block_end><return>members<block_end><def_stmt>brag_on_slack_bang city country team<block_start>"""
This is posting a message about Django Girls new event to #general channel on Slack!
"""<if_stmt>settings.ENABLE_SLACK_NOTIFICATIONS<block_start>text=f":django_pony: :zap: Woohoo! :tada: New Django Girls alert! "<concat>f"Welcome Django Girls {city}, {country}. "<concat>f"Congrats {', '.join(['{} {}'.format(x.first_name x.last_name)<for>x team])}!"<line_sep>slack.chat.post_message(channel='#general' text=text username='Django Girls' icon_emoji=':django_heart:')<block_end><block_end> |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install tensorflow_model_optimization."""<import_stmt>datetime<import_stmt>os<import_stmt>sys<import_from_stmt>setuptools find_packages<import_from_stmt>setuptools setup<import_from_stmt>setuptools.command.install install<as>InstallCommandBase<import_from_stmt>setuptools.dist Distribution<line_sep># To enable importing version.py directly, we add its path to sys.path.
version_path=os.path.join(os.path.dirname(__file__) 'tensorflow_model_optimization' 'python/core')<line_sep>sys.path.append(version_path)<import_from_stmt>version __version__# pylint: disable=g-import-not-at-top
# TODO(alanchiao): add explicit Tensorflow requirement once Tensorflow
# moves from a tf and tf-gpu packaging approach (where a user installs
# one of the two) to one where a user installs the tf package and then
# also installs the gpu package if they need gpu support. The latter allows
# us (and our dependents) to maintain a single package instead of two.
REQUIRED_PACKAGES=['numpy~=1.14' 'six~=1.10' 'enum34~=1.1;python_version<"3.4"' 'dm-tree~=0.1.1' ]<if_stmt>'--release'<in>sys.argv<block_start>release=<true><line_sep>sys.argv.remove('--release')<block_end><else_stmt># Build a nightly package by default.
<block_start>release=<false><block_end><if_stmt>release<block_start>project_name='vai-q-tensorflow2'<block_end><else_stmt># Nightly releases use date-based versioning of the form
# '0.0.1.dev20180305'
<block_start>project_name='vai-q-tensorflow2-nightly'<line_sep>datestring=datetime.datetime.now().strftime('%Y%m%d')<line_sep>__version__<augadd>datestring<block_end><class_stmt>BinaryDistribution(Distribution)<block_start>"""This class is needed in order to create OS specific wheels."""<def_stmt>has_ext_modules self<block_start><return><false><block_end><block_end>setup(name=project_name version=__version__ description='Xilinx Vitis AI Quantizer for Tensorflow 2.x. '<concat>'This is customized based on tensorflow-model-optimization('<concat>'https://github.com/tensorflow/model-optimization)'<concat>'A suite of tools that users, both novice and advanced'<concat>' can use to optimize machine learning models for deployment'<concat>' and execution.' author='<NAME>' author_email='<EMAIL>' license='Apache 2.0' packages=find_packages() install_requires=REQUIRED_PACKAGES # Add in any packaged data.
include_package_data=<true> package_data={'':['*.so' '*.json']} exclude_package_data={'':['BUILD' '*.h' '*.cc']} zip_safe=<false> distclass=BinaryDistribution cmdclass={'pip_pkg':InstallCommandBase } classifiers=['Intended Audience :: Developers' 'Intended Audience :: Education' 'Intended Audience :: Science/Research' 'License :: OSI Approved :: Apache Software License' 'Topic :: Scientific/Engineering' 'Topic :: Scientific/Engineering :: Artificial Intelligence' ] keywords='tensorflow model optimization machine learning' )<line_sep> |
<import_stmt>sys<import_from_stmt>typing Any<import_from_stmt>django.conf settings<if_stmt>sys.version_info<ge>(3 8)<block_start><import_from_stmt>typing Literal<line_sep>ModeType=Literal["once" "none" "all"]<block_end><else_stmt><block_start>ModeType=str<block_end><class_stmt>Settings<block_start>defaults={"HIDE_COLUMNS":<true> "MODE":"once"}<def_stmt>get_setting self key:str<arrow>Any<block_start><try_stmt><block_start><return>settings.PERF_REC[key]<block_end><except_stmt>(AttributeError KeyError)<block_start><return>self.defaults.get(key <none>)<block_end><block_end>@property<def_stmt>HIDE_COLUMNS self<arrow>bool<block_start><return>self.get_setting("HIDE_COLUMNS")<block_end>@property<def_stmt>MODE self<arrow>ModeType<block_start><return>self.get_setting("MODE")<block_end><block_end>perf_rec_settings=Settings()<line_sep> |
<import_stmt>unittest<import_from_stmt>pyNTM FlexModel<import_from_stmt>pyNTM ModelException<import_from_stmt>pyNTM PerformanceModel<class_stmt>TestIGPShortcuts(unittest.TestCase)<block_start><def_stmt>test_traffic_on_shortcut_lsps self<block_start>"""
Verify Interface and LSP traffic when IGP shortcuts enabled
in baseline model.
"""<line_sep># The demands should take LSPs starting on the first
# node that has shortcuts and should take the LSP that
# leads it closest to the demand destination
model=FlexModel.load_model_file("test/igp_shortcuts_model_mult_lsps_in_path.csv")<line_sep>model.update_simulation()<line_sep># Get all the interface objects
int_a_b=model.get_interface_object("A-B" "A")<line_sep>int_b_c=model.get_interface_object("B-C" "B")<line_sep>int_c_d=model.get_interface_object("C-D" "C")<line_sep>int_d_e=model.get_interface_object("D-E" "D")<line_sep>int_e_f=model.get_interface_object("E-F" "E")<line_sep>int_a_g=model.get_interface_object("A-G" "A")<line_sep>int_g_f=model.get_interface_object("G-F" "G")<line_sep># Get all LSP objects
lsp_b_d_1=model.get_rsvp_lsp("B" "D" "lsp_b_d_1")<line_sep>lsp_b_d_2=model.get_rsvp_lsp("B" "D" "lsp_b_d_2")<line_sep>lsp_c_e_1=model.get_rsvp_lsp("C" "E" "lsp_c_e_1")<line_sep>lsp_d_f_1=model.get_rsvp_lsp("D" "F" "lsp_d_f_1")<line_sep># Get demand objects
dmd_a_f_1=model.get_demand_object("A" "F" "dmd_a_f_1")<line_sep>dmd_d_f_1=model.get_demand_object("D" "F" "dmd_d_f_1")<line_sep># Verify traffic on LSPs
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model) 2.5)<line_sep>self.assertEqual(lsp_b_d_2.traffic_on_lsp(model) 2.5)<line_sep>self.assertEqual(lsp_c_e_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_d_f_1.traffic_on_lsp(model) 13.0)<line_sep># Verify demand paths
self.assertIn([int_a_g int_g_f] dmd_a_f_1.path)<line_sep>self.assertIn([int_a_b lsp_b_d_1 lsp_d_f_1] dmd_a_f_1.path)<line_sep>self.assertIn([int_a_b lsp_b_d_2 lsp_d_f_1] dmd_a_f_1.path)<line_sep>self.assertEqual(dmd_d_f_1.path [[lsp_d_f_1]])<line_sep># Verify interface traffic
self.assertEqual(int_a_b.traffic 5.0)<line_sep>self.assertEqual(int_b_c.traffic 5.0)<line_sep>self.assertEqual(int_c_d.traffic 5.0)<line_sep>self.assertEqual(int_d_e.traffic 13.0)<line_sep>self.assertEqual(int_e_f.traffic 13.0)<line_sep>self.assertEqual(int_a_g.traffic 5.0)<line_sep>self.assertEqual(int_g_f.traffic 5.0)<line_sep># Verify LSPs on interfaces
self.assertIn(lsp_b_d_1 int_b_c.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_b_c.lsps(model))<line_sep>self.assertIn(lsp_b_d_1 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_c_e_1 int_c_d.lsps(model))<block_end><def_stmt>test_igp_shortcut_node_attributes self# The IGP shortcut attribute should be True
<block_start>model=FlexModel.load_model_file("test/igp_shortcuts_model_mult_lsps_in_path.csv")<line_sep>node_b=model.get_node_object("B")<line_sep>self.assertTrue(node_b.igp_shortcuts_enabled)<block_end># Remove igp_shortcuts_enabled on node B, traffic should appear on lsp_c_e_1
# and disappear from lsp_b_d_1/2 and lsp_d_f_1
<def_stmt>test_remove_shortcuts_node_b self<block_start>model=FlexModel.load_model_file("test/igp_shortcuts_model_mult_lsps_in_path.csv")<line_sep>node_b=model.get_node_object("B")<line_sep>node_b.igp_shortcuts_enabled=<false><line_sep>model.update_simulation()<line_sep># Get LSP objects
lsp_b_d_1=model.get_rsvp_lsp("B" "D" "lsp_b_d_1")<line_sep>lsp_b_d_2=model.get_rsvp_lsp("B" "D" "lsp_b_d_2")<line_sep>lsp_c_e_1=model.get_rsvp_lsp("C" "E" "lsp_c_e_1")<line_sep>lsp_d_f_1=model.get_rsvp_lsp("D" "F" "lsp_d_f_1")<line_sep>dmd_a_f_1=model.get_demand_object("A" "F" "dmd_a_f_1")<line_sep>dmd_d_f_1=model.get_demand_object("D" "F" "dmd_d_f_1")<line_sep># Half the traffic from dmd_a_f_1 should be on lsp_c_e_1
self.assertEqual(lsp_c_e_1.traffic_on_lsp(model) 5.0)<line_sep># dmd_a_f_1 should be the only demand on lsp_c_e_1
self.assertEqual(lsp_c_e_1.demands_on_lsp(model) [dmd_a_f_1])<line_sep># dmd_d_f_1 should be the only demand on lsp_d_f_1
self.assertEqual(lsp_d_f_1.demands_on_lsp(model) [dmd_d_f_1])<line_sep># LSPs from B to D should have no demands and no traffic
self.assertEqual(lsp_b_d_1.demands_on_lsp(model) [])<line_sep>self.assertEqual(lsp_b_d_2.demands_on_lsp(model) [])<line_sep>self.assertEqual(lsp_b_d_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_b_d_2.traffic_on_lsp(model) 0)<block_end><def_stmt>test_demands_no_shortcuts self<block_start>"""
The demand should take the LSP if the IGP shortcut attribute is True on node B.
When the IGP shortcut attribute is turned to False, the demand should
only IGP route. Change all igp_shortcuts_enabled flags to False.
Test LSP and Interface traffic.
"""<line_sep>model=FlexModel.load_model_file("test/igp_shortcuts_model_mult_lsps_in_path.csv")<line_sep>model.update_simulation()<line_sep># Get all LSP objects
lsp_b_d_1=model.get_rsvp_lsp("B" "D" "lsp_b_d_1")<line_sep>lsp_b_d_2=model.get_rsvp_lsp("B" "D" "lsp_b_d_2")<line_sep>lsp_c_e_1=model.get_rsvp_lsp("C" "E" "lsp_c_e_1")<line_sep>lsp_d_f_1=model.get_rsvp_lsp("D" "F" "lsp_d_f_1")<line_sep># Get some node objects
node_b=model.get_node_object("B")<line_sep>node_c=model.get_node_object("C")<line_sep>node_d=model.get_node_object("D")<line_sep>node_e=model.get_node_object("E")<line_sep># Get LSP object
dmd_d_f_1=model.get_demand_object("D" "F" "dmd_d_f_1")<line_sep># Set the node igp_shortcuts_enabled attribute to False
node_b.igp_shortcuts_enabled=<false><line_sep>node_c.igp_shortcuts_enabled=<false><line_sep>node_d.igp_shortcuts_enabled=<false><line_sep>node_e.igp_shortcuts_enabled=<false><line_sep>model.update_simulation()<line_sep># Only lsp_d_f_1 should have traffic/demands
self.assertEqual(lsp_b_d_1.demands_on_lsp(model) [])<line_sep>self.assertEqual(lsp_b_d_2.demands_on_lsp(model) [])<line_sep>self.assertEqual(lsp_c_e_1.demands_on_lsp(model) [])<line_sep>self.assertEqual(lsp_b_d_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_b_d_2.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_c_e_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_d_f_1.demands_on_lsp(model) [dmd_d_f_1])<line_sep>self.assertEqual(lsp_d_f_1.traffic_on_lsp(model) 8.0)<block_end><def_stmt>test_igp_shortcut_perf_model self<block_start>model=PerformanceModel.load_model_file("test/igp_routing_topology.csv")<line_sep>node_a=model.get_node_object("A")<line_sep>node_a.igp_shortcuts_enabled=<true><line_sep>err_msg="igp_shortcuts_enabled not allowed in PerformanceModel, but present on these Nodes"<with_stmt>self.assertRaises(ModelException)<as>context<block_start>model.update_simulation()<block_end>self.assertIn(err_msg context.exception.args[0][1][0].keys())<block_end># If one LSP from B to D is assigned a lower metric, traffic should
# not split at A
<def_stmt>test_changed_metric self<block_start>model=FlexModel.load_model_file("test/igp_shortcuts_model_mult_lsps_in_path.csv")<line_sep># Get all the interface objects
int_a_b=model.get_interface_object("A-B" "A")<line_sep>int_b_c=model.get_interface_object("B-C" "B")<line_sep>int_c_d=model.get_interface_object("C-D" "C")<line_sep>int_d_e=model.get_interface_object("D-E" "D")<line_sep>int_e_f=model.get_interface_object("E-F" "E")<line_sep>int_a_g=model.get_interface_object("A-G" "A")<line_sep>int_g_f=model.get_interface_object("G-F" "G")<line_sep># Get all LSP objects
lsp_b_d_1=model.get_rsvp_lsp("B" "D" "lsp_b_d_1")<line_sep>lsp_b_d_2=model.get_rsvp_lsp("B" "D" "lsp_b_d_2")<line_sep>lsp_c_e_1=model.get_rsvp_lsp("C" "E" "lsp_c_e_1")<line_sep>lsp_d_f_1=model.get_rsvp_lsp("D" "F" "lsp_d_f_1")<line_sep># Get demand objects
dmd_a_f_1=model.get_demand_object("A" "F" "dmd_a_f_1")<line_sep>dmd_d_f_1=model.get_demand_object("D" "F" "dmd_d_f_1")<line_sep># Give lsp a lower than default metric
lsp_b_d_1.manual_metric=15<line_sep>model.update_simulation()<line_sep>dmd_path_1=[int_a_b lsp_b_d_1 lsp_d_f_1]<line_sep># Confirm demand path
self.assertIn(dmd_path_1 dmd_a_f_1.path)<line_sep># Verify traffic on LSPs
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model) 10)<line_sep>self.assertEqual(lsp_b_d_2.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_c_e_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_d_f_1.traffic_on_lsp(model) 18.0)<line_sep># Verify demand paths
self.assertNotIn([int_a_g int_g_f] dmd_a_f_1.path)<line_sep>self.assertIn([int_a_b lsp_b_d_1 lsp_d_f_1] dmd_a_f_1.path)<line_sep>self.assertNotIn(lsp_b_d_2 dmd_a_f_1.path)<line_sep>self.assertEqual(dmd_d_f_1.path [[lsp_d_f_1]])<line_sep># Verify interface traffic
self.assertEqual(int_a_b.traffic 10.0)<line_sep>self.assertEqual(int_b_c.traffic 10.0)<line_sep>self.assertEqual(int_c_d.traffic 10.0)<line_sep>self.assertEqual(int_d_e.traffic 18.0)<line_sep>self.assertEqual(int_e_f.traffic 18.0)<line_sep>self.assertEqual(int_a_g.traffic 0.0)<line_sep>self.assertEqual(int_g_f.traffic 0.0)<line_sep># Verify LSPs on interfaces
self.assertIn(lsp_b_d_1 int_b_c.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_b_c.lsps(model))<line_sep>self.assertIn(lsp_b_d_1 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_c_e_1 int_c_d.lsps(model))<line_sep># Give lsp_b_d_1 a higher than default metric
lsp_b_d_1.manual_metric=25<line_sep>model.update_simulation()<line_sep>dmd_path_2_1=[int_a_g int_g_f]<line_sep>dmd_path_2_2=[int_a_b lsp_b_d_2 lsp_d_f_1]<line_sep># Confirm demand path
self.assertIn(dmd_path_2_1 dmd_a_f_1.path)<line_sep>self.assertIn(dmd_path_2_2 dmd_a_f_1.path)<line_sep># Verify traffic on LSPs
self.assertEqual(lsp_b_d_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_b_d_2.traffic_on_lsp(model) 5)<line_sep>self.assertEqual(lsp_c_e_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_d_f_1.traffic_on_lsp(model) 13.0)<line_sep># Verify demand paths
self.assertIn([int_a_g int_g_f] dmd_a_f_1.path)<line_sep>self.assertNotIn(lsp_b_d_1 dmd_a_f_1.path)<line_sep>self.assertIn([int_a_b lsp_b_d_2 lsp_d_f_1] dmd_a_f_1.path)<line_sep>self.assertEqual(dmd_d_f_1.path [[lsp_d_f_1]])<line_sep># Verify interface traffic
self.assertEqual(int_a_b.traffic 5.0)<line_sep>self.assertEqual(int_b_c.traffic 5.0)<line_sep>self.assertEqual(int_c_d.traffic 5.0)<line_sep>self.assertEqual(int_d_e.traffic 13.0)<line_sep>self.assertEqual(int_e_f.traffic 13.0)<line_sep>self.assertEqual(int_a_g.traffic 5.0)<line_sep>self.assertEqual(int_g_f.traffic 5.0)<line_sep># Verify LSPs on interfaces
self.assertIn(lsp_b_d_1 int_b_c.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_b_c.lsps(model))<line_sep>self.assertIn(lsp_b_d_1 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_b_d_2 int_c_d.lsps(model))<line_sep>self.assertIn(lsp_c_e_1 int_c_d.lsps(model))<block_end># If an LSP from A to F is added, all traffic should take that LSP
<def_stmt>test_direct_lsp_preemption self<block_start>model=FlexModel.load_model_file("test/igp_shortcuts_model_mult_lsps_in_path.csv")<line_sep>dmd_a_f_1=model.get_demand_object("A" "F" "dmd_a_f_1")<line_sep>model.add_rsvp_lsp("A" "F" "lsp_a_f_1")<line_sep>lsp_a_f_1=model.get_rsvp_lsp("A" "F" "lsp_a_f_1")<line_sep>lsp_b_d_1=model.get_rsvp_lsp("B" "D" "lsp_b_d_1")<line_sep>int_a_g=model.get_interface_object("A-G" "A")<line_sep>int_a_b=model.get_interface_object("A-B" "A")<line_sep>model.update_simulation()<line_sep># Make sure dmd_a_f_1 takes lsp_a_f_1
self.assertEqual(lsp_a_f_1.demands_on_lsp(model) [dmd_a_f_1])<line_sep>self.assertEqual(lsp_a_f_1.traffic_on_lsp(model) 10)<line_sep>self.assertEqual(lsp_b_d_1.traffic_on_lsp(model) 0)<line_sep>self.assertEqual(lsp_b_d_1.demands_on_lsp(model) [])<line_sep># lsp_a_f_1 will take path with fewest hops
self.assertEqual(int_a_g.traffic 10)<line_sep>self.assertEqual(int_a_b.traffic 0)<block_end><block_end> |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>typing Dict<import_from_stmt>pandas DataFrame<import_from_stmt>lib.cast safe_int_cast<import_from_stmt>lib.data_source DataSource<import_from_stmt>lib.time datetime_isoformat<class_stmt>SudanHumdataDataSource(DataSource)<block_start><def_stmt>parse_dataframes self dataframes:Dict[str DataFrame] aux:Dict[str DataFrame] **parse_opts<arrow>DataFrame# Rename the appropriate columns
<block_start>data=(dataframes[0].rename(columns={"Report Date":"date" "State":"match_string" "Confirmed Cases":"total_confirmed" }).drop([0]))<line_sep># The dates in the provided CSV are incorrect for one of the reports.
# Replace with report date taken from text of report.
data.loc[data["Source"]<eq>"https://reliefweb.int/sites/reliefweb.int/files/resources/Situation%20Report%20-%20Sudan%20-%207%20May%202020.pdf" "date" ]="5/11/2020"<line_sep>data=data.drop(axis=1 columns=["As of Date" "Source"])<line_sep># Remove Abyei PCA, a disputed region with no data shown.
data=data[data["match_string"]<ne>"Abyei PCA"]<line_sep># Data source uses different spelling from src/data/iso_3166_2_codes.csv
data["match_string"].replace({"Gedaref":"Al Qadarif"} inplace=<true>)<line_sep>data.date=data.date.apply(<lambda>x:datetime_isoformat(x "%m/%d/%Y"))<line_sep># Sudan data includes empty cells where there are no confirmed cases.
# These get read in as NaN. Replace them with zeroes so that the
# grouped_diff call to get new confirmed cases works for a state's first
# day with a case.
data["total_confirmed"]=data["total_confirmed"].fillna(0).apply(safe_int_cast)<line_sep># Make sure all records have the country code
data["country_code"]="SD"<line_sep># Output the results
<return>data<block_end><block_end> |
<import_from_stmt>..base BuildStage<import_from_stmt>pathlib Path<line_sep>TPL="""\"\"\"
WSGI config for {0} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
\"\"\"
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{0}")
application = get_wsgi_application()
"""<class_stmt>WsgiStage(BuildStage)<block_start><def_stmt>run self<block_start>wsgi_py=Path(self.build.settings_pckg_path)/'wsgi.py'<line_sep>wsgi_py.write_text(TPL.format(self.settings_module('settings')))<block_end><block_end> |
"""add task pending state
Revision ID: 30241b33d849
Revises: cd<PASSWORD>
Create Date: 2021-01-07 14:39:43.251123
"""<import_stmt>sqlalchemy<as>sa<import_from_stmt>alembic op<line_sep># revision identifiers, used by Alembic.
revision='<KEY>'<line_sep>down_revision='cd404ed93cc0'<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # manually entered
<block_start><if_stmt>op.get_context().dialect.name<eq>'postgresql'# https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.migration.MigrationContext.autocommit_block
<block_start><with_stmt>op.get_context().autocommit_block()<block_start>op.execute("ALTER TYPE taskstatus ADD VALUE 'created'")<block_end><block_end><else_stmt># sqlite uses varchar + constraint for enum types
<block_start>taskstatus_enum=sa.Enum('created' 'pending' 'running' 'success' 'failed' 'skipped' name='taskstatus' )<with_stmt>op.batch_alter_table("tasks")<as>batch_op<block_start>batch_op.alter_column("status" type_=taskstatus_enum)<block_end><block_end><block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start><pass><line_sep># ### end Alembic commands ###
<block_end> |
<import_stmt>base64<import_stmt>hashlib<import_stmt>json<import_stmt>logging<import_from_stmt>dataclasses dataclass<import_stmt>boto3<line_sep>log=logging.getLogger()<line_sep>region="us-east-1"<def_stmt>handle event:dict context<block_start>request=event["Records"][0]["cf"]["request"]<try_stmt><block_start>authenticate(request["headers"])<block_end><except_stmt>Exception<as>e<block_start>log.error(repr(e))<line_sep><return>unauthorized<block_end><return>request<block_end><def_stmt>authenticate headers:dict<block_start>domain=headers["host"][0]["value"]<line_sep>auth=headers["authorization"][0]["value"]<line_sep>auth_type,creds=auth.split(" ")<if_stmt>auth_type<ne>"Basic"<block_start><raise>ValueError("Invalid auth type: "+auth_type)<block_end>username,password=base64.b64decode(creds).decode().split(":")<line_sep>user=get_user(domain username)<if_stmt>hash_password(password user.password_salt)<ne>user.password_hash<block_start><raise>ValueError("Invalid password for "+username)<block_end><block_end>@dataclass<class_stmt>User<block_start>username:str<line_sep>password_hash:str<line_sep>password_salt:str<block_end><def_stmt>get_user domain:str username:str<arrow>User<block_start>data=boto3.client("ssm" region_name=region).get_parameter(Name=f"/s3pypi/{domain}/users/{username}" WithDecryption=<true> )["Parameter"]["Value"]<line_sep><return>User(username **json.loads(data))<block_end><def_stmt>hash_password password:str salt:str<arrow>str<block_start><return>hashlib.sha1((password+salt).encode()).hexdigest()<block_end>unauthorized=dict(status="401" statusDescription="Unauthorized" headers={"www-authenticate":[{"key":"WWW-Authenticate" "value":'Basic realm="Login"'}]} )<line_sep> |
# -*- coding: utf-8 -*-
<import_stmt>asyncio<import_stmt>ccxt.async_support<as>ccxt<async_keyword><def_stmt>poll tickers<block_start>i=0<line_sep>kraken=ccxt.kraken()<while_stmt><true><block_start>symbol=tickers[i%len(tickers)]<line_sep><yield>(symbol <await>kraken.fetch_ticker(symbol))<line_sep>i<augadd>1<line_sep><await>asyncio.sleep(kraken.rateLimit/1000)<block_end><block_end><async_keyword><def_stmt>main <block_start><async_keyword><for_stmt>(symbol ticker) poll(['BTC/USD' 'ETH/BTC' 'BTC/EUR'])<block_start>print(symbol ticker)<block_end><block_end>asyncio.get_event_loop().run_until_complete(main())<line_sep> |
# Released under the MIT License. See LICENSE for details.
#
# This file was automatically generated from "rampage.ma"
# pylint: disable=all
points={}<line_sep># noinspection PyDictCreation
boxes={}<line_sep>boxes['area_of_interest_bounds']=(0.3544110667 5.616383286 -4.066055072)+(0.0 0.0 0.0)+(19.90053969 10.34051135 8.16221072)<line_sep>boxes['edge_box']=(0.3544110667 5.438284793 -4.100357672)+(0.0 0.0 0.0)+(12.57718032 4.645176013 3.605557343)<line_sep>points['ffa_spawn1']=(0.5006944438 5.051501304 -5.79356326)+(6.626174027 1.0 0.3402012662)<line_sep>points['ffa_spawn2']=(0.5006944438 5.051501304 -2.435321368)+(6.626174027 1.0 0.3402012662)<line_sep>points['flag1']=(-5.885814199 5.112162255 -4.251754911)<line_sep>points['flag2']=(6.700855451 5.10270501 -4.259912982)<line_sep>points['flag_default']=(0.3196701116 5.110914413 -4.292515158)<line_sep>boxes['map_bounds']=(0.4528955042 4.899663734 -3.543675157)+(0.0 0.0 0.0)+(23.54502348 14.19991443 12.08017448)<line_sep>points['powerup_spawn1']=(-2.645358507 6.426340583 -4.226597191)<line_sep>points['powerup_spawn2']=(3.540102796 6.549722855 -4.198476335)<line_sep>points['shadow_lower_bottom']=(5.580073911 3.136491026 5.341226521)<line_sep>points['shadow_lower_top']=(5.580073911 4.321758709 5.341226521)<line_sep>points['shadow_upper_bottom']=(5.274539479 8.425373402 5.341226521)<line_sep>points['shadow_upper_top']=(5.274539479 11.93458162 5.341226521)<line_sep>points['spawn1']=(-4.745706238 5.051501304 -4.247934288)+(0.9186962739 1.0 0.5153189341)<line_sep>points['spawn2']=(5.838590388 5.051501304 -4.259627405)+(0.9186962739 1.0 0.5153189341)<line_sep> |
# dockerpty.
#
# Copyright 2014 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>dockerpty.pty PseudoTerminal RunOperation ExecOperation exec_create<def_stmt>start client container interactive=<true> stdout=<none> stderr=<none> stdin=<none> logs=<none><block_start>"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""<line_sep>operation=RunOperation(client container interactive=interactive stdout=stdout stderr=stderr stdin=stdin logs=logs)<line_sep>PseudoTerminal(client operation).start()<block_end><def_stmt>exec_command client container command interactive=<true> stdout=<none> stderr=<none> stdin=<none><block_start>"""
Run provided command via exec API in provided container.
This is just a wrapper for PseudoTerminal(client, container).exec_command()
"""<line_sep>exec_id=exec_create(client container command interactive=interactive)<line_sep>operation=ExecOperation(client exec_id interactive=interactive stdout=stdout stderr=stderr stdin=stdin)<line_sep>PseudoTerminal(client operation).start()<block_end><def_stmt>start_exec client exec_id interactive=<true> stdout=<none> stderr=<none> stdin=<none><block_start>operation=ExecOperation(client exec_id interactive=interactive stdout=stdout stderr=stderr stdin=stdin)<line_sep>PseudoTerminal(client operation).start()<block_end> |
# Lint as: python3
# Copyright 2020 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private module implementing async_wrapped method for wrapping async tests.
This is a separate private module so that parameterized still optionally
supports Python 2 syntax.
"""<import_stmt>functools<import_stmt>inspect<def_stmt>async_wrapped func<block_start>@functools.wraps(func)<async_keyword><def_stmt>wrapper *args **kwargs<block_start><return><await>func(*args **kwargs)<block_end><return>wrapper<block_end><def_stmt>iscoroutinefunction func<block_start><return>inspect.iscoroutinefunction(func)<block_end> |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official,
# policies either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
__docformat__='reStructuredText'<import_stmt>logging<import_stmt>datetime<import_stmt>pytz<import_stmt>sys<import_stmt>grequests<line_sep># requests should be imported after grequests as requests imports ssl and grequests patches ssl
<import_stmt>requests<import_stmt>pkg_resources<import_from_stmt>volttron.platform.agent utils<import_from_stmt>volttron.platform.vip.agent RPC<import_from_stmt>volttron.platform.agent.utils format_timestamp<import_from_stmt>volttron.platform.agent.base_weather BaseWeatherAgent<import_from_stmt>volttron.platform jsonapi<line_sep>_log=logging.getLogger(__name__)<line_sep>utils.setup_logging()<line_sep>__version__="0.1"<def_stmt>ambient config_path **kwargs<block_start>"""
Parses the Agent configuration and returns an instance of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: Ambient
:rtype: Ambient
"""<try_stmt><block_start>config=utils.load_config(config_path)<block_end><except_stmt>Exception<block_start>config={}<block_end><if_stmt><not>config<block_start>_log.error("Ambient agent configuration: ".format(config))<block_end><for_stmt>key ["api_key" "application_key"]<block_start><if_stmt><not>config.get(key)<or><not>isinstance(config.get(key) str)<block_start><raise>RuntimeError("Ambient agent must be configured with '{}' key.".format(key))<block_end><block_end>_log.debug("config_dict before init: {}".format(config))<line_sep>utils.update_kwargs_with_config(kwargs config)<line_sep><return>Ambient(**kwargs)<block_end><class_stmt>Ambient(BaseWeatherAgent)<block_start>"""
The Ambient agent requires having an API key to interact with the remote API. The agent offers a performance_mode
configuration option which allows users to limit the amount of data returned by the API.
"""<def_stmt>__init__ self application_key="" **kwargs<block_start>super(Ambient self).__init__(**kwargs)<line_sep>_log.debug("vip_identity: "+self.core.identity)<line_sep>self.headers={"Accept":"application/json" "Accept-Language":"en-US"}<line_sep>self.remove_service("get_hourly_historical")<line_sep>self.remove_service("get_hourly_forecast")<line_sep>self.app_key=application_key<line_sep>self.last_service_call_timestamp=<none><block_end>@RPC.export<def_stmt>get_version self<block_start>"""
Provides the current version of the agent.
:return: current version number in string format.
"""<line_sep><return>__version__<block_end><def_stmt>validate_location self service_name location<block_start>"""
Indicates whether the location dictionary provided matches the format required by the remote weather API
:param service_name: name of the remote API service
:param location: location dictionary to provide in the remote API url
:return: True if the location matches the required format else False
"""<line_sep><return>isinstance(location.get("location" <none>) str)<block_end><def_stmt>get_update_interval self service_name<block_start>"""
Indicates the interval between remote API updates
:param service_name: requested service endpoint
:return: datetime timedelta representing the time interval
"""<if_stmt>service_name<eq>"get_current_weather"<block_start><return>datetime.timedelta(minutes=5)<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>get_api_description self service_name<block_start>"""
Provides a human-readable description of the various endpoints provided by the agent
:param service_name: requested service endpoint
:return: Human-readable description string
"""<if_stmt>service_name<is>"get_current_weather"<block_start>"Provides current weather observations for locations by their corresponding Ambient weather station name "<concat>"via RPC (Requires {'location': <station location string>})"<block_end><else_stmt><block_start><raise>RuntimeError("Service {} is not implemented by Ambient.".format(service_name))<block_end><block_end><def_stmt>get_point_name_defs_file self<block_start>"""
Constructs the point name mapping dict from the mapping csv.
:return: dictionary containing a mapping of service point names to standard point names with optional
"""<line_sep># returning resource file instead of stream, as csv.DictReader require file path or file like object opened in
# text mode.
<return>pkg_resources.resource_filename(__name__ "data/name_mapping.csv")<block_end><def_stmt>query_current_weather self location<block_start>"""
Retrieve data from the Ambient API, return formatted current data and store forecast data in cache
:param location: location dictionary requested by the user
:return: Timestamp and data for current data from the Ambient API
"""<line_sep>ambient_response=self.make_request()<line_sep>location_response=<none><line_sep>current_time=<none><for_stmt>record ambient_response<block_start>record_location=<none><line_sep>record_info=record.pop("info")<if_stmt>record_info<block_start>record_location=record_info.get("location" "")<block_end><if_stmt>record_location<block_start>weather_data=record.get("lastData" {})<line_sep>weather_data["macAddress"]=record.pop("macAddress" "")<line_sep>weather_data["name"]=record_info.get("name" "")<line_sep># "date": "2019-04-25T17:09:00.000Z"
weather_tz_string=weather_data.get('tz' <none>)<if_stmt>weather_tz_string<block_start>weather_tz=pytz.timezone(weather_tz_string)<block_end><else_stmt><block_start>weather_tz=pytz.utc<block_end>weather_date=datetime.datetime.strptime(weather_data.pop("date") "%Y-%m-%dT%H:%M:%S.%fZ").astimezone(weather_tz)<if_stmt>location["location"]<eq>record_location<block_start>current_time=format_timestamp(weather_date)<line_sep>location_response=weather_data<block_end><else_stmt><block_start>weather_data=self.apply_mapping(weather_data)<line_sep>self.store_weather_records("get_current_weather" [jsonapi.dumps({"location":record_location}) weather_date jsonapi.dumps(weather_data)])<block_end><block_end><else_stmt><block_start><raise>RuntimeError("API record contained improper 'info' format")<block_end><block_end><return>current_time location_response<block_end><def_stmt>query_forecast_service self service location quantity forecast_start<block_start>"""
Unimplemented method stub
:param service: forecast service type of weather data to return
:param location: location dictionary requested during the RPC call
:param quantity: number of records to return, used to generate Time Machine requests after the forecast request
:param forecast_start: forecast results that are prior to this timestamp will be filtered by base weather agent
:return: Timestamp and data returned by the Ambient weather API response
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>make_request self<block_start>"""
Request data from the Ambient Weather API
An example of the return value is as follows
[
{
"macAddress": "18:93:D7:3B:89:0C",
"lastData": {
"dateutc": 1556212140000,
"tempinf": 71.9,
"humidityin": 31,
"battout": "1",
"temp1f": 68.7,
"humidity1": 36,
"batt1": "1",
"date": "2019-04-25T17:09:00.000Z"
},
"info": {
"name": "Home B WS",
"location": "Lab Home B"
}
},
{
"macAddress": "50:F1:4A:F7:3C:C4",
"lastData": {
"dateutc": 1556211960000,
"tempinf": 82.5,
"humidityin": 27,
"battout": "1",
"temp1f": 68.5,
"humidity1": 42,
"batt1": "1",
"date": "2019-04-25T17:06:00.000Z"
},
"info": {
"name": "Home A WS",
"location": "Lab Home A"
}
}
]
:return:
"""<line_sep># AuthenticationTwo API Keys are required for all REST API requests:applicationKey - identifies the
# developer / application. To request an application key please email <EMAIL>apiKey -
# grants access to past/present data for a given user's devices. A typical consumer-facing application will
# initially ask the user to create an apiKey on their Ambient.net account page
# (https://dashboard.ambientweather.net/account) and paste it into the app. Developers for personal or
# in-house apps will also need to create an apiKey on their own account page.
# Rate LimitingAPI requests are capped at 1 request/second for each user's apiKey and 3 requests/second
# per applicationKey. When this limit is exceeded, the API will return a 429 response code.
# Please be kind to our servers :)
# If the previous call to the API was at least 3 seconds ago - this is a constraint set by Ambient
<if_stmt><not>self.last_service_call_timestamp<or>(datetime.datetime.now()-self.last_service_call_timestamp).total_seconds()<g>3<block_start>url='https://api.ambientweather.net/v1/devices?applicationKey='+self.app_key+'&apiKey='+self._api_key<line_sep>_log.info("requesting url: {}".format(url))<line_sep>grequest=[grequests.get(url verify=requests.certs.where() headers=self.headers timeout=30)]<line_sep>gresponse=grequests.map(grequest)[0]<if_stmt>gresponse<is><none><block_start><raise>RuntimeError("get request did not return any response")<block_end><try_stmt><block_start>response=jsonapi.loads(gresponse.content)<line_sep>self.last_service_call_timestamp=datetime.datetime.now()<line_sep><return>response<block_end><except_stmt>ValueError<block_start>self.last_service_call_timestamp=datetime.datetime.now()<line_sep>self.generate_response_error(url gresponse.status_code)<block_end><block_end><else_stmt><block_start><raise>RuntimeError("Previous API call to Ambient service is too recent, please wait at least 3 seconds "<concat>"between API calls.")<block_end><block_end><def_stmt>query_hourly_forecast self location<block_start>"""
Unimplemented method stub
:param location: currently accepts lat/long location dictionary format only
:return: time of forecast prediction as a timestamp string, and a list of
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>query_hourly_historical self location start_date end_date<block_start>"""
Unimplemented method stub
:param location: no format currently determined for history.
:param start_date: Starting date for historical weather period.
:param end_date: Ending date for historical weather period.
:return: NotImplementedError
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>generate_response_error self url response_code<block_start>"""
Raises a descriptive runtime error based on the response code returned by a service.
:param url: actual url used for requesting data from Ambient
:param response_code: Http response code returned by a service following a request
"""<line_sep>code_x100=int(response_code/100)<if_stmt>code_x100<eq>2<block_start><raise>RuntimeError("Remote API returned no data(code:{}, url:{})".format(response_code url))<block_end><elif_stmt>code_x100<eq>3<block_start><raise>RuntimeError("Remote API redirected request, but redirect failed (code:{}, url:{})".format(response_code url))<block_end><elif_stmt>code_x100<eq>4<block_start><raise>RuntimeError("Request ({}) rejected by remote API: Remote API returned Code {}".format(url response_code))<block_end><elif_stmt>code_x100<eq>5<block_start><raise>RuntimeError("Remote API returned invalid response (code:{}, url:{})".format(response_code url))<block_end><else_stmt><block_start><raise>RuntimeError("API request failed with unexpected response code (code:{}, url:{})".format(response_code url))<block_end><block_end><block_end><def_stmt>main <block_start>"""Main method called to start the agent."""<line_sep>utils.vip_main(ambient version=__version__)<block_end><if_stmt>__name__<eq>'__main__'# Entry point for script
<block_start><try_stmt><block_start>sys.exit(main())<block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><block_end> |
"""Execute validated & constructed query on device.
Accepts input from front end application, validates the input and
returns errors if input is invalid. Passes validated parameters to
construct.py, which is used to build & run the Netmiko connections or
hyperglass-frr API calls, returns the output back to the front end.
"""<line_sep># Standard Library
<import_from_stmt>ssl CertificateError<import_from_stmt>typing Iterable<line_sep># Third Party
<import_stmt>httpx<line_sep># Project
<import_from_stmt>hyperglass.log log<import_from_stmt>hyperglass.util parse_exception<import_from_stmt>hyperglass.encode jwt_decode jwt_encode<import_from_stmt>hyperglass.exceptions RestError ResponseEmpty<import_from_stmt>hyperglass.configuration params<line_sep># Local
<import_from_stmt>._common Connection<class_stmt>AgentConnection(Connection)<block_start>"""Connect to target device via hyperglass-agent."""<async_keyword><def_stmt>collect self<arrow>Iterable# noqa: C901
<block_start>"""Connect to a device running hyperglass-agent via HTTP."""<line_sep>log.debug("Query parameters: {}" self.query)<line_sep>client_params={"headers":{"Content-Type":"application/json"} "timeout":params.request_timeout }<if_stmt>self.device.ssl<is><not><none><and>self.device.ssl.enable<block_start><with_stmt>self.device.ssl.cert.open("r")<as>file<block_start>cert=file.read()<if_stmt><not>cert<block_start><raise>RestError("SSL Certificate for device {d} has not been imported" level="danger" d=self.device.name )<block_end><block_end>http_protocol="https"<line_sep>client_params.update({"verify":str(self.device.ssl.cert)})<line_sep>log.debug((f"Using {str(self.device.ssl.cert)} to validate connection "<concat>f"to {self.device.name}"))<block_end><else_stmt><block_start>http_protocol="http"<block_end>endpoint="{protocol}://{address}:{port}/query/".format(protocol=http_protocol address=self.device._target port=self.device.port)<line_sep>log.debug("URL endpoint: {}" endpoint)<try_stmt><block_start><async_keyword><with_stmt>httpx.AsyncClient(**client_params)<as>http_client<block_start>responses=()<for_stmt>query self.query<block_start>encoded_query=<await>jwt_encode(payload=query secret=self.device.credential.password.get_secret_value() duration=params.request_timeout )<line_sep>log.debug("Encoded JWT: {}" encoded_query)<line_sep>raw_response=<await>http_client.post(endpoint json={"encoded":encoded_query})<line_sep>log.debug("HTTP status code: {}" raw_response.status_code)<line_sep>raw=raw_response.text<line_sep>log.debug("Raw Response:\n{}" raw)<if_stmt>raw_response.status_code<eq>200<block_start>decoded=<await>jwt_decode(payload=raw_response.json()["encoded"] secret=self.device.credential.password.get_secret_value() )<line_sep>log.debug("Decoded Response:\n{}" decoded)<line_sep>responses<augadd>(decoded )<block_end><elif_stmt>raw_response.status_code<eq>204<block_start><raise>ResponseEmpty(params.messages.no_output device_name=self.device.name )<block_end><else_stmt><block_start>log.error(raw_response.text)<block_end><block_end><block_end><block_end><except_stmt>httpx.exceptions.HTTPError<as>rest_error<block_start>msg=parse_exception(rest_error)<line_sep>log.error("Error connecting to device {}: {}" self.device.name msg)<line_sep><raise>RestError(params.messages.connection_error device_name=self.device.name error=msg )<block_end><except_stmt>OSError<as>ose<block_start>log.critical(str(ose))<line_sep><raise>RestError(params.messages.connection_error device_name=self.device.name error="System error" )<block_end><except_stmt>CertificateError<as>cert_error<block_start>log.critical(str(cert_error))<line_sep>msg=parse_exception(cert_error)<line_sep><raise>RestError(params.messages.connection_error device_name=self.device.name error=f"{msg}: {cert_error}" )<block_end><if_stmt>raw_response.status_code<ne>200<block_start>log.error("Response code is {}" raw_response.status_code)<line_sep><raise>RestError(params.messages.connection_error device_name=self.device.name error=params.messages.general )<block_end><if_stmt><not>responses<block_start>log.error("No response from device {}" self.device.name)<line_sep><raise>RestError(params.messages.connection_error device_name=self.device.name error=params.messages.no_response )<block_end><return>responses<block_end><block_end> |
"""
Demonstrates the hover functionality of mpldatacursor as well as point labels
and a custom formatting function. Notice that overlapping points have both
labels displayed.
"""<import_stmt>string<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>mpldatacursor datacursor<line_sep>np.random.seed(1977)<line_sep>x,y=np.random.random((2 26))<line_sep>labels=string.ascii_lowercase<line_sep>fig,ax=plt.subplots()<line_sep>ax.scatter(x y s=200)<line_sep>ax.set_title('Mouse over a point')<line_sep># Show only the point label and allow nicer formatting if points overlap
formatter=<lambda>**kwargs:', '.join(kwargs['point_label'])<line_sep>datacursor(hover=<true> formatter=formatter point_labels=labels)<line_sep>plt.show()<line_sep> |
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
<import_from_stmt>tapas.models.bert modeling<import_from_stmt>tapas.utils attention_utils<import_stmt>tensorflow.compat.v1<as>tf<line_sep>tf.disable_v2_behavior()<class_stmt>AttentionUtilsTest(tf.test.TestCase)<block_start><def_stmt>test_matches_token_type_id self<block_start><with_stmt>self.cached_session()<as>sess<block_start>row_ids=sess.run(tf.constant([[1 2 2] [5 5 6] [1 2 3] [4 5 6]]))<line_sep>result=attention_utils._matches_token_type_id(row_ids)<line_sep>expected_result=sess.run(tf.constant([[[1 0 0] [0 1 1] [0 1 1]] [[1 1 0] [1 1 0] [0 0 1]] [[1 0 0] [0 1 0] [0 0 1]] [[1 0 0] [0 1 0] [0 0 1]] ]))<line_sep>self.assertAllEqual(result expected_result)<block_end><block_end><def_stmt>test_comput_bucket_id self<block_start><with_stmt>self.cached_session()<as>sess<block_start>column_ids=tf.constant([[0 0 1 2 3 1 2 3 1 2 3 0 0 0 0]])<line_sep>input_mask=tf.constant([[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0]])<line_sep>bucket_ids=tf.constant([[0 0 0 1 2 1 2 3 1 2 3 3 4 4 4]])<line_sep>result=sess.run(attention_utils._compute_bucket_id(bucket_size=3 header_size=3 token_type_id=column_ids input_mask=input_mask))<line_sep>expected_result=sess.run(bucket_ids)<line_sep>self.assertAllEqual(result expected_result)<block_end><block_end><def_stmt>test_comput_bucket_id_on_distinct_columns self<block_start><with_stmt>self.cached_session()<as>sess# When bucket size is 1 and columns_ids are a permutation of 0..n-1 then
# the bucket_ids will match the column_ids
<block_start>column_ids=tf.constant([[0 2 3 1 5 4]])<line_sep>input_mask=tf.constant([[1 1 1 1 1 1]])<line_sep>bucket_ids=tf.constant([[0 2 3 1 5 4]])<line_sep>result=sess.run(attention_utils._compute_bucket_id(bucket_size=1 header_size=1 token_type_id=column_ids input_mask=input_mask))<line_sep>expected_result=sess.run(bucket_ids)<line_sep>self.assertAllEqual(result expected_result)<block_end><block_end><def_stmt>test_comput_bucket_id_with_header self<block_start><with_stmt>self.cached_session()<as>sess# Similar to the distinct column test, but now we have two header tokens
<block_start>column_ids=tf.constant([[0 2 3 1 5 4]])<line_sep>input_mask=tf.constant([[1 1 1 1 1 1]])<line_sep>bucket_ids=tf.constant([[0 1 2 0 4 3]])<line_sep>result=sess.run(attention_utils._compute_bucket_id(bucket_size=1 header_size=2 token_type_id=column_ids input_mask=input_mask))<line_sep>expected_result=sess.run(bucket_ids)<line_sep>self.assertAllEqual(result expected_result)<block_end><block_end><def_stmt>test_compute_headwise_sparse_attention_mask self<block_start><with_stmt>self.cached_session()<as>sess# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
<block_start>row_ids=tf.constant([[0 0 0 0 0 1 1 1 2 2 2 0 0 0 0]])<line_sep>input_mask=tf.constant([[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0]])<line_sep>segment_ids=tf.constant([[0 0 1 1 1 1 1 1 1 1 1 0 0 0 0]])<line_sep>column_ids=tf.constant([[0 0 1 2 3 1 2 3 1 2 3 0 0 0 0]])<line_sep>result=sess.run(attention_utils.compute_headwise_sparse_attention_mask(num_row_heads=2 num_column_heads=3 bucket_size=0 header_size=<none> input_mask=input_mask segment_ids=segment_ids column_ids=column_ids row_ids=row_ids))[0]<line_sep># Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
expected_row_result=[[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # Q1
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # Q2
[1 1 1 1 1 0 0 0 0 0 0 0 0 0 0] # A0
[1 1 1 1 1 0 0 0 0 0 0 0 0 0 0] # A1
[1 1 1 1 1 0 0 0 0 0 0 0 0 0 0] # A2
[1 1 0 0 0 1 1 1 0 0 0 0 0 0 0] # B0
[1 1 0 0 0 1 1 1 0 0 0 0 0 0 0] # B1
[1 1 0 0 0 1 1 1 0 0 0 0 0 0 0] # B2
[1 1 0 0 0 0 0 0 1 1 1 0 0 0 0] # C0
[1 1 0 0 0 0 0 0 1 1 1 0 0 0 0] # C1
[1 1 0 0 0 0 0 0 1 1 1 0 0 0 0] # C2
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD1
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD2
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD3
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD4
]<line_sep># Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
expected_column_result=[[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # Q1
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # Q2
[1 1 1 0 0 1 0 0 1 0 0 0 0 0 0] # A0
[1 1 0 1 0 0 1 0 0 1 0 0 0 0 0] # A1
[1 1 0 0 1 0 0 1 0 0 1 0 0 0 0] # A2
[1 1 1 0 0 1 0 0 1 0 0 0 0 0 0] # B0
[1 1 0 1 0 0 1 0 0 1 0 0 0 0 0] # B1
[1 1 0 0 1 0 0 1 0 0 1 0 0 0 0] # B2
[1 1 1 0 0 1 0 0 1 0 0 0 0 0 0] # C0
[1 1 0 1 0 0 1 0 0 1 0 0 0 0 0] # C1
[1 1 0 0 1 0 0 1 0 0 1 0 0 0 0] # C2
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD1
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD2
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD3
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD4
]<line_sep>expected_result=sess.run(tf.constant([expected_row_result]<times>2+[expected_column_result]<times>3))<line_sep>self.assertAllEqual(result expected_result)<line_sep># With bucket size 2 no extra attention should be pruned
result=sess.run(attention_utils.compute_headwise_sparse_attention_mask(num_row_heads=2 num_column_heads=3 bucket_size=3 header_size=<none> input_mask=input_mask segment_ids=segment_ids column_ids=column_ids row_ids=row_ids))[0]<line_sep># The attention of the padding tokens changes but it has no impact
self.assertAllEqual(result[: : -4] expected_result[: : -4])<block_end><block_end><def_stmt>test_compute_sparse_attention_mask self<block_start><with_stmt>self.cached_session()<as>sess# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
<block_start>row_ids=tf.constant([[0 0 0 0 0 1 1 1 2 2 2 0 0 0 0]])<line_sep>input_mask=tf.constant([[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0]])<line_sep>segment_ids=tf.constant([[0 0 1 1 1 1 1 1 1 1 1 0 0 0 0]])<line_sep>column_ids=tf.constant([[0 0 1 2 3 1 2 3 1 2 3 0 0 0 0]])<line_sep>result=sess.run(attention_utils.compute_sparse_attention_mask(input_mask=input_mask segment_ids=segment_ids column_ids=column_ids row_ids=row_ids))[0]<line_sep>expected_result=sess.run(# Q1 Q2 A0 A1 A2 B0 B1 B2 C0 C1 C2 PADDING
tf.constant([[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # Q1
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # Q2
[1 1 1 1 1 1 0 0 1 0 0 0 0 0 0] # A0
[1 1 1 1 1 0 1 0 0 1 0 0 0 0 0] # A1
[1 1 1 1 1 0 0 1 0 0 1 0 0 0 0] # A2
[1 1 1 0 0 1 1 1 1 0 0 0 0 0 0] # B0
[1 1 0 1 0 1 1 1 0 1 0 0 0 0 0] # B1
[1 1 0 0 1 1 1 1 0 0 1 0 0 0 0] # B2
[1 1 1 0 0 1 0 0 1 1 1 0 0 0 0] # C0
[1 1 0 1 0 0 1 0 1 1 1 0 0 0 0] # C1
[1 1 0 0 1 0 0 1 1 1 1 0 0 0 0] # C2
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD1
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD2
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD3
[1 1 1 1 1 1 1 1 1 1 1 0 0 0 0] # PAD4
]))<line_sep>self.assertAllEqual(result expected_result)<block_end><block_end><def_stmt>compare_efficient_and_vanilla_sparse_attention self sess row_ids column_ids embedding_dim num_row_heads num_column_heads bucket_size header_size seq_length input_size first_segment_size rows_sorted<block_start>tf.random.set_random_seed(42)<line_sep>num_attention_heads=num_row_heads+num_column_heads<line_sep>pad_size=seq_length-input_size<line_sep>second_segment_size=input_size-first_segment_size<line_sep>input_mask=tf.constant([[1]<times>input_size+[0]<times>pad_size])<line_sep>segment_ids=tf.constant([[0]<times>first_segment_size+[1]<times>second_segment_size+[0]<times>pad_size])<line_sep>input_tensor=tf.random.normal(shape=[1 seq_length 128])<line_sep>attention_mask=attention_utils.compute_headwise_sparse_attention_mask(num_row_heads=num_row_heads num_column_heads=num_column_heads bucket_size=bucket_size header_size=header_size input_mask=input_mask segment_ids=segment_ids column_ids=column_ids row_ids=row_ids)<line_sep>expected_result_op=modeling.attention_layer(input_tensor input_tensor attention_mask=attention_mask num_attention_heads=num_attention_heads size_per_head=embedding_dim)[0][0 :input_size]<line_sep>result_ops=[]<for_stmt>sort_after_projection [<true> <false>]<block_start>attention_layer=attention_utils.create_bucketed_attention_layer(input_mask=input_mask input_header=tf.math.equal(segment_ids 0) bucket_size=bucket_size header_size=header_size sort_after_projection=sort_after_projection token_type_ids=[(num_row_heads rows_sorted row_ids) (num_column_heads <false> column_ids)])<line_sep>result_ops.append(attention_layer(input_tensor input_tensor num_attention_heads=num_attention_heads size_per_head=embedding_dim)[0][0 :input_size])<block_end>sess.run(tf.global_variables_initializer())<line_sep>expected_result,result1,result2=sess.run([expected_result_op]+result_ops)<line_sep>self.assertAllClose(result1 expected_result)<line_sep>self.assertAllClose(result2 expected_result)<block_end><def_stmt>test_efficient_sparse_attention_matches_vanilla_version self# Tests that computing bucketed and non-bucketed attention for random
# embeddings produces the same result.
<block_start><with_stmt>self.cached_session()<as>sess# Table Structure
# Q1 Q2 A0 B0 C0
# A1 B1 C1
# A2 B2 C2 PAD1 PAD2 PAD3 PAD4
<block_start>row_ids=tf.constant([[0 0 0 0 0 1 1 1 2 2 2 0 0 0 0]])<line_sep>column_ids=tf.constant([[0 0 1 2 3 1 2 3 1 2 3 0 0 0 0]])<line_sep>self.compare_efficient_and_vanilla_sparse_attention(sess row_ids=row_ids column_ids=column_ids embedding_dim=32 num_row_heads=2 num_column_heads=4 bucket_size=3 header_size=3 seq_length=15 input_size=11 first_segment_size=2 rows_sorted=<true>)<block_end><block_end><def_stmt>test_efficient_sparse_attention_random_ids_matches_vanilla_version self# Tests that computing bucketed and non-bucketed attention for random
# attributes not mapping to real columns yield the same results.
<block_start><with_stmt>self.cached_session()<as>sess<block_start>seq_length=14<line_sep>row_ids=tf.random.uniform(shape=[1 seq_length] maxval=20 dtype=tf.int32)<line_sep>column_ids=tf.random.uniform(shape=[1 seq_length] maxval=20 dtype=tf.int32)<line_sep>self.compare_efficient_and_vanilla_sparse_attention(sess row_ids=row_ids column_ids=column_ids embedding_dim=16 num_row_heads=5 num_column_heads=1 bucket_size=2 header_size=4 seq_length=seq_length input_size=11 first_segment_size=2 rows_sorted=<false>)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end> |
<import_stmt>numpy<as>np<line_sep># import matplotlib.pyplot as plt
<import_from_stmt>scipy.cluster.vq kmeans<line_sep># def plothist(x):
# vmin = x.min()-1
# vmax = x.max()+1
# bins = np.arange(vmin, vmax, (vmax - vmin)/50)
# plt.hist(x, bins=bins)
# plt.show()
# def scatterpred(pred):
# plt.scatter(pred[:,0], pred[:,1])
# plt.show()
# def scatter_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 8)
# plt.scatter(c[:,0], c[:,1], color='r')
# plt.show()
<def_stmt>most_assigned x c<block_start>nb_c=len(c)<line_sep>assign=np.zeros(nb_c)<for_stmt>i range(len(x))<block_start>y=x[i].reshape((1 2))<line_sep>d=np.sqrt(np.sum(np.power(y.repeat(nb_c axis=0)-c 2) axis=1))<line_sep>assign[d.argmin()]<augadd>1<block_end><return>assign.argmax()<block_end><def_stmt>mean_on_most_assigned x c<block_start>nb_c=len(c)<line_sep>assign=np.zeros(nb_c)<line_sep>mean=np.zeros(c.shape)<for_stmt>i range(len(x))<block_start>y=x[i].reshape((1 2))<line_sep>d=np.sqrt(np.sum(np.power(y.repeat(nb_c axis=0)-c 2) axis=1))<line_sep>idx=d.argmin()<line_sep>assign[idx]<augadd>1<line_sep>mean[idx :]<augadd>x[i]<block_end>idx=assign.argmax()<line_sep><return>mean[idx :]/assign[idx]<block_end># def best_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 3)
# plt.scatter(c[:,0], c[:,1], color='g')
# n = most_assigned(pred, c)
# plt.scatter(c[n,0], c[n,1], color='r')
# plt.show()
<def_stmt>clustering_joints y_pred k=3<block_start>_,nb_spl,nb_joints,dim=y_pred.shape<line_sep>y=np.zeros((nb_spl nb_joints dim))<for_stmt>s range(nb_spl)<block_start><for_stmt>j range(nb_joints)<block_start>d=y_pred[: s j]<line_sep>c,v=kmeans(d k)<line_sep>n=most_assigned(d c)<line_sep>y[s j :]=c[n]<block_end><block_end><return>y<block_end><def_stmt>clustering_grid y_pred size=10<block_start>_,nb_spl,nb_joints,dim=y_pred.shape<assert_stmt>dim<eq>2<line_sep>yp=np.zeros((nb_spl nb_joints dim))<for_stmt>s range(nb_spl)<block_start><for_stmt>j range(nb_joints)<block_start>d=y_pred[: s j :]<line_sep>xmin=d[: 0].min()<line_sep>ymin=d[: 1].min()<line_sep>xmax=d[: 0].max()<line_sep>ymax=d[: 1].max()<line_sep>xstep=(xmax-xmin)/size<line_sep>ystep=(ymax-ymin)/size<line_sep>c=np.zeros((size<times>size dim))<for_stmt>x range(size)<block_start><for_stmt>y range(size)<block_start>c[x+size<times>y 0]=xmin+(x+0.5)<times>xstep<line_sep>c[x+size<times>y 1]=ymin+(y+0.5)<times>ystep<block_end><block_end>yp[s j :]=mean_on_most_assigned(d c)<block_end><block_end><return>yp<block_end><def_stmt>mean_joints y_pred<block_start>_,nb_spl,dim,nb_joints=y_pred.shape<assert_stmt>dim<eq>2<line_sep>yp=np.zeros((nb_spl dim nb_joints))<for_stmt>s range(nb_spl)<block_start><for_stmt>j range(nb_joints)<block_start>d=y_pred[: s : j]<line_sep>yp[s 0 j]=d[: 0].mean()<line_sep>yp[s 1 j]=d[: 1].mean()<block_end><block_end><return>yp<block_end> |
<def_stmt>find_metric_transformation_by_name metric_transformations metric_name<block_start><for_stmt>metric metric_transformations<block_start><if_stmt>metric["metricName"]<eq>metric_name<block_start><return>metric<block_end><block_end><block_end><def_stmt>find_metric_transformation_by_namespace metric_transformations metric_namespace<block_start><for_stmt>metric metric_transformations<block_start><if_stmt>metric["metricNamespace"]<eq>metric_namespace<block_start><return>metric<block_end><block_end><block_end><class_stmt>MetricFilters<block_start><def_stmt>__init__ self<block_start>self.metric_filters=[]<block_end><def_stmt>add_filter self filter_name filter_pattern log_group_name metric_transformations<block_start>self.metric_filters.append({"filterName":filter_name "filterPattern":filter_pattern "logGroupName":log_group_name "metricTransformations":metric_transformations })<block_end><def_stmt>get_matching_filters self prefix=<none> log_group_name=<none> metric_name=<none> metric_namespace=<none><block_start>result=[]<for_stmt>f self.metric_filters<block_start>prefix_matches=prefix<is><none><or>f["filterName"].startswith(prefix)<line_sep>log_group_matches=(log_group_name<is><none><or>f["logGroupName"]<eq>log_group_name)<line_sep>metric_name_matches=(metric_name<is><none><or>find_metric_transformation_by_name(f["metricTransformations"] metric_name))<line_sep>namespace_matches=(metric_namespace<is><none><or>find_metric_transformation_by_namespace(f["metricTransformations"] metric_namespace))<if_stmt>(prefix_matches<and>log_group_matches<and>metric_name_matches<and>namespace_matches)<block_start>result.append(f)<block_end><block_end><return>result<block_end><def_stmt>delete_filter self filter_name=<none> log_group_name=<none><block_start><for_stmt>f self.metric_filters<block_start><if_stmt>f["filterName"]<eq>filter_name<and>f["logGroupName"]<eq>log_group_name<block_start>self.metric_filters.remove(f)<block_end><block_end><return>self.metric_filters<block_end><block_end> |
# encoding=utf-8
<import_from_stmt>airtest.core.win Windows<import_stmt>unittest<import_stmt>numpy<import_stmt>time<import_from_stmt>testconf try_remove<line_sep>SNAPSHOT="win_snapshot.png"<class_stmt>TestWin(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>w=Windows()<line_sep>w.start_app("calc")<line_sep>time.sleep(1)<line_sep>cls.windows=Windows(title_re=".*计算器.*".decode("utf-8"))<block_end><def_stmt>test_snapshot self<block_start>try_remove(SNAPSHOT)<line_sep>result=self.windows.snapshot(filename=SNAPSHOT)<line_sep>self.assertIsInstance(result numpy.ndarray)<line_sep>try_remove(SNAPSHOT)<block_end><def_stmt>test_touch self<block_start>self.windows.touch((11 11))<block_end><def_stmt>test_swipe self<block_start>self.windows.swipe((11 11) (100 100))<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>cls.windows.app.kill()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_stmt>unittest<import_stmt>torch<import_from_stmt>torchvision.models.resnet BasicBlock Bottleneck<import_from_stmt>nuscenes.prediction.models.backbone ResNetBackbone MobileNetBackbone<class_stmt>TestBackBones(unittest.TestCase)<block_start><def_stmt>count_layers self model<block_start><if_stmt>isinstance(model[4][0] BasicBlock)<block_start>n_convs=2<block_end><elif_stmt>isinstance(model[4][0] Bottleneck)<block_start>n_convs=3<block_end><else_stmt><block_start><raise>ValueError("Backbone layer block not supported!")<block_end><return>sum([len(model[i])<for>i range(4 8)])<times>n_convs+2<block_end><def_stmt>test_resnet self<block_start>rn_18=ResNetBackbone('resnet18')<line_sep>rn_34=ResNetBackbone('resnet34')<line_sep>rn_50=ResNetBackbone('resnet50')<line_sep>rn_101=ResNetBackbone('resnet101')<line_sep>rn_152=ResNetBackbone('resnet152')<line_sep>tensor=torch.ones((1 3 100 100))<line_sep>self.assertEqual(rn_18(tensor).shape[1] 512)<line_sep>self.assertEqual(rn_34(tensor).shape[1] 512)<line_sep>self.assertEqual(rn_50(tensor).shape[1] 2048)<line_sep>self.assertEqual(rn_101(tensor).shape[1] 2048)<line_sep>self.assertAlmostEqual(rn_152(tensor).shape[1] 2048)<line_sep>self.assertEqual(self.count_layers(list(rn_18.backbone.children())) 18)<line_sep>self.assertEqual(self.count_layers(list(rn_34.backbone.children())) 34)<line_sep>self.assertEqual(self.count_layers(list(rn_50.backbone.children())) 50)<line_sep>self.assertEqual(self.count_layers(list(rn_101.backbone.children())) 101)<line_sep>self.assertEqual(self.count_layers(list(rn_152.backbone.children())) 152)<with_stmt>self.assertRaises(ValueError)<block_start>ResNetBackbone('resnet51')<block_end><block_end><def_stmt>test_mobilenet self<block_start>mobilenet=MobileNetBackbone('mobilenet_v2')<line_sep>tensor=torch.ones((1 3 100 100))<line_sep>self.assertEqual(mobilenet(tensor).shape[1] 1280)<block_end><block_end> |
# Copyright (c) 2017-2021, <NAME>. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
<import_stmt>time<import_stmt>pytest<import_from_stmt>harness log<import_from_stmt>harness.dom_parser_utils *<import_from_stmt>harness.interface.defs key_codes<import_from_stmt>bt_fixtures *<line_sep>@pytest.mark.rt1051@pytest.mark.usefixtures("bt_all_devices")@pytest.mark.usefixtures("bt_reset")@pytest.mark.usefixtures("bt_main_window")@pytest.mark.usefixtures("phone_in_desktop")@pytest.mark.usefixtures("phone_unlocked")@pytest.mark.skipif("not config.getvalue('--bt_device')" reason='--bt_device was not specified')<def_stmt>test_bt_pairing_hmi harness bt_device<block_start><if_stmt><not>bt_device<block_start><return><block_end>bt_device_name=bt_device<line_sep>current_window_content=get_window_content(harness 1)<line_sep>is_device_in_history=item_contains_recursively(current_window_content 'TextValue' bt_device_name)<if_stmt><not>is_device_in_history<block_start>log.info("Device {} not in all devices history, scanning...".format(bt_device_name))<line_sep>harness.connection.send_key_code(key_codes["left"])<line_sep>max_try_count=5<for_stmt>_ range(max_try_count 0 -1)<block_start>time.sleep(2)<line_sep>current_window_content=get_window_content(harness 1)<line_sep>is_device_in_history=item_contains_recursively(current_window_content 'TextValue' bt_device_name)<if_stmt>is_device_in_history<block_start><break><block_end>log.info("Device {} not found, retrying...".format(bt_device_name))<block_end><assert_stmt>max_try_count<block_end>current_window_content=get_window_content(harness 1)<line_sep>parent_of_list_items=find_parent(current_window_content 'ListItem')<line_sep>steps_to_navigate_down=get_child_number_that_contains_recursively(parent_of_list_items [('TextValue' bt_device_name)])<assert_stmt>steps_to_navigate_down<g>-1<line_sep>log.info("Navigating to the {} device, {} down".format(bt_device_name steps_to_navigate_down))<for_stmt>_ range(steps_to_navigate_down)<block_start>harness.connection.send_key_code(key_codes["down"])<block_end>log.info("Checking if device {} is focused...".format(bt_device_name))<line_sep>current_window_content=get_window_content(harness 1)<line_sep>parent_of_list_items=find_parent(current_window_content 'ListItem')<assert_stmt>item_has_child_that_contains_recursively(parent_of_list_items [('TextValue' bt_device_name) ('Focus' <true>)])<block_end> |
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>unittest<import_from_stmt>xcenternet.model.evaluation.overlap compute_overlap<import_from_stmt>xcenternet.model.evaluation.mean_average_precision MAP<class_stmt>TestMeanAveragePrecision(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.map_bboxes=np.array([[[20 10 80 60] [10 40 40 90] [0 0 100 100]] [[0 0 10 10] [20 20 40 90] [80 20 100 50]] ] dtype=np.float64 )<line_sep>self.map_labels=np.array([[0 0 1] [0 0 0]])<line_sep>self.map_predictions=np.array([[[10 40 40 90 0.1 0] # overlap 1.00 with bbox #2, low prob
[60 10 90 60 0.5 0] # overlap 0.29 with bbox #1
[10 30 50 90 0.7 0] # overlap 0.625 with bbox #2
[0 0 100 90 0.7 1] # overlap 0.9 with bbox #3
[0 0 100 80 0.7 1] # overlap 0.8 with bbox #3
] [[20 20 30 50 0.6 0] # 0.21 overlap with #2
[2 0 10 11 0.8 0] # overlap with #1
[0 2 14 10 0.9 0] # overlap with #1
[0 0 10 10 0.7 1] # no ground truth for 1
[80 20 100 50 0.1 1] # no ground truth for 1
] ] dtype=np.float32 )<line_sep>self.map_masks=np.array([[1 1 1] [1 1 1]] dtype=np.float32)<line_sep>self.result_1={"overall":3/4 "weighted":2/3 "per_class":{0:(0.5 2) 1:(1.0 1)}}<line_sep>self.result_both={"overall":2/3 "weighted":4/9 "per_class":{0:(1/3 5) 1:(1.0 1)}}<block_end><def_stmt>test_compute_overlap self<block_start>boxes1=np.array([[10 10 30 50] [10 10 30 30]] dtype=np.float64)<line_sep>boxes2=np.array([[10 10 30 50] [10 10 40 40] [100 70 110 90]] dtype=np.float64)<line_sep>overlap=compute_overlap(boxes1 boxes2)<line_sep>self.assertAlmostEqual(1.0 overlap[0][0])<line_sep>self.assertAlmostEqual(6/11 overlap[0][1])<line_sep>self.assertAlmostEqual(0.0 overlap[0][2])<line_sep>self.assertAlmostEqual(0.5 overlap[1][0])<line_sep>self.assertAlmostEqual(4/9 overlap[1][1])<line_sep>self.assertAlmostEqual(0.0 overlap[1][2])<block_end><def_stmt>test_map_update_one self<block_start>mean_average_precision=MAP(2 iou_threshold=0.5 score_threshold=0.3)<line_sep>mean_average_precision.update_state(self.map_predictions[0] self.map_bboxes[0] self.map_labels[0])<line_sep>result=mean_average_precision.result()<line_sep>self._assert_map(result self.result_1)<block_end><def_stmt>test_map_update_both self<block_start>mean_average_precision=MAP(2 iou_threshold=0.5 score_threshold=0.3)<line_sep>mean_average_precision.update_state(self.map_predictions[0] self.map_bboxes[0] self.map_labels[0])<line_sep>mean_average_precision.update_state(self.map_predictions[1] self.map_bboxes[1] self.map_labels[1])<line_sep>result=mean_average_precision.result()<line_sep>self._assert_map(result self.result_both)<block_end><def_stmt>test_map_update_batch_one self<block_start>mean_average_precision=MAP(2 iou_threshold=0.5 score_threshold=0.3)<line_sep>mean_average_precision.update_state_batch(tf.constant([self.map_predictions[0]]) tf.constant([self.map_bboxes[0]]) tf.constant([self.map_labels[0]]) tf.constant([self.map_masks[0]]) )<line_sep>result=mean_average_precision.result()<line_sep>self._assert_map(result self.result_1)<block_end><def_stmt>test_map_update_batch_both self<block_start>mean_average_precision=MAP(2 iou_threshold=0.5 score_threshold=0.3)<line_sep>mean_average_precision.update_state_batch(tf.constant(self.map_predictions) tf.constant(self.map_bboxes) tf.constant(self.map_labels) tf.constant(self.map_masks) )<line_sep>result=mean_average_precision.result()<line_sep>self._assert_map(result self.result_both)<block_end><def_stmt>_assert_map self first second<block_start>self.assertAlmostEqual(first["overall"] second["overall"])<line_sep>self.assertAlmostEqual(first["weighted"] second["weighted"])<line_sep>self.assertAlmostEqual(first["per_class"][0][0] second["per_class"][0][0])# mAP
self.assertAlmostEqual(first["per_class"][0][1] second["per_class"][0][1])# num objects
self.assertAlmostEqual(first["per_class"][1][0] second["per_class"][1][0])# mAP
self.assertAlmostEqual(first["per_class"][1][1] second["per_class"][1][1])<block_end><block_end># num objects
<if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_stmt>pytest<import_from_stmt>helpers.cluster ClickHouseCluster<line_sep>cluster=ClickHouseCluster(__file__)<line_sep>instance=cluster.add_instance('instance' stay_alive=<true>)<line_sep>@pytest.fixture(scope="module" autouse=<true>)<def_stmt>started_cluster <block_start><try_stmt><block_start>cluster.start()<line_sep><yield>cluster<block_end><finally_stmt><block_start>cluster.shutdown()<block_end><block_end><def_stmt>test_persistence <block_start>create_function_query1="CREATE FUNCTION MySum1 AS (a, b) -> a + b"<line_sep>create_function_query2="CREATE FUNCTION MySum2 AS (a, b) -> MySum1(a, b) + b"<line_sep>instance.query(create_function_query1)<line_sep>instance.query(create_function_query2)<assert_stmt>instance.query("SELECT MySum1(1,2)")<eq>"3\n"<assert_stmt>instance.query("SELECT MySum2(1,2)")<eq>"5\n"<line_sep>instance.restart_clickhouse()<assert_stmt>instance.query("SELECT MySum1(1,2)")<eq>"3\n"<assert_stmt>instance.query("SELECT MySum2(1,2)")<eq>"5\n"<line_sep>instance.query("DROP FUNCTION MySum2")<line_sep>instance.query("DROP FUNCTION MySum1")<line_sep>instance.restart_clickhouse()<assert_stmt>"Unknown function MySum1"<in>instance.query_and_get_error("SELECT MySum1(1, 2)")<assert_stmt>"Unknown function MySum2"<in>instance.query_and_get_error("SELECT MySum2(1, 2)")<block_end> |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Underworld geophysics modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This module contains conditions used for applying constraints on model dynamics.
"""<import_stmt>underworld<as>uw<import_stmt>underworld._stgermain<as>_stgermain<import_stmt>underworld.libUnderworld<as>libUnderworld<import_stmt>abc<class_stmt>SystemCondition(_stgermain.StgCompoundComponent metaclass=abc.ABCMeta)<block_start><def_stmt>_add_to_stg_dict self componentDict<block_start><pass><block_end><def_stmt>__init__ self variable indexSetsPerDof<block_start><if_stmt><not>isinstance(variable uw.mesh.MeshVariable)<block_start><raise>TypeError("Provided variable must be of class 'MeshVariable'.")<block_end>self._variable=variable<if_stmt>isinstance(indexSetsPerDof uw.container.IndexSet)<block_start>indexSets=(indexSetsPerDof )<block_end><elif_stmt>isinstance(indexSetsPerDof (list tuple))<block_start>indexSets=indexSetsPerDof<block_end><else_stmt><block_start><raise>TypeError("You must provide the required 'indexSetsPerDof' item\n"+"as a list or tuple of 'IndexSet' items.")<block_end><for_stmt>guy indexSets<block_start><if_stmt><not>isinstance(guy (uw.container.IndexSet type(<none>)))<block_start><raise>TypeError("Provided list must only contain objects of 'NoneType' or type 'IndexSet'.")<block_end><block_end>self._indexSets=indexSets<if_stmt>variable.nodeDofCount<ne>len(self._indexSets)<block_start><raise>ValueError("Provided variable has a nodeDofCount of {}, however you have ".format(variable.nodeDofCount)+"provided {} index set(s). You must provide an index set for each degree ".format(len(self._indexSets))+"of freedom of your variable, but no more.")<block_end># ok, lets setup the c array
libUnderworld.StGermain._PythonVC_SetupIndexSetArray(self._cself len(self._indexSets))<line_sep># now, lets add the indexSet objects
<for_stmt>position,set enumerate(self._indexSets)<block_start><if_stmt>set<block_start>libUnderworld.StGermain._PythonVC_SetIndexSetAtArrayPosition(self._cself set._cself position)<line_sep><block_end><block_end><block_end>@property<def_stmt>indexSetsPerDof self<block_start>""" See class constructor for details. """<line_sep><return>self._indexSets<block_end>@property<def_stmt>variable self<block_start>""" See class constructor for details. """<line_sep><return>self._variable<block_end><block_end><class_stmt>DirichletCondition(SystemCondition)<block_start>"""
The DirichletCondition class provides the required functionality to imposed Dirichlet
conditions on your differential equation system.
The user is simply required to flag which nodes/DOFs should be considered by the system
to be a Dirichlet condition. The values at the Dirichlet nodes/DOFs is then left
untouched by the system.
Parameters
----------
variable : underworld.mesh.MeshVariable
This is the variable for which the Dirichlet condition applies.
indexSetsPerDof : list, tuple, IndexSet
The index set(s) which flag nodes/DOFs as Dirichlet conditions.
Note that the user must provide an index set for each degree of
freedom of the variable. So for a vector variable of rank 2 (say Vx & Vy),
two index sets must be provided (say VxDofSet, VyDofSet).
Notes
-----
Note that it is necessary for the user to set the required value on the variable, possibly
via the numpy interface.
Constructor must be called collectively all processes.
Example
-------
Basic setup and usage of Dirichlet conditions:
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> velocityField = uw.mesh.MeshVariable( linearMesh, 2 )
>>> velocityField.data[:] = [0.,0.] # set velocity zero everywhere, which will of course include the boundaries.
>>> IWalls = linearMesh.specialSets["MinI_VertexSet"] + linearMesh.specialSets["MaxI_VertexSet"] # get some wall index sets
>>> JWalls = linearMesh.specialSets["MinJ_VertexSet"] + linearMesh.specialSets["MaxJ_VertexSet"]
>>> freeSlipBC = uw.conditions.DirichletCondition(velocityField, (IWalls,JWalls) ) # this will give free slip sides
>>> noSlipBC = uw.conditions.DirichletCondition(velocityField, (IWalls+JWalls,IWalls+JWalls) ) # this will give no slip sides
"""<line_sep>_objectsDict={"_pyvc":"PythonVC"}<line_sep>_selfObjectName="_pyvc"<def_stmt>__init__ self variable indexSetsPerDof<block_start>super(DirichletCondition self).__init__(variable indexSetsPerDof)<block_end><block_end><class_stmt>NeumannCondition(SystemCondition)<block_start>"""
This class defines Neumann conditions for a differential equation.
Neumann conditions specifiy a field's flux along a boundary.
As such the user specifices the field's flux as a uw.Function and the nodes where this flux
is to be applied - similar to uw.conditions.DirichletCondtion
Parameters
----------
fn_flux : underworld.function.Function
Function which determines flux values.
variable : underworld.mesh.MeshVariable
The variable that describes the discretisation (mesh & DOFs) for 'indexSetsPerDof'
indexSetsPerDof : list, tuple, IndexSet
The index set(s) which flag nodes/DOFs as Neumann conditions.
Note that the user must provide an index set for each degree of
freedom of the variable above. So for a vector variable of rank 2 (say Vx & Vy),
two index sets must be provided (say VxDofSet, VyDofSet).
Example
-------
Basic setup and usage of Neumann conditions:
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> velocityField = uw.mesh.MeshVariable( linearMesh, 2 )
>>> velocityField.data[:] = [0.,0.] # set velocity zero everywhere, which will of course include the boundaries.
>>> myFunc = (uw.function.coord()[1],0.0)
>>> bottomWall = linearMesh.specialSets["MinJ_VertexSet"]
>>> tractionBC = uw.conditions.NeumannCondition(variable=velocityField, fn_flux=myFunc, indexSetsPerDof=(None,bottomWall) )
"""<line_sep>_objectsDict={"_pyvc":"PythonVC"}<line_sep>_selfObjectName="_pyvc"<def_stmt>__init__ self variable indexSetsPerDof=<none> fn_flux=<none># call parent
<block_start>super(NeumannCondition self).__init__(variable indexSetsPerDof)<line_sep>_fn_flux=uw.function.Function.convert(fn_flux)<if_stmt><not>isinstance(_fn_flux uw.function.Function)<block_start><raise>TypeError("Provided 'fn_flux' must be of or convertible to 'Function' class.")<block_end>self.fn_flux=_fn_flux<block_end>@property<def_stmt>fn_flux self<block_start>""" Get the underworld.Function that defines the flux """<line_sep><return>self._fn_flux<block_end>@fn_flux.setter<def_stmt>fn_flux self fn<block_start>""" Set the underworld.Function that defines the flux """<line_sep>_fn=uw.function.Function.convert(fn)<if_stmt><not>isinstance(_fn uw.function.Function)<block_start><raise>ValueError("Provided '_fn' must be of or convertible to 'Function' class.")<block_end>self._fn_flux=_fn<block_end><block_end> |
<import_stmt>csv<import_stmt>six<import_from_stmt>collections defaultdict Counter<import_from_stmt>itertools izip islice<import_from_stmt>geodata.text.tokenize tokenize token_types<import_from_stmt>geodata.encoding safe_encode<class_stmt>FrequentPhraseExtractor(object)<block_start>'''
Extract common multi-word phrases from a file/iterator using the
frequent itemsets method to keep memory usage low.
'''<line_sep>WORD_TOKEN_TYPES=(token_types.WORD token_types.IDEOGRAPHIC_CHAR token_types.ABBREVIATION token_types.HANGUL_SYLLABLE token_types.ACRONYM)<def_stmt>__init__ self min_count=5<block_start>self.min_count=min_count<line_sep>self.vocab=defaultdict(int)<line_sep>self.frequencies=defaultdict(int)<line_sep>self.train_words=0<block_end><def_stmt>ngrams self words n=2<block_start><for_stmt>t izip(*(islice(words i <none>)<for>i xrange(n)))<block_start><yield>t<block_end><block_end><def_stmt>add_tokens self s<block_start><for_stmt>t,c tokenize(s)<block_start><if_stmt>c<in>self.WORD_TOKEN_TYPES<block_start>self.vocab[((t.lower() c) )]<augadd>1<line_sep>self.train_words<augadd>1<block_end><block_end><block_end><def_stmt>create_vocab self f<block_start><for_stmt>line f<block_start>line=line.rstrip()<if_stmt><not>line<block_start><continue><block_end>self.add_tokens(line)<block_end>self.prune_vocab()<block_end><def_stmt>prune_vocab self<block_start><for_stmt>k self.vocab.keys()<block_start><if_stmt>self.vocab[k]<l>self.min_count<block_start><del_stmt>self.vocab[k]<block_end><block_end><block_end><def_stmt>add_ngrams self s n=2<block_start>sequences=[]<line_sep>seq=[]<for_stmt>t,c tokenize(s)<block_start><if_stmt>c<in>self.WORD_TOKEN_TYPES<block_start>seq.append((t c))<block_end><elif_stmt>seq<block_start>sequences.append(seq)<line_sep>seq=[]<block_end><block_end><if_stmt>seq<block_start>sequences.append(seq)<block_end><for_stmt>seq sequences<block_start><for_stmt>gram self.ngrams(seq n=n)<block_start>last_c=<none><line_sep>prev_tokens=tuple([(t.lower() c)<for>t,c gram[:-1]])<if_stmt>prev_tokens<in>self.vocab<block_start>t,c=gram[-1]<line_sep>current_token=(t.lower() c)<line_sep>self.frequencies[(prev_tokens current_token)]<augadd>1<block_end><block_end><block_end><block_end><def_stmt>add_frequent_ngrams_to_vocab self<block_start><for_stmt>k,v six.iteritems(self.frequencies)<block_start><if_stmt>v<l>self.min_count<block_start><continue><block_end>prev,current=k<line_sep>self.vocab[prev+(current )]=v<block_end><block_end><def_stmt>find_ngram_phrases self f n=2<block_start>self.frequencies=defaultdict(int)<for_stmt>line f<block_start>line=line.rstrip()<if_stmt><not>line<block_start><continue><block_end>self.add_ngrams(line n=n)<block_end>self.add_frequent_ngrams_to_vocab()<line_sep>self.frequencies=defaultdict(int)<block_end>@classmethod<def_stmt>from_file cls f max_phrase_len=5 min_count=5<block_start>phrases=cls()<line_sep>print('Doing frequent words for {}'.format(filename))<line_sep>f.seek(0)<line_sep>phrases.create_vocab(f)<for_stmt>n xrange(2 max_phrase_len+1)<block_start>print('Doing frequent ngrams, n={} for {}'.format(n filename))<line_sep>f.seek(0)<line_sep>phrases.find_ngram_phrases(f n=n)<block_end>print('Done with {}'.format(filename))<line_sep><return>phrases<block_end><def_stmt>to_tsv self filename mode='w' max_rows=<none><block_start>f=open(filename mode)<line_sep>writer=csv.writer(f delimiter='\t')<for_stmt>i,(k v) enumerate(Counter(self.vocab).most_common())<block_start><if_stmt>max_rows<is><not><none><and>i<eq>max_rows<block_start><break><block_end>gram=[]<for_stmt>t,c k<block_start>gram.append(t)<if_stmt>c<ne>token_types.IDEOGRAPHIC_CHAR<block_start>gram.append(six.text_type(' '))<block_end><block_end>phrase=six.text_type('').join(gram)<line_sep>writer.writerow((safe_encode(phrase) safe_encode(len(k)) safe_encode(v)))<block_end><block_end><block_end> |
<import_stmt>asyncio<import_stmt>json<import_from_stmt>unittest TestCase<import_from_stmt>aioresponses aioresponses<import_stmt>hummingbot.connector.exchange.bitfinex.bitfinex_utils<as>utils<import_from_stmt>hummingbot.connector.exchange.bitfinex BITFINEX_REST_URL<import_from_stmt>hummingbot.connector.exchange.bitfinex.bitfinex_api_order_book_data_source BitfinexAPIOrderBookDataSource<class_stmt>BitfinexAPIOrderBookDataSourceTests(TestCase)# the level is required to receive logs from the data source logger
<block_start>level=0<def_stmt>setUp self<arrow><none><block_start>super().setUp()<line_sep>self.log_records=[]<line_sep>BitfinexAPIOrderBookDataSource.logger().setLevel(1)<line_sep>BitfinexAPIOrderBookDataSource.logger().addHandler(self)<block_end><def_stmt>handle self record<block_start>self.log_records.append(record)<block_end><def_stmt>_is_logged self log_level:str message:str<arrow>bool<block_start><return>any(record.levelname<eq>log_level<and>record.getMessage()<eq>message<for>record self.log_records)<block_end>@aioresponses()<def_stmt>test_get_last_traded_price self api_mock<block_start>response=[10645 73.93854271 10647 75.22266119 731.60645389 0.0738 10644.00645389 14480.89849423 10766 9889.1449809]<line_sep>api_mock.get(f"{BITFINEX_REST_URL}/ticker/{utils.convert_to_exchange_trading_pair('BTC-USDT')}" body=json.dumps(response))<line_sep>last_price=asyncio.get_event_loop().run_until_complete(BitfinexAPIOrderBookDataSource.get_last_traded_price("BTC-USDT"))<line_sep>self.assertEqual(response[6] last_price)<block_end>@aioresponses()<def_stmt>test_get_last_traded_price_returns_zero_when_an_error_happens self api_mock<block_start>response={"error":"ERR_RATE_LIMIT"}<line_sep>api_mock.get(f"{BITFINEX_REST_URL}/ticker/{utils.convert_to_exchange_trading_pair('BTC-USDT')}" body=json.dumps(response))<line_sep>last_price=asyncio.get_event_loop().run_until_complete(BitfinexAPIOrderBookDataSource.get_last_traded_price("BTC-USDT"))<line_sep>self.assertEqual(0 last_price)<line_sep>self.assertTrue(self._is_logged("ERROR" f"Error encountered requesting ticker information. The response was: {response} "<concat>f"(There was an error requesting ticker information BTC-USDT ({response}))"))<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.