hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f3e142ca1cb364737d4450b3867cd93525109cda | 91 | py | Python | example.py | 01abhishek10/Attendance | 7c4faf67205fabcc6c8b509a9d433b0a4b4b3a28 | [
"MIT"
] | null | null | null | example.py | 01abhishek10/Attendance | 7c4faf67205fabcc6c8b509a9d433b0a4b4b3a28 | [
"MIT"
] | null | null | null | example.py | 01abhishek10/Attendance | 7c4faf67205fabcc6c8b509a9d433b0a4b4b3a28 | [
"MIT"
] | null | null | null | def git_operation():
print("I am adding example.py to my github repo")
git_operation()
| 22.75 | 53 | 0.725275 |
3bac55bef2f440b9dc013faf74e25c2af9ad388f | 918 | py | Python | examples/django_waveapps/django_waveapps/urls.py | gbozee/waveapps | 3a3505b135e8002ced59d5458090326aeef3822b | [
"MIT"
] | null | null | null | examples/django_waveapps/django_waveapps/urls.py | gbozee/waveapps | 3a3505b135e8002ced59d5458090326aeef3822b | [
"MIT"
] | null | null | null | examples/django_waveapps/django_waveapps/urls.py | gbozee/waveapps | 3a3505b135e8002ced59d5458090326aeef3822b | [
"MIT"
] | null | null | null | """django_waveapps URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path(
"wavesapps/",
include(("waveapps.frameworks.django.urls", "waveapps"), namespace="waveapps"),
),
]
| 35.307692 | 88 | 0.674292 |
ba7d80ac63f8efbb72e696693dbd5cc40829ed26 | 4,023 | py | Python | model_train/train_mixup2.py | rftgy-bow/ElectricTimbreDictionaryV2 | 566efa30f4b2186e71a75557c041110c6e8f1bfa | [
"MIT"
] | 3 | 2021-09-17T07:07:27.000Z | 2021-09-17T07:07:37.000Z | model_train/train_mixup2.py | rftgy-bow/ElectricTimbreDictionaryV2 | 566efa30f4b2186e71a75557c041110c6e8f1bfa | [
"MIT"
] | null | null | null | model_train/train_mixup2.py | rftgy-bow/ElectricTimbreDictionaryV2 | 566efa30f4b2186e71a75557c041110c6e8f1bfa | [
"MIT"
] | null | null | null | # -*- coding=utf-8 -*-
import numpy as np
from tensorflow import keras, saved_model, distribute
#from keras.models import Model
#from keras.layers import Input, Dense, Dropout, Activation
#from keras.layers import Conv2D, GlobalAveragePooling2D
#from keras.layers import BatchNormalization, Add
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.models import load_model
import os
from matplotlib import pyplot as plt
from new_model import CBACNN
from mixup import MixupGenerator
import gc
import time as tm
#physical_devices = tf.config.experimental.list_physical_devices('GPU')
#if len(physical_devices) > 0:
# for k in range(len(physical_devices)):
# tf.config.experimental.set_memory_growth(physical_devices[k], True)
# print('memory growth:', tf.config.experimental.get_memory_growth(physical_devices[k]))
#else:
# print("Not enough GPU hardware devices available")
strategy = distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
# IMPORT DATASET
# dataset files
train_files = ["mel_npz/esc_melsp_train_raw.npz",
"mel_npz/esc_melsp_train_ss.npz",
"mel_npz/esc_melsp_train_st.npz",
"mel_npz/esc_melsp_train_wn.npz",
"mel_npz/esc_melsp_train_com.npz"]
test_file = "mel_npz/esc_melsp_test.npz"
train_num = 1500
test_num = 500
freq = 128 # melsp
time = 862 # 5 * 44100 / 256
# define dataset placeholders
print("making placeholders...")
x_train = np.zeros(freq*time*train_num*len(train_files)).reshape(
train_num*len(train_files), freq, time)
y_train = np.zeros(train_num*len(train_files))
# load dataset
for i in range(len(train_files)):
print("loading datasets...")
data = np.load(train_files[i])
x_train[i*train_num:(i+1)*train_num] = data["x"]
y_train[i*train_num:(i+1)*train_num] = data["y"]
# load test dataset
test_data = np.load(test_file)
x_test = test_data["x"]
y_test = test_data["y"]
# convert target data into one-hot vector
classes = 50
print("converting datasets...")
y_train = keras.utils.to_categorical(y_train, classes)
y_test = keras.utils.to_categorical(y_test, classes)
# reshape source data
x_train = x_train.reshape(train_num*5, freq, time, 1)
x_test = x_test.reshape(test_num, freq, time, 1)
print("finished")
# DEFINE MODEL
with strategy.scope():
model = CBACNN.exportModel(x_train.shape[1:], classes)
opt = keras.optimizers.Adam(lr=0.00001, decay=1e-6, amsgrad=True)
model.compile(
loss=['categorical_crossentropy', None], # train output "final" only
optimizer=opt, metrics=['accuracy'])
callbacks = [
keras.callbacks.TensorBoard(
log_dir='./logs', histogram_freq=1, profile_batch = 100000000)
]
# TRAIN MODEL
start = tm.time()
print("***start training***")
model.fit(x_train,y_train,
validation_data=(x_test,y_test),
epochs=10, verbose=1, batch_size=32,
callbacks=callbacks)
print("training finished!")
print("***exec time:", tm.time() - start, "sec***")
# make SavedModel
model.save("./saved_model2")
print("stored trained model as <<saved_model>>")
# CALLBACKS
# early stopping
es_cb = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')
# TRAIN WITH MIXUP
batch_size = 32
epochs = 10
training_generator = MixupGenerator(x_train, y_train)()
print("***start MIXUP training***")
model.fit_generator(generator=training_generator,
steps_per_epoch=x_train.shape[0] // batch_size,
validation_data=(x_test, y_test),
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=[es_cb])
print("training finished!")
# make SavedModel
saved_model.save(model, "./saved_model2_mixup")
print("stored trained model as <<saved_model>>")
# EVALUATION
evaluation = model.evaluate(x_test, y_test)
print(evaluation)
print("evaluation finished!")
keras.backend.clear_session()
gc.collect()
| 29.580882 | 95 | 0.706687 |
de7a7c95baa8f39d84f3b9a9af22505379c9bc6e | 586 | py | Python | testsuite/complement-reg/run.py | luyatshimbalanga/OpenShadingLanguage | 2120647911af732f0d12d70e2f7f4e1ebe8fadcb | [
"BSD-3-Clause"
] | 1,105 | 2015-01-02T20:47:19.000Z | 2021-01-25T13:20:56.000Z | testsuite/complement-reg/run.py | luyatshimbalanga/OpenShadingLanguage | 2120647911af732f0d12d70e2f7f4e1ebe8fadcb | [
"BSD-3-Clause"
] | 696 | 2015-01-07T23:42:08.000Z | 2021-01-25T03:55:08.000Z | testsuite/complement-reg/run.py | luyatshimbalanga/OpenShadingLanguage | 2120647911af732f0d12d70e2f7f4e1ebe8fadcb | [
"BSD-3-Clause"
] | 248 | 2015-01-05T13:41:28.000Z | 2021-01-24T23:29:55.000Z | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
command += testshade("-t 1 -g 32 32 -od uint8 v_complement -o cout v_comp.tif -o mcout mv_comp.tif")
outputs.append ("v_comp.tif")
outputs.append ("mv_comp.tif")
command += testshade("-t 1 -g 32 32 -od uint8 u_complement -o cout u_comp.tif -o mcout mu_comp.tif")
outputs.append ("u_comp.tif")
outputs.append ("mu_comp.tif")
# expect a few LSB failures
failthresh = 0.008
failpercent = 3
| 27.904762 | 100 | 0.745734 |
d4f485496ed32ba0577e402b38aa06d9b61cd209 | 203 | py | Python | python/extend_cpp/main.py | DaniloZZZ/Ideas-Fun | 8b9fff17935e39d2a00630013045298e190cfe12 | [
"Apache-2.0"
] | null | null | null | python/extend_cpp/main.py | DaniloZZZ/Ideas-Fun | 8b9fff17935e39d2a00630013045298e190cfe12 | [
"Apache-2.0"
] | null | null | null | python/extend_cpp/main.py | DaniloZZZ/Ideas-Fun | 8b9fff17935e39d2a00630013045298e190cfe12 | [
"Apache-2.0"
] | null | null | null | import cdan
def main():
a = [1.2,3,0,-4]
b = [-1.2,9,1.2,-8]
l = cdan.loop_add(a,b)
print("loop",l)
s = cdan.simd_add(a,b)
print("simd",s)
if __name__=='__main__':
main()
| 13.533333 | 26 | 0.502463 |
6ede4d4c35ffce8cc22717b83650208c4c32f186 | 2,252 | py | Python | pmbrl/control/agent.py | paul-kinghorn/rl-inference | 007dba5836b79417aaa0aa6216492745aa9e1bb3 | [
"MIT"
] | 20 | 2020-04-16T18:13:39.000Z | 2022-02-25T01:20:39.000Z | pmbrl/control/agent.py | paul-kinghorn/rl-inference | 007dba5836b79417aaa0aa6216492745aa9e1bb3 | [
"MIT"
] | null | null | null | pmbrl/control/agent.py | paul-kinghorn/rl-inference | 007dba5836b79417aaa0aa6216492745aa9e1bb3 | [
"MIT"
] | 8 | 2020-04-23T05:39:46.000Z | 2021-08-29T20:21:44.000Z | # pylint: disable=not-callable
# pylint: disable=no-member
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
class Agent(object):
def __init__(self, env, planner, logger=None):
self.env = env
self.planner = planner
self.logger = logger
def get_seed_episodes(self, buffer, n_episodes):
for _ in range(n_episodes):
state = self.env.reset()
done = False
while not done:
action = self.env.sample_action()
next_state, reward, done, _ = self.env.step(action)
buffer.add(state, action, reward, next_state)
state = deepcopy(next_state)
if done:
break
return buffer
def run_episode(self, buffer=None, action_noise=None, recorder=None):
total_reward = 0
total_steps = 0
done = False
with torch.no_grad():
state = self.env.reset()
while not done:
action = self.planner(state)
if action_noise is not None:
action = self._add_action_noise(action, action_noise)
action = action.cpu().detach().numpy()
next_state, reward, done, _ = self.env.step(action)
total_reward += reward
total_steps += 1
if self.logger is not None and total_steps % 25 == 0:
self.logger.log(
"> Step {} [reward {:.2f}]".format(total_steps, total_reward)
)
if buffer is not None:
buffer.add(state, action, reward, next_state)
if recorder is not None:
recorder.capture_frame()
state = deepcopy(next_state)
if done:
break
if recorder is not None:
recorder.close()
del recorder
self.env.close()
stats = self.planner.return_stats()
return total_reward, total_steps, stats
def _add_action_noise(self, action, noise):
if noise is not None:
action = action + noise * torch.randn_like(action)
return action
| 30.849315 | 85 | 0.535524 |
ab73a85b7503b188e15fb52c190ea9c3057ca953 | 257 | py | Python | tests/testdata/models/identity_model.py | radu-matei/wasi-nn-onnx | 03791230353350b197bdd03037a026ba938e2f90 | [
"MIT"
] | 14 | 2021-07-07T16:03:11.000Z | 2022-03-29T01:01:57.000Z | tests/testdata/models/identity_model.py | radu-matei/wasi-nn-onnx | 03791230353350b197bdd03037a026ba938e2f90 | [
"MIT"
] | 2 | 2021-07-23T15:25:44.000Z | 2021-08-04T05:08:10.000Z | tests/testdata/models/identity_model.py | radu-matei/wasi-nn-onnx | 03791230353350b197bdd03037a026ba938e2f90 | [
"MIT"
] | 1 | 2021-09-30T20:29:07.000Z | 2021-09-30T20:29:07.000Z | """
Usage: pip install torch
python identity_model.py
"""
import torch
__version__ = '0.1.0'
class Model(torch.nn.Module):
def forward(self, x):
return x
m = Model()
x = torch.randn(1, 4)
torch.onnx.export(m, (x, ), 'identity_input_output.onnx')
| 15.117647 | 57 | 0.673152 |
f05db209e52280ab4d845ba388c64cb8e72b210a | 1,609 | py | Python | list-targets.py | lhunath/xcanalyzer | 93fae7f53dfb642664724e7ee58536f56f9c4790 | [
"BSD-3-Clause"
] | 4 | 2019-09-08T03:47:19.000Z | 2021-11-24T10:38:19.000Z | list-targets.py | lhunath/xcanalyzer | 93fae7f53dfb642664724e7ee58536f56f9c4790 | [
"BSD-3-Clause"
] | 1 | 2021-10-04T19:30:37.000Z | 2021-10-04T19:30:37.000Z | list-targets.py | lhunath/xcanalyzer | 93fae7f53dfb642664724e7ee58536f56f9c4790 | [
"BSD-3-Clause"
] | 4 | 2021-06-05T16:12:19.000Z | 2022-03-21T06:39:41.000Z | #!/usr/bin/env python3
import argparse
from xcanalyzer.xcodeproject.parsers import XcProjectParser
from xcanalyzer.xcodeproject.exceptions import XcodeProjectReadException
from xcanalyzer.xcodeproject.generators import XcProjReporter
# --- Arguments ---
argument_parser = argparse.ArgumentParser(description="List all targets and files of the Xcode project.")
# Project folder argument
argument_parser.add_argument('path',
help='Path of the folder containing your `.xcodeproj` folder.')
# Sorted by name argument
argument_parser.add_argument('-n', '--name-sorted',
dest='sorted_by_name',
action='store_true',
help='Give the list of targets sorted by name. So they are not grouped by type.')
# Verbose argument
argument_parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help="Give name of products associated with targets.")
# --- Parse arguments ---
args = argument_parser.parse_args()
# Xcode code project reader
xcode_project_reader = XcProjectParser(args.path)
# Loading the project
try:
xcode_project_reader.load()
except XcodeProjectReadException as e:
print("An error occurred when loading Xcode project: {}".format(e.message))
exit()
# Reporter
reporter = XcProjReporter(xcode_project_reader.xc_project)
reporter.print_targets(by_type=(not args.sorted_by_name), verbose=args.verbose)
if not args.sorted_by_name:
reporter.print_targets_summary() | 34.234043 | 110 | 0.684897 |
a63d0001a134b894edbff46b5d9955fcc1fd2a1b | 2,875 | py | Python | remove/myjira/test_ccc.py | nukeguys/myutil | 65d0aff36ec45bffbd2e52fea0fabfbabd5609b1 | [
"Apache-2.0"
] | null | null | null | remove/myjira/test_ccc.py | nukeguys/myutil | 65d0aff36ec45bffbd2e52fea0fabfbabd5609b1 | [
"Apache-2.0"
] | null | null | null | remove/myjira/test_ccc.py | nukeguys/myutil | 65d0aff36ec45bffbd2e52fea0fabfbabd5609b1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from jira.client import JIRA
from jira.config import get_jira
from jira.utils import json_loads
from pprint import pprint
import os
import subprocess
import json
import sys
class Shell:
@staticmethod
def execute(bashCommand, wantToError=True):
process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = process.communicate()
if wantToError == True:
return str(output), str(err)
else:
return str(output)
class MyJira:
def __init__(self):
logpath = os.environ['MY_APP_LOG_PATH']
self.descriptionPath = logpath + 'description'
self.minibatPath = logpath + 'minibat'
def getProjectKey(self, jira, projectName):
for project in jira.projects():
name=project.name.encode('ascii','ignore')
if name == 'webOS Commercial Display':
return project.key
return None
def getCustomFiledId(self, jira, fieldName):
l = []
for i in jira.fields():
field_name=i[u'name'].encode('ascii','ignore')
field_id=i[u'id'].encode('ascii','ignore')
if(field_name == fieldName):
l.append(field_id)
l.sort()
return l[0]
def update(self, issue_id):
jira_id='hlmdev'
#issue_id='IDWEBCCC-1492'
jira=get_jira(profile=jira_id)
issue=jira.issue(issue_id)
devel_description_field_id = self.getCustomFiledId(jira, 'Devel Description')
with open(self.descriptionPath, 'r') as readDF, open(self.minibatPath, 'r') as readMF:
issue.update(fields={devel_description_field_id:readDF.read()})
jira.add_comment(issue, readMF.read())
if __name__ == '__main__':
bashCommand = 'ssh -p 29425 we.lge.com gerrit query --comments $(git log -1 --pretty=format:\'%H\')'
results = Shell.execute(bashCommand, wantToError=False)
ccc = [item.strip().split(' ')[0] for item in results.split('\n') if item.strip().find('CCC: ') != -1]
ccc = "".join(ccc)
issue_id=ccc[1:len(ccc)-1]
MyJira().update(issue_id)
#issue_dict = {
# 'project': {'key': project_key},
# 'summary': summary,
# 'description': description,
# 'issuetype': {'name': 'CCC Task'},
# 'assignee': {'name':'hyeonsub.jung'},
# 'labels': [ branchName ],
# product_type_field_id: {'value': 'hotel'},
# code_review_field_id: codeReviewSite,
#}
#
#new_issue = jira.create_issue(fields=issue_dict)
#new_issue.update(fields={code_review_field_id:codeReviewSite})
#jira.add_comment(new_issue, comment)
#jira.add_watcher(new_issue, 'ernest.kim')
#jira.add_watcher(new_issue, 'yoengjin.hong')
#
#print 'Address : ' + new_issue._options[u'server']+'/browse/'+new_issue.key
#print '[' + new_issue.key + '] ' + summary
| 35.9375 | 107 | 0.642087 |
9745bb4362f4ab1d32071368495412023849db4f | 8,570 | py | Python | padertorch/contrib/cb/summary.py | sibange/padertorch | 494692d877f04c66847c2943795b23aea488217d | [
"MIT"
] | 62 | 2019-12-22T08:30:29.000Z | 2022-03-22T11:02:59.000Z | padertorch/contrib/cb/summary.py | sibange/padertorch | 494692d877f04c66847c2943795b23aea488217d | [
"MIT"
] | 47 | 2020-01-06T09:23:47.000Z | 2022-01-24T16:55:06.000Z | padertorch/contrib/cb/summary.py | sibange/padertorch | 494692d877f04c66847c2943795b23aea488217d | [
"MIT"
] | 13 | 2019-12-16T08:12:46.000Z | 2021-11-08T14:37:06.000Z |
import collections
import torch
import einops
import cached_property
import padertorch as pt
# loss: torch.Tenso r =None,
# losses: dict =None,
# scalars: dict =None,
# histograms: dict =None,
# audios: dict =None,
# images: dict =None,
class ReviewSummary(collections.abc.Mapping):
"""
>>> review_summary = ReviewSummary()
>>> review_summary
ReviewSummary(prefix='', _data={})
"""
_keys = set(pt.train.hooks.SummaryHook.empty_summary_dict().keys()) | {
'loss', 'losses'
}
def __init__(self, prefix='', _data=None, sampling_rate=None, visible_dB=60):
if _data is None:
_data = {}
self.data = _data
self.prefix = prefix
self.sampling_rate = sampling_rate
self.visible_dB = visible_dB
def add_to_loss(self, value):
assert torch.isfinite(value), value
if 'loss' in self.data:
self.data['loss'] = self.data['loss'] + value
else:
self.data['loss'] = value
def add_scalar(self, name, *value):
# Save the mean of all added values
value = pt.data.batch.example_to_numpy(value, detach=True)
self.data.setdefault(
'scalars',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).extend(value)
def add_audio(self, name, signal, sampling_rate=None, batch_first=None,
normalize=True):
if sampling_rate is None:
sampling_rate = self.sampling_rate
assert sampling_rate is not None, sampling_rate
audio = pt.summary.audio(
signal=signal, sampling_rate=sampling_rate,
batch_first=batch_first, normalize=normalize
)
self.data.setdefault(
'audios',
{}
)[f'{self.prefix}{name}'] = audio
def add_buffer(self, name, data):
data = pt.data.batch.example_to_numpy(data, detach=True)
self.data.setdefault(
'buffers',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).append(data)
def add_text(self, name, text):
assert isinstance(text, str), (type(text), text)
self.data.setdefault(
'texts',
{}
)[f'{self.prefix}{name}'] = text
def _rearrange(self, array, rearrange):
if rearrange is not None:
return einops.rearrange(array, rearrange)
else:
return array
def add_image(self, name, image):
# Save the last added value
image = pt.utils.to_numpy(image, detach=True)
if image.ndim != 3:
raise AssertionError(
'Did you forgot to call "pt.summary.*_to_image"?\n'
f'Expect ndim == 3, got shape {image.shape}.'
)
self.data.setdefault(
'images',
{}
)[f'{self.prefix}{name}'] = image
def add_stft_image(
self, name, signal,
*, batch_first=None, color='viridis', rearrange=None):
signal = self._rearrange(signal, rearrange)
image = pt.summary.stft_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB)
self.add_image(name, image)
def add_spectrogram_image(
self, name, signal,
*, batch_first=None, color='viridis', rearrange=None):
signal = self._rearrange(signal, rearrange)
image = pt.summary.spectrogram_to_image(signal, batch_first=batch_first, color=color, visible_dB=self.visible_dB)
self.add_image(name, image)
def add_mask_image(self, name, mask, *, batch_first=None, color='viridis', rearrange=None):
mask = self._rearrange(mask, rearrange)
image = pt.summary.mask_to_image(mask, batch_first=batch_first, color=color)
self.add_image(name, image)
def add_histogram(self, name, values):
value = pt.utils.to_numpy(values, detach=True)
self.data.setdefault(
'histograms',
{}
).setdefault(
f'{self.prefix}{name}',
[]
).append(value)
def __contains__(self, item):
return item in self.data
def __getitem__(self, key):
assert key in self._keys, (key, self._keys)
return self.data[key]
def __setitem__(self, key, value):
assert key in self._keys, (key, self._keys)
self.data[key] = value
def get(self, item, default):
if item in self:
return self.data[item]
else:
return default
def pop(self, *args, **kwargs):
"""pop(key[, default])"""
return self.data.pop(*args, **kwargs)
def setdefault(self, key, default):
self.data.setdefault(key, default)
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return f'{self.__class__.__name__}(prefix={self.prefix!r}, _data={dict(self)!r})'
def _repr_pretty_(self, p, cycle):
"""
>>> review_summary = ReviewSummary()
>>> review_summary.add_to_loss(1)
>>> review_summary.add_scalar('abc', 2)
>>> review_summary
ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}})
>>> from IPython.lib.pretty import pprint
>>> pprint(review_summary)
ReviewSummary(prefix='', _data={'loss': 1, 'scalars': {'abc': [2]}})
>>> pprint(review_summary, max_width=79-18)
ReviewSummary(
prefix='',
_data={'loss': 1, 'scalars': {'abc': [2]}}
)
>>> pprint(review_summary, max_width=79-40)
ReviewSummary(
prefix='',
_data={'loss': 1,
'scalars': {'abc': [2]}}
)
"""
if cycle:
p.text(f'{self.__class__.__name__}(...)')
else:
txt = f'{self.__class__.__name__}('
with p.group(4, txt, ''):
p.breakable(sep='')
p.text('prefix=')
p.pretty(self.prefix)
p.text(',')
p.breakable()
txt = '_data='
with p.group(len(txt), txt, ''):
p.pretty(dict(self))
p.breakable('')
p.text(')')
class _Plotter:
def __init__(self, review: 'ReviewSummary'):
self.review = review
def image(
self, key, origin='lower', **kwargs
):
import numpy as np
import matplotlib.pyplot as plt
kwargs = {
'origin': origin,
**kwargs,
}
if key not in self.review['images']:
from paderbox.utils.mapping import DispatchError
raise DispatchError(key, self.review['images'].keys())
X = np.einsum('chw->hwc', self.review['images'][key])
if origin == 'lower':
X = X[::-1]
else:
assert origin == 'upper'
# ToDo: Where is AxesImage defined?
ax: 'plt.AxesImage' = plt.imshow(
X,
**kwargs,
)
# ax.set_title(key)
plt.title(key)
plt.grid(False)
return ax
def images(
self,
columns=1,
font_scale=1.0,
line_width=3,
figure_size=(8.0, 6.0),
):
from paderbox.visualization import axes_context
from paderbox.visualization.context_manager import _AxesHandler
with axes_context(
columns=columns,
font_scale=font_scale,
line_width=line_width,
figure_size=figure_size,
) as axes:
axes: _AxesHandler
for k in self.review['images']:
axes.new.grid(False) # set gca
self.image(k)
@cached_property.cached_property
def plot(self):
return self._Plotter(self)
def play(self, key=None):
if key is None:
for k in self['audios'].keys():
self.play(k)
elif key in self['audios']:
from paderbox.io.play import play
data, sample_rate = self['audios'][key]
play(data, sample_rate=sample_rate, name=key)
else:
from paderbox.utils.mapping import DispatchError
raise DispatchError(key, self['audios'].keys())
| 31.050725 | 121 | 0.535123 |
9de73177fd56fe2cc0101a79442e867055bd7dbb | 9,219 | py | Python | tests/test_postgresql_search.py | VanilleBid/weekly-saleor | e776e86ee7ce710929ef33878d936e2a8367a217 | [
"BSD-3-Clause"
] | null | null | null | tests/test_postgresql_search.py | VanilleBid/weekly-saleor | e776e86ee7ce710929ef33878d936e2a8367a217 | [
"BSD-3-Clause"
] | 86 | 2018-03-08T14:19:19.000Z | 2018-05-12T14:55:16.000Z | tests/test_postgresql_search.py | JesusDelgadoPatlan/tiendaSpark | 0c8cfe7fa6e070f57daf4d06e2776bc4059ad830 | [
"BSD-3-Clause"
] | 2 | 2018-03-05T12:29:10.000Z | 2018-09-28T12:40:52.000Z | from saleor.product.models import Product
from saleor.order.models import Order
from saleor.userprofile.models import Address, User
from django.urls import reverse
from decimal import Decimal
import pytest
@pytest.fixture(scope='function', autouse=True)
def postgresql_search_enabled(settings):
settings.ENABLE_SEARCH = True
settings.SEARCH_BACKEND = 'saleor.search.backends.postgresql'
PRODUCTS = [('Arabica Coffee', 'The best grains in galactic'),
('Cool T-Shirt', 'Blue and big.'),
('Roasted chicken', 'Fabulous vertebrate')]
@pytest.fixture
def named_products(default_category, product_type):
def gen_product(name, description):
product = Product.objects.create(
name=name,
description=description,
price=Decimal(6.6),
product_type=product_type,
category=default_category)
return product
return [gen_product(name, desc) for name, desc in PRODUCTS]
def search_storefront(client, phrase):
"""Execute storefront search on client matching phrase."""
resp = client.get(reverse('search:search'), {'q': phrase})
return [prod for prod, _ in resp.context['results'].object_list]
@pytest.mark.parametrize('phrase,product_num',
[('Arabika', 0), ('Aarabica', 0), ('Arab', 0),
('czicken', 2), ('blue', 1), ('roast', 2),
('coool', 1)])
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_storefront_product_fuzzy_name_search(client, named_products, phrase,
product_num):
results = search_storefront(client, phrase)
assert 1 == len(results)
assert named_products[product_num] in results
def unpublish_product(product):
prod_to_unpublish = product
prod_to_unpublish.is_published = False
prod_to_unpublish.save()
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_storefront_filter_published_products(client, named_products):
unpublish_product(named_products[0])
assert search_storefront(client, 'Coffee') == []
def search_dashboard(client, phrase):
"""Execute dashboard search on client matching phrase."""
response = client.get(reverse('dashboard:search'), {'q': phrase})
assert response.context['query'] in phrase
context = response.context
return context['products'], context['orders'], context['users']
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_dashboard_search_with_empty_results(admin_client, named_products):
products, orders, users = search_dashboard(admin_client, 'foo')
assert 0 == len(products) == len(orders) == len(users)
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize('phrase,product_num', [(' coffee. ', 0),
('shirt', 1), ('ROASTED', 2)])
def test_find_product_by_name(admin_client, named_products, phrase,
product_num):
products, _, _ = search_dashboard(admin_client, phrase)
assert 1 == len(products)
assert named_products[product_num] in products
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize('phrase,product_num', [('BIG', 1), (' grains, ', 0),
('fabulous', 2)])
def test_find_product_by_description(admin_client, named_products, phrase,
product_num):
products, _, _ = search_dashboard(admin_client, phrase)
assert 1 == len(products)
assert named_products[product_num] in products
USERS = [('Andreas', 'Knop', 'adreas.knop@example.com'),
('Euzebiusz', 'Ziemniak', 'euzeb.potato@cebula.pl'),
('John', 'Doe', 'johndoe@example.com')]
ORDER_IDS = [10, 45, 13]
ORDERS = [[pk] + list(user) for pk, user in zip(ORDER_IDS, USERS)]
def gen_address_for_user(first_name, last_name):
return Address.objects.create(
first_name=first_name,
last_name=last_name,
company_name='Mirumee Software',
street_address_1='Tęczowa 7',
city='Wrocław',
postal_code='53-601',
country='PL')
@pytest.fixture
def orders_with_addresses():
orders = []
for pk, name, lastname, email in ORDERS:
addr = gen_address_for_user(name, lastname)
user = User.objects.create(default_shipping_address=addr, email=email)
order = Order.objects.create(user=user, billing_address=addr, pk=pk)
orders.append(order)
return orders
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_find_order_by_id_with_no_result(admin_client, orders_with_addresses):
phrase = '991' # not existing id
_, orders, _ = search_dashboard(admin_client, phrase)
assert 0 == len(orders)
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_find_order_by_id(admin_client, orders_with_addresses):
phrase = ' 10 '
_, orders, _ = search_dashboard(admin_client, phrase)
assert 1 == len(orders)
assert orders_with_addresses[0] in orders
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize('phrase,order_num', [('euzeb.potato@cebula.pl', 1),
(' johndoe@example.com ', 2)])
def test_find_order_with_email(admin_client, orders_with_addresses, phrase,
order_num):
_, orders, _ = search_dashboard(admin_client, phrase)
assert 1 == len(orders)
assert orders_with_addresses[order_num] in orders
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize('phrase,order_num', [('knop', 0), ('ZIEMniak', 1),
(' john ', 2), ('ANDREAS', 0)])
def test_find_order_with_user_name(admin_client, orders_with_addresses, phrase,
order_num):
_, orders, _ = search_dashboard(admin_client, phrase)
assert 1 == len(orders)
assert orders_with_addresses[order_num] in orders
ORDER_PHRASE_WITH_RESULT = 'Andreas'
ORDER_RESULTS_PERMISSION = 'order.view_order'
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_orders_search_results_restricted_to_users_with_permission(
orders_with_addresses, staff_client, staff_user):
assert not staff_user.has_perm(ORDER_RESULTS_PERMISSION)
_, orders, _ = search_dashboard(staff_client, ORDER_PHRASE_WITH_RESULT)
assert 0 == len(orders)
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_show_orders_search_result_to_user_with_permission_granted(
orders_with_addresses, staff_client, staff_user, staff_group,
permission_view_order):
assert not staff_user.has_perm(ORDER_RESULTS_PERMISSION)
staff_group.permissions.add(permission_view_order)
staff_user.groups.add(staff_group)
_, orders, _ = search_dashboard(staff_client, ORDER_PHRASE_WITH_RESULT)
assert 1 == len(orders)
@pytest.fixture
def users_with_addresses():
users = []
for firstname, lastname, email in USERS:
addr = gen_address_for_user(firstname, lastname)
user = User.objects.create(default_billing_address=addr, email=email)
users.append(user)
return users
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize('phrase,user_num', [('adreas.knop@example.com', 0),
(' euzeb.potato@cebula.pl ', 1)])
def test_find_user_by_email(admin_client, users_with_addresses, phrase,
user_num):
_, _, users = search_dashboard(admin_client, phrase)
assert 1 == len(users)
assert users_with_addresses[user_num] in users
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
@pytest.mark.parametrize('phrase,user_num', [('Andreas Knop', 0),
(' Euzebiusz ', 1), ('DOE', 2)])
def test_find_user_by_name(admin_client, users_with_addresses, phrase,
user_num):
_, _, users = search_dashboard(admin_client, phrase)
assert 1 == len(users)
assert users_with_addresses[user_num] in users
USER_PHRASE_WITH_RESULT = 'adreas.knop@example.com'
USER_RESULTS_PERMISSION = 'userprofile.view_user'
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_users_search_results_restricted_to_staff_with_permission(
users_with_addresses, staff_client, staff_user):
assert not staff_user.has_perm(USER_RESULTS_PERMISSION)
_, _, users = search_dashboard(staff_client, USER_PHRASE_WITH_RESULT)
assert 0 == len(users)
@pytest.mark.integration
@pytest.mark.django_db(transaction=True)
def test_show_users_search_result_when_access_granted(
users_with_addresses, staff_client, staff_user, staff_group,
permission_view_user):
assert not staff_user.has_perm(USER_RESULTS_PERMISSION)
staff_group.permissions.add(permission_view_user)
staff_user.groups.add(staff_group)
_, _, users = search_dashboard(staff_client, USER_PHRASE_WITH_RESULT)
assert 1 == len(users)
| 36.729084 | 79 | 0.692917 |
5729d5da16f4b7eada51126d5bb6485bc1d2dbc3 | 49 | py | Python | 01-holamundo/holamundo.py | tulcas/master-python | ddac8aa1bf3e6448fe237eac239d27ce3fda705c | [
"MIT"
] | null | null | null | 01-holamundo/holamundo.py | tulcas/master-python | ddac8aa1bf3e6448fe237eac239d27ce3fda705c | [
"MIT"
] | null | null | null | 01-holamundo/holamundo.py | tulcas/master-python | ddac8aa1bf3e6448fe237eac239d27ce3fda705c | [
"MIT"
] | null | null | null | print("Hola mundo !! Soy Victor Robles!!!\n\n")
| 16.333333 | 47 | 0.632653 |
8e53e00d4f5c1a21fec538b2ab32025aec76fbd6 | 6,634 | py | Python | impacket/NDP.py | wuerror/impacket | 142cacb649b8c6441df9330ac22fe4d15a0d1bbc | [
"Apache-1.1"
] | 23 | 2020-02-21T02:44:21.000Z | 2022-03-03T23:40:32.000Z | impacket/NDP.py | cipher9rat/impacket | 142cacb649b8c6441df9330ac22fe4d15a0d1bbc | [
"Apache-1.1"
] | null | null | null | impacket/NDP.py | cipher9rat/impacket | 142cacb649b8c6441df9330ac22fe4d15a0d1bbc | [
"Apache-1.1"
] | 21 | 2021-06-29T23:14:54.000Z | 2022-03-24T13:13:58.000Z | # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
import array
import struct
from impacket import ImpactPacket
from impacket.ICMP6 import ICMP6
class NDP(ICMP6):
#ICMP message type numbers
ROUTER_SOLICITATION = 133
ROUTER_ADVERTISEMENT = 134
NEIGHBOR_SOLICITATION = 135
NEIGHBOR_ADVERTISEMENT = 136
REDIRECT = 137
############################################################################
# Append NDP Option helper
def append_ndp_option(self, ndp_option):
#As NDP inherits ICMP6, it is, in fact an ICMP6 "header"
#The payload (where all NDP options should reside) is a child of the header
self.child().get_bytes().extend(ndp_option.get_bytes())
############################################################################
@classmethod
def Router_Solicitation(class_object):
message_data = struct.pack('>L', 0) #Reserved bytes
return class_object.__build_message(NDP.ROUTER_SOLICITATION, message_data)
@classmethod
def Router_Advertisement(class_object, current_hop_limit,
managed_flag, other_flag,
router_lifetime, reachable_time, retransmission_timer):
flag_byte = 0x00
if (managed_flag):
flag_byte |= 0x80
if (other_flag):
flag_byte |= 0x40
message_data = struct.pack('>BBHLL', current_hop_limit, flag_byte, router_lifetime, reachable_time, retransmission_timer)
return class_object.__build_message(NDP.ROUTER_ADVERTISEMENT, message_data)
@classmethod
def Neighbor_Solicitation(class_object, target_address):
message_data = struct.pack('>L', 0) #Reserved bytes
message_data += ImpactPacket.array_tobytes(target_address.as_bytes())
return class_object.__build_message(NDP.NEIGHBOR_SOLICITATION, message_data)
@classmethod
def Neighbor_Advertisement(class_object, router_flag, solicited_flag, override_flag, target_address):
flag_byte = 0x00
if (router_flag):
flag_byte |= 0x80
if (solicited_flag):
flag_byte |= 0x40
if (override_flag):
flag_byte |= 0x20
message_data = struct.pack('>BBBB', flag_byte, 0x00, 0x00, 0x00) #Flag byte and three reserved bytes
message_data += ImpactPacket.array_tobytes(target_address.as_bytes())
return class_object.__build_message(NDP.NEIGHBOR_ADVERTISEMENT, message_data)
@classmethod
def Redirect(class_object, target_address, destination_address):
message_data = struct.pack('>L', 0)# Reserved bytes
message_data += ImpactPacket.array_tobytes(target_address.as_bytes())
message_data += ImpactPacket.array_tobytes(destination_address.as_bytes())
return class_object.__build_message(NDP.REDIRECT, message_data)
@classmethod
def __build_message(class_object, type, message_data):
#Build NDP header
ndp_packet = NDP()
ndp_packet.set_type(type)
ndp_packet.set_code(0)
#Pack payload
ndp_payload = ImpactPacket.Data()
ndp_payload.set_data(message_data)
ndp_packet.contains(ndp_payload)
return ndp_packet
class NDP_Option():
#NDP Option Type numbers
SOURCE_LINK_LAYER_ADDRESS = 1
TARGET_LINK_LAYER_ADDRESS = 2
PREFIX_INFORMATION = 3
REDIRECTED_HEADER = 4
MTU_OPTION = 5
############################################################################
@classmethod
#link_layer_address must have a size that is a multiple of 8 octets
def Source_Link_Layer_Address(class_object, link_layer_address):
return class_object.__Link_Layer_Address(NDP_Option.SOURCE_LINK_LAYER_ADDRESS, link_layer_address)
@classmethod
#link_layer_address must have a size that is a multiple of 8 octets
def Target_Link_Layer_Address(class_object, link_layer_address):
return class_object.__Link_Layer_Address(NDP_Option.TARGET_LINK_LAYER_ADDRESS, link_layer_address)
@classmethod
#link_layer_address must have a size that is a multiple of 8 octets
def __Link_Layer_Address(class_object, option_type, link_layer_address):
option_length = (len(link_layer_address) / 8) + 1
option_data = ImpactPacket.array_tobytes(array.array("B", link_layer_address))
return class_object.__build_option(option_type, option_length, option_data)
@classmethod
#Note: if we upgraded to Python 2.6, we could use collections.namedtuples for encapsulating the arguments
#ENHANCEMENT - Prefix could be an instance of IP6_Address
def Prefix_Information(class_object, prefix_length, on_link_flag, autonomous_flag, valid_lifetime, preferred_lifetime, prefix):
flag_byte = 0x00
if (on_link_flag):
flag_byte |= 0x80
if (autonomous_flag):
flag_byte |= 0x40
option_data = struct.pack('>BBLL', prefix_length, flag_byte, valid_lifetime, preferred_lifetime)
option_data += struct.pack('>L', 0) #Reserved bytes
option_data += ImpactPacket.array_tobytes(array.array("B", prefix))
option_length = 4
return class_object.__build_option(NDP_Option.PREFIX_INFORMATION, option_length, option_data)
@classmethod
def Redirected_Header(class_object, original_packet):
option_data = struct.pack('>BBBBBB', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)# Reserved bytes
option_data += ImpactPacket.array_tobytes(array.array("B", original_packet))
option_length = (len(option_data) + 4) / 8
return class_object.__build_option(NDP_Option.REDIRECTED_HEADER, option_length, option_data)
@classmethod
def MTU(class_object, mtu):
option_data = struct.pack('>BB', 0x00, 0x00)# Reserved bytes
option_data += struct.pack('>L', mtu)
option_length = 1
return class_object.__build_option(NDP_Option.MTU_OPTION, option_length, option_data)
@classmethod
def __build_option(class_object, type, length, option_data):
#Pack data
data_bytes = struct.pack('>BB', type, length)
data_bytes += option_data
ndp_option = ImpactPacket.Data()
ndp_option.set_data(data_bytes)
return ndp_option
| 39.963855 | 131 | 0.66521 |
be13266e224fb72cd4c0d6c30d7b386cee07dc6a | 1,042 | py | Python | kubernetes/test/test_v1beta2_deployment_condition.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | 1 | 2019-02-17T15:28:39.000Z | 2019-02-17T15:28:39.000Z | kubernetes/test/test_v1beta2_deployment_condition.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta2_deployment_condition.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta2_deployment_condition import V1beta2DeploymentCondition
class TestV1beta2DeploymentCondition(unittest.TestCase):
""" V1beta2DeploymentCondition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta2DeploymentCondition(self):
"""
Test V1beta2DeploymentCondition
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta2_deployment_condition.V1beta2DeploymentCondition()
pass
if __name__ == '__main__':
unittest.main()
| 23.155556 | 105 | 0.731286 |
3629fca193ee45491cf0cdc41eea9a096a771349 | 275 | py | Python | csip/datasets/__init__.py | isaaccorley/contrastive-surface-image-pretraining | a918d4fd3b9cc61ec512af978fb4f086d3b46a70 | [
"MIT"
] | 1 | 2022-03-03T09:26:49.000Z | 2022-03-03T09:26:49.000Z | csip/datasets/__init__.py | isaaccorley/contrastive-surface-image-pretraining | a918d4fd3b9cc61ec512af978fb4f086d3b46a70 | [
"MIT"
] | 1 | 2022-03-09T08:50:01.000Z | 2022-03-09T08:51:11.000Z | csip/datasets/__init__.py | isaaccorley/contrastive-surface-image-pretraining | a918d4fd3b9cc61ec512af978fb4f086d3b46a70 | [
"MIT"
] | null | null | null | from .landcover_ai import LandCoverAI, LandCoverAIDataModule
from .oscd import OSCD, OSCDDataModule
from .overhead_geopose import OverheadGeopose, OverheadGeoposeSSLDataModule
from .s2looking import S2Looking, S2LookingDataModule
from .xview2 import xView2, xView2DataModule
| 45.833333 | 75 | 0.872727 |
cdc7205e74804a2f51fc90157872b7aec9ada281 | 2,128 | py | Python | capirca/lib/srxlo.py | google-admin/capirca | 8c9e66456fedb3c0fc1c641dbefc41793e5c68d5 | [
"Apache-2.0"
] | null | null | null | capirca/lib/srxlo.py | google-admin/capirca | 8c9e66456fedb3c0fc1c641dbefc41793e5c68d5 | [
"Apache-2.0"
] | null | null | null | capirca/lib/srxlo.py | google-admin/capirca | 8c9e66456fedb3c0fc1c641dbefc41793e5c68d5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Juniper SRX generator for loopback ACLs.
This is a subclass of Juniper generator. Juniper SRX loopback filter
uses the same syntax as regular Juniper stateless ACLs, with minor
differences. This subclass effects those differences.
"""
from capirca.lib import juniper
class Term(juniper.Term):
"""Single SRXlo term representation."""
_PLATFORM = 'srxlo'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.term.protocol = ['icmp6' if x == 'icmpv6' else x
for x in self.term.protocol]
self.term.protocol_except = [
'icmp6' if x == 'icmpv6' else x for x in self.term.protocol_except
]
def NormalizeIcmpTypes(self, icmp_types, protocols, af):
protocols = ['icmpv6' if x == 'icmp6' else x for x in protocols]
return super().NormalizeIcmpTypes(icmp_types, protocols, af)
class SRXlo(juniper.Juniper):
"""SRXlo generator."""
_PLATFORM = 'srxlo'
SUFFIX = '.jsl'
_TERM = Term
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super()._BuildTokens()
# flexible match is MX/Trio only
supported_tokens.remove('flexible_match_range')
# currently only support 'encapsulate' in juniper
supported_tokens.remove('encapsulate')
# currently only support 'port-mirror' in juniper
supported_tokens.remove('port_mirror')
return supported_tokens, supported_sub_tokens
| 33.777778 | 74 | 0.719925 |
eaa72539e6430beedac7c2b67837bf560cb7f908 | 506 | py | Python | migrations/versions/47b4ac7d7d6e_.py | d82078010/The-Anti-Social-Network | 68941cda2e855f91d7ea4c8bf3c8159118d7f09b | [
"MIT"
] | 16 | 2015-01-22T17:14:56.000Z | 2019-08-15T13:14:08.000Z | migrations/versions/47b4ac7d7d6e_.py | aquario-crypto/Anti_Social_Network | 8d7113ce34b9ecc8208cf98f2ddba1b98c4dd1d9 | [
"MIT"
] | 6 | 2020-03-24T15:32:44.000Z | 2021-02-02T21:46:33.000Z | migrations/versions/47b4ac7d7d6e_.py | aquario-crypto/Anti_Social_Network | 8d7113ce34b9ecc8208cf98f2ddba1b98c4dd1d9 | [
"MIT"
] | 5 | 2015-02-06T12:35:37.000Z | 2018-08-09T15:14:23.000Z | """empty message
Revision ID: 47b4ac7d7d6e
Revises: 57b811035a20
Create Date: 2014-09-09 20:26:07.166443
"""
# revision identifiers, used by Alembic.
revision = '47b4ac7d7d6e'
down_revision = '57b811035a20'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| 18.740741 | 63 | 0.689723 |
fd95eaa709757b6c32e43db1dbf3474c2443248c | 1,712 | py | Python | scripts/cross_validation_results.py | jannesgg/rasa-train-test-gha | 2c12be4555d893b16cf98b09064c8b9eb9b0c6ff | [
"Apache-2.0"
] | null | null | null | scripts/cross_validation_results.py | jannesgg/rasa-train-test-gha | 2c12be4555d893b16cf98b09064c8b9eb9b0c6ff | [
"Apache-2.0"
] | null | null | null | scripts/cross_validation_results.py | jannesgg/rasa-train-test-gha | 2c12be4555d893b16cf98b09064c8b9eb9b0c6ff | [
"Apache-2.0"
] | null | null | null | from pytablewriter import MarkdownTableWriter
import json
import os
result_dir = os.environ["RESULT_DIR"]
def intent_table():
writer = MarkdownTableWriter()
writer.table_name = "Intent Cross-Validation Results"
with open(f"{result_dir}/intent_report.json", "r") as f:
data = json.loads(f.read())
cols = ["support", "f1-score", "confused_with"]
writer.headers = ["class"] + cols
data.pop("accuracy", None)
classes = list(data.keys())
classes.sort(key=lambda x: data[x].get("support", 0), reverse=True)
def format_cell(data, c, k):
if not data[c].get(k):
return "N/A"
if k == "confused_with":
return ", ".join([f"{k}({v})" for k, v in data[c][k].items()])
else:
return data[c][k]
writer.value_matrix = [
[c] + [format_cell(data, c, k) for k in cols] for c in classes
]
return writer.dumps()
def entity_table():
writer = MarkdownTableWriter()
writer.table_name = "Entity Cross-Validation Results"
with open(f"{result_dir}/DIETClassifier_report.json", "r") as f:
data = json.loads(f.read())
cols = ["support", "f1-score", "precision", "recall"]
writer.headers = ["entity"] + cols
classes = list(data.keys())
classes.sort(key=lambda x: data[x]["support"], reverse=True)
def format_cell(data, c, k):
if not data[c].get(k):
return "N/A"
else:
return data[c][k]
writer.value_matrix = [
[c] + [format_cell(data, c, k) for k in cols] for c in classes
]
return writer.dumps()
intents = intent_table()
entities = entity_table()
print(intents)
print("\n\n\n")
print(entities)
| 24.112676 | 74 | 0.599299 |
3f99e4387227f93b61af143c16e04c84fb4250a0 | 1,445 | py | Python | dufi/gui/boxes/custombox/logbox.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/gui/boxes/custombox/logbox.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/gui/boxes/custombox/logbox.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import os
import io
import subprocess
try:
import tkinter as tk
except ImportError:
import Tkinter as tk
from ..pygubuapp import GUIApplication
from .logboxui import logboxui
__all__ = ("logbox", )
class LogDialogBox(GUIApplication):
title = "Application Log"
ui_content = logboxui
def __init__(self, master, logfile, title=None):
super(LogDialogBox, self).__init__(master)
self.logfile = logfile
if title:
self.title = title
w = self.builder.get_object("TextLogContent")
w.delete(1.0, tk.END)
w.insert(1.0, io.open(logfile).read().rstrip())
w.update()
w.see(tk.END)
w.focus()
def on_button_locate(self, event=None):
if not os.path.isfile(self.logfile):
return
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen(
'explorer /select,"{}"'.format(self.logfile).encode("cp1251"),
shell=True, startupinfo=startupinfo)
def logbox(logfile, title=None, parent=None):
root = None
if not parent:
root = tk.Tk()
root.withdraw()
parent = root
LogDialogBox(parent, logfile, title)
if root:
root.mainloop()
if __name__ == "__main__":
pass
| 20.352113 | 82 | 0.631834 |
9f0448b2870040c705004f28c3ffef226b616a5b | 1,370 | py | Python | tracker/serializers.py | TreZc0/donation-tracker | 3a833a5eba3c0b7cedd8249b44b1435f526095ba | [
"Apache-2.0"
] | 39 | 2016-01-04T04:13:27.000Z | 2022-01-18T19:17:24.000Z | tracker/serializers.py | TreZc0/donation-tracker | 3a833a5eba3c0b7cedd8249b44b1435f526095ba | [
"Apache-2.0"
] | 140 | 2015-11-01T01:19:54.000Z | 2022-03-10T13:00:33.000Z | tracker/serializers.py | TreZc0/donation-tracker | 3a833a5eba3c0b7cedd8249b44b1435f526095ba | [
"Apache-2.0"
] | 35 | 2016-01-20T12:42:21.000Z | 2022-01-20T07:06:47.000Z | from django.core.serializers.python import Serializer as PythonSerializer
from django.db import models
from tracker.models import Prize
_ExtraFields = {
Prize: ['start_draw_time', 'end_draw_time'],
}
class TrackerSerializer(PythonSerializer):
def __init__(self, Model, request):
self.Model = Model
self.request = request
def handle_field(self, obj, field):
if isinstance(field, models.FileField):
value = field.value_from_object(obj)
self._current[field.name] = value.url if value else ''
elif isinstance(field, models.DecimalField):
value = field.value_from_object(obj)
self._current[field.name] = float(value) if value else value
else:
super(TrackerSerializer, self).handle_field(obj, field)
def get_dump_object(self, obj):
data = super(TrackerSerializer, self).get_dump_object(obj)
for extra_field in _ExtraFields.get(self.Model, []):
prop = getattr(obj, extra_field)
if callable(prop):
prop = prop()
data['fields'][extra_field] = prop
absolute_url = getattr(obj, 'get_absolute_url', None)
if callable(absolute_url):
data['fields']['canonical_url'] = self.request.build_absolute_uri(
absolute_url()
)
return data
| 35.128205 | 78 | 0.638686 |
2924a197c4b7f920f444d05dcbdbd95d145a3dbc | 1,538 | py | Python | Winston/code/pgn_to_df3.py | wel51x/ItsOver9000-DS | 288dced311a77ad3c1945f68a1ab8ed2c075b755 | [
"MIT"
] | null | null | null | Winston/code/pgn_to_df3.py | wel51x/ItsOver9000-DS | 288dced311a77ad3c1945f68a1ab8ed2c075b755 | [
"MIT"
] | null | null | null | Winston/code/pgn_to_df3.py | wel51x/ItsOver9000-DS | 288dced311a77ad3c1945f68a1ab8ed2c075b755 | [
"MIT"
] | 2 | 2019-04-12T23:51:43.000Z | 2019-04-16T04:43:23.000Z | import chess.pgn
import pandas as pd
import datetime
import numpy as np
def categorize(score):
bin = score - 700
if bin < 0:
bin = 0
return int(bin / 100)
pgn = open("/Users/wel51x/Downloads/ficsgamesdb_201902_chess_nomovetimes_63077.pgn")
ix = 0
df_cols = ["Result", "Category"]
playerSet = {}
Result = []
categ = np.zeros(26)
print("00000", datetime.datetime.now())
while True:
game = chess.pgn.read_game(pgn)
'''
Result += [game.headers["Result"]]
if Result[ix] == "1-0":
# Result[ix] = 1
Result[ix] = "White Win"
elif Result[ix] == "0-1":
# Result[ix] = -1
Result[ix] = "Black Win"
else:
# Result[ix] = 0
Result[ix] = "Draw"
'''
ix += 1
if ix % 10000 == 0:
print(ix, datetime.datetime.now())
if ix % 100000 == 0:
break
playerBlack = str([game.headers["Black"]])
if playerBlack in playerSet.keys():
continue
else:
playerSet[playerBlack] = 1
BlackElo = [game.headers["BlackElo"]]
BlackElo = int(BlackElo[0])
categ[categorize(BlackElo)] += 1
playerWhite = str([game.headers["White"]])
if playerWhite in playerSet.keys():
continue
else:
playerSet[playerWhite] = 1
WhiteElo = [game.headers["WhiteElo"]]
WhiteElo = int(WhiteElo[0])
categ[categorize(WhiteElo)] += 1
#my_df["Result"] = Result
my_df = pd.DataFrame(categ)
#my_df["Counts"] = categ
print(my_df)
print(len(playerSet))
my_df.to_csv('my_df4.csv')
| 23.30303 | 84 | 0.587776 |
7293d7d26d00da65ad03ed2d895c795bbdfa568e | 1,182 | py | Python | setup.py | JNRowe-retired/restfulie-py | 8ac2bc401068c7bae6da3d107b004835618165d7 | [
"Apache-2.0"
] | 3 | 2015-03-09T09:17:46.000Z | 2016-05-03T02:51:25.000Z | setup.py | JNRowe-retired/restfulie-py | 8ac2bc401068c7bae6da3d107b004835618165d7 | [
"Apache-2.0"
] | null | null | null | setup.py | JNRowe-retired/restfulie-py | 8ac2bc401068c7bae6da3d107b004835618165d7 | [
"Apache-2.0"
] | 1 | 2020-12-14T23:23:26.000Z | 2020-12-14T23:23:26.000Z | #!/usr/bin/python
from setuptools import setup, find_packages
setup(name='restfulie',
version='0.9.2',
description='Writing hypermedia aware resource based clients and servers',
author=' ',
author_email=' ',
url='http://restfulie.caelumobjects.com/',
long_description='CRUD through HTTP is a good step forward to using resources and becoming RESTful, another step further is to make use of hypermedia aware resources and Restfulie allows you to do it in Python.',
download_url='https://github.com/caelum/restfulie-py',
keywords='restfulie rest http hypermedia',
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
],
test_suite = "nose.collector",
install_requires= ['httplib2>=0.6.0'],
packages=find_packages(),
include_package_data=True,
)
| 38.129032 | 218 | 0.64467 |
be0f95114f6955cd6ef31ac1ab20e646647cc4aa | 32,747 | py | Python | ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py | willwill1101/ambari | 3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1 | [
"Apache-2.0",
"MIT"
] | 3 | 2016-12-01T15:55:11.000Z | 2016-12-01T15:56:38.000Z | ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py | willwill1101/ambari | 3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1 | [
"Apache-2.0",
"MIT"
] | null | null | null | ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py | willwill1101/ambari | 3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1 | [
"Apache-2.0",
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import tempfile
from resource_management import *
from stacks.utils.RMFTestCase import *
from mock.mock import patch
from mock.mock import MagicMock
@patch.object(tempfile, "gettempdir", new=MagicMock(return_value="/tmp"))
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
class TestKnoxGateway(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "KNOX/0.5.0.2.2/package"
STACK_VERSION = "2.2"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command = "configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/usr/hdp/current/knox-server/data/',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/var/log/knox',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/var/run/knox',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/usr/hdp/current/knox-server/conf',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/usr/hdp/current/knox-server/conf/topologies',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('XmlConfig', 'gateway-site.xml',
owner = 'knox',
group = 'knox',
conf_dir = '/usr/hdp/current/knox-server/conf',
configurations = self.getConfig()['configurations']['gateway-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['gateway-site']
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/gateway-log4j.properties',
mode=0644,
group='knox',
owner = 'knox',
content = self.getConfig()['configurations']['gateway-log4j']['content']
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/topologies/default.xml',
group='knox',
owner = 'knox',
content = InlineTemplate(self.getConfig()['configurations']['topology']['content'])
)
self.assertResourceCalled('Execute', ('chown',
'-R',
'knox:knox',
'/usr/hdp/current/knox-server/data/',
'/var/log/knox',
'/var/run/knox',
'/usr/hdp/current/knox-server/conf',
'/usr/hdp/current/knox-server/conf/topologies'),
sudo = True,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/knox-server/bin/knoxcli.sh create-master --master sa',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = "ambari-sudo.sh su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /usr/hdp/current/knox-server/data/security/master'",
user = 'knox',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/knox-server/bin/knoxcli.sh create-cert --hostname c6401.ambari.apache.org',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = "ambari-sudo.sh su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /usr/hdp/current/knox-server/data/security/keystores/gateway.jks'",
user = 'knox',
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/ldap-log4j.properties',
content = '\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # "License"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an "AS IS" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #testing\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n\n log4j.rootLogger=ERROR, drfa\n log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n log4j.logger.org.apache.directory=WARN\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n',
owner = 'knox',
group = 'knox',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/users.ldif',
content = '\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # "License"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an "AS IS" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n version: 1\n\n # Please replace with site specific values\n dn: dc=hadoop,dc=apache,dc=org\n objectclass: organization\n objectclass: dcObject\n o: Hadoop\n dc: hadoop\n\n # Entry for a sample people container\n # Please replace with site specific values\n dn: ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:organizationalUnit\n ou: people\n\n # Entry for a sample end user\n # Please replace with site specific values\n dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: Guest\n sn: User\n uid: guest\n userPassword:guest-password\n\n # entry for sample user admin\n dn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: Admin\n sn: Admin\n uid: admin\n userPassword:admin-password\n\n # entry for sample user sam\n dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: sam\n sn: sam\n uid: sam\n userPassword:sam-password\n\n # entry for sample user tom\n dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: tom\n sn: tom\n uid: tom\n userPassword:tom-password\n\n # create FIRST Level groups branch\n dn: ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:organizationalUnit\n ou: groups\n description: generic groups branch\n\n # create the analyst group under groups\n dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass: groupofnames\n cn: analyst\n description:analyst group\n member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n # create the scientist group under groups\n dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass: groupofnames\n cn: scientist\n description: scientist group\n member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org',
owner = 'knox',
group = 'knox',
mode = 0644,
)
self.assertNoMoreResources()
@patch("resource_management.libraries.functions.security_commons.build_expectations")
@patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
@patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
@patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
@patch("resource_management.libraries.script.Script.put_structured_out")
def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock,
validate_security_config_mock, get_params_mock, build_exp_mock):
# Test that function works when is called with correct parameters
security_params = {
"krb5JAASLogin":
{
'keytab': "/path/to/keytab",
'principal': "principal"
},
"gateway-site" : {
"gateway.hadoop.kerberos.secured" : "true"
}
}
result_issues = []
get_params_mock.return_value = security_params
validate_security_config_mock.return_value = result_issues
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command="security_status",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertTrue(build_exp_mock.call_count, 2)
build_exp_mock.assert_called_with('gateway-site', {"gateway.hadoop.kerberos.secured": "true"}, None, None)
put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
self.assertTrue(cached_kinit_executor_mock.call_count, 1)
cached_kinit_executor_mock.assert_called_with('/usr/bin/kinit',
self.config_dict['configurations']['knox-env']['knox_user'],
security_params['krb5JAASLogin']['keytab'],
security_params['krb5JAASLogin']['principal'],
self.config_dict['hostname'],
'/tmp')
# Testing that the exception throw by cached_executor is caught
cached_kinit_executor_mock.reset_mock()
cached_kinit_executor_mock.side_effect = Exception("Invalid command")
try:
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command="security_status",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
except:
self.assertTrue(True)
# Testing with a security_params which doesn't contains krb5JAASLogin
empty_security_params = {"krb5JAASLogin" : {}}
cached_kinit_executor_mock.reset_mock()
get_params_mock.reset_mock()
put_structured_out_mock.reset_mock()
get_params_mock.return_value = empty_security_params
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command="security_status",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file and principal are not set."})
# Testing with not empty result_issues
result_issues_with_params = {'krb5JAASLogin': "Something bad happened"}
validate_security_config_mock.reset_mock()
get_params_mock.reset_mock()
validate_security_config_mock.return_value = result_issues_with_params
get_params_mock.return_value = security_params
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command="security_status",
config_file="secured.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
# Testing with security_enable = false
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command="security_status",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@patch("os.path.isdir")
def test_pre_upgrade_restart(self, isdir_mock):
isdir_mock.return_value = True
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/knox_upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-data-backup.tar',
'/var/lib/knox/data'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', '2.2.1.0-3242'),
sudo = True,
)
self.assertNoMoreResources()
@patch("os.remove")
@patch("os.path.exists")
@patch("os.path.isdir")
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_to_hdp_2300(self, call_mock, isdir_mock, path_exists_mock, remove_mock):
"""
In HDP 2.3.0.0, Knox was using a data dir of /var/lib/knox/data
"""
isdir_mock.return_value = True
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/knox_upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = "2.3.0.0-1234"
json_content['commandParams']['version'] = version
path_exists_mock.return_value = True
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-data-backup.tar',
'/var/lib/knox/data'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalledIgnoreEarlier('Execute', ('hdp-select', 'set', 'knox-server', version),
sudo = True,
)
self.assertResourceCalled('Execute', ('cp',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/knox-conf-backup.tar'),
sudo = True,
)
self.assertResourceCalled('Execute', ('tar',
'-xvf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'-C',
'/usr/hdp/current/knox-server/conf/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/knox-conf-backup.tar',
action = ['delete'],
)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
@patch("os.remove")
@patch("os.path.exists")
@patch("os.path.isdir")
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_from_hdp_2300_to_2320(self, call_mock, isdir_mock, path_exists_mock, remove_mock):
"""
In RU from HDP 2.3.0.0 to 2.3.2.0, should backup the data dir used by the source version, which
is /var/lib/knox/data
"""
isdir_mock.return_value = True
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/knox_upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
source_version = "2.3.0.0-1234"
version = "2.3.2.0-5678"
# This is an RU from 2.3.0.0 to 2.3.2.0
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['current_version'] = source_version
path_exists_mock.return_value = True
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-data-backup.tar',
'/var/lib/knox/data'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalledIgnoreEarlier('Execute', ('hdp-select', 'set', 'knox-server', version),
sudo = True,
)
self.assertResourceCalled('Execute', ('cp',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/knox-conf-backup.tar'),
sudo = True,
)
self.assertResourceCalled('Execute', ('tar',
'-xvf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'-C',
'/usr/hdp/current/knox-server/conf/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/knox-conf-backup.tar',
action = ['delete'],
)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
@patch("os.remove")
@patch("os.path.exists")
@patch("os.path.isdir")
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_from_hdp_2320(self, call_mock, isdir_mock, path_exists_mock, remove_mock):
"""
In RU from HDP 2.3.2 to anything higher, should backup the data dir used by the source version, which
is /var/lib/knox/data_${source_version}
"""
isdir_mock.return_value = True
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/knox_upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
source_version = "2.3.2.0-1000"
version = "2.3.2.0-1001"
# This is an RU from 2.3.2.0 to 2.3.2.1
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['current_version'] = source_version
path_exists_mock.return_value = True
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command = "pre_upgrade_restart",
config_dict = json_content,
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/knox-upgrade-backup/knox-data-backup.tar',
"/usr/hdp/%s/knox/data" % source_version),
sudo = True, tries = 3, try_sleep = 1,
)
'''
self.assertResourceCalled('Execute', ('hdp-select', 'set', 'knox-server', version),
sudo = True,
)
self.assertResourceCalled('Execute', ('cp',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'/usr/hdp/current/knox-server/conf/knox-conf-backup.tar'),
sudo = True,
)
self.assertResourceCalled('Execute', ('tar',
'-xvf',
'/tmp/knox-upgrade-backup/knox-conf-backup.tar',
'-C',
'/usr/hdp/current/knox-server/conf/'),
sudo = True,
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/knox-conf-backup.tar',
action = ['delete'],
)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('conf-select', 'set-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('conf-select', 'create-conf-dir', '--package', 'knox', '--stack-version', version, '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
'''
@patch("os.path.islink")
def test_start_default(self, islink_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/knox_gateway.py",
classname = "KnoxGateway",
command = "start",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Directory', '/usr/hdp/current/knox-server/data/',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/var/log/knox',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/var/run/knox',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/usr/hdp/current/knox-server/conf',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('Directory', '/usr/hdp/current/knox-server/conf/topologies',
owner = 'knox',
group = 'knox',
recursive = True,
mode = 0755,
cd_access = "a",
)
self.assertResourceCalled('XmlConfig', 'gateway-site.xml',
owner = 'knox',
group = 'knox',
conf_dir = '/usr/hdp/current/knox-server/conf',
configurations = self.getConfig()['configurations']['gateway-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['gateway-site']
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/gateway-log4j.properties',
mode=0644,
group='knox',
owner = 'knox',
content = self.getConfig()['configurations']['gateway-log4j']['content']
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/topologies/default.xml',
group='knox',
owner = 'knox',
content = InlineTemplate(self.getConfig()['configurations']['topology']['content'])
)
self.assertResourceCalled('Execute', ('chown',
'-R',
'knox:knox',
'/usr/hdp/current/knox-server/data/',
'/var/log/knox',
'/var/run/knox',
'/usr/hdp/current/knox-server/conf', '/usr/hdp/current/knox-server/conf/topologies'),
sudo = True,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/knox-server/bin/knoxcli.sh create-master --master sa',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = "ambari-sudo.sh su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /usr/hdp/current/knox-server/data/security/master'",
user = 'knox',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/knox-server/bin/knoxcli.sh create-cert --hostname c6401.ambari.apache.org',
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = "ambari-sudo.sh su knox -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]test -f /usr/hdp/current/knox-server/data/security/keystores/gateway.jks'",
user = 'knox',
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/ldap-log4j.properties',
mode=0644,
group='knox',
owner = 'knox',
content = self.getConfig()['configurations']['ldap-log4j']['content']
)
self.assertResourceCalled('File', '/usr/hdp/current/knox-server/conf/users.ldif',
mode=0644,
group='knox',
owner = 'knox',
content = self.getConfig()['configurations']['users-ldif']['content']
)
self.assertResourceCalled('Link', '/usr/hdp/current/knox-server/pids',
to = '/var/run/knox',
)
self.assertResourceCalled('Directory', '/var/log/knox',
owner = 'knox',
mode = 0755,
group = 'knox',
recursive = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chown', '-R', u'knox:knox', u'/var/log/knox'),
sudo = True,
)
self.assertResourceCalled("Execute", "/usr/hdp/current/knox-server/bin/gateway.sh start",
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if = u'ls /var/run/knox/gateway.pid >/dev/null 2>&1 && ps -p `cat /var/run/knox/gateway.pid` >/dev/null 2>&1',
user = u'knox',)
self.assertTrue(islink_mock.called)
self.assertNoMoreResources()
| 54.669449 | 3,967 | 0.573946 |
7c7e3b0b132b68cbb5f8c8a452d0b372e6c235fb | 3,418 | py | Python | Eg01_neural_network/mnist_loader.py | chengfzy/DeepLearningStudy | 46b7dd1e5ee33c3556b2acd6820ed5254c93197f | [
"MIT"
] | null | null | null | Eg01_neural_network/mnist_loader.py | chengfzy/DeepLearningStudy | 46b7dd1e5ee33c3556b2acd6820ed5254c93197f | [
"MIT"
] | null | null | null | Eg01_neural_network/mnist_loader.py | chengfzy/DeepLearningStudy | 46b7dd1e5ee33c3556b2acd6820ed5254c93197f | [
"MIT"
] | null | null | null | """
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('./data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding='bytes')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
| 38.404494 | 80 | 0.696899 |
7e3066c4bc08cfdd8a5da6d108268971f845108e | 3,248 | py | Python | profiles_project/settings.py | joeward60/profiles-rest-api | a7e2d01d27290a510dc8392137a6dea8a3ec55ee | [
"MIT"
] | null | null | null | profiles_project/settings.py | joeward60/profiles-rest-api | a7e2d01d27290a510dc8392137a6dea8a3ec55ee | [
"MIT"
] | null | null | null | profiles_project/settings.py | joeward60/profiles-rest-api | a7e2d01d27290a510dc8392137a6dea8a3ec55ee | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+0g&1#*9#i&&45he&ie9tk_ha2or%u7eehb8w4vjti5*5w9jor'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1',]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.777778 | 91 | 0.699815 |
88dbe224bf514de2885b180337ccc23f3e13b810 | 2,667 | py | Python | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SearchMediaWorkflowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SearchMediaWorkflowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SearchMediaWorkflowRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmts.endpoint import endpoint_data
class SearchMediaWorkflowRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'SearchMediaWorkflow','mts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PageNumber(self): # Long
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Long
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Long
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Long
self.add_query_param('PageSize', PageSize)
def get_StateList(self): # String
return self.get_query_params().get('StateList')
def set_StateList(self, StateList): # String
self.add_query_param('StateList', StateList)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
| 38.652174 | 78 | 0.760405 |
3627aa39512d87b65c10006b6fb6074031e4218f | 1,849 | py | Python | tests/superset_test_config.py | rodrigoguariento/incubator-superset | b2633a51d43faaca74751349b96fc32784d4b377 | [
"Apache-2.0"
] | 7 | 2017-11-01T06:00:12.000Z | 2019-01-05T13:31:48.000Z | tests/superset_test_config.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 59 | 2019-10-29T10:43:54.000Z | 2020-01-13T20:28:00.000Z | tests/superset_test_config.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 3 | 2020-04-15T16:34:09.000Z | 2020-06-22T17:26:45.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import copy
from superset.config import * # type: ignore
AUTH_USER_REGISTRATION_ROLE = "alpha"
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(DATA_DIR, "unittests.db")
DEBUG = True
SUPERSET_WEBSERVER_PORT = 8081
# Allowing SQLALCHEMY_DATABASE_URI to be defined as an env var for
# continuous integration
if "SUPERSET__SQLALCHEMY_DATABASE_URI" in os.environ:
SQLALCHEMY_DATABASE_URI = os.environ["SUPERSET__SQLALCHEMY_DATABASE_URI"]
SQL_SELECT_AS_CTA = True
SQL_MAX_ROW = 666
FEATURE_FLAGS = {"foo": "bar"}
def GET_FEATURE_FLAGS_FUNC(ff):
ff_copy = copy(ff)
ff_copy["super"] = "set"
return ff_copy
TESTING = True
SECRET_KEY = "thisismyscretkey"
WTF_CSRF_ENABLED = False
PUBLIC_ROLE_LIKE_GAMMA = True
AUTH_ROLE_PUBLIC = "Public"
EMAIL_NOTIFICATIONS = False
CACHE_CONFIG = {"CACHE_TYPE": "simple"}
class CeleryConfig(object):
BROKER_URL = "redis://localhost"
CELERY_IMPORTS = ("superset.sql_lab",)
CELERY_ANNOTATIONS = {"sql_lab.add": {"rate_limit": "10/s"}}
CONCURRENCY = 1
CELERY_CONFIG = CeleryConfig
| 30.816667 | 79 | 0.760411 |
c36b8fafa51e904b32a3484dd7c30d755b94ebe3 | 586 | py | Python | users/forms.py | PatriciaAnduru/awwards | 0873af52f60a635df47bf42cbef9445d816fea23 | [
"MIT"
] | null | null | null | users/forms.py | PatriciaAnduru/awwards | 0873af52f60a635df47bf42cbef9445d816fea23 | [
"MIT"
] | null | null | null | users/forms.py | PatriciaAnduru/awwards | 0873af52f60a635df47bf42cbef9445d816fea23 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username','email','password1','password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username','email']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
| 21.703704 | 61 | 0.680887 |
f5998d555805ed397a3e36f684a0918968ddd1e1 | 6,257 | py | Python | nexus/tests/unit/test_pwscf_postprocessor_analyzers.py | djstaros/qmcpack | 280f67e638bae280448b47fa618f05b848c530d2 | [
"NCSA"
] | null | null | null | nexus/tests/unit/test_pwscf_postprocessor_analyzers.py | djstaros/qmcpack | 280f67e638bae280448b47fa618f05b848c530d2 | [
"NCSA"
] | 11 | 2020-05-09T20:57:21.000Z | 2020-06-10T00:00:17.000Z | nexus/tests/unit/test_pwscf_postprocessor_analyzers.py | djstaros/qmcpack | 280f67e638bae280448b47fa618f05b848c530d2 | [
"NCSA"
] | null | null | null |
import testing
from testing import value_eq,object_eq,text_eq
def test_import():
from pwscf_postprocessors import PPAnalyzer
from pwscf_postprocessors import DosAnalyzer
from pwscf_postprocessors import BandsAnalyzer
from pwscf_postprocessors import ProjwfcAnalyzer
from pwscf_postprocessors import CpppAnalyzer
from pwscf_postprocessors import PwexportAnalyzer
#end def test_import
def test_empty_init():
from pwscf_postprocessors import PPAnalyzer
from pwscf_postprocessors import DosAnalyzer
from pwscf_postprocessors import BandsAnalyzer
from pwscf_postprocessors import ProjwfcAnalyzer
from pwscf_postprocessors import CpppAnalyzer
from pwscf_postprocessors import PwexportAnalyzer
pa = PPAnalyzer(None)
pa = DosAnalyzer(None)
pa = BandsAnalyzer(None)
pa = ProjwfcAnalyzer(None)
pa = CpppAnalyzer(None)
pa = PwexportAnalyzer(None)
#end def test_empty_init
def test_projwfc_analyzer():
import os
from generic import obj
from pwscf_postprocessors import ProjwfcAnalyzer
tpath = testing.setup_unit_test_output_directory(
test = 'pwscf_postprocessor_analyzers',
subtest = 'test_projwfc_analyzer',
file_sets = ['pwf.in','pwf.out'],
)
projwfc_in = os.path.join(tpath,'pwf.in')
pa = ProjwfcAnalyzer(projwfc_in)
del pa.info.path
pa_ref = obj(
info = obj(
infile = 'pwf.in',
initialized = True,
outfile = 'pwf.out',
strict = False,
warn = False,
),
input = obj(
projwfc = obj(
lwrite_overlaps = True,
outdir = 'pwscf_output',
prefix = 'pwscf',
),
),
)
assert(object_eq(pa.to_obj(),pa_ref))
pa = ProjwfcAnalyzer(projwfc_in,analyze=True)
del pa.info.path
pa_ref = obj(
info = obj(
infile = 'pwf.in',
initialized = True,
outfile = 'pwf.out',
strict = False,
warn = False,
),
input = obj(
projwfc = obj(
lwrite_overlaps = True,
outdir = 'pwscf_output',
prefix = 'pwscf',
),
),
lowdin = obj({
0 : obj(
down = obj({
'charge' : 1.9988,
'd' : 0.0,
'dx2-y2' : 0.0,
'dxy' : 0.0,
'dxz' : 0.0,
'dyz' : 0.0,
'dz2' : 0.0,
'p' : 0.999,
'px' : 0.3318,
'py' : 0.3336,
'pz' : 0.3336,
's' : 0.9998,
}),
pol = obj({
'charge' : 2.0001,
'd' : 0.0,
'dx2-y2' : 0.0,
'dxy' : 0.0,
'dxz' : 0.0,
'dyz' : 0.0,
'dz2' : 0.0,
'p' : 1.9999,
'px' : 0.6678,
'py' : 0.666,
'pz' : 0.666,
's' : 0.0001,
}),
tot = obj({
'charge' : 5.9977,
'd' : 0.0,
'dx2-y2' : 0.0,
'dxy' : 0.0,
'dxz' : 0.0,
'dyz' : 0.0,
'dz2' : 0.0,
'p' : 3.9979,
'px' : 1.3314,
'py' : 1.3332,
'pz' : 1.3332,
's' : 1.9997,
}),
up = obj({
'charge' : 3.9989,
'd' : 0.0,
'dx2-y2' : 0.0,
'dxy' : 0.0,
'dxz' : 0.0,
'dyz' : 0.0,
'dz2' : 0.0,
'p' : 2.9989,
'px' : 0.9996,
'py' : 0.9996,
'pz' : 0.9996,
's' : 0.9999,
}),
)
}),
states = obj(
elem = ['S'],
nstates = 9,
),
)
assert(object_eq(pa.to_obj(),pa_ref))
lowdin_file = os.path.join(tpath,'pwf.lowdin')
pa.write_lowdin(lowdin_file)
text = open(lowdin_file,'r').read()
text_ref = '''
nup+ndn = 5.9977
nup-ndn = 2.0001
tot
0 S 6.00 s( 2.00)p( 4.00)d( 0.00)
pol
0 S 2.00 s( 0.00)p( 2.00)d( 0.00)
up
0 S 4.00 s( 1.00)p( 3.00)d( 0.00)
down
0 S 2.00 s( 1.00)p( 1.00)d( 0.00)
'''
def process_text(t):
return t.replace('(',' ( ').replace(')',' ) ')
#end def process_text
text = process_text(text)
text_ref = process_text(text_ref)
assert(text_eq(text,text_ref))
lowdin_file = os.path.join(tpath,'pwf.lowdin_long')
pa.write_lowdin(lowdin_file,long=True)
text = open(lowdin_file,'r').read()
text_ref = '''
nup+ndn = 5.9977
nup-ndn = 2.0001
tot
0 S 6.00 s( 2.00)px( 1.33)py( 1.33)pz( 1.33)dx2-y2( 0.00)dxy( 0.00)dxz( 0.00)dyz( 0.00)dz2( 0.00)
pol
0 S 2.00 s( 0.00)px( 0.67)py( 0.67)pz( 0.67)dx2-y2( 0.00)dxy( 0.00)dxz( 0.00)dyz( 0.00)dz2( 0.00)
up
0 S 4.00 s( 1.00)px( 1.00)py( 1.00)pz( 1.00)dx2-y2( 0.00)dxy( 0.00)dxz( 0.00)dyz( 0.00)dz2( 0.00)
down
0 S 2.00 s( 1.00)px( 0.33)py( 0.33)pz( 0.33)dx2-y2( 0.00)dxy( 0.00)dxz( 0.00)dyz( 0.00)dz2( 0.00)
'''
text = process_text(text)
text_ref = process_text(text_ref)
assert(text_eq(text,text_ref))
#end def test_projwfc_analyzer
| 28.184685 | 114 | 0.4109 |
de7e278d28be0106d8f9e3e21e98057ee35e44ed | 5,977 | py | Python | ci_hackathon_july_2020/settings.py | maliahavlicek/ci_hackathon_july_2020 | 48c269da0c1f375337164d8bf23df494cccf1d3c | [
"ADSL"
] | 1 | 2020-07-06T05:57:00.000Z | 2020-07-06T05:57:00.000Z | ci_hackathon_july_2020/settings.py | maliahavlicek/ci_hackathon_july_2020 | 48c269da0c1f375337164d8bf23df494cccf1d3c | [
"ADSL"
] | 8 | 2020-07-04T16:53:12.000Z | 2022-01-13T02:58:55.000Z | ci_hackathon_july_2020/settings.py | maliahavlicek/ci_hackathon_july_2020 | 48c269da0c1f375337164d8bf23df494cccf1d3c | [
"ADSL"
] | null | null | null | """
Django settings for ci_hackathon_july_2020 project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from os import environ
import dj_database_url
from django.contrib.messages import constants as messages
# SECURITY WARNING: don't run with debug turned on in production!
if os.path.exists('env.py'):
import env # noqa: F401
"""
can override local to False here if you want to
test things like 404, 500 error
"""
DEBUG = True
else:
DEBUG = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# ALLOWED_HOSTS = [os.environ.get('HOSTNAME'), "localhost", "127.0.0.1", "adv-b-wall.herokuapp.com"]
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'adv-b-wall.herokuapp.com']
DEFAULT_DOMAIN = 'https://adv-b-wall.herokuapp.com'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_forms_bootstrap',
'crispy_forms',
'django_nose',
"rest_framework",
'storages',
'home',
'users',
'accounts',
'posts',
'status',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ci_hackathon_july_2020.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'ci_hackathon_july_2020.wsgi.application'
REST_FRAMEWORK = {
"DEFAULT_THROTTLE_CLASSES": ["rest_framework.throttling.ScopedRateThrottle", ],
"DEFAULT_THROTTLE_RATES": {
"send_status": "60/day",
"get_status": "100/hour",
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
if 'DATABASE_URL' in os.environ:
DATABASES = {
'default': dj_database_url.parse(os.environ.get('DATABASE_URL'))
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': 'mytestdatabase',
},
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'users.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.EmailAuth'
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
AWS_S3_OBJECTS_PARAMETERS = {
'Expires': 'Thu, 31, Dec 2099 20:00:00 GMT',
'CacheControl': 'max-age=9460800',
}
AWS_STORAGE_BUCKET_NAME = 'ci-hackathon-july-2020'
AWS_S3_REGION_NAME = 'us-east-2'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.%s.amazonaws.com' % (
AWS_STORAGE_BUCKET_NAME, AWS_S3_REGION_NAME)
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
MEDIAFILES_LOCATION = 'media'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# SMTP Email configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = environ.get('EMAIL_PASS')
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
if DEBUG:
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
else:
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
| 28.061033 | 100 | 0.703865 |
8b3c6dfb93dc4177686bded217cd8d55ea70e467 | 180 | py | Python | vscvs/cli/show/__init__.py | fcoclavero/vscvs | 27fab0bc62fb68da044cf6f2516e3c1853f77533 | [
"MIT"
] | 1 | 2019-07-02T19:07:15.000Z | 2019-07-02T19:07:15.000Z | vscvs/cli/show/__init__.py | fcoclavero/vscvs | 27fab0bc62fb68da044cf6f2516e3c1853f77533 | [
"MIT"
] | 2 | 2019-10-23T18:05:37.000Z | 2020-09-25T14:16:25.000Z | vscvs/cli/show/__init__.py | fcoclavero/vscvs | 27fab0bc62fb68da044cf6f2516e3c1853f77533 | [
"MIT"
] | null | null | null | __author__ = ["Francisco Clavero"]
__description__ = "CLI for visualizations."
__email__ = ["fcoclavero32@gmail.com"]
__status__ = "Prototype"
from .show import show, tensorboard
| 25.714286 | 43 | 0.766667 |
752234e0e14256a8aaf40c19e5b45bbd980f50cd | 2,564 | py | Python | docs/source/conf.py | messcode/ITCA | 829aa695f5f470cdf928653dd7a8c5ad576a4223 | [
"MIT"
] | null | null | null | docs/source/conf.py | messcode/ITCA | 829aa695f5f470cdf928653dd7a8c5ad576a4223 | [
"MIT"
] | null | null | null | docs/source/conf.py | messcode/ITCA | 829aa695f5f470cdf928653dd7a8c5ad576a4223 | [
"MIT"
] | 2 | 2021-08-16T04:22:02.000Z | 2021-08-16T04:34:08.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(f'{os.path.dirname(__file__)}/../..'))
# -- Project information -----------------------------------------------------
project = 'ITCA'
copyright = '2021, Chihao Zhang'
author = 'Chihao Zhang'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon', # different doc-styles (Google, Numpy)
# 'myst_parser', # use Markdown using MyST
"nbsphinx", # notebooks
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages', # enable github-pages
'sphinx_autodoc_typehints', # needs to be after napoleon
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# source_suffix = '.rst'
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md'] | 34.186667 | 79 | 0.666927 |
857eb91acb9ac2f91b638291928b85803f90cb7d | 55,356 | py | Python | src/module_list.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | src/module_list.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | src/module_list.py | rekhabiswal/sage | e8633b09919542a65e7e990c8369fee30c7edefd | [
"BSL-1.0"
] | null | null | null | import os
from glob import glob
from distutils.extension import Extension
from sage.env import SAGE_LOCAL
SAGE_INC = os.path.join(SAGE_LOCAL, 'include')
#########################################################
### pkg-config setup
#########################################################
import pkgconfig
# CBLAS can be one of multiple implementations
cblas_pc = pkgconfig.parse('cblas')
cblas_libs = cblas_pc['libraries']
cblas_library_dirs = cblas_pc['library_dirs']
cblas_include_dirs = cblas_pc['include_dirs']
# TODO: Remove Cygwin hack by installing a suitable cblas.pc
if os.path.exists('/usr/lib/libblas.dll.a'):
cblas_libs = ['gslcblas']
# LAPACK can be one of multiple implementations
lapack_pc = pkgconfig.parse('lapack')
lapack_libs = lapack_pc['libraries']
lapack_library_dirs = lapack_pc['library_dirs']
lapack_include_dirs = lapack_pc['include_dirs']
# GD image library
gd_pc = pkgconfig.parse('gdlib')
gd_libs = gd_pc['libraries']
gd_library_dirs = gd_pc['library_dirs']
gd_include_dirs = gd_pc['include_dirs']
# PNG image library
png_pc = pkgconfig.parse('libpng')
png_libs = png_pc['libraries']
png_library_dirs = png_pc['library_dirs']
png_include_dirs = png_pc['include_dirs']
# zlib
zlib_pc = pkgconfig.parse('zlib')
zlib_libs = zlib_pc['libraries']
zlib_library_dirs = zlib_pc['library_dirs']
zlib_include_dirs = zlib_pc['include_dirs']
#########################################################
### M4RI flags
#########################################################
m4ri_pc = pkgconfig.parse('m4ri')
m4ri_libs = m4ri_pc['libraries']
m4ri_library_dirs = m4ri_pc['library_dirs']
m4ri_include_dirs = m4ri_pc['include_dirs']
m4ri_extra_compile_args = pkgconfig.cflags('m4ri').split()
try:
m4ri_extra_compile_args.remove("-pedantic")
except ValueError:
pass
#########################################################
### Library order
#########################################################
# This list defines the *order* of linking libraries. A library should
# be put *before* any library it links to. Cython allows
# defining libraries using "# distutils: libraries = LIB". However, if
# there are multiple libraries, the order is undefined so we need to
# manually reorder the libraries according to this list. The order is
# important in particular for Cygwin. Any libraries which are not
# listed here will be added at the end of the list (without changing
# their relative order). There is one exception: stdc++ is always put
# at the very end of the list.
from sage.env import cython_aliases
aliases = cython_aliases()
library_order_list = aliases["SINGULAR_LIBRARIES"] + [
"ec", "ecm",
] + aliases["LINBOX_LIBRARIES"] + aliases["FFLASFFPACK_LIBRARIES"] + aliases["GSL_LIBRARIES"] + [
"pari", "flint", "ratpoints", "ecl", "glpk", "ppl",
"arb", "mpfi", "mpfr", "mpc", "gmp", "gmpxx",
"brial",
"brial_groebner",
"m4rie",
] + m4ri_libs + [
"zn_poly", "gap",
] + gd_libs + png_libs + [
"m", "readline", "Lfunction" ,
] + cblas_libs + zlib_libs
# Make a dict with library:order pairs, where the order are negative
# integers sorted according to library_order_list. When sorting,
# unlisted libraries have order 0, so they appear after the libraries
# in library_order_list.
n = len(library_order_list)
library_order = {}
for i in range(n):
lib = library_order_list[i]
library_order[lib] = i-n
library_order["stdc++"] = 1000
#############################################################
### List of modules
###
### Note that the list of modules is sorted alphabetically
### by extension name. Please keep this list sorted when
### adding new modules!
###
#############################################################
from sage_setup.optional_extension import OptionalExtension
UNAME = os.uname()
def uname_specific(name, value, alternative):
if name in UNAME[0]:
return value
else:
return alternative
ext_modules = [
################################
##
## sage.algebras
##
################################
Extension('sage.algebras.quatalg.quaternion_algebra_element',
sources = ['sage/algebras/quatalg/quaternion_algebra_element.pyx'],
language='c++',
libraries = ["gmp", "m", "ntl"]),
Extension('*', sources = ['sage/algebras/letterplace/*.pyx']),
Extension('*', sources = ['sage/algebras/finite_dimensional_algebras/*.pyx']),
Extension('sage.algebras.quatalg.quaternion_algebra_cython',
sources = ['sage/algebras/quatalg/quaternion_algebra_cython.pyx'],
language='c++',
libraries = ["gmp", "m", "ntl"]),
Extension('sage.algebras.lie_algebras.lie_algebra_element',
sources = ["sage/algebras/lie_algebras/lie_algebra_element.pyx"]),
################################
##
## sage.arith
##
################################
Extension('*', ['sage/arith/*.pyx']),
################################
##
## sage.calculus
##
################################
Extension('*', ['sage/calculus/**/*.pyx']),
################################
##
## sage.categories
##
################################
Extension('*', ['sage/categories/**/*.pyx']),
################################
##
## sage.coding
##
################################
Extension('sage.coding.codecan.codecan',
sources = ['sage/coding/codecan/codecan.pyx']),
Extension('*', ['sage/coding/**/*.pyx']),
################################
##
## sage.combinat
##
################################
Extension('*', ['sage/combinat/**/*.pyx']),
Extension('sage.combinat.subword_complex_c',
sources=['sage/combinat/subword_complex_c.pyx']),
################################
##
## sage.cpython
##
################################
Extension('*', ['sage/cpython/*.pyx']),
################################
##
## sage.crypto
##
################################
Extension('*', ['sage/crypto/*.pyx']),
################################
##
## sage.data_structures
##
################################
Extension('*', ['sage/data_structures/*.pyx']),
################################
##
## sage.docs
##
################################
Extension('*', ['sage/docs/*.pyx']),
################################
##
## sage.dynamics
##
################################
Extension('sage.dynamics.arithmetic_dynamics.projective_ds_helper',
sources = ['sage/dynamics/arithmetic_dynamics/projective_ds_helper.pyx']),
Extension('sage.dynamics.complex_dynamics.mandel_julia_helper',
sources = ['sage/dynamics/complex_dynamics/mandel_julia_helper.pyx']),
################################
##
## sage.ext
##
################################
Extension('*', ['sage/ext/**/*.pyx']),
################################
##
## sage.finance
##
################################
Extension('*', ['sage/finance/*.pyx']),
################################
##
## sage.functions
##
################################
Extension('sage.functions.prime_pi',
sources = ['sage/functions/prime_pi.pyx']),
################################
##
## sage.games
##
################################
Extension('*', ['sage/games/*.pyx']),
################################
##
## sage.geometry
##
################################
Extension('sage.geometry.point_collection',
sources = ['sage/geometry/point_collection.pyx']),
Extension('sage.geometry.toric_lattice_element',
sources = ['sage/geometry/toric_lattice_element.pyx']),
Extension('sage.geometry.integral_points',
sources = ['sage/geometry/integral_points.pyx']),
Extension('sage.geometry.triangulation.base',
sources = ['sage/geometry/triangulation/base.pyx',
'sage/geometry/triangulation/functions.cc',
'sage/geometry/triangulation/data.cc',
'sage/geometry/triangulation/triangulations.cc'],
depends = ['sage/geometry/triangulation/functions.h',
'sage/geometry/triangulation/data.h',
'sage/geometry/triangulation/triangulations.h'],
language="c++"),
################################
##
## sage.graphs
##
################################
Extension('sage.graphs.asteroidal_triples',
sources = ['sage/graphs/asteroidal_triples.pyx']),
Extension('sage.graphs.chrompoly',
sources = ['sage/graphs/chrompoly.pyx']),
Extension('sage.graphs.cliquer',
sources = ['sage/graphs/cliquer.pyx']),
Extension('sage.graphs.centrality',
sources = ['sage/graphs/centrality.pyx']),
Extension('sage.graphs.independent_sets',
sources = ['sage/graphs/independent_sets.pyx']),
Extension('sage.graphs.graph_decompositions.fast_digraph',
sources = ['sage/graphs/graph_decompositions/fast_digraph.pyx']),
Extension('sage.graphs.graph_decompositions.vertex_separation',
sources = ['sage/graphs/graph_decompositions/vertex_separation.pyx']),
Extension('sage.graphs.graph_decompositions.graph_products',
sources = ['sage/graphs/graph_decompositions/graph_products.pyx']),
Extension('sage.graphs.convexity_properties',
sources = ['sage/graphs/convexity_properties.pyx']),
Extension('sage.graphs.comparability',
sources = ['sage/graphs/comparability.pyx']),
Extension('sage.graphs.generic_graph_pyx',
sources = ['sage/graphs/generic_graph_pyx.pyx']),
Extension('sage.graphs.graph_generators_pyx',
sources = ['sage/graphs/graph_generators_pyx.pyx']),
Extension('sage.graphs.distances_all_pairs',
sources = ['sage/graphs/distances_all_pairs.pyx']),
Extension('sage.graphs.base.graph_backends',
sources = ['sage/graphs/base/graph_backends.pyx']),
Extension('sage.graphs.base.static_dense_graph',
sources = ['sage/graphs/base/static_dense_graph.pyx']),
Extension('sage.graphs.base.static_sparse_graph',
sources = ['sage/graphs/base/static_sparse_graph.pyx'],
language = 'c++'),
Extension('sage.graphs.base.static_sparse_backend',
sources = ['sage/graphs/base/static_sparse_backend.pyx']),
Extension('sage.graphs.weakly_chordal',
sources = ['sage/graphs/weakly_chordal.pyx']),
Extension('sage.graphs.matchpoly',
sources = ['sage/graphs/matchpoly.pyx']),
OptionalExtension("sage.graphs.mcqd",
["sage/graphs/mcqd.pyx"],
language = "c++",
package = 'mcqd'),
OptionalExtension("sage.graphs.bliss",
["sage/graphs/bliss.pyx"],
language = "c++",
libraries = ['bliss'],
package = 'bliss'),
Extension('sage.graphs.planarity',
sources = ['sage/graphs/planarity.pyx'],
libraries=['planarity']),
Extension('sage.graphs.strongly_regular_db',
sources = ['sage/graphs/strongly_regular_db.pyx']),
Extension('sage.graphs.graph_decompositions.rankwidth',
sources = ['sage/graphs/graph_decompositions/rankwidth.pyx'],
libraries=['rw']),
Extension('sage.graphs.graph_decompositions.bandwidth',
sources = ['sage/graphs/graph_decompositions/bandwidth.pyx']),
Extension('sage.graphs.graph_decompositions.cutwidth',
sources = ['sage/graphs/graph_decompositions/cutwidth.pyx']),
OptionalExtension('sage.graphs.graph_decompositions.tdlib',
sources = ['sage/graphs/graph_decompositions/tdlib.pyx'],
language="c++",
package = 'tdlib'),
Extension('sage.graphs.spanning_tree',
sources = ['sage/graphs/spanning_tree.pyx']),
Extension('sage.graphs.connectivity',
sources = ['sage/graphs/connectivity.pyx']),
Extension('sage.graphs.trees',
sources = ['sage/graphs/trees.pyx']),
Extension('sage.graphs.genus',
sources = ['sage/graphs/genus.pyx']),
Extension('sage.graphs.hyperbolicity',
sources = ['sage/graphs/hyperbolicity.pyx']),
Extension('sage.graphs.base.c_graph',
sources = ['sage/graphs/base/c_graph.pyx']),
Extension('sage.graphs.base.sparse_graph',
sources = ['sage/graphs/base/sparse_graph.pyx']),
Extension('sage.graphs.base.dense_graph',
sources = ['sage/graphs/base/dense_graph.pyx']),
Extension('sage.graphs.base.boost_graph',
sources = ['sage/graphs/base/boost_graph.pyx']),
################################
##
## sage.groups
##
################################
Extension('*', ['sage/groups/**/*.pyx']),
################################
##
## sage.interacts
##
################################
Extension('*', ['sage/interacts/*.pyx']),
################################
##
## sage.interfaces
##
################################
OptionalExtension("sage.interfaces.primecount",
["sage/interfaces/primecount.pyx"],
package = "primecount"),
Extension('*', ['sage/interfaces/*.pyx']),
################################
##
## sage.lfunctions
##
################################
Extension('sage.lfunctions.zero_sums',
sources = ['sage/lfunctions/zero_sums.pyx']),
################################
##
## sage.libs
##
################################
OptionalExtension('sage.libs.coxeter3.coxeter',
sources = ['sage/libs/coxeter3/coxeter.pyx'],
include_dirs = [os.path.join(SAGE_INC, 'coxeter')],
language="c++",
libraries = ['coxeter3'],
package = 'coxeter3'),
Extension('sage.libs.ecl',
sources = ["sage/libs/ecl.pyx"]),
OptionalExtension("sage.libs.fes",
["sage/libs/fes.pyx"],
language = "c",
libraries = ['fes'],
package = 'fes'),
Extension('sage.libs.flint.flint',
sources = ["sage/libs/flint/flint.pyx"],
extra_compile_args = ["-D_XPG6"]),
Extension('sage.libs.flint.fmpz_poly',
sources = ["sage/libs/flint/fmpz_poly.pyx"],
extra_compile_args = ["-D_XPG6"]),
Extension('sage.libs.flint.arith',
sources = ["sage/libs/flint/arith.pyx"],
extra_compile_args = ["-D_XPG6"]),
Extension("sage.libs.glpk.error",
["sage/libs/glpk/error.pyx"]),
Extension('sage.libs.gmp.pylong',
sources = ['sage/libs/gmp/pylong.pyx']),
OptionalExtension('sage.libs.braiding',
sources = ["sage/libs/braiding.pyx"],
libraries = ["braiding"],
package="libbraiding",
language = 'c++'),
OptionalExtension('sage.libs.homfly',
sources = ["sage/libs/homfly.pyx"],
libraries = ["homfly", "gc"],
package="libhomfly"),
OptionalExtension('sage.libs.sirocco',
sources = ["sage/libs/sirocco.pyx"],
libraries = ["sirocco"],
package="sirocco",
language = 'c++'),
Extension('*', ['sage/libs/linbox/*.pyx']),
Extension('sage.libs.lcalc.lcalc_Lfunction',
sources = ['sage/libs/lcalc/lcalc_Lfunction.pyx'],
libraries = ['m', 'ntl', 'Lfunction'],
extra_compile_args=["-O3", "-ffast-math"],
language = 'c++'),
Extension('sage.libs.libecm',
sources = ['sage/libs/libecm.pyx'],
libraries = ['ecm'],
extra_link_args = uname_specific("Linux", ["-Wl,-z,noexecstack"],
[])),
Extension('sage.libs.lrcalc.lrcalc',
sources = ["sage/libs/lrcalc/lrcalc.pyx"]),
OptionalExtension("sage.libs.meataxe",
sources = ['sage/libs/meataxe.pyx'],
libraries = ['mtx'],
package = 'meataxe'),
Extension('*', ['sage/libs/pari/*.pyx']),
Extension('sage.libs.ppl',
sources = ['sage/libs/ppl.pyx', 'sage/libs/ppl_shim.cc']),
Extension('*', ['sage/libs/pynac/*.pyx']),
Extension('sage.libs.ratpoints',
sources = ["sage/libs/ratpoints.pyx"],
libraries = ["ratpoints"]),
Extension('sage.libs.readline',
sources = ['sage/libs/readline.pyx'],
libraries = ['readline']),
Extension('*', sources = ['sage/libs/singular/*.pyx']),
Extension('sage.libs.symmetrica.symmetrica',
sources = ["sage/libs/symmetrica/symmetrica.pyx"],
libraries = ["symmetrica"]),
Extension('sage.libs.mpmath.utils',
sources = ["sage/libs/mpmath/utils.pyx"]),
Extension('sage.libs.mpmath.ext_impl',
sources = ["sage/libs/mpmath/ext_impl.pyx"]),
Extension('sage.libs.mpmath.ext_main',
sources = ["sage/libs/mpmath/ext_main.pyx"]),
Extension('sage.libs.mpmath.ext_libmp',
sources = ["sage/libs/mpmath/ext_libmp.pyx"]),
###################################
##
## sage.libs.arb
##
###################################
Extension('*', ["sage/libs/arb/*.pyx"]),
###################################
##
## sage.libs.eclib
##
###################################
Extension('*', ["sage/libs/eclib/*.pyx"]),
################################
##
## sage.libs.gap
##
################################
Extension('*', ["sage/libs/gap/*.pyx"]),
###################################
##
## sage.libs.gsl
##
###################################
Extension('*', ["sage/libs/gsl/*.pyx"]),
###################################
##
## sage.libs.ntl
##
###################################
Extension('sage.libs.ntl.convert',
sources = ["sage/libs/ntl/convert.pyx"],
libraries = ["ntl", "gmp"],
language='c++'),
Extension('sage.libs.ntl.error',
sources = ["sage/libs/ntl/error.pyx"],
libraries = ["ntl", "gmp"],
language='c++'),
Extension('sage.libs.ntl.ntl_GF2',
sources = ["sage/libs/ntl/ntl_GF2.pyx"],
libraries = ["ntl", "gmp"],
language='c++'),
Extension('sage.libs.ntl.ntl_GF2E',
sources = ["sage/libs/ntl/ntl_GF2E.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_GF2EContext',
sources = ["sage/libs/ntl/ntl_GF2EContext.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_GF2EX',
sources = ["sage/libs/ntl/ntl_GF2EX.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_GF2X',
sources = ["sage/libs/ntl/ntl_GF2X.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_lzz_p',
sources = ["sage/libs/ntl/ntl_lzz_p.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_lzz_pContext',
sources = ["sage/libs/ntl/ntl_lzz_pContext.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_lzz_pX',
sources = ["sage/libs/ntl/ntl_lzz_pX.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_mat_GF2',
sources = ["sage/libs/ntl/ntl_mat_GF2.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_mat_GF2E',
sources = ["sage/libs/ntl/ntl_mat_GF2E.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_mat_ZZ',
sources = ["sage/libs/ntl/ntl_mat_ZZ.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ',
sources = ["sage/libs/ntl/ntl_ZZ.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZX',
sources = ["sage/libs/ntl/ntl_ZZX.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ_p',
sources = ["sage/libs/ntl/ntl_ZZ_p.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ_pContext',
sources = ["sage/libs/ntl/ntl_ZZ_pContext.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ_pE',
sources = ["sage/libs/ntl/ntl_ZZ_pE.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ_pEContext',
sources = ["sage/libs/ntl/ntl_ZZ_pEContext.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ_pEX',
sources = ["sage/libs/ntl/ntl_ZZ_pEX.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.libs.ntl.ntl_ZZ_pX',
sources = ["sage/libs/ntl/ntl_ZZ_pX.pyx"],
libraries = ["ntl", "gmp", "m"],
language='c++'),
################################
##
## sage.matrix
##
################################
Extension('sage.matrix.action',
sources = ['sage/matrix/action.pyx']),
Extension('sage.matrix.args',
sources = ['sage/matrix/args.pyx']),
Extension('sage.matrix.echelon_matrix',
sources = ['sage/matrix/echelon_matrix.pyx']),
Extension('sage.matrix.change_ring',
sources = ['sage/matrix/change_ring.pyx']),
Extension('sage.matrix.constructor',
sources = ['sage/matrix/constructor.pyx']),
Extension('sage.matrix.matrix',
sources = ['sage/matrix/matrix.pyx']),
Extension('sage.matrix.matrix0',
sources = ['sage/matrix/matrix0.pyx']),
Extension('sage.matrix.matrix1',
sources = ['sage/matrix/matrix1.pyx']),
Extension('sage.matrix.matrix2',
sources = ['sage/matrix/matrix2.pyx']),
Extension("sage.matrix.matrix_complex_ball_dense",
["sage/matrix/matrix_complex_ball_dense.pyx"],
libraries=['arb']),
Extension('sage.matrix.matrix_complex_double_dense',
sources = ['sage/matrix/matrix_complex_double_dense.pyx']),
Extension('sage.matrix.matrix_cyclo_dense',
sources = ['sage/matrix/matrix_cyclo_dense.pyx'],
language = "c++",
libraries=['ntl']),
Extension('sage.matrix.matrix_gap',
sources = ['sage/matrix/matrix_gap.pyx']),
Extension('sage.matrix.matrix_dense',
sources = ['sage/matrix/matrix_dense.pyx']),
Extension('sage.matrix.matrix_double_dense',
sources = ['sage/matrix/matrix_double_dense.pyx']),
Extension('sage.matrix.matrix_generic_dense',
sources = ['sage/matrix/matrix_generic_dense.pyx']),
Extension('sage.matrix.matrix_generic_sparse',
sources = ['sage/matrix/matrix_generic_sparse.pyx']),
Extension('sage.matrix.matrix_integer_dense',
sources = ['sage/matrix/matrix_integer_dense.pyx'],
extra_compile_args = m4ri_extra_compile_args,
libraries = ['iml', 'ntl', 'gmp', 'm'] + cblas_libs,
library_dirs = cblas_library_dirs,
include_dirs = cblas_include_dirs),
Extension('sage.matrix.matrix_integer_sparse',
sources = ['sage/matrix/matrix_integer_sparse.pyx']),
Extension('sage.matrix.matrix_mod2_dense',
sources = ['sage/matrix/matrix_mod2_dense.pyx'],
libraries = m4ri_libs + gd_libs + png_libs + zlib_libs,
library_dirs = m4ri_library_dirs + gd_library_dirs + png_library_dirs + zlib_library_dirs,
include_dirs = m4ri_include_dirs + gd_include_dirs + png_include_dirs + zlib_include_dirs,
extra_compile_args = m4ri_extra_compile_args,
depends = [SAGE_INC + "/png.h", SAGE_INC + "/m4ri/m4ri.h"]),
Extension('sage.matrix.matrix_gf2e_dense',
sources = ['sage/matrix/matrix_gf2e_dense.pyx'],
libraries = ['m4rie'] + m4ri_libs + ['m'],
library_dirs = m4ri_library_dirs,
include_dirs = m4ri_include_dirs,
depends = [SAGE_INC + "/m4rie/m4rie.h"],
extra_compile_args = m4ri_extra_compile_args),
Extension('sage.matrix.matrix_modn_dense_float',
sources = ['sage/matrix/matrix_modn_dense_float.pyx'],
language="c++",
libraries = cblas_libs,
library_dirs = cblas_library_dirs,
include_dirs = cblas_include_dirs),
Extension('sage.matrix.matrix_modn_dense_double',
sources = ['sage/matrix/matrix_modn_dense_double.pyx'],
language="c++",
libraries = cblas_libs,
library_dirs = cblas_library_dirs,
include_dirs = cblas_include_dirs,
extra_compile_args = ["-D_XPG6"]),
Extension('sage.matrix.matrix_modn_sparse',
sources = ['sage/matrix/matrix_modn_sparse.pyx']),
Extension('sage.matrix.matrix_mpolynomial_dense',
sources = ['sage/matrix/matrix_mpolynomial_dense.pyx']),
Extension('sage.matrix.matrix_polynomial_dense',
sources = ['sage/matrix/matrix_polynomial_dense.pyx']),
Extension('sage.matrix.matrix_rational_dense',
sources = ['sage/matrix/matrix_rational_dense.pyx'],
extra_compile_args = ["-D_XPG6"] + m4ri_extra_compile_args,
libraries = ['iml', 'ntl', 'm'] + cblas_libs,
library_dirs = cblas_library_dirs,
include_dirs = cblas_include_dirs,
depends = [SAGE_INC + '/m4ri/m4ri.h']),
Extension('sage.matrix.matrix_rational_sparse',
sources = ['sage/matrix/matrix_rational_sparse.pyx']),
Extension('sage.matrix.matrix_real_double_dense',
sources = ['sage/matrix/matrix_real_double_dense.pyx']),
Extension('sage.matrix.matrix_sparse',
sources = ['sage/matrix/matrix_sparse.pyx']),
Extension('sage.matrix.matrix_symbolic_dense',
sources = ['sage/matrix/matrix_symbolic_dense.pyx']),
Extension('sage.matrix.matrix_window',
sources = ['sage/matrix/matrix_window.pyx']),
OptionalExtension("sage.matrix.matrix_gfpn_dense",
sources = ['sage/matrix/matrix_gfpn_dense.pyx'],
libraries = ['mtx'],
package = 'meataxe'),
Extension('sage.matrix.misc',
sources = ['sage/matrix/misc.pyx']),
Extension('sage.matrix.strassen',
sources = ['sage/matrix/strassen.pyx']),
################################
##
## sage.matroids
##
################################
Extension('*', ['sage/matroids/*.pyx']),
################################
##
## sage.media
##
################################
Extension('*', ['sage/media/*.pyx']),
################################
##
## sage.misc
##
################################
Extension('*', ['sage/misc/*.pyx']),
################################
##
## sage.modular
##
################################
Extension('sage.modular.arithgroup.congroup',
sources = ['sage/modular/arithgroup/congroup.pyx']),
Extension('sage.modular.arithgroup.farey_symbol',
sources = ['sage/modular/arithgroup/farey_symbol.pyx']),
Extension('sage.modular.arithgroup.arithgroup_element',
sources = ['sage/modular/arithgroup/arithgroup_element.pyx']),
Extension('sage.modular.modform.eis_series_cython',
sources = ['sage/modular/modform/eis_series_cython.pyx']),
Extension('sage.modular.modform.l_series_gross_zagier_coeffs',
sources = ['sage/modular/modform/l_series_gross_zagier_coeffs.pyx']),
Extension('sage.modular.modsym.apply',
sources = ['sage/modular/modsym/apply.pyx'],
extra_compile_args=["-D_XPG6"]),
Extension('sage.modular.modsym.manin_symbol',
sources = ['sage/modular/modsym/manin_symbol.pyx']),
Extension('sage.modular.modsym.relation_matrix_pyx',
sources = ['sage/modular/modsym/relation_matrix_pyx.pyx']),
Extension('sage.modular.modsym.heilbronn',
sources = ['sage/modular/modsym/heilbronn.pyx'],
extra_compile_args=["-D_XPG6"]),
Extension('sage.modular.modsym.p1list',
sources = ['sage/modular/modsym/p1list.pyx']),
Extension('sage.modular.pollack_stevens.dist',
sources = ['sage/modular/pollack_stevens/dist.pyx'],
libraries = ["gmp", "zn_poly"],
extra_compile_args = ["-D_XPG6"]),
################################
##
## sage.modules
##
################################
Extension('sage.modules.vector_rational_sparse',
sources = ['sage/modules/vector_rational_sparse.pyx']),
Extension('sage.modules.vector_integer_sparse',
sources = ['sage/modules/vector_integer_sparse.pyx']),
Extension('sage.modules.vector_modn_sparse',
sources = ['sage/modules/vector_modn_sparse.pyx']),
Extension('sage.modules.finite_submodule_iter',
sources = ['sage/modules/finite_submodule_iter.pyx']),
Extension('sage.modules.free_module_element',
sources = ['sage/modules/free_module_element.pyx']),
Extension('sage.modules.module',
sources = ['sage/modules/module.pyx']),
Extension('sage.modules.vector_complex_double_dense',
['sage/modules/vector_complex_double_dense.pyx']),
Extension('sage.modules.vector_double_dense',
['sage/modules/vector_double_dense.pyx']),
Extension('sage.modules.vector_integer_dense',
sources = ['sage/modules/vector_integer_dense.pyx']),
Extension('sage.modules.vector_modn_dense',
sources = ['sage/modules/vector_modn_dense.pyx']),
Extension('sage.modules.vector_mod2_dense',
sources = ['sage/modules/vector_mod2_dense.pyx'],
libraries = m4ri_libs + gd_libs + png_libs,
library_dirs = m4ri_library_dirs + gd_library_dirs + png_library_dirs,
include_dirs = m4ri_include_dirs + gd_include_dirs + png_include_dirs,
extra_compile_args = m4ri_extra_compile_args,
depends = [SAGE_INC + "/png.h", SAGE_INC + "/m4ri/m4ri.h"]),
Extension('sage.modules.vector_rational_dense',
sources = ['sage/modules/vector_rational_dense.pyx']),
Extension('sage.modules.vector_real_double_dense',
['sage/modules/vector_real_double_dense.pyx']),
Extension('sage.modules.with_basis.indexed_element',
sources = ['sage/modules/with_basis/indexed_element.pyx']),
################################
##
## sage.numerical
##
################################
Extension("sage.numerical.mip",
["sage/numerical/mip.pyx"]),
Extension("sage.numerical.linear_functions",
["sage/numerical/linear_functions.pyx"]),
Extension("sage.numerical.linear_tensor_element",
["sage/numerical/linear_tensor_element.pyx"]),
Extension("sage.numerical.gauss_legendre",
["sage/numerical/gauss_legendre.pyx"]),
Extension("sage.numerical.sdp",
["sage/numerical/sdp.pyx"]),
Extension("sage.numerical.backends.generic_backend",
["sage/numerical/backends/generic_backend.pyx"]),
Extension("sage.numerical.backends.generic_sdp_backend",
["sage/numerical/backends/generic_sdp_backend.pyx"]),
Extension("sage.numerical.backends.glpk_backend",
["sage/numerical/backends/glpk_backend.pyx"]),
Extension("sage.numerical.backends.glpk_exact_backend",
["sage/numerical/backends/glpk_exact_backend.pyx"]),
Extension("sage.numerical.backends.ppl_backend",
["sage/numerical/backends/ppl_backend.pyx"]),
Extension("sage.numerical.backends.cvxopt_backend",
["sage/numerical/backends/cvxopt_backend.pyx"]),
Extension("sage.numerical.backends.cvxopt_sdp_backend",
["sage/numerical/backends/cvxopt_sdp_backend.pyx"]),
Extension("sage.numerical.backends.glpk_graph_backend",
["sage/numerical/backends/glpk_graph_backend.pyx"]),
Extension("sage.numerical.backends.interactivelp_backend",
["sage/numerical/backends/interactivelp_backend.pyx"]),
OptionalExtension("sage.numerical.backends.gurobi_backend",
["sage/numerical/backends/gurobi_backend.pyx"],
libraries = ["gurobi"],
condition = os.path.isfile(SAGE_INC + "/gurobi_c.h") and
os.path.isfile(SAGE_LOCAL + "/lib/libgurobi.so")),
OptionalExtension("sage.numerical.backends.cplex_backend",
["sage/numerical/backends/cplex_backend.pyx"],
libraries = ["cplex"],
condition = os.path.isfile(SAGE_INC + "/cplex.h") and
os.path.isfile(SAGE_LOCAL + "/lib/libcplex.a")),
OptionalExtension("sage.numerical.backends.coin_backend",
["sage/numerical/backends/coin_backend.pyx"],
language = 'c++',
libraries = ["Cbc", "CbcSolver", "Cgl", "Clp", "CoinUtils",
"OsiCbc", "OsiClp", "Osi"] + lapack_libs,
library_dirs = lapack_library_dirs,
include_dirs = lapack_include_dirs,
package = 'cbc'),
################################
##
## sage.parallel
##
################################
Extension('*', ['sage/parallel/**/*.pyx']),
################################
##
## sage.plot
##
################################
Extension('*', ['sage/plot/**/*.pyx']),
################################
##
## sage.probability
##
################################
Extension('*', ['sage/probability/*.pyx']),
################################
##
## sage.quadratic_forms
##
################################
Extension('*', ['sage/quadratic_forms/*.pyx']),
###############################
##
## sage.quivers
##
###############################
Extension('*', ['sage/quivers/*.pyx']),
################################
##
## sage.rings
##
################################
Extension('sage.rings.sum_of_squares',
sources = ['sage/rings/sum_of_squares.pyx'],
libraries = ['m']),
Extension('sage.rings.bernmm',
sources = ['sage/rings/bernmm.pyx',
'sage/rings/bernmm/bern_modp.cpp',
'sage/rings/bernmm/bern_modp_util.cpp',
'sage/rings/bernmm/bern_rat.cpp'],
libraries = ['ntl', 'pthread', 'gmp'],
depends = ['sage/rings/bernmm/bern_modp.h',
'sage/rings/bernmm/bern_modp_util.h',
'sage/rings/bernmm/bern_rat.h'],
language = 'c++',
define_macros=[('USE_THREADS', '1'),
('THREAD_STACK_SIZE', '4096')]),
Extension('sage.rings.bernoulli_mod_p',
sources = ['sage/rings/bernoulli_mod_p.pyx'],
libraries=['ntl', 'gmp'],
language = 'c++'),
Extension("sage.rings.complex_arb",
["sage/rings/complex_arb.pyx"]),
Extension('sage.rings.complex_double',
sources = ['sage/rings/complex_double.pyx'],
extra_compile_args = ["-D_XPG6"],
libraries = ['m']),
Extension('sage.rings.complex_interval',
sources = ['sage/rings/complex_interval.pyx']),
Extension('sage.rings.complex_number',
sources = ['sage/rings/complex_number.pyx']),
Extension('sage.rings.integer',
sources = ['sage/rings/integer.pyx'],
libraries=['ntl']),
Extension('sage.rings.integer_ring',
sources = ['sage/rings/integer_ring.pyx'],
libraries=['ntl']),
Extension('sage.rings.factorint',
sources = ['sage/rings/factorint.pyx']),
Extension('sage.rings.fast_arith',
sources = ['sage/rings/fast_arith.pyx']),
Extension('sage.rings.fraction_field_element',
sources = ['sage/rings/fraction_field_element.pyx']),
Extension('sage.rings.fraction_field_FpT',
sources = ['sage/rings/fraction_field_FpT.pyx'],
libraries = ["gmp", "ntl", "zn_poly"],
language = 'c++'),
Extension('sage.rings.laurent_series_ring_element',
sources = ['sage/rings/laurent_series_ring_element.pyx']),
Extension('sage.rings.morphism',
sources = ['sage/rings/morphism.pyx']),
Extension('sage.rings.complex_mpc',
sources = ['sage/rings/complex_mpc.pyx']),
Extension('sage.rings.noncommutative_ideals',
sources = ['sage/rings/noncommutative_ideals.pyx']),
Extension('sage.rings.power_series_mpoly',
sources = ['sage/rings/power_series_mpoly.pyx']),
Extension('sage.rings.power_series_poly',
sources = ['sage/rings/power_series_poly.pyx']),
Extension('sage.rings.power_series_pari',
sources = ['sage/rings/power_series_pari.pyx']),
Extension('sage.rings.power_series_ring_element',
sources = ['sage/rings/power_series_ring_element.pyx']),
Extension('sage.rings.rational',
sources = ['sage/rings/rational.pyx'],
libraries=['ntl']),
Extension('sage.rings.real_double',
sources = ['sage/rings/real_double.pyx']),
Extension('sage.rings.real_interval_absolute',
sources = ['sage/rings/real_interval_absolute.pyx']),
Extension("sage.rings.real_arb",
["sage/rings/real_arb.pyx"]),
Extension('sage.rings.real_lazy',
sources = ['sage/rings/real_lazy.pyx']),
Extension('sage.rings.real_mpfi',
sources = ['sage/rings/real_mpfi.pyx']),
Extension('sage.rings.real_mpfr',
sources = ['sage/rings/real_mpfr.pyx']),
Extension('sage.rings.finite_rings.residue_field',
sources = ['sage/rings/finite_rings/residue_field.pyx']),
Extension('sage.rings.ring',
sources = ['sage/rings/ring.pyx']),
Extension('*', ['sage/rings/convert/*.pyx']),
################################
##
## sage.rings.finite_rings
##
################################
Extension('sage.rings.finite_rings.finite_field_base',
sources = ['sage/rings/finite_rings/finite_field_base.pyx']),
Extension('sage.rings.finite_rings.element_base',
sources = ['sage/rings/finite_rings/element_base.pyx']),
Extension('sage.rings.finite_rings.integer_mod',
sources = ['sage/rings/finite_rings/integer_mod.pyx']),
Extension('sage.rings.finite_rings.element_givaro',
sources = ["sage/rings/finite_rings/element_givaro.pyx"],
libraries = ['givaro', 'ntl', 'gmp', 'm'],
language='c++'),
Extension('sage.rings.finite_rings.element_ntl_gf2e',
sources = ['sage/rings/finite_rings/element_ntl_gf2e.pyx'],
libraries = ['ntl'],
language = 'c++'),
Extension('sage.rings.finite_rings.element_pari_ffelt',
sources = ['sage/rings/finite_rings/element_pari_ffelt.pyx']),
Extension('sage.rings.finite_rings.hom_finite_field',
sources = ["sage/rings/finite_rings/hom_finite_field.pyx"]),
Extension('sage.rings.finite_rings.hom_prime_finite_field',
sources = ["sage/rings/finite_rings/hom_prime_finite_field.pyx"]),
Extension('sage.rings.finite_rings.hom_finite_field_givaro',
sources = ["sage/rings/finite_rings/hom_finite_field_givaro.pyx"],
libraries = ['givaro', 'ntl', 'gmp', 'm'],
language='c++'),
################################
##
## sage.rings.function_field
##
################################
Extension('sage.rings.function_field.function_field_element',
sources = ['sage/rings/function_field/function_field_element.pyx']),
################################
##
## sage.rings.number_field
##
################################
Extension('sage.rings.number_field.number_field_base',
sources = ['sage/rings/number_field/number_field_base.pyx']),
Extension('sage.rings.number_field.number_field_element',
sources = ['sage/rings/number_field/number_field_element.pyx'],
libraries=['ntl'],
language = 'c++'),
Extension('sage.rings.number_field.number_field_element_quadratic',
sources = ['sage/rings/number_field/number_field_element_quadratic.pyx'],
libraries=['ntl'],
language = 'c++'),
Extension('sage.rings.number_field.number_field_morphisms',
sources = ['sage/rings/number_field/number_field_morphisms.pyx']),
Extension('sage.rings.number_field.totallyreal',
sources = ['sage/rings/number_field/totallyreal.pyx']),
Extension('sage.rings.number_field.totallyreal_data',
sources = ['sage/rings/number_field/totallyreal_data.pyx'],
libraries = ['gmp']),
################################
##
## sage.rings.padics
##
################################
Extension('sage.rings.padics.morphism',
sources = ['sage/rings/padics/morphism.pyx']),
Extension('sage.rings.padics.common_conversion',
sources = ['sage/rings/padics/common_conversion.pyx']),
Extension('sage.rings.padics.local_generic_element',
sources = ['sage/rings/padics/local_generic_element.pyx']),
Extension('sage.rings.padics.padic_capped_absolute_element',
sources = ['sage/rings/padics/padic_capped_absolute_element.pyx']),
Extension('sage.rings.padics.padic_capped_relative_element',
sources = ['sage/rings/padics/padic_capped_relative_element.pyx']),
Extension('sage.rings.padics.padic_floating_point_element',
sources = ['sage/rings/padics/padic_floating_point_element.pyx']),
Extension('sage.rings.padics.padic_ext_element',
sources = ['sage/rings/padics/padic_ext_element.pyx'],
libraries=['ntl', 'gmp', 'm'],
language='c++'),
Extension('sage.rings.padics.padic_fixed_mod_element',
sources = ['sage/rings/padics/padic_fixed_mod_element.pyx']),
Extension('sage.rings.padics.padic_generic_element',
sources = ['sage/rings/padics/padic_generic_element.pyx']),
Extension('sage.rings.padics.padic_printing',
sources = ['sage/rings/padics/padic_printing.pyx'],
libraries=['gmp', 'ntl', 'm'],
language='c++'),
Extension('sage.rings.padics.padic_ZZ_pX_CA_element',
sources = ['sage/rings/padics/padic_ZZ_pX_CA_element.pyx'],
libraries = ['ntl', 'gmp', 'm'],
language='c++'),
Extension('sage.rings.padics.padic_ZZ_pX_CR_element',
sources = ['sage/rings/padics/padic_ZZ_pX_CR_element.pyx'],
libraries=['ntl', 'gmp', 'm'],
language='c++'),
Extension('sage.rings.padics.padic_ZZ_pX_element',
sources = ['sage/rings/padics/padic_ZZ_pX_element.pyx'],
libraries=['ntl', 'gmp', 'm'],
language='c++'),
Extension('sage.rings.padics.padic_ZZ_pX_FM_element',
sources = ['sage/rings/padics/padic_ZZ_pX_FM_element.pyx'],
libraries=['ntl', 'gmp', 'm'],
language='c++'),
Extension('sage.rings.padics.pow_computer',
sources = ['sage/rings/padics/pow_computer.pyx'],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.rings.padics.pow_computer_ext',
sources = ['sage/rings/padics/pow_computer_ext.pyx'],
libraries = ["ntl", "gmp", "m"],
language='c++'),
Extension('sage.rings.padics.pow_computer_flint',
sources = ['sage/rings/padics/pow_computer_flint.pyx'],
libraries = ["gmp", "ntl"],
language='c++'),
Extension('sage.rings.padics.qadic_flint_CR',
sources = ['sage/rings/padics/qadic_flint_CR.pyx']),
Extension('sage.rings.padics.qadic_flint_CA',
sources = ['sage/rings/padics/qadic_flint_CA.pyx']),
Extension('sage.rings.padics.qadic_flint_FM',
sources = ['sage/rings/padics/qadic_flint_FM.pyx']),
Extension('sage.rings.padics.qadic_flint_FP',
sources = ['sage/rings/padics/qadic_flint_FP.pyx'],
libraries = ["flint"]),
################################
##
## sage.rings.polynomial
##
################################
Extension('sage.rings.polynomial.cyclotomic',
sources = ['sage/rings/polynomial/cyclotomic.pyx']),
Extension('sage.rings.polynomial.evaluation',
libraries = ["ntl"],
sources = ['sage/rings/polynomial/evaluation.pyx'],
language = 'c++'),
Extension('sage.rings.polynomial.laurent_polynomial',
sources = ['sage/rings/polynomial/laurent_polynomial.pyx']),
Extension('sage.rings.polynomial.multi_polynomial',
sources = ['sage/rings/polynomial/multi_polynomial.pyx']),
Extension('sage.rings.polynomial.multi_polynomial_ideal_libsingular',
sources = ['sage/rings/polynomial/multi_polynomial_ideal_libsingular.pyx']),
Extension('sage.rings.polynomial.plural',
sources = ['sage/rings/polynomial/plural.pyx']),
Extension('sage.rings.polynomial.multi_polynomial_libsingular',
sources = ['sage/rings/polynomial/multi_polynomial_libsingular.pyx']),
Extension('sage.rings.polynomial.multi_polynomial_ring_base',
sources = ['sage/rings/polynomial/multi_polynomial_ring_base.pyx']),
Extension('sage.rings.polynomial.polynomial_number_field',
sources = ['sage/rings/polynomial/polynomial_number_field.pyx']),
Extension('sage.rings.polynomial.polydict',
sources = ['sage/rings/polynomial/polydict.pyx']),
Extension('sage.rings.polynomial.polynomial_complex_arb',
sources = ['sage/rings/polynomial/polynomial_complex_arb.pyx']),
Extension('sage.rings.polynomial.polynomial_compiled',
sources = ['sage/rings/polynomial/polynomial_compiled.pyx']),
Extension('sage.rings.polynomial.polynomial_element',
sources = ['sage/rings/polynomial/polynomial_element.pyx']),
Extension('sage.rings.polynomial.polynomial_gf2x',
sources = ['sage/rings/polynomial/polynomial_gf2x.pyx'],
libraries = ['gmp', 'ntl'],
extra_compile_args = m4ri_extra_compile_args,
language = 'c++'),
Extension('sage.rings.polynomial.polynomial_zz_pex',
sources = ['sage/rings/polynomial/polynomial_zz_pex.pyx'],
libraries = ['ntl', 'gmp'],
language = 'c++'),
Extension('sage.rings.polynomial.polynomial_zmod_flint',
sources = ['sage/rings/polynomial/polynomial_zmod_flint.pyx'],
libraries = ["gmp", "ntl", "zn_poly"],
language = 'c++'),
Extension('sage.rings.polynomial.polynomial_integer_dense_flint',
sources = ['sage/rings/polynomial/polynomial_integer_dense_flint.pyx'],
language = 'c++',
libraries = ["ntl", "gmp"]),
Extension('sage.rings.polynomial.polynomial_integer_dense_ntl',
sources = ['sage/rings/polynomial/polynomial_integer_dense_ntl.pyx'],
libraries = ['ntl', 'gmp'],
language = 'c++'),
Extension('sage.rings.polynomial.polynomial_rational_flint',
sources = ['sage/rings/polynomial/polynomial_rational_flint.pyx'],
libraries = ["ntl", "gmp"],
language = 'c++'),
Extension('sage.rings.polynomial.polynomial_modn_dense_ntl',
sources = ['sage/rings/polynomial/polynomial_modn_dense_ntl.pyx'],
libraries = ['ntl', 'gmp'],
language = 'c++'),
Extension('sage.rings.polynomial.polynomial_ring_homomorphism',
sources = ['sage/rings/polynomial/polynomial_ring_homomorphism.pyx']),
Extension('sage.rings.polynomial.pbori',
sources = ['sage/rings/polynomial/pbori.pyx'],
libraries=['brial', 'brial_groebner'] + m4ri_libs + png_libs,
library_dirs = m4ri_library_dirs + png_library_dirs,
include_dirs = m4ri_include_dirs + png_include_dirs,
depends = [SAGE_INC + "/polybori/" + hd + ".h" for hd in ["polybori", "config"] ] +
[SAGE_INC + '/m4ri/m4ri.h'],
extra_compile_args = m4ri_extra_compile_args),
Extension('sage.rings.polynomial.polynomial_real_mpfr_dense',
sources = ['sage/rings/polynomial/polynomial_real_mpfr_dense.pyx']),
Extension('sage.rings.polynomial.real_roots',
sources = ['sage/rings/polynomial/real_roots.pyx']),
Extension('sage.rings.polynomial.refine_root',
sources = ['sage/rings/polynomial/refine_root.pyx']),
Extension('sage.rings.polynomial.symmetric_reduction',
sources = ['sage/rings/polynomial/symmetric_reduction.pyx']),
Extension('sage.rings.polynomial.skew_polynomial_element',
sources = ['sage/rings/polynomial/skew_polynomial_element.pyx']),
################################
##
## sage.rings.semirings
##
################################
Extension('sage.rings.semirings.tropical_semiring',
sources = ['sage/rings/semirings/tropical_semiring.pyx']),
################################
##
## sage.sat
##
################################
Extension('sage.sat.solvers.satsolver',
sources = ['sage/sat/solvers/satsolver.pyx']),
################################
##
## sage.schemes
##
################################
Extension('sage.schemes.elliptic_curves.descent_two_isogeny',
sources = ['sage/schemes/elliptic_curves/descent_two_isogeny.pyx'],
libraries = ['ratpoints']),
Extension('sage.schemes.elliptic_curves.period_lattice_region',
sources = ['sage/schemes/elliptic_curves/period_lattice_region.pyx']),
Extension('sage.schemes.hyperelliptic_curves.hypellfrob',
sources = ['sage/schemes/hyperelliptic_curves/hypellfrob.pyx',
'sage/schemes/hyperelliptic_curves/hypellfrob/hypellfrob.cpp',
'sage/schemes/hyperelliptic_curves/hypellfrob/recurrences_ntl.cpp',
'sage/schemes/hyperelliptic_curves/hypellfrob/recurrences_zn_poly.cpp'],
libraries = ['gmp', 'ntl', 'zn_poly'],
depends = ['sage/schemes/hyperelliptic_curves/hypellfrob/hypellfrob.h',
'sage/schemes/hyperelliptic_curves/hypellfrob/recurrences_ntl.h',
'sage/schemes/hyperelliptic_curves/hypellfrob/recurrences_zn_poly.h'],
language = 'c++',
include_dirs = ['sage/libs/ntl/',
'sage/schemes/hyperelliptic_curves/hypellfrob/']),
Extension('sage.schemes.toric.divisor_class',
sources = ['sage/schemes/toric/divisor_class.pyx']),
################################
##
## sage.sets
##
################################
Extension('*', ['sage/sets/*.pyx']),
################################
##
## sage.stats
##
################################
Extension('sage.stats.hmm.util',
sources = ['sage/stats/hmm/util.pyx']),
Extension('sage.stats.hmm.distributions',
sources = ['sage/stats/hmm/distributions.pyx']),
Extension('sage.stats.hmm.hmm',
sources = ['sage/stats/hmm/hmm.pyx']),
Extension('sage.stats.hmm.chmm',
sources = ['sage/stats/hmm/chmm.pyx']),
Extension('sage.stats.intlist',
sources = ['sage/stats/intlist.pyx']),
Extension('sage.stats.distributions.discrete_gaussian_integer',
sources = ['sage/stats/distributions/discrete_gaussian_integer.pyx', 'sage/stats/distributions/dgs_gauss_mp.c', 'sage/stats/distributions/dgs_gauss_dp.c', 'sage/stats/distributions/dgs_bern.c'],
depends = ['sage/stats/distributions/dgs_gauss.h', 'sage/stats/distributions/dgs_bern.h', 'sage/stats/distributions/dgs_misc.h'],
extra_compile_args = ["-D_XOPEN_SOURCE=600"]),
################################
##
## sage.structure
##
################################
# Compile this with -Os because it works around a bug with
# GCC-4.7.3 + Cython 0.19 on Itanium, see Trac #14452. Moreover, it
# actually results in faster code than -O3.
Extension('sage.structure.element',
sources = ['sage/structure/element.pyx'],
extra_compile_args=["-Os"]),
Extension('*', ['sage/structure/*.pyx']),
################################
##
## sage.symbolic
##
################################
Extension('*', ['sage/symbolic/*.pyx']),
################################
##
## sage.tests
##
################################
Extension('sage.tests.stl_vector',
sources = ['sage/tests/stl_vector.pyx'],
language = 'c++'),
Extension('sage.tests.cython',
sources = ['sage/tests/cython.pyx']),
]
| 34.640801 | 208 | 0.551503 |
617357c4900d9f389c9c1bdf99fb5ce19ad9e274 | 2,437 | py | Python | speakeasy/winenv/arch.py | certego/speakeasy | 7cc983603d09ee0bd62115b8b8c9a17c81abe9a4 | [
"MIT"
] | null | null | null | speakeasy/winenv/arch.py | certego/speakeasy | 7cc983603d09ee0bd62115b8b8c9a17c81abe9a4 | [
"MIT"
] | null | null | null | speakeasy/winenv/arch.py | certego/speakeasy | 7cc983603d09ee0bd62115b8b8c9a17c81abe9a4 | [
"MIT"
] | null | null | null | # Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
ARCH_X86 = 32
ARCH_AMD64 = 64
PAGE_SIZE = 0x1000
BITS_32 = 32
BITS_64 = 64
LSTAR = 0xC0000082
X86_REG_CS = 1001
X86_REG_DS = 1002
X86_REG_EAX = 1003
X86_REG_EBP = 1004
X86_REG_EBX = 1005
X86_REG_ECX = 1006
X86_REG_EDI = 1007
X86_REG_EDX = 1008
X86_REG_EFLAGS = 1009
X86_REG_EIP = 1010
X86_REG_EIZ = 1011
X86_REG_ESI = 1012
X86_REG_ESP = 1013
# Segment registers
X86_REG_FS = 1014
X86_REG_GS = 1015
X86_REG_ES = 1016
X86_REG_SS = 1017
X86_REG_CS = 1018
X86_REG_DS = 1019
X86_REG_MSR = 1020
# AMD64
AMD64_REG_RAX = 1021
AMD64_REG_RBP = 1022
AMD64_REG_RBX = 1023
AMD64_REG_RCX = 1024
AMD64_REG_RDI = 1025
AMD64_REG_RDX = 1026
AMD64_REG_RIP = 1027
AMD64_REG_RIZ = 1028
AMD64_REG_RSI = 1029
AMD64_REG_RSP = 1030
AMD64_REG_SIL = 1031
AMD64_REG_DIL = 1032
AMD64_REG_BPL = 1033
AMD64_REG_SPL = 1034
AMD64_REG_R8 = 1035
AMD64_REG_R9 = 1036
AMD64_REG_R10 = 1037
AMD64_REG_R11 = 1038
AMD64_REG_R12 = 1039
AMD64_REG_R13 = 1040
AMD64_REG_R14 = 1041
AMD64_REG_R15 = 1042
# Control Registers
X86_REG_CR0 = 1043
X86_REG_CR1 = 1044
X86_REG_CR2 = 1045
X86_REG_CR3 = 1046
X86_REG_CR4 = 1047
X86_REG_CR5 = 1048
X86_REG_CR6 = 1049
X86_REG_CR7 = 1050
X86_REG_CR8 = 1051
# Debug registers
X86_REG_DR0 = 1052
X86_REG_DR1 = 1053
X86_REG_DR2 = 1054
X86_REG_DR3 = 1055
X86_REG_DR4 = 1056
X86_REG_DR5 = 1057
X86_REG_DR6 = 1058
X86_REG_DR7 = 1059
X86_REG_DR8 = 1060
X86_REG_IDTR = 1061
X86_REG_GDTR = 1062
X86_REG_XMM0 = 1063
X86_REG_XMM1 = 1064
X86_REG_XMM2 = 1065
X86_REG_XMM3 = 1066
X86_REG_XMM4 = 1067
# Calling conventions
CALL_CONV_CDECL = 0
CALL_CONV_STDCALL = 1
CALL_CONV_FASTCALL = 2
CALL_CONV_FLOAT = 3
VAR_ARGS = -1
# Register string constants
REG_LOOKUP = {
# x86 registers
"eax": X86_REG_EAX,
"ebx": X86_REG_EBX,
"ecx": X86_REG_ECX,
"edx": X86_REG_EDX,
"edi": X86_REG_EDI,
"esi": X86_REG_ESI,
"ebp": X86_REG_EBP,
"esp": X86_REG_ESP,
"eip": X86_REG_EIP,
"eflags": X86_REG_EFLAGS,
# amd64 registers
"rax": AMD64_REG_RAX,
"rbx": AMD64_REG_RBX,
"rcx": AMD64_REG_RCX,
"rdx": AMD64_REG_RDX,
"rdi": AMD64_REG_RDI,
"rsi": AMD64_REG_RSI,
"rsp": AMD64_REG_RSP,
"rbp": AMD64_REG_RBP,
"r8": AMD64_REG_R8,
"r9": AMD64_REG_R9,
"r10": AMD64_REG_R10,
"r11": AMD64_REG_R11,
"r12": AMD64_REG_R12,
"r13": AMD64_REG_R13,
"r14": AMD64_REG_R14,
"r15": AMD64_REG_R15,
"rip": AMD64_REG_RIP,
}
| 18.891473 | 55 | 0.730406 |
a6ca7b8ff20a6966dc7cb38126eff74c55b248b1 | 1,860 | py | Python | eskill_custom/sales_invoice.py | mohsinalimat/eskill_custom | 1aa4a591c71144d751b78e0a2907353336e71f37 | [
"MIT"
] | null | null | null | eskill_custom/sales_invoice.py | mohsinalimat/eskill_custom | 1aa4a591c71144d751b78e0a2907353336e71f37 | [
"MIT"
] | null | null | null | eskill_custom/sales_invoice.py | mohsinalimat/eskill_custom | 1aa4a591c71144d751b78e0a2907353336e71f37 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import json
import frappe
@frappe.whitelist()
def update_service_order(invoice_name: str):
"Updates information on service order to indicate invoice."
invoice = frappe.get_doc("Sales Invoice", invoice_name)
service_order = frappe.get_doc("Service Order", invoice.service_order)
if not invoice.is_return and invoice.docstatus == 1:
service_order.db_set("billing_status", "Invoiced")
service_order.add_comment(
comment_type="Info",
text="invoiced this service order."
)
else:
service_order.db_set("billing_status", "Pending Invoicing")
service_order.notify_update()
@frappe.whitelist()
def validate_advance_payment_rate(exchange_rate, advances) -> "str | None":
"Validate rate on advances."
# advances table is passed from the front end as a JSON and exchange_rate is passed as a string
advances = json.loads(advances)
exchange_rate = float(exchange_rate)
# append error messages to a list if an exchange rate is found to not match the given rate
error_list = []
for advance in advances:
if advance['allocated_amount'] != 0 and advance['ref_exchange_rate'] != exchange_rate:
error_list.append(
f"{advance['reference_name']} in row {advance['idx']}"
f" has an exchange rate of {round(1 / advance['ref_exchange_rate'], 4)}"
f" whilst the you are trying to invoice at a rate of {round(1 / exchange_rate, 4)}."
)
# return error message by joining list if any errors are found, otherwise return nothing
if len(error_list) > 0:
error_list.insert(0, "The below advances are invalid due to mismatched exchange rates:<br>")
error_message = "<br>".join(error_list)
return error_message
return None
| 37.959184 | 100 | 0.682796 |
b347e8d10d0f9a971883adc7b7aef8c97ba13abe | 496 | py | Python | build/lib/tzager/diseases.py | tzagerAI/tzager | a6787f02fde58babd9999867d2cc3ced94926da8 | [
"MIT"
] | 2 | 2021-01-25T17:05:59.000Z | 2021-04-11T19:05:16.000Z | build/lib/tzager/diseases.py | tzagerAI/tzager | a6787f02fde58babd9999867d2cc3ced94926da8 | [
"MIT"
] | null | null | null | build/lib/tzager/diseases.py | tzagerAI/tzager | a6787f02fde58babd9999867d2cc3ced94926da8 | [
"MIT"
] | null | null | null | import json
import requests
def get_data(password, concept_list, filters='None'):
concept_list = '|'.join(concept_list)
if filters != 'None':
filters = '|'.join(filters)
response = requests.get('https://cloud.bolooba.com:25556/diseases_data/' + password + '/' + concept_list + '/' + filters)
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
| 27.555556 | 125 | 0.618952 |
51ea747e5a5a1ce7b83d752f573877cb525cdad7 | 1,022 | py | Python | Python2/ejercicios/ejercicio_clase6.py | eveiramirez/python_class | 7a3830cc92dc842b853b243c6b01e06993faa97e | [
"MIT"
] | null | null | null | Python2/ejercicios/ejercicio_clase6.py | eveiramirez/python_class | 7a3830cc92dc842b853b243c6b01e06993faa97e | [
"MIT"
] | null | null | null | Python2/ejercicios/ejercicio_clase6.py | eveiramirez/python_class | 7a3830cc92dc842b853b243c6b01e06993faa97e | [
"MIT"
] | 3 | 2021-04-09T19:12:15.000Z | 2021-08-24T18:24:58.000Z | from Bio import Entrez
# Ejercicio 2
Entrez.email = "iramirez@lcg.unam.mx"
handle = Entrez.esearch(db="Taxonomy", term="Notoryctes typhlops")
record = Entrez.read(handle)
ids = record["IdList"][0]
handle = Entrez.efetch(db="Taxonomy", id=ids, retmode="xml")
Notoryctes = Entrez.read(handle)
lineages = [Notoryctes[0]["Lineage"].split(";")]
handle = Entrez.esearch(db="Taxonomy", term="Chrysochloris asiatica")
record = Entrez.read(handle)
ids = record["IdList"][0]
handle = Entrez.efetch(db="Taxonomy", id=ids, retmode="xml")
Notoryctes = Entrez.read(handle)
lineages.append(Notoryctes[0]["Lineage"].split(";"))
lengths = []
for lin in lineages:
lengths.append(len(lin))
ltmin = lengths.index(min(lengths))
print(ltmin)
diff = -1
i = 0
while diff == -1:
while i <= len(lineages[ltmin])-1:
if lineages[0][i] != lineages[1][i]:
diff = i
i = len(lineages[ltmin])
else:
i += 1
if diff == 0:
diff = -2
for org in lineages:
print(org[diff])
| 22.711111 | 69 | 0.638943 |
2b977edbb02e06ab391708b7275b2af0229950f2 | 6,364 | py | Python | gamestonk_terminal/stocks/comparison_analysis/yahoo_finance_view.py | Flodur871/GamestonkTerminal | 6b7a8efc594c06987a155b823afb8c838b49066b | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/comparison_analysis/yahoo_finance_view.py | Flodur871/GamestonkTerminal | 6b7a8efc594c06987a155b823afb8c838b49066b | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/comparison_analysis/yahoo_finance_view.py | Flodur871/GamestonkTerminal | 6b7a8efc594c06987a155b823afb8c838b49066b | [
"MIT"
] | null | null | null | """ Comparison Analysis Yahoo Finance View """
__docformat__ = "numpy"
import os
from datetime import datetime, timedelta
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from sklearn.preprocessing import MinMaxScaler
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal.stocks.comparison_analysis import yahoo_finance_model
register_matplotlib_converters()
d_candle_types = {
"o": "Open",
"h": "High",
"l": "Low",
"c": "Close",
"a": "Adj Close",
"v": "Volume",
}
def display_historical(
ticker: str,
similar_tickers: List[str],
start: str = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
candle_type: str = "a",
normalize: bool = True,
export: str = "",
):
"""Display historical stock prices. [Source: Yahoo Finance]
Parameters
----------
ticker : str
Base ticker
similar_tickers : List[str]
List of similar tickers
start : str, optional
Start date of comparison, by default 1 year ago
candle_type : str, optional
OHLCA column to use, by default "a" for Adjusted Close
normalize : bool, optional
Boolean to normalize all stock prices using MinMax defaults True
export : str, optional
Format to export historical prices, by default ""
"""
ordered_tickers = [ticker, *similar_tickers]
df_similar = yahoo_finance_model.get_historical(
ticker, similar_tickers, start, candle_type
)
# To plot with ticker first
df_similar = df_similar[ordered_tickers]
if np.any(df_similar.isna()):
nan_tickers = df_similar.columns[df_similar.isna().sum() >= 1].to_list()
print(f"NaN values found in: {', '.join(nan_tickers)}. Replacing with zeros.")
df_similar = df_similar.fillna(0)
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
# This puts everything on 0-1 scale for visualizing
if normalize:
mm_scale = MinMaxScaler()
df_similar = pd.DataFrame(
mm_scale.fit_transform(df_similar),
columns=df_similar.columns,
index=df_similar.index,
)
df_similar.plot(ax=ax)
ax.set_title(f"Similar companies to {ticker}")
ax.set_xlabel("Time")
ax.set_ylabel(f"{['','Normalized'][normalize]} Share Price {['($)',''][normalize]}")
ax.grid(b=True, which="major", color="#666666", linestyle="-")
# ensures that the historical data starts from same datapoint
ax.set_xlim([df_similar.index[0], df_similar.index[-1]])
plt.gcf().autofmt_xdate()
fig.tight_layout()
plt.show()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "historical", df_similar
)
print("")
def display_volume(
ticker: str,
similar_tickers: List[str],
start: str = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
normalize: bool = True,
export: str = "",
):
"""Display volume stock prices. [Source: Yahoo Finance]
Parameters
----------
ticker : str
Base ticker
similar_tickers : List[str]
List of similar tickers
start : str, optional
Start date of comparison, by default 1 year ago
normalize : bool, optional
Boolean to normalize all stock prices using MinMax defaults True
export : str, optional
Format to export historical prices, by default ""
"""
ordered_tickers = [ticker, *similar_tickers]
df_similar = yahoo_finance_model.get_historical(ticker, similar_tickers, start, "v")
# To plot with ticker first
df_similar = df_similar[ordered_tickers]
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
# This puts everything on 0-1 scale for visualizing
if normalize:
mm_scale = MinMaxScaler()
df_similar = pd.DataFrame(
mm_scale.fit_transform(df_similar),
columns=df_similar.columns,
index=df_similar.index,
)
else:
df_similar = df_similar.div(1_000_000)
df_similar.plot(ax=ax)
ax.set_title("Volume over time")
# ax.plot(df_similar.index, df_similar[ticker].values/1_000_000)
ax.set_xlabel("Date")
ax.set_ylabel(f"{['','Normalized'][normalize]} Volume {['[K]',''][normalize]}")
ax.grid(b=True, which="major", color="#666666", linestyle="-")
# ensures that the historical data starts from same datapoint
ax.set_xlim([df_similar.index[0], df_similar.index[-1]])
plt.gcf().autofmt_xdate()
fig.tight_layout()
plt.show()
export_data(
export, os.path.dirname(os.path.abspath(__file__)), "volume", df_similar
)
print("")
def display_correlation(
ticker: str,
similar_tickers: List[str],
start: str = (datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
candle_type: str = "a",
):
"""
Correlation heatmap based on historical price comparison
between similar companies. [Source: Yahoo Finance]
Parameters
----------
ticker : str
Base ticker
similar_tickers : List[str]
List of similar tickers
start : str, optional
Start date of comparison, by default 1 year ago
candle_type : str, optional
OHLCA column to use, by default "a" for Adjusted Close
"""
ordered_tickers = [ticker, *similar_tickers]
df_similar = yahoo_finance_model.get_historical(
ticker, similar_tickers, start, candle_type
)
# To plot with ticker first
df_similar = df_similar[ordered_tickers]
if np.any(df_similar.isna()):
nan_tickers = df_similar.columns[df_similar.isna().sum() >= 1].to_list()
print(f"NaN values found in: {', '.join(nan_tickers)}. Backfilling data")
df_similar = df_similar.fillna(method="bfill")
mask = np.zeros((df_similar.shape[1], df_similar.shape[1]), dtype=bool)
mask[np.triu_indices(len(mask))] = True
sns.heatmap(
df_similar.corr(),
cbar_kws={"ticks": [-1.0, -0.5, 0.0, 0.5, 1.0]},
cmap="RdYlGn",
linewidths=1,
annot=True,
vmin=-1,
vmax=1,
mask=mask,
)
plt.title("Correlation Heatmap")
plt.show()
print("")
| 32.635897 | 88 | 0.652891 |
efff5efe47df9c803896bc61222f53016b36678d | 2,169 | py | Python | pwndbg/commands/dumpargs.py | n00bSec/pwndbg | 400502bc2787f4bf7004a542156ca9c6cc28db67 | [
"MIT"
] | 2 | 2018-10-08T13:07:17.000Z | 2020-02-08T11:55:53.000Z | pwndbg/commands/dumpargs.py | n00bSec/pwndbg | 400502bc2787f4bf7004a542156ca9c6cc28db67 | [
"MIT"
] | null | null | null | pwndbg/commands/dumpargs.py | n00bSec/pwndbg | 400502bc2787f4bf7004a542156ca9c6cc28db67 | [
"MIT"
] | 1 | 2019-04-08T18:44:34.000Z | 2019-04-08T18:44:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pwndbg.arguments
import pwndbg.chain
import pwndbg.commands
import pwndbg.commands.telescope
import pwndbg.disasm
parser = argparse.ArgumentParser(
description='Prints determined arguments for call instruction. Pass --all to see all possible arguments.'
)
parser.add_argument('-f', '--force', action='store_true', help='Force displaying of all arguments.')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def dumpargs(force=False):
force_text = "Use `%s --force` to force the display." % dumpargs.__name__
if not pwndbg.disasm.is_call() and not force:
print("Cannot dump args as current instruction is not a call.\n" + force_text)
return
args = all_args() if force else call_args()
if args:
print('\n'.join(args))
elif force:
print("Couldn't resolve call arguments from registers.")
print("Detected ABI: {} ({} bit) either doesn't pass arguments through registers or is not implemented. Maybe they are passed on the stack?".format(pwndbg.arch.current, pwndbg.arch.ptrsize*8))
else:
print("Couldn't resolve call arguments. Maybe the function doesn\'t take any?\n" + force_text)
def call_args():
"""
Returns list of resolved call argument strings for display.
Attempts to resolve the target and determine the number of arguments.
Should be used only when being on a call instruction.
"""
results = []
for arg, value in pwndbg.arguments.get(pwndbg.disasm.one()):
code = False if arg.type == 'char' else True
pretty = pwndbg.chain.format(value, code=code)
results.append(' %-10s %s' % (arg.name+':', pretty))
return results
def all_args():
"""
Returns list of all argument strings for display.
"""
results = []
for name, value in pwndbg.arguments.arguments():
results.append('%4s = %s' % (name, pwndbg.chain.format(value)))
return results
| 31.897059 | 200 | 0.695712 |
83f3dd5f9c29669c5c9a181cef12663024e4ff71 | 2,140 | py | Python | api/http/python/example/GET-example.py | yzs981130/gStore | 6e9936e6cabd2617f1c451759fbfdfcee39fc6bf | [
"BSD-3-Clause"
] | 1 | 2020-03-01T12:32:20.000Z | 2020-03-01T12:32:20.000Z | api/http/python/example/GET-example.py | yzs981130/gStore | 6e9936e6cabd2617f1c451759fbfdfcee39fc6bf | [
"BSD-3-Clause"
] | null | null | null | api/http/python/example/GET-example.py | yzs981130/gStore | 6e9936e6cabd2617f1c451759fbfdfcee39fc6bf | [
"BSD-3-Clause"
] | 1 | 2020-09-17T13:06:30.000Z | 2020-09-17T13:06:30.000Z | # -*- coding: UTF-8 -*-
"""
# Filename: GET-example.py
# Author: suxunbin
# Last Modified: 2019-5-15 18:26
# Description: a simple GET-example of python API
"""
import sys
sys.path.append('../src')
import GstoreConnector
# before you run this example, make sure that you have started up ghttp service (using bin/ghttp port)
# "GET" is a default parameter that can be omitted
IP = "127.0.0.1"
Port = 9000
username = "root"
password = "123456"
sparql = "select ?x where \
{ \
?x <rdf:type> <ub:UndergraduateStudent>. \
?y <ub:name> <Course1>. \
?x <ub:takesCourse> ?y. \
?z <ub:teacherOf> ?y. \
?z <ub:name> <FullProfessor1>. \
?z <ub:worksFor> ?w. \
?w <ub:name> <Department0>. \
}"
filename = "res.txt"
# start a gc with given IP, Port, username and password
gc = GstoreConnector.GstoreConnector(IP, Port, username, password)
# build a database with a RDF graph
res = gc.build("lubm", "data/lubm/lubm.nt")
print(res)
# load the database
res = gc.load("lubm")
print(res);
# to add, delete a user or modify the privilege of a user, operation must be done by the root user
#res = gc.user("add_user", "user1", "111111")
#print(res);
# show all users
res = gc.showUser()
print(res)
# query
res = gc.query("lubm", "json", sparql)
print(res)
# query and save the result in a file
gc.fquery("lubm", "json", sparql, filename)
# save the database if you have changed the database
res = gc.checkpoint("lubm")
print(res)
# show information of the database
res = gc.monitor("lubm")
print(res)
# show all databases
res = gc.show()
print(res)
# export the database
res = gc.exportDB("lubm", "export/lubm/lubm_get.nt")
print(res)
# unload the database
res = gc.unload("lubm")
print(res);
# drop the database
res = gc.drop("lubm", False) #delete the database directly
#res = gc.drop("lubm", True) #leave a backup
print(res);
# get CoreVersion and APIVersion
res = gc.getCoreVersion()
print(res)
res = gc.getAPIVersion()
print(res)
| 24.883721 | 102 | 0.62243 |
433fc65f9b2194ebf13cb59b01e78f923ad6daf7 | 1,934 | py | Python | edl/records.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-05-14T09:36:36.000Z | 2022-03-30T14:32:21.000Z | edl/records.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 1 | 2015-07-14T11:47:25.000Z | 2015-07-17T01:45:26.000Z | edl/records.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-07-25T22:29:29.000Z | 2022-03-01T21:26:14.000Z | from edl.util import parse_list_to_set
def recordIterator(stream, separatorRE, idRE=None):
"""
Given:
na file-like object (any iterator over strings)
1 or 2 regular expressions that define record boundaries
and identifiers
Return:
an iterator over records that returns a tuple of (id, [recordLines])
If only a separator given, it is assumed to match the record id
"""
recordId = None
recordLines = []
for line in stream:
m = separatorRE.search(line)
if m:
# is there a previous record?
if recordId is not None:
yield (recordId, recordLines)
recordId = None
recordLines = [line, ]
if idRE is None:
recordId = m.group(1)
continue
recordLines.append(line)
if idRE is not None:
m = idRE.search(line)
if m:
recordId = m.group(1)
if recordId is not None:
yield (recordId, recordLines)
def screenRecords(
stream,
separatorRE,
idRE=None,
keep=False,
screen_set=None,
screenFile=None):
"""
uses recordIterator(strean, separatorRE, idRE) to parse input into records
uses screen_set (can be read from screenFile) to identify records
identified records are kept or skipped based on the value of keep
"""
if screen_set is None:
if screenFile is None:
raise Exception(
"Please supply a hash(Python map) or file of record keys")
else:
screen_set = parse_list_to_set(screenFile)
for (
recordId,
recordLines) in recordIterator(
stream,
separatorRE,
idRE=idRE):
screened = recordId in screen_set
if screened == keep:
for line in recordLines:
yield line
| 26.861111 | 78 | 0.573423 |
d03fbdd948acb631e6c603b81d3b2f0583d03277 | 175 | py | Python | servertime/__main__.py | tmat-project/time-recorder-backend | 47f2fc9dbc0a7e50a055e0253baf74a38a955d9e | [
"MIT"
] | 3 | 2021-11-18T01:26:36.000Z | 2021-11-18T17:50:22.000Z | servertime/__main__.py | tmat-project/time-recorder-backend | 47f2fc9dbc0a7e50a055e0253baf74a38a955d9e | [
"MIT"
] | null | null | null | servertime/__main__.py | tmat-project/time-recorder-backend | 47f2fc9dbc0a7e50a055e0253baf74a38a955d9e | [
"MIT"
] | null | null | null | from urllib.parse import urlparse
from . rest import app
url = urlparse('http://0.0.0.0:8001')
host, port = url.hostname, url.port
app.run(host=host, port=port, debug=True)
| 21.875 | 41 | 0.72 |
770cfc3e47ba4bb3596d3ec75c718da8b9c092cb | 2,234 | py | Python | src/main/python/app/workers/ArchiveExtractorWorker.py | karlpet/WadLauncher | 512f5d28de5c57e4dffdc642b170891a99a00ea8 | [
"MIT"
] | 2 | 2020-09-06T11:16:30.000Z | 2020-09-15T17:11:34.000Z | src/main/python/app/workers/ArchiveExtractorWorker.py | karlpet/WadLauncher | 512f5d28de5c57e4dffdc642b170891a99a00ea8 | [
"MIT"
] | 74 | 2020-09-07T16:40:54.000Z | 2021-06-18T00:22:39.000Z | src/main/python/app/workers/ArchiveExtractorWorker.py | karlpet/WadLauncher | 512f5d28de5c57e4dffdc642b170891a99a00ea8 | [
"MIT"
] | null | null | null | import shutil, os, re, pathlib
from PyQt5.QtCore import QThread, pyqtSignal
from app.config import Config
from app.workers.WorkerPool import *
def archive_extractor_worker_wrapper(file_path, should_remove_archive=False, done_handlers=[]):
worker = ArchiveExtractorWorker(file_path, should_remove_archive)
for handler in done_handlers:
worker.done.connect(handler)
WorkerPool.Instance().start(worker)
class ArchiveExtractorWorker(QThread):
done = pyqtSignal(str)
def __init__(self, file_path, should_remove_archive = False, parent=None):
QThread.__init__(self, parent)
self.file_path = file_path
self.should_remove_archive = should_remove_archive
config = Config.Instance()
base_path = os.path.expanduser(config['PATHS']['BASE_PATH'])
self.wads_path = os.path.expanduser(config['PATHS']['WADS_PATH'])
# remove file extension (.zip or whatever)
pattern = re.compile(r'\.[a-z0-9]+$')
self.file_dir = pattern.sub('', pathlib.Path(file_path).name)
self.temp_extraction_path = os.path.join(base_path, 'temp', self.file_dir)
def run(self):
pathlib.Path(self.temp_extraction_path).mkdir(parents=True, exist_ok=True)
shutil.unpack_archive(self.file_path, self.temp_extraction_path)
source_dir = self.temp_extraction_path
tree = [f for f in os.walk(self.temp_extraction_path)][0]
p, directories, files = tree
# if no files are in the first level of archive extraction
# and there exists only one directory instead
# then we need to move the directory inside our temp_extraction_path instead.
# this is the only case where this solution is a good idea.
if (len(files) == 0 and len(directories) == 1):
source_dir = os.path.join(self.temp_extraction_path, directories[0])
destination_dir = os.path.join(self.wads_path, self.file_dir)
shutil.move(source_dir, destination_dir)
if source_dir != self.temp_extraction_path:
shutil.rmtree(self.temp_extraction_path)
if self.should_remove_archive:
os.remove(self.file_path)
self.done.emit(destination_dir)
| 39.892857 | 95 | 0.69248 |
f178eb6e61fdf87401afb87d9ac44938fe9f7291 | 2,860 | py | Python | tests/whitebox/integration/physical/test_list.py | jbaublitz/stratis-cli | 602199fb476f53e519a36b1894337e96a68459cd | [
"Apache-2.0"
] | null | null | null | tests/whitebox/integration/physical/test_list.py | jbaublitz/stratis-cli | 602199fb476f53e519a36b1894337e96a68459cd | [
"Apache-2.0"
] | null | null | null | tests/whitebox/integration/physical/test_list.py | jbaublitz/stratis-cli | 602199fb476f53e519a36b1894337e96a68459cd | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test 'list'.
"""
from dbus_client_gen import DbusClientUniqueResultError
from stratis_cli._errors import StratisCliActionError
from .._misc import device_name_list
from .._misc import RUNNER
from .._misc import SimTestCase
_DEVICE_STRATEGY = device_name_list(1)
class ListTestCase(SimTestCase):
"""
Test listing devices for a non-existant pool.
"""
_MENU = ["--propagate", "blockdev", "list"]
_POOLNAME = "deadpool"
def testList(self):
"""
Listing the devices must fail since the pool does not exist.
"""
command_line = self._MENU + [self._POOLNAME]
with self.assertRaises(StratisCliActionError) as context:
RUNNER(command_line)
cause = context.exception.__cause__
self.assertIsInstance(cause, DbusClientUniqueResultError)
def testListEmpty(self):
"""
Listing the devices should succeed without a pool name specified.
The list should be empty.
"""
command_line = self._MENU
RUNNER(command_line)
def testListDefault(self):
"""
Blockdev subcommand should default to listing all blockdevs for all
pools. The list should be empty.
"""
command_line = self._MENU[:-1]
RUNNER(command_line)
class List2TestCase(SimTestCase):
"""
Test listing devices in an existing pool.
"""
_MENU = ["--propagate", "blockdev", "list"]
_POOLNAME = "deadpool"
def setUp(self):
"""
Start the stratisd daemon with the simulator.
"""
super().setUp()
command_line = ["pool", "create"] + [self._POOLNAME] + _DEVICE_STRATEGY()
RUNNER(command_line)
def testList(self):
"""
Listing the devices should succeed.
"""
command_line = self._MENU + [self._POOLNAME]
RUNNER(command_line)
def testListEmpty(self):
"""
Listing the devices should succeed without a pool name specified.
"""
command_line = self._MENU
RUNNER(command_line)
def testListDefault(self):
"""
Blockdev subcommand should default to listing all blockdevs for all
pools.
"""
command_line = self._MENU[:-1]
RUNNER(command_line)
| 28.316832 | 81 | 0.651748 |
cf00b9ead27e6b6726b54c6f11fd0360fcb70201 | 2,799 | py | Python | pelicanconf.py | ogrisel/website | 04a9434f5f146d232211643d60c64f5dd9714c09 | [
"Artistic-2.0"
] | null | null | null | pelicanconf.py | ogrisel/website | 04a9434f5f146d232211643d60c64f5dd9714c09 | [
"Artistic-2.0"
] | null | null | null | pelicanconf.py | ogrisel/website | 04a9434f5f146d232211643d60c64f5dd9714c09 | [
"Artistic-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Ga\xebl Varoquaux'
SITENAME = u'Ga\xebl Varoquaux'
SITEURL = 'http://gael-varoquaux.info'
AUTHOR_EMAIL = 'gael.varoquaux@normalesup.org'
GITHUB_URL = 'https://github.com/GaelVaroquaux/'
TWITTER_USERNAME = 'GaelVaroquaux'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Posts in the future get assigned a draft status
WITH_FUTURE_DATES = True
# Blogroll
MENUITEMS = (
('Selected posts', 'tag/selected.html'),
('Latest posts', 'index.html#posts'),
('About me', 'about.html'),
)
ARTICLE_URL = '{category}/{slug}.html'
ARTICLE_SAVE_AS = '{category}/{slug}.html'
PAGE_URL = '{slug}.html'
URL = '{category}/{slug}.html'
PAGE_SAVE_AS = '{slug}.html'
DEFAULT_PAGINATION = 10
SUMMARY_MAX_LENGTH = 50
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
STATIC_PATHS = ['images', 'science/attachments', 'programming/attachments',
'personnal/attachments']
IGNORE_FILES = ['content/science/attachments/citations.html',
'.#*',]
# apply the typogrify post-processing
TYPOGRIFY = False
# Better settings for the rst generation
DOCUTILS_SETTINGS = dict(smart_quotes=True)
USE_FOLDER_AS_CATEGORY = True
import logging
LOG_FILTER = [(logging.WARN, 'Empty alt attribute for image.*'),
(logging.ERROR, 'Skipping science/attachments/citations.html')]
###############################################################################
# For the pure theme
# The theme itself
THEME = "pure"
# Links in the sidebar: this is not standard, it is for my own modified
# theme, as it has 3 entries per item:
# - The link title
# - The icon name on http://fontawesome.io/icons/ after stripping 'fa-'
# - The link itself
SOCIAL = (
('Google scholar', 'graduation-cap',
'http://scholar.google.fr/citations?user=OGGu384AAAAJ', ''),
('twitter', 'twitter-square', 'https://twitter.com/GaelVaroquaux', ''),
('GitHub', 'github', GITHUB_URL, ''),
("Artwork", 'camera-retro',
'http://www.flickriver.com/photos/gaelvaroquaux/popular-interesting/',
"""<div class="extra"><div id="flickrstream"></div></div>"""),
)
# Linkedin, slideshare
# My gravatar
PROFILE_IMAGE_URL = 'http://gael-varoquaux.info/images/gael.png'
# The pretty picture for the sidebar
COVER_IMG_URL = 'http://gael-varoquaux.info/images/cover_img.jpg'
TAGLINE = "computer / data / brain science"
# global metadata to all the contents
DEFAULT_METADATA = (('email', 'gael.varoquaux@normalesup.org'),
('profile_image', PROFILE_IMAGE_URL))
| 27.99 | 79 | 0.67667 |
fecbc3bb5981f3998115506a86e20f1bc4add811 | 1,803 | py | Python | src/cv/cv/src/webcam.py | APMMonteiro/european_robotic_league | 1a7345bdbdf4a57c434c6fda44b0714c277877a7 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/cv/cv/src/webcam.py | APMMonteiro/european_robotic_league | 1a7345bdbdf4a57c434c6fda44b0714c277877a7 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/cv/cv/src/webcam.py | APMMonteiro/european_robotic_league | 1a7345bdbdf4a57c434c6fda44b0714c277877a7 | [
"BSD-3-Clause-Clear"
] | 5 | 2021-11-26T12:06:56.000Z | 2022-02-15T14:15:16.000Z | #!/usr/bin/python
# Adapted from https://automaticaddison.com/working-with-ros-and-opencv-in-ros-noetic/
# Basics ROS program to publish real-time streaming
# video from your built-in webcam
# Author:
# - Addison Sears-Collins
# - https://automaticaddison.com
# Import the necessary libraries
import rospy # Python library for ROS
from sensor_msgs.msg import Image # Image is the message type
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
import time
def publish_message():
# Node is publishing to the video_frames topic using
# the message type Image
pub = rospy.Publisher('/webcam', Image, queue_size=10)
# Tells rospy the name of the node.
# Anonymous = True makes sure the node has a unique name. Random
# numbers are added to the end of the name.
rospy.init_node('video_pub_py', anonymous=True)
rate = rospy.Rate(10) # 10hz
# Create a VideoCapture object
# The argument '0' gets the default webcam.
cap = cv2.VideoCapture(0)
# Used to convert between ROS and OpenCV images
br = CvBridge()
# While ROS is still running.
while not rospy.is_shutdown():
# Capture frame-by-frame
# This method returns True/False as well
# as the video frame.
ret, frame = cap.read()
if ret == True:
# Publish the image.
# The 'cv2_to_imgmsg' method converts an OpenCV
# image to a ROS image message
pub.publish(br.cv2_to_imgmsg(frame))
time.sleep(0.5) #using time because rate.sleep isn't terminating
if __name__ == '__main__':
try:
publish_message()
except rospy.ROSInterruptException:
pass | 30.05 | 86 | 0.65391 |
73b2cb6e0cb2aad260e42c81d2c2fc622f0b2068 | 1,134 | py | Python | tests/storage/cases/test_KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | tests/storage/cases/test_KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/storage/cases/test_KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD.py | tqtezos/pytezos | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD.json')
def test_storage_encoding_KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1JGnD4XhVE1pXt7oy97jLydZctekDtJjwD(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 40.5 | 112 | 0.749559 |
82640dd39550477f2cd518c44c41b9e883d4b68a | 5,450 | py | Python | src/plugins/voice.py | NNNMM12345/Discord_Sandbot1 | 76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6 | [
"MIT"
] | 4 | 2019-01-02T20:31:17.000Z | 2020-09-06T09:43:22.000Z | src/plugins/voice.py | NNNMM12345/Discord_Sandbot1 | 76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6 | [
"MIT"
] | 2 | 2018-03-23T00:45:17.000Z | 2018-03-27T15:44:13.000Z | src/plugins/voice.py | NNNMM12345/Discord_Sandbot1 | 76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6 | [
"MIT"
] | 2 | 2018-03-24T22:48:33.000Z | 2018-03-24T22:49:09.000Z | import discord
import youtube_dl
import asyncio
from discord.ext import commands
# Suppress noise about console usage from errors
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'before_options': '-nostdin',
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Voice:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def join(self, ctx, *, channel: discord.VoiceChannel):
"""Joins a voice channel"""
if ctx.voice_client is not None:
return await ctx.voice_client.move_to(channel)
await channel.connect()
@commands.command()
async def play(self, ctx, *, query: str=None):
"""Plays a file from the local filesystem"""
if query == None:
await ctx.send("Did you give me path to the file you want me to play?")
try:
await ctx.voice_client.disconnect()
except:
pass
else:
try:
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}\nDefault volume is: 0.5'.format(query))
except Exception as e:
await ctx.send("I need a file from the local filesystem\n" + "Error Code: " + str(e))
@commands.command()
async def yt(self, ctx, *, url: str=None):
"""Plays from a url (almost anything youtube_dl supports)"""
if url == None:
await ctx.send("url?")
try:
await ctx.voice_client.disconnect()
except:
pass
else:
try:
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop)
ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}\nDefault volume is: 0.5'.format(player.title))
except Exception as e:
await ctx.send("You did something wrong.\n" + "Error Code: " + str(e))
@commands.command()
async def stream(self, ctx, *, url: str=None):
"""Streams from a url in real time example: Twitch, Youtube etc"""
if url == None:
await ctx.send("url?")
try:
await ctx.voice_client.disconnect()
except:
pass
else:
try:
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop, stream=True)
ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}\nDefault volume is: 0.5'.format(player.title))
except Exception as e:
await ctx.send("You did something wrong while trying to stream a video.\n" + "Error Code: " + str(e))
@commands.command()
async def volume(self, ctx, volume: str):
"""Changes the player's volume(Default volume is 0.5)"""
if ctx.voice_client is None:
return await ctx.send("Not connected to a voice channel.")
ctx.voice_client.source.volume = float(volume)
await ctx.send("Changed volume to {}%".format(volume))
@commands.command()
async def stop(self, ctx):
"""Stops and disconnects the bot from voice"""
try:
await ctx.voice_client.disconnect()
except Exception as e:
await ctx.send("Is the bot connected to any voice channels?\n" + "Error Code: " + str(e))
@play.before_invoke
@yt.before_invoke
@stream.before_invoke
async def ensure_voice(self, ctx):
if ctx.voice_client is None:
if ctx.author.voice:
await ctx.author.voice.channel.connect()
else:
await ctx.send("You are not connected to a voice channel.")
raise commands.CommandError("Author not connected to a voice channel.")
elif ctx.voice_client.is_playing():
ctx.voice_client.stop()
def setup(bot):
bot.add_cog(Voice(bot))
| 36.092715 | 117 | 0.593211 |
c2b84cabb7bbd6013414fbc38f523fa9dd5b4017 | 3,656 | py | Python | ironic_python_agent/tests/functional/test_commands.py | poojaghumre/ironic-python-agent | dc8c1f16f9a00e2bff21612d1a9cf0ea0f3addf0 | [
"Apache-2.0"
] | 86 | 2015-01-21T22:04:48.000Z | 2022-03-29T12:15:49.000Z | ironic_python_agent/tests/functional/test_commands.py | poojaghumre/ironic-python-agent | dc8c1f16f9a00e2bff21612d1a9cf0ea0f3addf0 | [
"Apache-2.0"
] | 3 | 2018-01-29T07:44:43.000Z | 2021-09-04T06:13:42.000Z | ironic_python_agent/tests/functional/test_commands.py | poojaghumre/ironic-python-agent | dc8c1f16f9a00e2bff21612d1a9cf0ea0f3addf0 | [
"Apache-2.0"
] | 110 | 2015-01-29T09:26:52.000Z | 2021-12-29T03:16:27.000Z | # Copyright 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironic_python_agent.tests.functional import base
class TestCommands(base.FunctionalBase):
"""Tests the commands API.
These tests are structured monolithically as one test with multiple steps
to preserve ordering and ensure IPA state remains consistent across
different test runs.
"""
node = {'uuid': '1', 'properties': {}, 'instance_info': {}}
def step_1_get_empty_commands(self):
response = self.request('get', 'commands')
self.assertEqual({'commands': []}, response)
def step_2_run_command(self):
# NOTE(mariojv): get_clean_steps always returns the default
# HardwareManager clean steps if there's not a more specific HWM. So,
# this command succeeds even with an empty node and port. This test's
# success is required for steps 3 and 4 to succeed.
command = {'name': 'clean.get_clean_steps',
'params': {'node': self.node, 'ports': {}}}
response = self.request('post', 'commands', json=command,
headers={'Content-Type': 'application/json'})
self.assertIsNone(response['command_error'])
def step_3_get_commands(self):
# This test relies on step 2 to succeed since step 2 runs the command
# we're checking for
response = self.request('get', 'commands')
self.assertEqual(1, len(response['commands']))
self.assertEqual(
'get_clean_steps', response['commands'][0]['command_name'])
def step_4_get_command_by_id(self):
# First, we have to query the commands API to retrieve the ID. Make
# sure this API call succeeds again, just in case it fails for some
# reason after the last test. This test relies on step 2 to succeed
# since step 2 runs the command we're checking for.
response = self.request('get', 'commands')
command_id = response['commands'][0]['id']
command_from_id = self.request(
'get', 'commands/%s' % command_id)
self.assertEqual('get_clean_steps', command_from_id['command_name'])
def step_5_run_non_existent_command(self):
fake_command = {'name': 'bad_extension.fake_command', 'params': {}}
self.request('post', 'commands', expect_error=404, json=fake_command)
def positive_get_post_command_steps(self):
"""Returns generator with test steps sorted by step number."""
steps_unsorted = [step for step in dir(self)
if step.startswith('step_')]
# The lambda retrieves the step number from the function name and casts
# it to an integer. This is necessary, otherwise a lexicographic sort
# would return ['step_1', 'step_12', 'step_3'] after sorting instead of
# ['step_1', 'step_3', 'step_12'].
steps = sorted(steps_unsorted, key=lambda s: int(s.split('_', 2)[1]))
for name in steps:
yield getattr(self, name)
def test_positive_get_post_commands(self):
for step in self.positive_get_post_command_steps():
step()
| 44.048193 | 79 | 0.664661 |
1ab0c77132b21de667bbd743134c81541cd64f2f | 13,899 | py | Python | SimLight/field.py | Miyoshichi/SimLight | 9f01dee5e324026bfdcdbe9f83cd29bbd447adda | [
"MIT"
] | null | null | null | SimLight/field.py | Miyoshichi/SimLight | 9f01dee5e324026bfdcdbe9f83cd29bbd447adda | [
"MIT"
] | null | null | null | SimLight/field.py | Miyoshichi/SimLight | 9f01dee5e324026bfdcdbe9f83cd29bbd447adda | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on May 21, 2020
@author: Zhou Xiang
"""
import math
import copy
import numpy as np
from .plottools import plot_wavefront, plot_intensity
from .utils import cart2pol
from .units import *
class Field:
"""
A basic light field.
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
"""
def __init__(self, wavelength=1.0, size=0, N=0):
"""
A basic light field.
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
"""
# check of inputted parameters
if wavelength <= 0:
raise ValueError('Wavelength cannot be less than 0.')
if size <= 0:
raise ValueError('Light field cannot be smaller than 0.')
if N <= 0:
raise ValueError('Cannot generate zero light field')
self._wavelength = wavelength
self._size = size
self._N = N
self._curvature = 0
self._phase_ratio = 1
self._complex_amp = np.ones([N, N], dtype=np.complex)
self._complex_amp2 = np.ones([N, N], dtype=np.complex)
def zernike_aberration(self):
"""TODO
"""
pass
def plot_wavefront(self, noise=False, mask_r=None, dimension=2,
unit='mm', title=''):
"""Plot the wavefront.
Plot the wavefront of light field using matplotlib.
Parameters
----------
field : SimLight.Field
A light field.
mask_r : float, optional, from 0 to 1, default None
Radius of a circle mask.
dimension : int, optional, {1, 2, 3}, default 2
Dimension of the showing wavefront, where
2 for surface,
3 for 3d.
unit : str, optional, {'m', 'cm', 'mm', 'um', 'µm', 'nm'},
default 'µm'
Unit used for FOV.
title : str, optional
Title of the figure.
"""
mask_r = mask_r
dimension = dimension
unit = unit
title = title
fid = 'SimLight'
field = [self._wavelength,
self._size,
self._N,
self._complex_amp2,
self._phase_ratio,
fid]
plot_wavefront(field, noise, mask_r, dimension, unit, title)
def plot_intensity(self, mask_r=None, norm_type=0, dimension=2, mag=1,
unit='µm', title=''):
"""Plot the intensity.
Plot the intensity of light field using matplotlib.
Parameters
----------
field : SimLight.Field
A light field.
mask_r : float, optional, from 0 to 1, default None
Radius of a circle mask.
norm_type : int, optional, {0, 1, 2}, default 0
Type of normalization, where
0 for no normalization,
1 for normalize up to 1,
2 for normalize up to 255.
dimension : int, optional, {1, 2}, default 2
Dimension of the showing intensity, where
1 for showing the intensity in a line,
2 for showing the intensity in a surface.
mag : float, optional, default 1
Magnification of the figure.
unit : str, optional, {'m', 'cm', 'mm', 'um', 'µm', 'nm'},
default 'µm'
Unit used for FOV.
title : str, optional, default ''
Title of the figure.
"""
mask_r = mask_r
norm_type = norm_type
dimension = dimension
mag = mag
unit = unit
title = title
field = [self._size, self._N, self._complex_amp]
plot_intensity(field, mask_r, norm_type, dimension, mag, unit,
title)
def zernike_coeffs(self, j):
"""
docstring
"""
x = np.linspace(-self._size / 2, self._size / 2, self._N)
X, Y = np.meshgrid(x, x)
theta, r = cart2pol(X, Y)
@classmethod
def copy(cls, field):
"""
Create a copy of the input light field so that the original field
would not be changed.
Parameters
----------
field : SimLight.Field
Input light field to copy.
Returns
----------
copied_field : SimLight.Field
A new copied light field.
"""
return copy.deepcopy(field)
@property
def wavelength(self):
return self._wavelength
@property
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
@property
def N(self):
return self._N
@N.setter
def N(self, N):
self._N = N
@property
def curvature(self):
return self._curvature
@curvature.setter
def curvature(self, curvature):
self._curvature = curvature
@property
def phase_ratio(self):
return self._phase_ratio
@phase_ratio.setter
def phase_ratio(self, phase_ratio):
self._phase_ratio = phase_ratio
@property
def complex_amp(self):
return self._complex_amp
@complex_amp.setter
def complex_amp(self, complex_amp):
self._complex_amp = complex_amp
@property
def complex_amp2(self):
return self._complex_amp2
@complex_amp2.setter
def complex_amp2(self, complex_amp2):
self._complex_amp2 = complex_amp2
class PlaneWave(Field):
"""
A plane wave light field.
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
x_tilt : float
Tilt coefficient in x direction, unit: rad.
y_tilt : float
Tilt coefficient in y direciton, unit: rad.
"""
def __init__(self, wavelength, size, N, x_tilt=0, y_tilt=0):
"""
A plane wave light field.
Parameters
----------
x_tilt : float
Tilt in x direction, unit: rad.
y_tilt : float
Tilt in y direciton, unit: rad.
"""
super().__init__(wavelength, size, N)
self._x_tilt = x_tilt
self._y_tilt = y_tilt
self._field_type = 'plane wave'
self._complex_amp *= self.__tilt(self._wavelength,
self._size,
self._N,
[self._x_tilt, self._y_tilt])
self._complex_amp2 *= self.__tilt(self._wavelength,
self._size,
self._N,
[self._x_tilt, self._y_tilt])
@staticmethod
def __tilt(wavelength, size, N, tilt):
"""
Return a tilted light field.
U = A * exp(ikr - φ0)
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
tilt : list, [x_tilt, y_tilt]
x_tilt: float
Tilt coefficient in x direction, unit: rad.
y_tilt: float
Tilt coefficient in y direciton, unit: rad.
"""
x = np.linspace(-size / 2, size / 2, N)
X, Y = np.meshgrid(x, x)
k = 2 * np.pi / wavelength
phi = -k * (tilt[0] * X + tilt[1] * Y)
return np.exp(1j * phi)
@property
def x_tilt(self):
return self._x_tilt
@property
def y_tilt(self):
return self._y_tilt
@property
def field_type(self):
return self._field_type
class SphericalWave(Field):
"""
A spherical wave light field.
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
z : float
The propagation distance of the spherical wave from center.
"""
def __init__(self, wavelength, size, N, z=0):
"""
A spherical wave light field.
Parameters
----------s
z : float
The propagation distance of the spherical wave from center.
"""
super().__init__(wavelength, size, N)
self._z = z
self._field_type = 'spherical wave'
self._complex_amp *= self.__sphere(self._wavelength, self._size,
self._N, self._z)
self._complex_amp2 *= self.__sphere(self._wavelength, self._size,
self._N, self._z)
@staticmethod
def __sphere(wavelength, size, N, z):
"""
Return a spherical wave.
U = (A / r) * exp(ikr - φ0)
where r = √(x^2 + y^2 + z^2)
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
z : float
The propagation distance of the spherical wave from center.
"""
x = np.linspace(-size / 2, size / 2, N)
X, Y = np.meshgrid(x, x)
r = np.sqrt(X**2 + Y**2 + z**2)
k = 2 * np.pi / wavelength
phi = -k * r
sphere = np.exp(1j * phi) / r if z > 0 else -np.exp(1j * phi) / r
return sphere
@property
def z(self):
return self._z
@property
def field_type(self):
return self._field_type
class Gaussian(Field):
"""
A gaussian beam light field.
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
w0 : float
Size of the waist.
z : float
The propagation distance of the gaussian beam from the waist.
"""
def __init__(self, wavelength, size, N, w0=0, z=0):
"""
A spherical wave light field.
Parameters
----------
w0 : float
Size of the waist.
z : float
The propagation distance of the gaussian beam
from the waist.
"""
super().__init__(wavelength, size, N)
if w0 == 0:
w0 = self._size / 2
else:
w0 /= 2
self._w0 = w0
self._z = z
self._field_type = 'gaussian beam'
self._complex_amp *= self.__gaussian(self._wavelength, self._size,
self._N, self._w0, self._z)
self._complex_amp2 *= self.__gaussian(self._wavelength, self._size,
self._N, self._w0, self._z)
@staticmethod
def __gaussian(wavelength, size, N, w0, z):
"""
Return a TEM00 mode gaussian beam.
U = (A / ω(z)) * exp(-(x^2 + y^2) / ω^2(z)) *
exp(-ik(z + (x^2 + y^2) / 2r(z)) + iφ(z))
where ω(z) = ω0 * √(1 + (z / zR)^2)
r(z) = z * (1 + (zR / z)^2)
φ(z) = arctan(z / zR)
zR = πω0^2 / λ
Parameters
----------
wavelength : float
Physical wavelength of input light.
size : float
Physical size of input light field.
circle: diameter
square: side length
N : int
Pixel numbers of input light field in one dimension.
w0 : float
Size of the waist.
z : float
The propagation distance of the gaussian beam
from the waist.
"""
x = np.linspace(-size / 2, size / 2, N)
X, Y = np.meshgrid(x, x)
z_R = np.pi * w0**2 / wavelength
w_z = w0 * np.sqrt(1 + (z / z_R)**2)
r_z = z * (1 + (z_R / z)**2) if z != 0 else float('inf')
phi_z = np.arctan2(z, z_R)
k = 2 * np.pi / wavelength
return np.exp(-(X**2 + Y**2) / w_z**2) *\
np.exp(-1j * k * (z + (X**2 + Y**2) / (2 * r_z)) + 1j * phi_z) /\
w_z
@property
def w0(self):
return self._w0
@property
def z(self):
return self._z
@property
def field_type(self):
return self._field_type
| 29.57234 | 77 | 0.497086 |
7af436513e53f597d76de8edd8372419889046f8 | 6,331 | py | Python | mlrun/datastore/store_resources.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/datastore/store_resources.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/datastore/store_resources.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa - this is until we take care of the F401 violations with respect to __all__ & sphinx
import mlrun
from mlrun.config import config
from mlrun.utils.helpers import (
is_legacy_artifact,
parse_artifact_uri,
parse_versioned_object_uri,
)
from ..platforms.iguazio import parse_v3io_path
from ..utils import DB_SCHEMA, StorePrefix
from .targets import get_online_target
def is_store_uri(url):
"""detect if the uri starts with the store schema prefix"""
return url.startswith(DB_SCHEMA + "://")
def parse_store_uri(url):
"""parse a store uri and return kind + uri suffix"""
if not is_store_uri(url):
return None, ""
uri = url[len(DB_SCHEMA) + len("://") :].strip("/")
split = uri.split("/", 1)
if len(split) == 0:
raise ValueError(f"url {url} has no path")
if split and StorePrefix.is_prefix(split[0]):
return split[0], split[1]
return StorePrefix.Artifact, uri
def get_store_uri(kind, uri):
"""return uri from store kind and suffix"""
return f"{DB_SCHEMA}://{kind}/{uri}"
class ResourceCache:
"""Resource cache for real-time pipeline/serving and storey
this cache is basic and doesnt have sync or ttl logic
"""
def __init__(self):
self._tabels = {}
self._resources = {}
def cache_table(self, uri, value, is_default=False):
"""Cache storey Table objects"""
self._tabels[uri] = value
if is_default:
self._tabels["."] = value
def get_table(self, uri):
"""get storey Table object by uri"""
try:
from storey import Driver, Table, V3ioDriver
except ImportError:
raise ImportError("storey package is not installed, use pip install storey")
if uri in self._tabels:
return self._tabels[uri]
if uri in [".", ""] or uri.startswith("$"): # $.. indicates in-mem table
self._tabels[uri] = Table("", Driver())
return self._tabels[uri]
if uri.startswith("v3io://") or uri.startswith("v3ios://"):
endpoint, uri = parse_v3io_path(uri)
self._tabels[uri] = Table(
uri,
V3ioDriver(webapi=endpoint),
flush_interval_secs=mlrun.mlconf.feature_store.flush_interval,
)
return self._tabels[uri]
if is_store_uri(uri):
resource = get_store_resource(uri)
if resource.kind in [
mlrun.api.schemas.ObjectKind.feature_set.value,
mlrun.api.schemas.ObjectKind.feature_vector.value,
]:
target = get_online_target(resource)
if not target:
raise mlrun.errors.MLRunInvalidArgumentError(
f"resource {uri} does not have an online data target"
)
self._tabels[uri] = target.get_table_object()
return self._tabels[uri]
raise mlrun.errors.MLRunInvalidArgumentError(f"table {uri} not found in cache")
def cache_resource(self, uri, value, default=False):
"""cache store resource (artifact/feature-set/feature-vector)"""
self._resources[uri] = value
if default:
self._resources["."] = value
def get_resource(self, uri):
"""get resource from cache by uri"""
return self._resources[uri]
def resource_getter(self, db=None, secrets=None):
"""wraps get_store_resource with a simple object cache"""
def _get_store_resource(uri, use_cache=True):
"""get mlrun store resource object
:param use_cache: indicate if we read from local cache or from DB
"""
if (uri == "." or use_cache) and uri in self._resources:
return self._resources[uri]
resource = get_store_resource(uri, db, secrets=secrets)
if use_cache:
self._resources[uri] = resource
return resource
return _get_store_resource
def get_store_resource(uri, db=None, secrets=None, project=None):
"""get store resource object by uri"""
db = db or mlrun.get_run_db(secrets=secrets)
kind, uri = parse_store_uri(uri)
if kind == StorePrefix.FeatureSet:
project, name, tag, uid = parse_versioned_object_uri(
uri, project or config.default_project
)
return db.get_feature_set(name, project, tag, uid)
elif kind == StorePrefix.FeatureVector:
project, name, tag, uid = parse_versioned_object_uri(
uri, project or config.default_project
)
return db.get_feature_vector(name, project, tag, uid)
elif StorePrefix.is_artifact(kind):
project, key, iteration, tag, uid = parse_artifact_uri(
uri, project or config.default_project
)
resource = db.read_artifact(
key, project=project, tag=tag or uid, iter=iteration
)
if resource.get("kind", "") == "link":
# todo: support other link types (not just iter, move this to the db/api layer
link_iteration = (
resource.get("link_iteration", 0)
if is_legacy_artifact(resource)
else resource["spec"].get("link_iteration", 0)
)
resource = db.read_artifact(
key,
tag=tag,
iter=link_iteration,
project=project,
)
if resource:
# import here to avoid circular imports
from mlrun.artifacts import dict_to_artifact
return dict_to_artifact(resource)
else:
stores = mlrun.store_manager.set(secrets, db=db)
return stores.object(url=uri)
| 34.977901 | 100 | 0.618544 |
39154d4bb9405dcae342c683b31171e25b444956 | 4,796 | py | Python | sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_07_01/models/_app_platform_management_client_enums.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_07_01/models/_app_platform_management_client_enums.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2020_07_01/models/_app_platform_management_client_enums.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AppResourceProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the App
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CREATING = "Creating"
UPDATING = "Updating"
class ConfigServerState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the config server.
"""
NOT_AVAILABLE = "NotAvailable"
DELETED = "Deleted"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
class DeploymentResourceProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the Deployment
"""
CREATING = "Creating"
UPDATING = "Updating"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class DeploymentResourceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Deployment
"""
UNKNOWN = "Unknown"
STOPPED = "Stopped"
RUNNING = "Running"
FAILED = "Failed"
ALLOCATING = "Allocating"
UPGRADING = "Upgrading"
COMPILING = "Compiling"
class ManagedIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the managed identity
"""
NONE = "None"
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
class MonitoringSettingState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the Monitoring Setting.
"""
NOT_AVAILABLE = "NotAvailable"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the Service
"""
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
DELETED = "Deleted"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
MOVING = "Moving"
MOVED = "Moved"
MOVE_FAILED = "MoveFailed"
class ResourceSkuRestrictionsReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the reason for restriction. Possible values include: 'QuotaId',
'NotAvailableForSubscription'
"""
QUOTA_ID = "QuotaId"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class ResourceSkuRestrictionsType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets the type of restrictions. Possible values include: 'Location', 'Zone'
"""
LOCATION = "Location"
ZONE = "Zone"
class RuntimeVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Runtime version
"""
JAVA8 = "Java_8"
JAVA11 = "Java_11"
NET_CORE31 = "NetCore_31"
class SkuScaleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Gets or sets the type of the scale.
"""
NONE = "None"
MANUAL = "Manual"
AUTOMATIC = "Automatic"
class SupportedRuntimePlatform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The platform of this runtime version (possible values: "Java" or ".NET").
"""
JAVA = "Java"
_NET_CORE = ".NET Core"
class SupportedRuntimeValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The raw value which could be passed to deployment CRUD operations.
"""
JAVA8 = "Java_8"
JAVA11 = "Java_11"
NET_CORE31 = "NetCore_31"
class TestKeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the test key
"""
PRIMARY = "Primary"
SECONDARY = "Secondary"
class UserSourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the source uploaded
"""
JAR = "Jar"
NET_CORE_ZIP = "NetCoreZip"
SOURCE = "Source"
| 29.78882 | 95 | 0.673686 |
9b03a95ea6a4c636187db6f4f6dd3515d087d717 | 2,693 | py | Python | python/paddle/fluid/tests/unittests/test_clip_op.py | jinyuKING/Paddle | 1f4d46fa885448af4ce45827eae3c609280d4e34 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_clip_op.py | jinyuKING/Paddle | 1f4d46fa885448af4ce45827eae3c609280d4e34 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_clip_op.py | jinyuKING/Paddle | 1f4d46fa885448af4ce45827eae3c609280d4e34 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from op_test import OpTest
class TestClipOp(OpTest):
def setUp(self):
self.max_relative_error = 0.006
self.initTestCase()
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - self.min) < self.max_relative_error] = 0.5
input[np.abs(input - self.max) < self.max_relative_error] = 0.5
self.op_type = "clip"
self.inputs = {'X': input, }
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
self.outputs = {
'Out': np.clip(self.inputs['X'], self.attrs['min'],
self.attrs['max'])
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.shape = (10, 10)
self.max = 0.7
self.min = 0.1
class TestCase1(TestClipOp):
def initTestCase(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestCase2(TestClipOp):
def initTestCase(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestCase3(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestClipOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((2, 4)).astype("float32")
def test_Variable():
fluid.layers.clip(x=input_data, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32')
fluid.layers.clip(x=x2, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__':
unittest.main()
| 28.956989 | 75 | 0.621983 |
9eb17ca027b2f2a21cc7db7a0cd5521cfd12da79 | 32,318 | py | Python | src/vendor/geniv3rpc/g3rpc/genivthree.py | GENI-NSF/geni-soil | e3dcec0bd5f31db2d69a3db652da15eb0baf774f | [
"BSD-3-Clause"
] | null | null | null | src/vendor/geniv3rpc/g3rpc/genivthree.py | GENI-NSF/geni-soil | e3dcec0bd5f31db2d69a3db652da15eb0baf774f | [
"BSD-3-Clause"
] | null | null | null | src/vendor/geniv3rpc/g3rpc/genivthree.py | GENI-NSF/geni-soil | e3dcec0bd5f31db2d69a3db652da15eb0baf774f | [
"BSD-3-Clause"
] | null | null | null | import os, os.path
import urllib2
import traceback
from datetime import datetime
from dateutil import parser as dateparser
from lxml import etree
from lxml.builder import ElementMaker
import ext.geni
import ext.sfa.trust.gid as gid
import amsoil.core.pluginmanager as pm
from amsoil.core import serviceinterface
from amsoil.config import ROOT_PATH
import amsoil.core.log
logger=amsoil.core.log.getLogger('geniv3rpc')
from amsoil.config import expand_amsoil_path
from exceptions import *
xmlrpc = pm.getService('xmlrpc')
class GENIv3Handler(xmlrpc.Dispatcher):
RFC3339_FORMAT_STRING = '%Y-%m-%d %H:%M:%S.%fZ'
def __init__(self):
super(GENIv3Handler, self).__init__(logger)
self._delegate = None
@serviceinterface
def setDelegate(self, geniv3delegate):
self._delegate = geniv3delegate
@serviceinterface
def getDelegate(self):
return self._delegate
# RSPEC3_NAMESPACE= 'http://www.geni.net/resources/rspec/3'
def GetVersion(self):
"""Returns the version of this interface.
This method can be hard coded, since we are actually setting up the GENI v3 API, only.
For the RSpec extensions, we ask the delegate."""
# no authentication necessary
try:
request_extensions = self._delegate.get_request_extensions_list()
ad_extensions = self._delegate.get_ad_extensions_list()
allocation_mode = self._delegate.get_allocation_mode()
is_single_allocation = self._delegate.is_single_allocation()
except Exception as e:
return self._errorReturn(e)
request_rspec_versions = [
{ 'type' : 'geni', 'version' : '3', 'schema' : 'http://www.geni.net/resources/rspec/3/request.xsd', 'namespace' : 'http://www.geni.net/resources/rspec/3', 'extensions' : request_extensions},]
ad_rspec_versions = [
{ 'type' : 'geni', 'version' : '3', 'schema' : 'http://www.geni.net/resources/rspec/3/ad.xsd', 'namespace' : 'http://www.geni.net/resources/rspec/3', 'extensions' : ad_extensions },]
credential_types = { 'geni_type' : 'geni_sfa', 'geni_version' : '3' }
return self._successReturn({
'geni_api' : '3',
'geni_api_versions' : { '3' : '/RPC2' }, # this should be an absolute URL
'geni_request_rspec_versions' : request_rspec_versions,
'geni_ad_rspec_versions' : ad_rspec_versions,
'geni_credential_types' : credential_types,
'geni_single_allocation' : is_single_allocation,
'geni_allocate' : allocation_mode
})
def ListResources(self, credentials, options):
"""Delegates the call and unwraps the needed parameter. Also takes care of the compression option."""
# interpret options
geni_available = bool(options['geni_available']) if ('geni_available' in options) else False
geni_compress = bool(options['geni_compress']) if ('geni_compress' in options) else False
# check version and delegate
try:
self._checkRSpecVersion(options['geni_rspec_version'])
result = self._delegate.list_resources(self.requestCertificate(), credentials, geni_available)
except Exception as e:
return self._errorReturn(e)
# compress and return
if geni_compress:
result = base64.b64encode(zlib.compress(result))
return self._successReturn(result)
def Describe(self, urns, credentials, options):
"""Delegates the call and unwraps the needed parameter. Also takes care of the compression option."""
# some duplication with above
geni_compress = bool(options['geni_compress']) if ('geni_compress' in options) else False
try:
self._checkRSpecVersion(options['geni_rspec_version'])
result = self._delegate.describe(urns, self.requestCertificate(), credentials)
except Exception as e:
return self._errorReturn(e)
if geni_compress:
result = base64.b64encode(zlib.compress(result))
return self._successReturn(result)
def Allocate(self, slice_urn, credentials, rspec, options):
"""Delegates the call and unwraps the needed parameter. Also converts the incoming timestamp to python and the outgoing to geni compliant date format."""
geni_end_time = self._str2datetime(options['geni_end_time']) if ('geni_end_time' in options) else None
# TODO check the end_time against the duration of the credential
try:
# delegate
result_rspec, result_sliver_list = self._delegate.allocate(slice_urn, self.requestCertificate(), credentials, rspec, geni_end_time)
# change datetime's to strings
result = { 'geni_rspec' : result_rspec, 'geni_slivers' : self._convertExpiresDate(result_sliver_list) }
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
def Renew(self, urns, credentials, expiration_time_str, options):
geni_best_effort = bool(options['geni_best_effort']) if ('geni_best_effort' in options) else True
expiration_time = self._str2datetime(expiration_time_str)
try:
# delegate
result = self._delegate.renew(urns, self.requestCertificate(), credentials, expiration_time, geni_best_effort)
# change datetime's to strings
result = self._convertExpiresDate(result)
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
def Provision(self, urns, credentials, options):
geni_best_effort = bool(options['geni_best_effort']) if ('geni_best_effort' in options) else True
geni_end_time = self._str2datetime(options['geni_end_time']) if ('geni_end_time' in options) else None
geni_users = options['geni_users'] if ('geni_users' in options) else []
# TODO check the end_time against the duration of the credential
try:
self._checkRSpecVersion(options['geni_rspec_version'])
result_rspec, result_sliver_list = self._delegate.provision(urns, self.requestCertificate(), credentials, geni_best_effort, geni_end_time, geni_users)
result = { 'geni_rspec' : result_rspec, 'geni_slivers' : self._convertExpiresDate(result_sliver_list) }
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
def Status(self, urns, credentials, options):
try:
result_sliceurn, result_sliver_list = self._delegate.status(urns, self.requestCertificate(), credentials)
result = { 'geni_urn' : result_sliceurn, 'geni_slivers' : self._convertExpiresDate(result_sliver_list) }
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
def PerformOperationalAction(self, urns, credentials, action, options):
geni_best_effort = bool(options['geni_best_effort']) if ('geni_best_effort' in options) else False
try:
result = self._delegate.perform_operational_action(urns, self.requestCertificate(), credentials, action, geni_best_effort)
result = self._convertExpiresDate(result)
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
def Delete(self, urns, credentials, options):
geni_best_effort = bool(options['geni_best_effort']) if ('geni_best_effort' in options) else False
try:
result = self._delegate.delete(urns, self.requestCertificate(), credentials, geni_best_effort)
result = self._convertExpiresDate(result)
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
def Shutdown(self, slice_urn, credentials, options):
try:
result = bool(self._delegate.shutdown(slice_urn, self.requestCertificate(), credentials))
except Exception as e:
return self._errorReturn(e)
return self._successReturn(result)
# ---- helper methods
def _datetime2str(self, dt):
return dt.strftime(self.RFC3339_FORMAT_STRING)
def _str2datetime(self, strval):
"""Parses the given date string and converts the timestamp to utc and the date unaware of timezones."""
result = dateparser.parse(strval)
if result:
result = result - result.utcoffset()
result = result.replace(tzinfo=None)
return result
def _convertExpiresDate(self, sliver_list):
for slhash in sliver_list:
if slhash['geni_expires'] == None:
continue
if not isinstance(slhash['geni_expires'], datetime):
raise ValueError("Given geni_expires in sliver_list hash retrieved from delegate's method is not a python datetime object.")
slhash['geni_expires'] = self._datetime2str(slhash['geni_expires'])
return sliver_list
def _checkRSpecVersion(self, rspec_version_option):
if (int(rspec_version_option['version']) != 3) or (rspec_version_option['type'].lower() != 'geni'):
raise GENIv3BadArgsError("Only RSpec 3 supported.")
def _errorReturn(self, e):
"""Assembles a GENI compliant return result for faulty methods."""
if not isinstance(e, GENIv3BaseError): # convert common errors into GENIv3GeneralError
e = GENIv3ServerError(str(e))
# do some logging
logger.error(e)
logger.error(traceback.format_exc())
return { 'geni_api' : 3, 'code' : { 'geni_code' : e.code }, 'output' : str(e) }
def _successReturn(self, result):
"""Assembles a GENI compliant return result for successful methods."""
return { 'geni_api' : 3, 'code' : { 'geni_code' : 0 }, 'value' : result, 'output' : None }
class GENIv3DelegateBase(object):
"""
TODO document
The GENIv3 handler assumes that this class uses RSpec version 3 when interacting with the client.
General parameters:
{client_cert} The client's certificate. See [flaskrpcs]XMLRPCDispatcher.requestCertificate(). Also see http://groups.geni.net/geni/wiki/GeniApiCertificates
{credentials} The a list of credentials in the format specified at http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#credentials
Dates are converted to UTC and then made timezone-unaware (see http://docs.python.org/2/library/datetime.html#datetime.datetime.astimezone).
"""
ALLOCATION_STATE_UNALLOCATED = 'geni_unallocated'
"""The sliver does not exist. (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverAllocationStates)"""
ALLOCATION_STATE_ALLOCATED = 'geni_allocated'
"""The sliver is offered/promissed, but it does not consume actual resources. This state shall time out at some point in time."""
ALLOCATION_STATE_PROVISIONED = 'geni_provisioned'
"""The sliver is/has been instanciated. Operational states apply here."""
OPERATIONAL_STATE_PENDING_ALLOCATION = 'geni_pending_allocation'
"""Required for aggregates to support. A transient state."""
OPERATIONAL_STATE_NOTREADY = 'geni_notready'
"""Optional. A stable state."""
OPERATIONAL_STATE_CONFIGURING = 'geni_configuring'
"""Optional. A transient state."""
OPERATIONAL_STATE_STOPPING = 'geni_stopping'
"""Optional. A transient state."""
OPERATIONAL_STATE_READY = 'geni_ready'
"""Optional. A stable state."""
OPERATIONAL_STATE_READY_BUSY = 'geni_ready_busy'
"""Optional. A transient state."""
OPERATIONAL_STATE_FAILED = 'geni_failed'
"""Optional. A stable state."""
OPERATIONAL_ACTION_START = 'geni_start'
"""Sliver shall become geni_ready. The AM developer may define more states (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#SliverOperationalActions)"""
OPERATIONAL_ACTION_RESTART = 'geni_restart'
"""Sliver shall become geni_ready again."""
OPERATIONAL_ACTION_STOP = 'geni_stop'
"""Sliver shall become geni_notready."""
def __init__(self):
super(GENIv3DelegateBase, self).__init__()
pass
def get_request_extensions_list(self):
"""Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
return [uri for prefix, uri in self.get_request_extensions_mapping().items()]
def get_request_extensions_mapping(self):
"""Overwrite by AM developer. Should return a dict of namespace names and request extensions (XSD schema's URLs as string).
Format: {xml_namespace_prefix : namespace_uri, ...}
"""
return {}
def get_manifest_extensions_mapping(self):
"""Overwrite by AM developer. Should return a dict of namespace names and manifest extensions (XSD schema's URLs as string).
Format: {xml_namespace_prefix : namespace_uri, ...}
"""
return {}
def get_ad_extensions_list(self):
"""Not to overwrite by AM developer. Should retrun a list of request extensions (XSD schemas) to be sent back by GetVersion."""
return [uri for prefix, uri in self.get_ad_extensions_mapping().items()]
def get_ad_extensions_mapping(self):
"""Overwrite by AM developer. Should return a dict of namespace names and advertisement extensions (XSD schema URLs as string) to be sent back by GetVersion.
Format: {xml_namespace_prefix : namespace_uri, ...}
"""
return {}
def is_single_allocation(self):
"""Overwrite by AM developer. Shall return a True or False. When True (not default), and performing one of (Describe, Allocate, Renew, Provision, Delete), such an AM requires you to include either the slice urn or the urn of all the slivers in the same state.
see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
return False
def get_allocation_mode(self):
"""Overwrite by AM developer. Shall return a either 'geni_single', 'geni_disjoint', 'geni_many'.
It defines whether this AM allows adding slivers to slices at an AM (i.e. calling Allocate multiple times, without first deleting the allocated slivers).
For description of the options see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#OperationsonIndividualSlivers"""
return 'geni_single'
def list_resources(self, client_cert, credentials, geni_available):
"""Overwrite by AM developer. Shall return an RSpec version 3 (advertisement) or raise an GENIv3...Error.
If {geni_available} is set, only return availabe resources.
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources"""
raise GENIv3GeneralError("Method not implemented yet")
def describe(self, urns, client_cert, credentials):
"""Overwrite by AM developer. Shall return an RSpec version 3 (manifest) or raise an GENIv3...Error.
{urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe"""
raise GENIv3GeneralError("Method not implemented yet")
def allocate(self, slice_urn, client_cert, credentials, rspec, end_time=None):
"""Overwrite by AM developer.
Shall return the two following values or raise an GENIv3...Error.
- a RSpec version 3 (manifest) of newly allocated slivers
- a list of slivers of the format:
[{'geni_sliver_urn' : String,
'geni_expires' : Python-Date,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx},
...]
Please return like so: "return respecs, slivers"
{slice_urn} contains a slice identifier (e.g. 'urn:publicid:IDN+ofelia:eict:gcf+slice+myslice').
{end_time} Optional. A python datetime object which determines the desired expiry date of this allocation (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
>>> This is the first part of what CreateSliver used to do in previous versions of the AM API. The second part is now done by Provision, and the final part is done by PerformOperationalAction.
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Allocate"""
raise GENIv3GeneralError("Method not implemented yet")
def renew(self, urns, client_cert, credentials, expiration_time, best_effort):
"""Overwrite by AM developer.
Shall return a list of slivers of the following format or raise an GENIv3...Error:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'geni_expires' : Python-Date,
'geni_error' : optional String},
...]
{urns} contains a list of slice identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{expiration_time} is a python datetime object
{best_effort} determines if the method shall fail in case that not all of the urns can be renewed (best_effort=False).
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Renew"""
raise GENIv3GeneralError("Method not implemented yet")
def provision(self, urns, client_cert, credentials, best_effort, end_time, geni_users):
"""Overwrite by AM developer.
Shall return the two following values or raise an GENIv3...Error.
- a RSpec version 3 (manifest) of slivers
- a list of slivers of the format:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'geni_expires' : Python-Date,
'geni_error' : optional String},
...]
Please return like so: "return respecs, slivers"
{urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{best_effort} determines if the method shall fail in case that not all of the urns can be provisioned (best_effort=False)
{end_time} Optional. A python datetime object which determines the desired expiry date of this provision (see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#geni_end_time).
{geni_users} is a list of the format: [ { 'urn' : ..., 'keys' : [sshkey, ...]}, ...]
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Provision"""
raise GENIv3GeneralError("Method not implemented yet")
def status(self, urns, client_cert, credentials):
"""Overwrite by AM developer.
Shall return the two following values or raise an GENIv3...Error.
- a slice urn
- a list of slivers of the format:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'geni_expires' : Python-Date,
'geni_error' : optional String},
...]
Please return like so: "return slice_urn, slivers"
{urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Status"""
raise GENIv3GeneralError("Method not implemented yet")
def perform_operational_action(self, urns, client_cert, credentials, action, best_effort):
"""Overwrite by AM developer.
Shall return a list of slivers of the following format or raise an GENIv3...Error:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_operational_status' : one of the OPERATIONAL_STATE_xxx,
'geni_expires' : Python-Date,
'geni_error' : optional String},
...]
{urns} contains a list of slice or sliver identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{action} an arbitraty string, but the following should be possible: "geni_start", "geni_stop", "geni_restart"
{best_effort} determines if the method shall fail in case that not all of the urns can be changed (best_effort=False)
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#PerformOperationalAction"""
raise GENIv3GeneralError("Method not implemented yet")
def delete(self, urns, client_cert, credentials, best_effort):
"""Overwrite by AM developer.
Shall return a list of slivers of the following format or raise an GENIv3...Error:
[{'geni_sliver_urn' : String,
'geni_allocation_status' : one of the ALLOCATION_STATE_xxx,
'geni_expires' : Python-Date,
'geni_error' : optional String},
...]
{urns} contains a list of slice/resource identifiers (e.g. ['urn:publicid:IDN+ofelia:eict:gcf+slice+myslice']).
{best_effort} determines if the method shall fail in case that not all of the urns can be deleted (best_effort=False)
If the transactional behaviour of {best_effort}=False can not be provided, throw a GENIv3OperationUnsupportedError.
For more information on possible {urns} see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Delete"""
raise GENIv3GeneralError("Method not implemented yet")
def shutdown(self, slice_urn, client_cert, credentials):
"""Overwrite by AM developer.
Shall return True or False or raise an GENIv3...Error.
For full description see http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Shutdown"""
raise GENIv3GeneralError("Method not implemented yet")
@serviceinterface
def auth(self, client_cert, credentials, slice_urn=None, privileges=()):
"""
This method authenticates and authorizes.
It returns the client's urn, uuid, email (extracted from the {client_cert}). Example call: "urn, uuid, email = self.auth(...)"
Be aware, the email is not required in the certificate, hence it might be empty.
If the validation fails, an GENIv3ForbiddenError is thrown.
The credentials are checked so the user has all the required privileges (success if any credential fits all privileges).
The client certificate is not checked: this is usually done via the webserver configuration.
This method only treats certificates of type 'geni_sfa'.
Here a list of possible privileges (format: right_in_credential: [privilege1, privilege2, ...]):
"authority" : ["register", "remove", "update", "resolve", "list", "getcredential", "*"],
"refresh" : ["remove", "update"],
"resolve" : ["resolve", "list", "getcredential"],
"sa" : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "deleteslice", "deletesliver", "updateslice",
"getsliceresources", "getticket", "loanresources", "stopslice", "startslice", "renewsliver",
"deleteslice", "deletesliver", "resetslice", "listslices", "listnodes", "getpolicy", "sliverstatus"],
"embed" : ["getticket", "redeemslice", "redeemticket", "createslice", "createsliver", "renewsliver", "deleteslice",
"deletesliver", "updateslice", "sliverstatus", "getsliceresources", "shutdown"],
"bind" : ["getticket", "loanresources", "redeemticket"],
"control" : ["updateslice", "createslice", "createsliver", "renewsliver", "sliverstatus", "stopslice", "startslice",
"deleteslice", "deletesliver", "resetslice", "getsliceresources", "getgids"],
"info" : ["listslices", "listnodes", "getpolicy"],
"ma" : ["setbootstate", "getbootstate", "reboot", "getgids", "gettrustedcerts"],
"operator" : ["gettrustedcerts", "getgids"],
"*" : ["createsliver", "deletesliver", "sliverstatus", "renewsliver", "shutdown"]
When using the gcf clearinghouse implementation the credentials will have the rights:
- user: "refresh", "resolve", "info" (which resolves to the privileges: "remove", "update", "resolve", "list", "getcredential", "listslices", "listnodes", "getpolicy").
- slice: "refresh", "embed", "bind", "control", "info" (well, do the resolving yourself...)
"""
# check variables
if not isinstance(privileges, tuple):
raise TypeError("Privileges need to be a tuple.")
# collect credentials (only GENI certs, version ignored)
geni_credentials = []
for c in credentials:
if c['geni_type'] == 'geni_sfa':
geni_credentials.append(c['geni_value'])
# get the cert_root
config = pm.getService("config")
cert_root = expand_amsoil_path(config.get("geniv3rpc.cert_root"))
if client_cert == None:
# work around if the certificate could not be acquired due to the shortcommings of the werkzeug library
if config.get("flask.debug"):
import ext.sfa.trust.credential as cred
client_cert = cred.Credential(string=geni_credentials[0]).gidCaller.save_to_string(save_parents=True)
else:
raise GENIv3ForbiddenError("Could not determine the client SSL certificate")
# test the credential
try:
cred_verifier = ext.geni.CredentialVerifier(cert_root)
cred_verifier.verify_from_strings(client_cert, geni_credentials, slice_urn, privileges)
except Exception as e:
raise GENIv3ForbiddenError(str(e))
user_gid = gid.GID(string=client_cert)
user_urn = user_gid.get_urn()
user_uuid = user_gid.get_uuid()
user_email = user_gid.get_email()
return user_urn, user_uuid, user_email # TODO document return
@serviceinterface
def urn_type(self, urn):
"""Returns the type of the urn (e.g. slice, sliver).
For the possible types see: http://groups.geni.net/geni/wiki/GeniApiIdentifiers#ExamplesandUsage"""
return urn.split('+')[2].strip()
@serviceinterface
def lxml_ad_root(self):
"""Returns a xml root node with the namespace extensions specified by self.get_ad_extensions_mapping."""
return etree.Element('rspec', self.get_ad_extensions_mapping(), type='advertisement')
def lxml_manifest_root(self):
"""Returns a xml root node with the namespace extensions specified by self.get_manifest_extensions_mapping."""
return etree.Element('rspec', self.get_manifest_extensions_mapping(), type='manifest')
@serviceinterface
def lxml_to_string(self, rspec):
"""Converts a lxml root node to string (for returning to the client)."""
return etree.tostring(rspec, pretty_print=True)
@serviceinterface
def lxml_ad_element_maker(self, prefix):
"""Returns a lxml.builder.ElementMaker configured for avertisements and the namespace given by {prefix}."""
ext = self.get_ad_extensions_mapping()
return ElementMaker(namespace=ext[prefix], nsmap=ext)
@serviceinterface
def lxml_manifest_element_maker(self, prefix):
"""Returns a lxml.builder.ElementMaker configured for manifests and the namespace given by {prefix}."""
ext = self.get_manifest_extensions_mapping()
return ElementMaker(namespace=ext[prefix], nsmap=ext)
@serviceinterface
def lxml_parse_rspec(self, rspec_string):
"""Returns a the root element of the given {rspec_string} as lxml.Element.
If the config key is set, the rspec is validated with the schemas found at the URLs specified in schemaLocation of the the given RSpec."""
# parse
rspec_root = etree.fromstring(rspec_string)
# validate RSpec against specified schemaLocations
config = pm.getService("config")
should_validate = config.get("geniv3rpc.rspec_validation")
if should_validate:
schema_locations = rspec_root.get("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
if schema_locations:
schema_location_list = schema_locations.split(" ")
schema_location_list = map(lambda x: x.strip(), schema_location_list) # strip whitespaces
for sl in schema_location_list:
try:
xmlschema_contents = urllib2.urlopen(sl) # try to download the schema
xmlschema_doc = etree.parse(xmlschema_contents)
xmlschema = etree.XMLSchema(xmlschema_doc)
xmlschema.validate(rspec_root)
except Exception as e:
logger.warning("RSpec validation failed failed (%s: %s)" % (sl, str(e),))
else:
logger.warning("RSpec does not specify any schema locations")
return rspec_root
@serviceinterface
def lxml_elm_has_request_prefix(self, lxml_elm, ns_name):
return str(lxml_elm.tag).startswith("{%s}" % (self.get_request_extensions_mapping()[ns_name],))
# def lxml_request_prefix(self, ns_name):
# """Returns the full lxml-prefix: Wraps the namespace looked up in the get_request_extensions_mapping (see above) wrapped in curly brackets (useful for lxml)."""
# return "{%s}" % (self.get_request_extensions_mapping()[ns_name],)
# @serviceinterface
# def lxml_mainifest_prefix(self, ns_name):
# """See: lxml_request_prefix() (here for manifest)"""
# return "{%s}" % (self.get_manifest_extensions_mapping()[ns_name],)
# @serviceinterface
# def lxml_ad_prefix(self, ns_name):
# """See: lxml_request_prefix() (here for advertisement)"""
# return "{%s}" % (self.get_ad_extensions_mapping()[ns_name],)
@serviceinterface
def lxml_elm_equals_request_tag(self, lxml_elm, ns_name, tagname):
"""Determines if the given tag by {ns_name} and {tagname} equals lxml_tag. The namespace URI is looked up via get_request_extensions_mapping()['ns_name']"""
return ("{%s}%s" % (self.get_request_extensions_mapping()[ns_name], tagname)) == str(lxml_elm.tag)
| 55.624785 | 267 | 0.666935 |
3aaa1ee678ac916fa1903d14c39a3c01ea8d08f7 | 21,387 | py | Python | tests/test_ext.py | wpilibsuite/sphinxext-rediraffe | d8961c2402015dafa6039ed27d973ad835d7fc09 | [
"MIT"
] | 19 | 2020-08-30T05:52:49.000Z | 2022-02-19T21:57:57.000Z | tests/test_ext.py | wpilibsuite/sphinxext-rediraffe | d8961c2402015dafa6039ed27d973ad835d7fc09 | [
"MIT"
] | 29 | 2020-08-21T01:36:14.000Z | 2021-09-16T12:20:33.000Z | tests/test_ext.py | wpilibsuite/sphinxext-rediraffe | d8961c2402015dafa6039ed27d973ad835d7fc09 | [
"MIT"
] | 7 | 2020-08-18T18:22:51.000Z | 2021-05-22T09:54:27.000Z | import pytest
import seleniumbase
from seleniumbase import BaseCase
from sphinx.testing.path import path
from sphinx.application import Sphinx
from sphinx.errors import ExtensionError
from pathlib import Path
import shutil
import logging
from conftest import rel2url
@pytest.fixture(scope="module")
def rootdir():
return path(__file__).parent.abspath() / "roots/ext"
class TestExtHtml:
@pytest.mark.sphinx("html", testroot="no_redirects")
def test_no_redirects(self, app: Sphinx):
app.build()
assert app.statuscode == 0
@pytest.mark.sphinx("html", testroot="simple")
def test_simple(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("another.html", "index.html")
@pytest.mark.sphinx("html", testroot="simple")
def test_simple_rebuild(self, app_params, make_app, ensure_redirect):
args, kwargs = app_params
app = make_app(*args, **kwargs)
if Path(app.outdir).exists():
shutil.rmtree(Path(app.outdir))
app.build()
assert app.statuscode == 0
app2 = make_app(*args, **kwargs)
app2.build()
assert app2.statuscode == 0
ensure_redirect("another.html", "index.html")
@pytest.mark.sphinx("html", testroot="no_cycle")
def test_no_cycle(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("a.html", "index.html")
ensure_redirect("b.html", "index.html")
@pytest.mark.sphinx("html", testroot="cycle")
def test_cycle(self, app: Sphinx):
with pytest.raises(ExtensionError):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("html", testroot="nested")
def test_nested(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("tof1.html", "docs/folder1/f1.html")
ensure_redirect("docs/folder1/tof1.html", "docs/folder1/f1.html")
ensure_redirect("docs/folder1/tof2.html", "docs/folder2/f2.html")
ensure_redirect("docs/folder2/toindex.html", "index.html")
ensure_redirect("totoindex.html", "index.html")
@pytest.mark.sphinx("html", testroot="backslashes")
def test_backslashes(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("tof1.html", "docs/folder1/f1.html")
ensure_redirect("docs/folder1/tof1.html", "docs/folder1/f1.html")
ensure_redirect("docs/folder1/tof2.html", "docs/folder2/f2.html")
ensure_redirect("docs/folder2/toindex.html", "index.html")
ensure_redirect("totoindex.html", "index.html")
@pytest.mark.sphinx("html", testroot="dot_in_filename")
def test_dot_in_filename(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("docs/x.y.z.html", "docs/a.b.c.html")
@pytest.mark.sphinx("html", testroot="mixed_slashes")
def test_mixed_slashes(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("tof1.html", "docs/folder1/f1.html")
ensure_redirect("docs/folder1/tof1.html", "docs/folder1/f1.html")
ensure_redirect("docs/folder1/tof2.html", "docs/folder2/f2.html")
ensure_redirect("docs/folder2/toindex.html", "index.html")
ensure_redirect("totoindex.html", "index.html")
@pytest.mark.sphinx("html", testroot="link_redirected_twice")
def test_link_redirected_twice(self, app: Sphinx):
with pytest.raises(ExtensionError):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("html", testroot="link_redirected_to_nonexistant_file")
def test_link_redirected_to_nonexistant_file(self, app: Sphinx):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("html", testroot="existing_link_redirected")
def test_existing_link_redirected(self, app: Sphinx):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("html", testroot="bad_rediraffe_file")
def test_bad_rediraffe_file(self, app: Sphinx):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("html", testroot="no_rediraffe_file")
def test_no_rediraffe_file(self, app: Sphinx):
app.build()
assert app.statuscode == 0
assert "rediraffe was not given redirects to process" in app._warning.getvalue()
@pytest.mark.sphinx("html", testroot="redirect_from_deleted_folder")
def test_redirect_from_deleted_folder(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("deletedfolder/another.html", "index.html")
ensure_redirect("deletedfolder/deletedfolder2/another.html", "index.html")
@pytest.mark.sphinx("html", testroot="complex")
def test_complex(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("a.html", "e.html")
ensure_redirect("b.html", "e.html")
ensure_redirect("c.html", "e.html")
ensure_redirect("d.html", "e.html")
ensure_redirect("f.html", "e.html")
ensure_redirect("g.html", "e.html")
ensure_redirect("h.html", "e.html")
ensure_redirect("i.html", "j.html")
ensure_redirect("k.html", "l.html")
ensure_redirect("m.html", "o.html")
ensure_redirect("n.html", "o.html")
ensure_redirect("q.html", "z.html")
ensure_redirect("r.html", "z.html")
ensure_redirect("s.html", "z.html")
ensure_redirect("t.html", "z.html")
ensure_redirect("u.html", "z.html")
ensure_redirect("v.html", "z.html")
ensure_redirect("w.html", "z.html")
ensure_redirect("x.html", "z.html")
ensure_redirect("y.html", "z.html")
ensure_redirect("F1/1.html", "z.html")
ensure_redirect("F1/2.html", "z.html")
ensure_redirect("F2/1.html", "z.html")
ensure_redirect("F5/F4/F3/F2/F1/1.html", "index.html")
@pytest.mark.sphinx("html", testroot="complex_dict")
def test_complex_dict(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("a.html", "e.html")
ensure_redirect("b.html", "e.html")
ensure_redirect("c.html", "e.html")
ensure_redirect("d.html", "e.html")
ensure_redirect("f.html", "e.html")
ensure_redirect("g.html", "e.html")
ensure_redirect("h.html", "e.html")
ensure_redirect("i.html", "j.html")
ensure_redirect("k.html", "l.html")
ensure_redirect("m.html", "o.html")
ensure_redirect("n.html", "o.html")
ensure_redirect("q.html", "z.html")
ensure_redirect("r.html", "z.html")
ensure_redirect("s.html", "z.html")
ensure_redirect("t.html", "z.html")
ensure_redirect("u.html", "z.html")
ensure_redirect("v.html", "z.html")
ensure_redirect("w.html", "z.html")
ensure_redirect("x.html", "z.html")
ensure_redirect("y.html", "z.html")
ensure_redirect("F1/1.html", "z.html")
ensure_redirect("F1/2.html", "z.html")
ensure_redirect("F2/1.html", "z.html")
ensure_redirect("F5/F4/F3/F2/F1/1.html", "index.html")
@pytest.mark.sphinx("html", testroot="jinja")
def test_jinja(self, app: Sphinx, _sb: BaseCase):
app.build()
assert app.statuscode == 0
_sb.open(rel2url(app.outdir, "another.html"))
text = _sb.get_text(selector="html")
text = text.replace("\\", "/")
text = text.replace("//", "/")
assert "rel_url: index.html" in text
assert "from_file: another.rst" in text
assert "to_file: index.rst" in text
assert "from_url: another.html" in text
assert "to_url: index.html" in text
@pytest.mark.sphinx("html", testroot="jinja_bad_path")
def test_jinja_bad_path(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("another.html", "index.html")
@pytest.mark.sphinx("html", testroot="pass_url_fragments_queries")
def test_pass_url_fragments(self, app: Sphinx, _sb: BaseCase, ensure_redirect):
app.build()
ensure_redirect("another.html", "index.html")
_sb.open(rel2url(app.outdir, "another.html") + "#haha")
# check url
assert Path(rel2url(app.outdir, "index.html")) == Path(
_sb.execute_script(
'return window.location.protocol + "//" + window.location.host + "/" + window.location.pathname'
)
)
# check hash
assert "#haha" == _sb.execute_script("return window.location.hash")
@pytest.mark.sphinx("html", testroot="pass_url_fragments_queries")
def test_pass_url_queries(self, app: Sphinx, _sb: BaseCase, ensure_redirect):
app.build()
ensure_redirect("another.html", "index.html")
_sb.open(rel2url(app.outdir, "another.html") + "?phrase=haha")
# check url
assert Path(rel2url(app.outdir, "index.html")) == Path(
_sb.execute_script(
'return window.location.protocol + "//" + window.location.host + "/" + window.location.pathname'
)
)
# check query
assert "?phrase=haha" == _sb.execute_script("return window.location.search")
@pytest.mark.sphinx("html", testroot="pass_url_fragments_queries")
def test_pass_url_fragment_and_query(
self, app: Sphinx, _sb: BaseCase, ensure_redirect
):
app.build()
ensure_redirect("another.html", "index.html")
_sb.open(rel2url(app.outdir, "another.html") + "?phrase=haha#giraffe")
# check url
assert Path(rel2url(app.outdir, "index.html")) == Path(
_sb.execute_script(
'return window.location.protocol + "//" + window.location.host + "/" + window.location.pathname'
)
)
# check query
assert "?phrase=haha" == _sb.execute_script("return window.location.search")
# check hash
assert "#giraffe" == _sb.execute_script("return window.location.hash")
class TestExtDirHtml:
@pytest.mark.sphinx("dirhtml", testroot="no_redirects")
def test_no_redirects(self, app: Sphinx):
app.build()
assert app.statuscode == 0
@pytest.mark.sphinx("dirhtml", testroot="simple")
def test_simple(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("another/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="dirhtml_user_index_files")
def test_index_file_foldering(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("another/index.html", "mydir/index.html")
@pytest.mark.sphinx("dirhtml", testroot="simple", freshenv=False)
def test_simple_rebuild(self, app_params, make_app, ensure_redirect):
args, kwargs = app_params
app = make_app(*args, **kwargs)
if Path(app.outdir).exists():
shutil.rmtree(Path(app.outdir))
app.build()
assert app.statuscode == 0
app2 = make_app(*args, **kwargs)
app2.build()
assert app2.statuscode == 0
ensure_redirect("another/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="no_cycle")
def test_no_cycle(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("a/index.html", "index.html")
ensure_redirect("b/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="cycle")
def test_cycle(self, app: Sphinx):
with pytest.raises(ExtensionError):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("dirhtml", testroot="nested")
def test_nested(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("tof1/index.html", "docs/folder1/f1/index.html")
ensure_redirect("docs/folder1/tof1/index.html", "docs/folder1/f1/index.html")
ensure_redirect("docs/folder1/tof2/index.html", "docs/folder2/f2/index.html")
ensure_redirect("docs/folder2/toindex/index.html", "index.html")
ensure_redirect("totoindex/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="backslashes")
def test_backslashes(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("tof1/index.html", "docs/folder1/f1/index.html")
ensure_redirect("docs/folder1/tof1/index.html", "docs/folder1/f1/index.html")
ensure_redirect("docs/folder1/tof2/index.html", "docs/folder2/f2/index.html")
ensure_redirect("docs/folder2/toindex/index.html", "index.html")
ensure_redirect("totoindex/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="dot_in_filename")
def test_dot_in_filename(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("docs/x.y.z/index.html", "docs/a.b.c/index.html")
@pytest.mark.sphinx("dirhtml", testroot="mixed_slashes")
def test_mixed_slashes(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("tof1/index.html", "docs/folder1/f1/index.html")
ensure_redirect("docs/folder1/tof1/index.html", "docs/folder1/f1/index.html")
ensure_redirect("docs/folder1/tof2/index.html", "docs/folder2/f2/index.html")
ensure_redirect("docs/folder2/toindex/index.html", "index.html")
ensure_redirect("totoindex/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="link_redirected_twice")
def test_link_redirected_twice(self, app: Sphinx):
with pytest.raises(ExtensionError):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("dirhtml", testroot="link_redirected_to_nonexistant_file")
def test_link_redirected_to_nonexistant_file(self, app: Sphinx):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("dirhtml", testroot="existing_link_redirected")
def test_existing_link_redirected(self, app: Sphinx):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("dirhtml", testroot="bad_rediraffe_file")
def test_bad_rediraffe_file(self, app: Sphinx):
app.build()
assert app.statuscode == 1
@pytest.mark.sphinx("dirhtml", testroot="no_rediraffe_file")
def test_no_rediraffe_file(self, app: Sphinx):
app.build()
assert app.statuscode == 0
assert "rediraffe was not given redirects to process" in app._warning.getvalue()
@pytest.mark.sphinx("dirhtml", testroot="redirect_from_deleted_folder")
def test_redirect_from_deleted_folder(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("deletedfolder/another/index.html", "index.html")
ensure_redirect("deletedfolder/deletedfolder2/another/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="complex")
def test_complex(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("a/index.html", "e/index.html")
ensure_redirect("b/index.html", "e/index.html")
ensure_redirect("c/index.html", "e/index.html")
ensure_redirect("d/index.html", "e/index.html")
ensure_redirect("f/index.html", "e/index.html")
ensure_redirect("g/index.html", "e/index.html")
ensure_redirect("h/index.html", "e/index.html")
ensure_redirect("i/index.html", "j/index.html")
ensure_redirect("k/index.html", "l/index.html")
ensure_redirect("m/index.html", "o/index.html")
ensure_redirect("n/index.html", "o/index.html")
ensure_redirect("q/index.html", "z/index.html")
ensure_redirect("r/index.html", "z/index.html")
ensure_redirect("s/index.html", "z/index.html")
ensure_redirect("t/index.html", "z/index.html")
ensure_redirect("u/index.html", "z/index.html")
ensure_redirect("v/index.html", "z/index.html")
ensure_redirect("w/index.html", "z/index.html")
ensure_redirect("x/index.html", "z/index.html")
ensure_redirect("y/index.html", "z/index.html")
ensure_redirect("F1/1/index.html", "z/index.html")
ensure_redirect("F1/2/index.html", "z/index.html")
ensure_redirect("F2/1/index.html", "z/index.html")
ensure_redirect("F5/F4/F3/F2/F1/1/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="complex_dict")
def test_complex_dict(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("a/index.html", "e/index.html")
ensure_redirect("b/index.html", "e/index.html")
ensure_redirect("c/index.html", "e/index.html")
ensure_redirect("d/index.html", "e/index.html")
ensure_redirect("f/index.html", "e/index.html")
ensure_redirect("g/index.html", "e/index.html")
ensure_redirect("h/index.html", "e/index.html")
ensure_redirect("i/index.html", "j/index.html")
ensure_redirect("k/index.html", "l/index.html")
ensure_redirect("m/index.html", "o/index.html")
ensure_redirect("n/index.html", "o/index.html")
ensure_redirect("q/index.html", "z/index.html")
ensure_redirect("r/index.html", "z/index.html")
ensure_redirect("s/index.html", "z/index.html")
ensure_redirect("t/index.html", "z/index.html")
ensure_redirect("u/index.html", "z/index.html")
ensure_redirect("v/index.html", "z/index.html")
ensure_redirect("w/index.html", "z/index.html")
ensure_redirect("x/index.html", "z/index.html")
ensure_redirect("y/index.html", "z/index.html")
ensure_redirect("F1/1/index.html", "z/index.html")
ensure_redirect("F1/2/index.html", "z/index.html")
ensure_redirect("F2/1/index.html", "z/index.html")
ensure_redirect("F5/F4/F3/F2/F1/1/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="jinja")
def test_jinja(self, app: Sphinx, _sb: BaseCase):
app.build()
assert app.statuscode == 0
_sb.open(rel2url(app.outdir, "another/index.html"))
text = _sb.get_text(selector="html")
text = text.replace("\\", "/")
text = text.replace("//", "/")
assert "rel_url: ../index.html" in text
assert "from_file: another.rst" in text
assert "to_file: index.rst" in text
assert "from_url: another/index.html" in text
assert "to_url: index.html" in text
@pytest.mark.sphinx("dirhtml", testroot="jinja_bad_path")
def test_jinja_bad_path(self, app: Sphinx, ensure_redirect):
app.build()
assert app.statuscode == 0
ensure_redirect("another/index.html", "index.html")
@pytest.mark.sphinx("dirhtml", testroot="pass_url_fragments_queries")
def test_pass_url_fragments(self, app: Sphinx, _sb: BaseCase, ensure_redirect):
app.build()
ensure_redirect("another/index.html", "index.html")
_sb.open(rel2url(app.outdir, "another/index.html") + "#haha")
# check url
assert Path(rel2url(app.outdir, "index.html")) == Path(
_sb.execute_script(
'return window.location.protocol + "//" + window.location.host + "/" + window.location.pathname'
)
)
# check hash
assert "#haha" == _sb.execute_script("return window.location.hash")
@pytest.mark.sphinx("dirhtml", testroot="pass_url_fragments_queries")
def test_pass_url_queries(self, app: Sphinx, _sb: BaseCase, ensure_redirect):
app.build()
ensure_redirect("another/index.html", "index.html")
_sb.open(rel2url(app.outdir, "another/index.html") + "?phrase=haha")
# check url
assert Path(rel2url(app.outdir, "index.html")) == Path(
_sb.execute_script(
'return window.location.protocol + "//" + window.location.host + "/" + window.location.pathname'
)
)
# check query
assert "?phrase=haha" == _sb.execute_script("return window.location.search")
@pytest.mark.sphinx("dirhtml", testroot="pass_url_fragments_queries")
def test_pass_url_fragment_and_query(
self, app: Sphinx, _sb: BaseCase, ensure_redirect
):
app.build()
ensure_redirect("another/index.html", "index.html")
_sb.open(rel2url(app.outdir, "another/index.html") + "?phrase=haha#giraffe")
# check url
assert Path(rel2url(app.outdir, "index.html")) == Path(
_sb.execute_script(
'return window.location.protocol + "//" + window.location.host + "/" + window.location.pathname'
)
)
# check query
assert "?phrase=haha" == _sb.execute_script("return window.location.search")
# check hash
assert "#giraffe" == _sb.execute_script("return window.location.hash")
| 40.582543 | 113 | 0.638472 |
f70d2269439119cb16be737012dc189740de1235 | 19,878 | py | Python | networkx/generators/community.py | jeanfrancois8512/networkx | 1d5e2183f514a847db63ce0cb78979a3cf7263bb | [
"BSD-3-Clause"
] | null | null | null | networkx/generators/community.py | jeanfrancois8512/networkx | 1d5e2183f514a847db63ce0cb78979a3cf7263bb | [
"BSD-3-Clause"
] | null | null | null | networkx/generators/community.py | jeanfrancois8512/networkx | 1d5e2183f514a847db63ce0cb78979a3cf7263bb | [
"BSD-3-Clause"
] | null | null | null | # Copyright(C) 2011, 2015, 2018 by
# Ben Edwards <bedwards@cs.unm.edu>
# Aric Hagberg <hagberg@lanl.gov>
# Konstantinos Karakatsanis <dinoskarakas@gmail.com>
# All rights reserved.
# BSD license.
#
# Authors: Ben Edwards (bedwards@cs.unm.edu)
# Aric Hagberg (hagberg@lanl.gov)
# Konstantinos Karakatsanis (dinoskarakas@gmail.com)
# Jean-Gabriel Young (jean.gabriel.young@gmail.com)
"""Generators for classes of graphs used in studying social networks."""
from __future__ import division
import itertools
import math
import networkx as nx
from networkx.utils import py_random_state
__all__ = ['caveman_graph', 'connected_caveman_graph',
'relaxed_caveman_graph', 'random_partition_graph',
'planted_partition_graph', 'gaussian_random_partition_graph',
'ring_of_cliques', 'windmill_graph', 'stochastic_block_model']
def caveman_graph(l, k):
"""Returns a caveman graph of `l` cliques of size `k`.
Parameters
----------
l : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.caveman_graph(3, 3)
See also
--------
connected_caveman_graph
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
# l disjoint cliques of size k
G = nx.empty_graph(l * k)
if k > 1:
for start in range(0, l * k, k):
edges = itertools.combinations(range(start, start + k), 2)
G.add_edges_from(edges)
return G
def connected_caveman_graph(l, k):
"""Returns a connected caveman graph of `l` cliques of size `k`.
The connected caveman graph is formed by creating `n` cliques of size
`k`, then a single edge in each clique is rewired to a node in an
adjacent clique.
Parameters
----------
l : int
number of cliques
k : int
size of cliques
Returns
-------
G : NetworkX Graph
connected caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.connected_caveman_graph(3, 3)
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
G = nx.caveman_graph(l, k)
for start in range(0, l * k, k):
G.remove_edge(start, start + 1)
G.add_edge(start, (start - 1) % (l * k))
return G
@py_random_state(3)
def relaxed_caveman_graph(l, k, p, seed=None):
"""Returns a relaxed caveman graph.
A relaxed caveman graph starts with `l` cliques of size `k`. Edges are
then randomly rewired with probability `p` to link different cliques.
Parameters
----------
l : int
Number of groups
k : int
Size of cliques
p : float
Probabilty of rewiring each edge.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph
Relaxed Caveman Graph
Raises
------
NetworkXError:
If p is not in [0,1]
Examples
--------
>>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
References
----------
.. [1] Santo Fortunato, Community Detection in Graphs,
Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
https://arxiv.org/abs/0906.0612
"""
G = nx.caveman_graph(l, k)
nodes = list(G)
for (u, v) in G.edges():
if seed.random() < p: # rewire the edge
x = seed.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
return G
@py_random_state(3)
def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
"""Returns the random partition graph with a partition of sizes.
A partition graph is a graph of communities with sizes defined by
s in sizes. Nodes in the same group are connected with probability
p_in and nodes of different groups are connected with probability
p_out.
Parameters
----------
sizes : list of ints
Sizes of groups
p_in : float
probability of edges with in groups
p_out : float
probability of edges between groups
directed : boolean optional, default=False
Whether to create a directed graph
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph or DiGraph
random partition graph of size sum(gs)
Raises
------
NetworkXError
If p_in or p_out is not in [0,1]
Examples
--------
>>> G = nx.random_partition_graph([10,10,10],.25,.01)
>>> len(G)
30
>>> partition = G.graph['partition']
>>> len(partition)
3
Notes
-----
This is a generalization of the planted-l-partition described in
[1]_. It allows for the creation of groups of any size.
The partition is store as a graph attribute 'partition'.
References
----------
.. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
"""
# Use geometric method for O(n+m) complexity algorithm
# partition = nx.community_sets(nx.get_node_attributes(G, 'affiliation'))
if not 0.0 <= p_in <= 1.0:
raise nx.NetworkXError("p_in must be in [0,1]")
if not 0.0 <= p_out <= 1.0:
raise nx.NetworkXError("p_out must be in [0,1]")
# create connection matrix
num_blocks = len(sizes)
p = [[p_out for s in range(num_blocks)] for r in range(num_blocks)]
for r in range(num_blocks):
p[r][r] = p_in
return stochastic_block_model(sizes, p, nodelist=None, seed=seed,
directed=directed, selfloops=False,
sparse=True)
@py_random_state(4)
def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
"""Returns the planted l-partition graph.
This model partitions a graph with n=l*k vertices in
l groups with k vertices each. Vertices of the same
group are linked with a probability p_in, and vertices
of different groups are linked with probability p_out.
Parameters
----------
l : int
Number of groups
k : int
Number of vertices in each group
p_in : float
probability of connecting vertices within a group
p_out : float
probability of connected vertices between groups
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : bool,optional (default=False)
If True return a directed graph
Returns
-------
G : NetworkX Graph or DiGraph
planted l-partition graph
Raises
------
NetworkXError:
If p_in,p_out are not in [0,1] or
Examples
--------
>>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1, seed=42)
See Also
--------
random_partition_model
References
----------
.. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
on the planted partition model,
Random Struct. Algor. 18 (2001) 116-140.
.. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
"""
return random_partition_graph([k] * l, p_in, p_out, seed, directed)
@py_random_state(6)
def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
seed=None):
"""Generate a Gaussian random partition graph.
A Gaussian random partition graph is created by creating k partitions
each with a size drawn from a normal distribution with mean s and variance
s/v. Nodes are connected within clusters with probability p_in and
between clusters with probability p_out[1]
Parameters
----------
n : int
Number of nodes in the graph
s : float
Mean cluster size
v : float
Shape parameter. The variance of cluster size distribution is s/v.
p_in : float
Probabilty of intra cluster connection.
p_out : float
Probability of inter cluster connection.
directed : boolean, optional default=False
Whether to create a directed graph or not
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph or DiGraph
gaussian random partition graph
Raises
------
NetworkXError
If s is > n
If p_in or p_out is not in [0,1]
Notes
-----
Note the number of partitions is dependent on s,v and n, and that the
last partition may be considerably smaller, as it is sized to simply
fill out the nodes [1]
See Also
--------
random_partition_graph
Examples
--------
>>> G = nx.gaussian_random_partition_graph(100,10,10,.25,.1)
>>> len(G)
100
References
----------
.. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
Experiments on Graph Clustering Algorithms,
In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
"""
if s > n:
raise nx.NetworkXError("s must be <= n")
assigned = 0
sizes = []
while True:
size = int(seed.gauss(s, float(s) / v + 0.5))
if size < 1: # how to handle 0 or negative sizes?
continue
if assigned + size >= n:
sizes.append(n - assigned)
break
assigned += size
sizes.append(size)
return random_partition_graph(sizes, p_in, p_out, directed, seed)
def ring_of_cliques(num_cliques, clique_size):
"""Defines a "ring of cliques" graph.
A ring of cliques graph is consisting of cliques, connected through single
links. Each clique is a complete graph.
Parameters
----------
num_cliques : int
Number of cliques
clique_size : int
Size of cliques
Returns
-------
G : NetworkX Graph
ring of cliques graph
Raises
------
NetworkXError
If the number of cliques is lower than 2 or
if the size of cliques is smaller than 2.
Examples
--------
>>> G = nx.ring_of_cliques(8, 4)
See Also
--------
connected_caveman_graph
Notes
-----
The `connected_caveman_graph` graph removes a link from each clique to
connect it with the next clique. Instead, the `ring_of_cliques` graph
simply adds the link without removing any link from the cliques.
"""
if num_cliques < 2:
raise nx.NetworkXError('A ring of cliques must have at least '
'two cliques')
if clique_size < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.Graph()
for i in range(num_cliques):
edges = itertools.combinations(range(i * clique_size, i * clique_size +
clique_size), 2)
G.add_edges_from(edges)
G.add_edge(i * clique_size + 1, (i + 1) * clique_size %
(num_cliques * clique_size))
return G
def windmill_graph(n, k):
"""Generate a windmill graph.
A windmill graph is a graph of `n` cliques each of size `k` that are all
joined at one node.
It can be thought of as taking a disjoint union of `n` cliques of size `k`,
selecting one point from each, and contracting all of the selected points.
Alternatively, one could generate `n` cliques of size `k-1` and one node
that is connected to all other nodes in the graph.
Parameters
----------
n : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
windmill graph with n cliques of size k
Raises
------
NetworkXError
If the number of cliques is less than two
If the size of the cliques are less than two
Examples
--------
>>> G = nx.windmill_graph(4, 5)
Notes
-----
The node labeled `0` will be the node connected to all other nodes.
Note that windmill graphs are usually denoted `Wd(k,n)`, so the parameters
are in the opposite order as the parameters of this method.
"""
if n < 2:
msg = 'A windmill graph must have at least two cliques'
raise nx.NetworkXError(msg)
if k < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.disjoint_union_all(itertools.chain([nx.complete_graph(k)],
(nx.complete_graph(k - 1)
for _ in range(n - 1))))
G.add_edges_from((0, i) for i in range(k, G.number_of_nodes()))
return G
@py_random_state(3)
def stochastic_block_model(sizes, p, nodelist=None, seed=None,
directed=False, selfloops=False, sparse=True):
"""Returns a stochastic block model graph.
This model partitions the nodes in blocks of arbitrary sizes, and places
edges between pairs of nodes independently, with a probability that depends
on the blocks.
Parameters
----------
sizes : list of ints
Sizes of blocks
p : list of list of floats
Element (r,s) gives the density of edges going from the nodes
of group r to nodes of group s.
p must match the number of groups (len(sizes) == len(p)),
and it must be symmetric if the graph is undirected.
nodelist : list, optional
The block tags are assigned according to the node identifiers
in nodelist. If nodelist is None, then the ordering is the
range [0,sum(sizes)-1].
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : boolean optional, default=False
Whether to create a directed graph or not.
selfloops : boolean optional, default=False
Whether to include self-loops or not.
sparse: boolean optional, default=True
Use the sparse heuristic to speed up the generator.
Returns
-------
g : NetworkX Graph or DiGraph
Stochastic block model graph of size sum(sizes)
Raises
------
NetworkXError
If probabilities are not in [0,1].
If the probability matrix is not square (directed case).
If the probability matrix is not symmetric (undirected case).
If the sizes list does not match nodelist or the probability matrix.
If nodelist contains duplicate.
Examples
--------
>>> sizes = [75, 75, 300]
>>> probs = [[0.25, 0.05, 0.02],
... [0.05, 0.35, 0.07],
... [0.02, 0.07, 0.40]]
>>> g = nx.stochastic_block_model(sizes, probs, seed=0)
>>> len(g)
450
>>> H = nx.quotient_graph(g, g.graph['partition'], relabel=True)
>>> for v in H.nodes(data=True):
... print(round(v[1]['density'], 3))
...
0.245
0.348
0.405
>>> for v in H.edges(data=True):
... print(round(1.0 * v[2]['weight'] / (sizes[v[0]] * sizes[v[1]]), 3))
...
0.051
0.022
0.07
See Also
--------
random_partition_graph
planted_partition_graph
gaussian_random_partition_graph
gnp_random_graph
References
----------
.. [1] Holland, P. W., Laskey, K. B., & Leinhardt, S.,
"Stochastic blockmodels: First steps",
Social networks, 5(2), 109-137, 1983.
"""
# Check if dimensions match
if len(sizes) != len(p):
raise nx.NetworkXException("'sizes' and 'p' do not match.")
# Check for probability symmetry (undirected) and shape (directed)
for row in p:
if len(p) != len(row):
raise nx.NetworkXException("'p' must be a square matrix.")
if not directed:
p_transpose = [list(i) for i in zip(*p)]
for i in zip(p, p_transpose):
for j in zip(i[0], i[1]):
if abs(j[0] - j[1]) > 1e-08:
raise nx.NetworkXException("'p' must be symmetric.")
# Check for probability range
for row in p:
for prob in row:
if prob < 0 or prob > 1:
raise nx.NetworkXException("Entries of 'p' not in [0,1].")
# Check for nodelist consistency
if nodelist is not None:
if len(nodelist) != sum(sizes):
raise nx.NetworkXException("'nodelist' and 'sizes' do not match.")
if len(nodelist) != len(set(nodelist)):
raise nx.NetworkXException("nodelist contains duplicate.")
else:
nodelist = range(0, sum(sizes))
# Setup the graph conditionally to the directed switch.
block_range = range(len(sizes))
if directed:
g = nx.DiGraph()
block_iter = itertools.product(block_range, block_range)
else:
g = nx.Graph()
block_iter = itertools.combinations_with_replacement(block_range, 2)
# Split nodelist in a partition (list of sets).
size_cumsum = [sum(sizes[0:x]) for x in range(0, len(sizes) + 1)]
g.graph['partition'] = [set(nodelist[size_cumsum[x]:size_cumsum[x + 1]])
for x in range(0, len(size_cumsum) - 1)]
# Setup nodes and graph name
for block_id, nodes in enumerate(g.graph['partition']):
for node in nodes:
g.add_node(node, block=block_id)
g.name = "stochastic_block_model"
# Test for edge existence
parts = g.graph['partition']
for i, j in block_iter:
if i == j:
if directed:
if selfloops:
edges = itertools.product(parts[i], parts[i])
else:
edges = itertools.permutations(parts[i], 2)
else:
edges = itertools.combinations(parts[i], 2)
if selfloops:
edges = itertools.chain(edges, zip(parts[i], parts[i]))
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e)
else:
edges = itertools.product(parts[i], parts[j])
if sparse:
if p[i][j] == 1: # Test edges cases p_ij = 0 or 1
for e in edges:
g.add_edge(*e)
elif p[i][j] > 0:
while True:
try:
logrand = math.log(seed.random())
skip = math.floor(logrand / math.log(1 - p[i][j]))
# consume "skip" edges
next(itertools.islice(edges, skip, skip), None)
e = next(edges)
g.add_edge(*e) # __safe
except StopIteration:
break
else:
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e) # __safe
return g
| 31.107981 | 79 | 0.597495 |
0ada46dca4a375b9b089b0b77f15b8a6e740d946 | 413 | py | Python | watchneighborhood/migrations/0006_profile_bio.py | Koech-code/Neighborhood | 5ff01cca9d9d5ff05068e20c0b36f5ca629db2fb | [
"MIT"
] | null | null | null | watchneighborhood/migrations/0006_profile_bio.py | Koech-code/Neighborhood | 5ff01cca9d9d5ff05068e20c0b36f5ca629db2fb | [
"MIT"
] | null | null | null | watchneighborhood/migrations/0006_profile_bio.py | Koech-code/Neighborhood | 5ff01cca9d9d5ff05068e20c0b36f5ca629db2fb | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-29 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watchneighborhood', '0005_alter_post_user'),
]
operations = [
migrations.AddField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=500, null=True),
),
]
| 21.736842 | 74 | 0.610169 |
8fcf33a692788b47d320d5c9982b3cd3f542f329 | 7,736 | py | Python | auth/oidc_auth.py | sam-glendenning/dynafed-oidc-plugin | d22c274e80eb717edff3c8ea849f632432d3c31b | [
"Apache-2.0"
] | null | null | null | auth/oidc_auth.py | sam-glendenning/dynafed-oidc-plugin | d22c274e80eb717edff3c8ea849f632432d3c31b | [
"Apache-2.0"
] | 2 | 2020-01-27T16:30:44.000Z | 2021-02-23T13:48:24.000Z | auth/oidc_auth.py | sam-glendenning/dynafed-oidc-plugin | d22c274e80eb717edff3c8ea849f632432d3c31b | [
"Apache-2.0"
] | 1 | 2020-11-26T17:28:12.000Z | 2020-11-26T17:28:12.000Z | #!/usr/bin/python3.6
# A DynaFed plugin which contacts an OpenID Connect identity provider
# and then compares token attributes with a JSON file in order to
# determine whether a user with certain attributes can access a
# resource
#
# This is modified from dynafed-ldap-plugin, written by Louise Davies
# and available at: https://github.com/stfc/dynafed-ldap-plugin
#
# usage:
# oidc_auth.py <clientname> <remoteaddr> <fqan1> .. <fqanN>
#
# Return value means:
# 0 --> access is GRANTED
# nonzero --> access is DENIED
#
# This script is typically called by DynaFed and specified as an authorisation plugin inside /etc/ugr/ugr.conf
#
import sys
import json
import time
DEFAULT_AUTH_FILE_LOCATION = "/etc/grid-security/oidc_auth.json"
BLACKLIST_FILE = "/etc/ugr/conf.d/blacklist.json"
# use this to strip trailing slashes so that we don't trip up any equalities due to them
def strip_end(string, suffix):
if string.endswith(suffix):
return string[:-len(suffix)]
else:
return string
# a class that loads the JSON configution file that details the authorisation info for paths
# this is called during the initialisation of the module
class _AuthJSON(object):
auth_dict = {}
path_list = []
def __init__(self):
with open(DEFAULT_AUTH_FILE_LOCATION, "r") as f:
self.auth_dict = json.load(f)
prefix = self.auth_dict["prefix"]
self.path_list.append(prefix)
# prepopulate path list so we don't repeatedly parse it
for group in self.auth_dict["groups"]:
group_name = group["name"]
self.path_list.append(prefix + "/" + group_name)
for bucket in group["buckets"]:
bucket_name = bucket["name"]
self.path_list.append(prefix + "/" + group_name + "/" + bucket_name)
# we want to apply the auth that matches the path most closely,
# so we have to search the dict for path prefixes that match
# the path we supply
# aka we propogate permissions down unless the user has specified
# different permissions for a child directory
def auth_info_for_path(self, path):
stripped_path = strip_end(path, "/")
split_path = stripped_path.split("/")
prefix = self.auth_dict["prefix"]
i = 0
while i < len(split_path):
p = ""
if i == 0:
p = stripped_path
else:
# the higher i is the closer we're getting to the base of the path
# so take off successive elements from end of split path list
p = "/".join(split_path[:-i])
if p in self.path_list:
if p == prefix:
return {"path": p, "auth_info": self.auth_dict["base_info"][0]}
for group in self.auth_dict["groups"]:
if prefix + "/" + group["name"] == p: # if the user is navigating to /gridpp/group-name
return {"path": p, "auth_info": group}
for bucket in group["buckets"]:
if prefix + "/" + group["name"] + "/" + bucket["name"] == p: # if the user is navigating to a bucket inside a group (/gridpp/group-name/bucket-name)
return {"path": p, "auth_info": bucket}
i += 1
# given a authorisation condition and the user info, does the user satisfy the condition?
# return true or false based on condition
def process_condition(condition, user_info):
# empty list = don't check any attributes, so auto match
if len(condition) == 0:
return True
if "attribute" in condition:
encoded_condition = {"attribute": condition["attribute"].encode(
"utf-8"), "value": condition["value"].encode("utf-8")}
# only one attribute to check
if user_info is None or encoded_condition["attribute"] not in user_info or (user_info[encoded_condition["attribute"]] != encoded_condition["value"] and encoded_condition["value"] not in user_info[encoded_condition["attribute"]]):
return False
else:
return True
if "or" in condition:
# need to match one of anything in the list, so moment we match something
# return true, if we finish without matching nothing matched so return
# false
match_or = condition["or"]
for or_condition in match_or:
match = process_condition(or_condition, user_info)
if match:
return True
return False
if "and" in condition:
# need to match everything in the list, so moment we don't match return
# false, if we escape without returning false then everything must
# have been true so return true
match_and = condition["and"]
for and_condition in match_and:
match = process_condition(and_condition, user_info)
if not match:
return False
return True
# TODO: extend to other operators if we need them?
def get_blacklist():
try:
with open(BLACKLIST_FILE, "r") as f:
blacklist = json.load(f)
except FileNotFoundError:
return []
return blacklist["buckets"]
# The main function that has to be invoked from ugr to determine if a request
# has to be performed or not
def isallowed(clientname="unknown", remoteaddr="nowhere", resource="none", mode="0", fqans=None, keys=None):
# Initializing the token from keys. For this to work the mod_auth_openidc plugin must hand
# the token payload through as a header, ie:
# OIDCPassIDTokenAs payload
user_info = dict(keys)
if "http.OIDC_CLAIM_groups" in user_info:
user_info["http.OIDC_CLAIM_groups"] = user_info["http.OIDC_CLAIM_groups"].split(
",")
if "dynafed/admins" in user_info["http.OIDC_CLAIM_groups"]:
return 0
myauthjson = _AuthJSON()
result = myauthjson.auth_info_for_path(resource)
if result is None:
# failed to match anything, means the path isn't supposed protected by this plugin
# shouldn't really happen, as usually the base path at least will be specified
# unless there are mutiple auth plugins and you want to reduce repetition of granting
# things on base path
return 1
auth_info = result["auth_info"]
matched_path = result["path"]
if strip_end(matched_path, "/") != strip_end(resource, "/") and "propogate_permissions" in auth_info and not auth_info["propogate_permissions"]:
# if matched_path != resource then it is a parent directory. if the
# parent directory does not want to propogate permissions then deny
# access
# mainly need this to allow top-level access to the federation
# without defaulting so that the entire federation is readable
# might be useful elsewhere too
return 1
bucket = strip_end(matched_path, "/").split("/")[-1]
if bucket in get_blacklist():
return 1
for item in auth_info["allowed_attributes"]:
# use process_condition to check whether we match or not
condition = item["attribute_requirements"]
match = process_condition(condition, user_info)
if match and mode in item["permissions"]:
# if we match on all attributes for this spec and the mode matches the permissions then let them in!
return 0
# if we haven't matched on IP or via token attributes then don't let them in >:(
return 1
# ------------------------------
if __name__ == "__main__":
r = isallowed(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sys.argv[5:])
sys.exit(r)
| 39.469388 | 237 | 0.634566 |
a27f5876d1017a8c3e96137461acd97b2bb8d88c | 7,392 | py | Python | tests/chainer_tests/backends_tests/test_chainerx.py | prabhatnagarajan/chainer | 3029bbaa587c15b3539b55ee1fd357a4149e5aed | [
"MIT"
] | null | null | null | tests/chainer_tests/backends_tests/test_chainerx.py | prabhatnagarajan/chainer | 3029bbaa587c15b3539b55ee1fd357a4149e5aed | [
"MIT"
] | null | null | null | tests/chainer_tests/backends_tests/test_chainerx.py | prabhatnagarajan/chainer | 3029bbaa587c15b3539b55ee1fd357a4149e5aed | [
"MIT"
] | null | null | null | import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
import chainerx
@testing.inject_backend_tests(
None,
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestChainerxDevice(unittest.TestCase):
def check_device(self, device, backend_config):
assert isinstance(device, backend.ChainerxDevice)
assert device.xp is chainerx
assert device.supported_array_types == (chainerx.ndarray,)
assert device.name == backend_config.chainerx_device
assert str(device) == backend_config.chainerx_device
# fallback_device
chainerx_device_comps = backend_config.chainerx_device.split(':')
if chainerx_device_comps[0] == 'native':
assert isinstance(device.fallback_device, backend.CpuDevice)
elif chainerx_device_comps[0] == 'cuda':
assert isinstance(device.fallback_device, backend.GpuDevice)
assert (
device.fallback_device.device.id
== int(chainerx_device_comps[1]))
else:
# Currently no such ChainerX device is known in Chainer
assert False
def test_init(self, backend_config):
name = backend_config.chainerx_device
chx_device = chainerx.get_device(name)
device = backend.ChainerxDevice(chx_device)
self.check_device(device, backend_config)
assert device.device is chx_device
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
# Test precondition check
assert arr.device.name == backend_config.chainerx_device
expected_device = backend_config.device
# ChainerxDevice.from_array
device = backend.ChainerxDevice.from_array(arr)
self.check_device(device, backend_config)
assert device == expected_device
# backend.get_device_from_array
device = backend.get_device_from_array(arr)
self.check_device(device, backend_config)
assert device == expected_device
def test_from_fallback_device(self, backend_config):
# Preparation: it depends on ChainerxDevice.fallback_device
tmp_device = backend.ChainerxDevice(
chainerx.get_device(backend_config.chainerx_device))
fallback_device = tmp_device.fallback_device
# Test
device = backend.ChainerxDevice.from_fallback_device(fallback_device)
self.check_device(device, backend_config)
assert device.fallback_device == fallback_device
@testing.inject_backend_tests(
None,
[
{},
{'use_cuda': True},
])
class TestChainerxDeviceFromArrayInvalidArray(unittest.TestCase):
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
device = backend.ChainerxDevice.from_array(arr)
assert device is None
@testing.parameterize(*testing.product(
{
'value': [None, 1, ()],
}))
class TestChainerxDeviceFromArrayInvalidValue(unittest.TestCase):
def test_from_array(self):
device = backend.ChainerxDevice.from_array(self.value)
assert device is None
@testing.inject_backend_tests(
None,
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestChainerxDeviceUse(unittest.TestCase):
def test_use(self, backend_config):
device = chainer.get_device(backend_config.chainerx_device)
with chainerx.using_device('native:1'):
device.use()
assert device.device is chainerx.get_default_device()
@chainer.testing.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@attr.chainerx
class TestFromToChainerx(unittest.TestCase):
def check_equal_memory_shared(self, arr1, arr2):
# Check that the two arrays share the internal memory.
numpy.testing.assert_array_equal(
backend.CpuDevice().send(arr1), backend.CpuDevice().send(arr2))
with chainer.using_device(backend.get_device_from_array(arr1)):
arr1 += 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(arr1), backend.CpuDevice().send(arr2))
with chainer.using_device(backend.get_device_from_array(arr1)):
arr1 -= 2
def test_from_chx(self, backend_config):
arr = backend_config.get_array(numpy.ones((2, 3), numpy.float32))
arr_converted = backend.from_chx(arr)
src_device = backend_config.device
if src_device.xp is chainerx:
dst_xp = src_device.fallback_device.xp
assert isinstance(arr_converted, dst_xp.ndarray)
if dst_xp is cuda.cupy:
assert arr_converted.device.id == src_device.device.index
else:
assert arr is arr_converted
with backend_config:
self.check_equal_memory_shared(arr, arr_converted)
def test_to_chx(self, backend_config):
arr = backend_config.get_array(numpy.ones((2, 3), numpy.float32))
arr_converted = backend.to_chx(arr)
src_device = backend_config.device
assert isinstance(arr_converted, chainerx.ndarray)
if src_device.xp is chainerx:
assert arr is arr_converted
elif src_device.xp is cuda.cupy:
assert arr.device.id == arr_converted.device.index
self.check_equal_memory_shared(arr, arr_converted)
@chainer.testing.inject_backend_tests( # backend_config2
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@chainer.testing.inject_backend_tests( # backend_config1
None,
[
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestChainerxIsArraySupported(unittest.TestCase):
def test_is_array_supported(self, backend_config1, backend_config2):
target = backend_config1.device # backend.ChainerxDevice
arr = backend_config2.get_array(numpy.ndarray((2,), numpy.float32))
device = backend_config2.device
if (isinstance(device, backend.ChainerxDevice)
and device.device == target.device):
assert target.is_array_supported(arr)
else:
assert not target.is_array_supported(arr)
testing.run_module(__name__, __file__)
| 34.542056 | 77 | 0.662879 |
53d9d483e04a858a19c90ccb40bd37970f0735ac | 3,021 | py | Python | B2G/gecko/testing/tps/tps/firefoxrunner.py | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | 3 | 2015-08-31T15:24:31.000Z | 2020-04-24T20:31:29.000Z | B2G/gecko/testing/tps/tps/firefoxrunner.py | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | null | null | null | B2G/gecko/testing/tps/tps/firefoxrunner.py | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | 3 | 2015-07-29T07:17:15.000Z | 2020-11-04T06:55:37.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import shutil
import sys
from mozprocess.pid import get_pids
from mozprofile import Profile
from mozregression.mozInstall import MozInstaller
from mozregression.utils import download_url, get_platform
from mozrunner import FirefoxRunner
class TPSFirefoxRunner(object):
PROCESS_TIMEOUT = 240
def __init__(self, binary):
if binary is not None and ('http://' in binary or 'ftp://' in binary):
self.url = binary
self.binary = None
else:
self.url = None
self.binary = binary
self.runner = None
self.installdir = None
def __del__(self):
if self.installdir:
shutil.rmtree(self.installdir, True)
def download_build(self, installdir='downloadedbuild',
appname='firefox', macAppName='Minefield.app'):
self.installdir = os.path.abspath(installdir)
buildName = os.path.basename(self.url)
pathToBuild = os.path.join(os.path.dirname(os.path.abspath(__file__)),
buildName)
# delete the build if it already exists
if os.access(pathToBuild, os.F_OK):
os.remove(pathToBuild)
# download the build
print "downloading build"
download_url(self.url, pathToBuild)
# install the build
print "installing %s" % pathToBuild
shutil.rmtree(self.installdir, True)
MozInstaller(src=pathToBuild, dest=self.installdir, dest_app=macAppName)
# remove the downloaded archive
os.remove(pathToBuild)
# calculate path to binary
platform = get_platform()
if platform['name'] == 'Mac':
binary = '%s/%s/Contents/MacOS/%s-bin' % (installdir,
macAppName,
appname)
else:
binary = '%s/%s/%s%s' % (installdir,
appname,
appname,
'.exe' if platform['name'] == 'Windows' else '')
return binary
def run(self, profile=None, timeout=PROCESS_TIMEOUT, env=None, args=None):
"""Runs the given FirefoxRunner with the given Profile, waits
for completion, then returns the process exit code
"""
if profile is None:
profile = Profile()
self.profile = profile
if self.binary is None and self.url:
self.binary = self.download_build()
if self.runner is None:
self.runner = FirefoxRunner(self.profile, binary=self.binary)
self.runner.profile = self.profile
if env is not None:
self.runner.env.update(env)
if args is not None:
self.runner.cmdargs = copy.copy(args)
self.runner.start()
status = self.runner.process_handler.waitForFinish(timeout=timeout)
return status
| 30.826531 | 80 | 0.618669 |
bbb2ea40aabc214898813371531dd98a5d3aaab4 | 552 | py | Python | nsd1802/python/day08/tcp_serv2.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | nsd1802/python/day08/tcp_serv2.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | nsd1802/python/day08/tcp_serv2.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | import socket
host = ''
port = 12345
addr = (host, port)
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(1)
while True:
cli_sock, cli_addr = s.accept()
print('Client connect from:', cli_addr)
while True:
data = cli_sock.recv(1024)
if data.strip() == b'end':
break
print(data.decode('utf8')) # bytes类型转为string类型
data = input('> ') + '\r\n' # 获得的是string类型
cli_sock.send(data.encode('utf8')) # 转成bytes类型发送
cli_sock.close()
s.close()
| 25.090909 | 57 | 0.61413 |
0c2194e0f7b4ecc76c8dcc95d9f7387c4c4dc1f7 | 13,771 | py | Python | test/test_functional_tensor.py | 1pikachu/vision | 913657061e62a91ee44fee90b8486436ebc1b1b1 | [
"BSD-3-Clause"
] | null | null | null | test/test_functional_tensor.py | 1pikachu/vision | 913657061e62a91ee44fee90b8486436ebc1b1b1 | [
"BSD-3-Clause"
] | null | null | null | test/test_functional_tensor.py | 1pikachu/vision | 913657061e62a91ee44fee90b8486436ebc1b1b1 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torchvision.transforms as transforms
import torchvision.transforms.functional_tensor as F_t
import torchvision.transforms.functional_pil as F_pil
import torchvision.transforms.functional as F
import numpy as np
import unittest
import random
import colorsys
from PIL import Image
class Tester(unittest.TestCase):
def _create_data(self, height=3, width=3, channels=3):
tensor = torch.randint(0, 255, (channels, height, width), dtype=torch.uint8)
pil_img = Image.fromarray(tensor.permute(1, 2, 0).contiguous().numpy())
return tensor, pil_img
def compareTensorToPIL(self, tensor, pil_image, msg=None):
pil_tensor = torch.as_tensor(np.array(pil_image).transpose((2, 0, 1)))
self.assertTrue(tensor.equal(pil_tensor), msg)
def test_vflip(self):
script_vflip = torch.jit.script(F_t.vflip)
img_tensor = torch.randn(3, 16, 16)
img_tensor_clone = img_tensor.clone()
vflipped_img = F_t.vflip(img_tensor)
vflipped_img_again = F_t.vflip(vflipped_img)
self.assertEqual(vflipped_img.shape, img_tensor.shape)
self.assertTrue(torch.equal(img_tensor, vflipped_img_again))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
vflipped_img_script = script_vflip(img_tensor)
self.assertTrue(torch.equal(vflipped_img, vflipped_img_script))
def test_hflip(self):
script_hflip = torch.jit.script(F_t.hflip)
img_tensor = torch.randn(3, 16, 16)
img_tensor_clone = img_tensor.clone()
hflipped_img = F_t.hflip(img_tensor)
hflipped_img_again = F_t.hflip(hflipped_img)
self.assertEqual(hflipped_img.shape, img_tensor.shape)
self.assertTrue(torch.equal(img_tensor, hflipped_img_again))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
hflipped_img_script = script_hflip(img_tensor)
self.assertTrue(torch.equal(hflipped_img, hflipped_img_script))
def test_crop(self):
script_crop = torch.jit.script(F_t.crop)
img_tensor = torch.randint(0, 255, (3, 16, 16), dtype=torch.uint8)
img_tensor_clone = img_tensor.clone()
top = random.randint(0, 15)
left = random.randint(0, 15)
height = random.randint(1, 16 - top)
width = random.randint(1, 16 - left)
img_cropped = F_t.crop(img_tensor, top, left, height, width)
img_PIL = transforms.ToPILImage()(img_tensor)
img_PIL_cropped = F.crop(img_PIL, top, left, height, width)
img_cropped_GT = transforms.ToTensor()(img_PIL_cropped)
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
self.assertTrue(torch.equal(img_cropped, (img_cropped_GT * 255).to(torch.uint8)),
"functional_tensor crop not working")
# scriptable function test
cropped_img_script = script_crop(img_tensor, top, left, height, width)
self.assertTrue(torch.equal(img_cropped, cropped_img_script))
def test_hsv2rgb(self):
shape = (3, 100, 150)
for _ in range(20):
img = torch.rand(*shape, dtype=torch.float)
ft_img = F_t._hsv2rgb(img).permute(1, 2, 0).flatten(0, 1)
h, s, v, = img.unbind(0)
h = h.flatten().numpy()
s = s.flatten().numpy()
v = v.flatten().numpy()
rgb = []
for h1, s1, v1 in zip(h, s, v):
rgb.append(colorsys.hsv_to_rgb(h1, s1, v1))
colorsys_img = torch.tensor(rgb, dtype=torch.float32)
max_diff = (ft_img - colorsys_img).abs().max()
self.assertLess(max_diff, 1e-5)
def test_rgb2hsv(self):
shape = (3, 150, 100)
for _ in range(20):
img = torch.rand(*shape, dtype=torch.float)
ft_hsv_img = F_t._rgb2hsv(img).permute(1, 2, 0).flatten(0, 1)
r, g, b, = img.unbind(0)
r = r.flatten().numpy()
g = g.flatten().numpy()
b = b.flatten().numpy()
hsv = []
for r1, g1, b1 in zip(r, g, b):
hsv.append(colorsys.rgb_to_hsv(r1, g1, b1))
colorsys_img = torch.tensor(hsv, dtype=torch.float32)
max_diff = (colorsys_img - ft_hsv_img).abs().max()
self.assertLess(max_diff, 1e-5)
def test_adjustments(self):
script_adjust_brightness = torch.jit.script(F_t.adjust_brightness)
script_adjust_contrast = torch.jit.script(F_t.adjust_contrast)
script_adjust_saturation = torch.jit.script(F_t.adjust_saturation)
fns = ((F.adjust_brightness, F_t.adjust_brightness, script_adjust_brightness),
(F.adjust_contrast, F_t.adjust_contrast, script_adjust_contrast),
(F.adjust_saturation, F_t.adjust_saturation, script_adjust_saturation))
for _ in range(20):
channels = 3
dims = torch.randint(1, 50, (2,))
shape = (channels, dims[0], dims[1])
if torch.randint(0, 2, (1,)) == 0:
img = torch.rand(*shape, dtype=torch.float)
else:
img = torch.randint(0, 256, shape, dtype=torch.uint8)
factor = 3 * torch.rand(1)
img_clone = img.clone()
for f, ft, sft in fns:
ft_img = ft(img, factor)
sft_img = sft(img, factor)
if not img.dtype.is_floating_point:
ft_img = ft_img.to(torch.float) / 255
sft_img = sft_img.to(torch.float) / 255
img_pil = transforms.ToPILImage()(img)
f_img_pil = f(img_pil, factor)
f_img = transforms.ToTensor()(f_img_pil)
# F uses uint8 and F_t uses float, so there is a small
# difference in values caused by (at most 5) truncations.
max_diff = (ft_img - f_img).abs().max()
max_diff_scripted = (sft_img - f_img).abs().max()
self.assertLess(max_diff, 5 / 255 + 1e-5)
self.assertLess(max_diff_scripted, 5 / 255 + 1e-5)
self.assertTrue(torch.equal(img, img_clone))
# test for class interface
f = transforms.ColorJitter(brightness=factor.item())
scripted_fn = torch.jit.script(f)
scripted_fn(img)
f = transforms.ColorJitter(contrast=factor.item())
scripted_fn = torch.jit.script(f)
scripted_fn(img)
f = transforms.ColorJitter(saturation=factor.item())
scripted_fn = torch.jit.script(f)
scripted_fn(img)
f = transforms.ColorJitter(brightness=1)
scripted_fn = torch.jit.script(f)
scripted_fn(img)
def test_rgb_to_grayscale(self):
script_rgb_to_grayscale = torch.jit.script(F_t.rgb_to_grayscale)
img_tensor = torch.randint(0, 255, (3, 16, 16), dtype=torch.uint8)
img_tensor_clone = img_tensor.clone()
grayscale_tensor = F_t.rgb_to_grayscale(img_tensor).to(int)
grayscale_pil_img = torch.tensor(np.array(F.to_grayscale(F.to_pil_image(img_tensor)))).to(int)
max_diff = (grayscale_tensor - grayscale_pil_img).abs().max()
self.assertLess(max_diff, 1.0001)
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
grayscale_script = script_rgb_to_grayscale(img_tensor).to(int)
self.assertTrue(torch.equal(grayscale_script, grayscale_tensor))
def test_center_crop(self):
script_center_crop = torch.jit.script(F_t.center_crop)
img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
img_tensor_clone = img_tensor.clone()
cropped_tensor = F_t.center_crop(img_tensor, [10, 10])
cropped_pil_image = F.center_crop(transforms.ToPILImage()(img_tensor), [10, 10])
cropped_pil_tensor = (transforms.ToTensor()(cropped_pil_image) * 255).to(torch.uint8)
self.assertTrue(torch.equal(cropped_tensor, cropped_pil_tensor))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
cropped_script = script_center_crop(img_tensor, [10, 10])
self.assertTrue(torch.equal(cropped_script, cropped_tensor))
def test_five_crop(self):
script_five_crop = torch.jit.script(F_t.five_crop)
img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
img_tensor_clone = img_tensor.clone()
cropped_tensor = F_t.five_crop(img_tensor, [10, 10])
cropped_pil_image = F.five_crop(transforms.ToPILImage()(img_tensor), [10, 10])
self.assertTrue(torch.equal(cropped_tensor[0],
(transforms.ToTensor()(cropped_pil_image[0]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[1],
(transforms.ToTensor()(cropped_pil_image[2]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[2],
(transforms.ToTensor()(cropped_pil_image[1]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[3],
(transforms.ToTensor()(cropped_pil_image[3]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[4],
(transforms.ToTensor()(cropped_pil_image[4]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
cropped_script = script_five_crop(img_tensor, [10, 10])
for cropped_script_img, cropped_tensor_img in zip(cropped_script, cropped_tensor):
self.assertTrue(torch.equal(cropped_script_img, cropped_tensor_img))
def test_ten_crop(self):
script_ten_crop = torch.jit.script(F_t.ten_crop)
img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
img_tensor_clone = img_tensor.clone()
cropped_tensor = F_t.ten_crop(img_tensor, [10, 10])
cropped_pil_image = F.ten_crop(transforms.ToPILImage()(img_tensor), [10, 10])
self.assertTrue(torch.equal(cropped_tensor[0],
(transforms.ToTensor()(cropped_pil_image[0]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[1],
(transforms.ToTensor()(cropped_pil_image[2]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[2],
(transforms.ToTensor()(cropped_pil_image[1]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[3],
(transforms.ToTensor()(cropped_pil_image[3]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[4],
(transforms.ToTensor()(cropped_pil_image[4]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[5],
(transforms.ToTensor()(cropped_pil_image[5]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[6],
(transforms.ToTensor()(cropped_pil_image[7]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[7],
(transforms.ToTensor()(cropped_pil_image[6]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[8],
(transforms.ToTensor()(cropped_pil_image[8]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(cropped_tensor[9],
(transforms.ToTensor()(cropped_pil_image[9]) * 255).to(torch.uint8)))
self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
# scriptable function test
cropped_script = script_ten_crop(img_tensor, [10, 10])
for cropped_script_img, cropped_tensor_img in zip(cropped_script, cropped_tensor):
self.assertTrue(torch.equal(cropped_script_img, cropped_tensor_img))
def test_pad(self):
script_fn = torch.jit.script(F_t.pad)
tensor, pil_img = self._create_data(7, 8)
for dt in [None, torch.float32, torch.float64]:
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
for pad in [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]]:
configs = [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"},
]
for kwargs in configs:
pad_tensor = F_t.pad(tensor, pad, **kwargs)
pad_pil_img = F_pil.pad(pil_img, pad, **kwargs)
pad_tensor_8b = pad_tensor
# we need to cast to uint8 to compare with PIL image
if pad_tensor_8b.dtype != torch.uint8:
pad_tensor_8b = pad_tensor_8b.to(torch.uint8)
self.compareTensorToPIL(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs))
if isinstance(pad, int):
script_pad = [pad, ]
else:
script_pad = pad
pad_tensor_script = script_fn(tensor, script_pad, **kwargs)
self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs))
if __name__ == '__main__':
unittest.main()
| 48.489437 | 106 | 0.608162 |
5ac43f8ba72a922918f9477de63adeff874e9660 | 2,564 | py | Python | python/mzcloud/api/deployments/deployments_metrics_cpu_retrieve.py | MaterializeInc/cloud-sdks | b7886468ceb9e76d1c4b9e0b1b82a957ddd2b1c8 | [
"Apache-2.0"
] | null | null | null | python/mzcloud/api/deployments/deployments_metrics_cpu_retrieve.py | MaterializeInc/cloud-sdks | b7886468ceb9e76d1c4b9e0b1b82a957ddd2b1c8 | [
"Apache-2.0"
] | null | null | null | python/mzcloud/api/deployments/deployments_metrics_cpu_retrieve.py | MaterializeInc/cloud-sdks | b7886468ceb9e76d1c4b9e0b1b82a957ddd2b1c8 | [
"Apache-2.0"
] | 1 | 2021-08-05T17:33:39.000Z | 2021-08-05T17:33:39.000Z | from typing import Any, Dict, List, Optional, Union, cast
import httpx
from ...client import AuthenticatedClient, Client
from ...models.prometheus_metrics import PrometheusMetrics
from ...types import UNSET, Response
def _get_kwargs(
*,
client: AuthenticatedClient,
id: str,
period: float,
) -> Dict[str, Any]:
url = "{}/api/deployments/{id}/metrics/cpu/{period}".format(client.base_url, id=id, period=period)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[PrometheusMetrics]:
if response.status_code == 200:
response_200 = PrometheusMetrics.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[PrometheusMetrics]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: AuthenticatedClient,
id: str,
period: float,
) -> Response[PrometheusMetrics]:
kwargs = _get_kwargs(
client=client,
id=id,
period=period,
)
response = httpx.get(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: AuthenticatedClient,
id: str,
period: float,
) -> Optional[PrometheusMetrics]:
"""Retrieve cpu line graph as a list of tuples (timestamps / utilization in %)) for a deployment."""
return sync_detailed(
client=client,
id=id,
period=period,
).parsed
async def asyncio_detailed(
*,
client: AuthenticatedClient,
id: str,
period: float,
) -> Response[PrometheusMetrics]:
kwargs = _get_kwargs(
client=client,
id=id,
period=period,
)
async with httpx.AsyncClient() as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: AuthenticatedClient,
id: str,
period: float,
) -> Optional[PrometheusMetrics]:
"""Retrieve cpu line graph as a list of tuples (timestamps / utilization in %)) for a deployment."""
return (
await asyncio_detailed(
client=client,
id=id,
period=period,
)
).parsed
| 22.690265 | 104 | 0.636115 |
c105d0acb7c515fc33d321f8d4d98c39da406b4d | 265 | py | Python | employee/filters.py | jamp-scp28/interviews | a3cc7a05bdd8fc6afc09e18b6663a83f49852293 | [
"MIT"
] | null | null | null | employee/filters.py | jamp-scp28/interviews | a3cc7a05bdd8fc6afc09e18b6663a83f49852293 | [
"MIT"
] | null | null | null | employee/filters.py | jamp-scp28/interviews | a3cc7a05bdd8fc6afc09e18b6663a83f49852293 | [
"MIT"
] | null | null | null | from .models import *
import django_filters
from django_filters import CharFilter
class EmployeeFilter(django_filters.FilterSet):
name = CharFilter(field_name="fullname", lookup_expr='icontains')
class Meta:
model = Employee
fields = ''
| 20.384615 | 69 | 0.724528 |
da1a57919102a9cd294ff48e511b6051672004c0 | 4,071 | py | Python | ExamplesPython_3.6/Chapter9/MeanShift.py | Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples | d6f4c45314b481705958ee336e83ce331926e894 | [
"MIT"
] | 30 | 2019-10-01T11:03:12.000Z | 2022-03-22T09:44:33.000Z | ExamplesPython_3.6/Chapter9/MeanShift.py | suryabranwal/Feature-Extraction-and-Image-Processing-Book-Examples | d6f4c45314b481705958ee336e83ce331926e894 | [
"MIT"
] | 1 | 2020-01-05T18:26:05.000Z | 2020-01-06T16:47:07.000Z | ExamplesPython_3.6/Chapter9/MeanShift.py | suryabranwal/Feature-Extraction-and-Image-Processing-Book-Examples | d6f4c45314b481705958ee336e83ce331926e894 | [
"MIT"
] | 16 | 2019-11-22T20:13:53.000Z | 2022-02-06T20:10:08.000Z | '''
Feature Extraction and Image Processing
Mark S. Nixon & Alberto S. Aguado
http://www.southampton.ac.uk/~msn/book/
Chapter 9
MeanShift: Tracks a region in an image by considering the colour histogram
'''
# Set module functions
from ImageUtilities import imageReadRGB, showImageRGB, createImageF, createImageRGB
from PlotUtilities import plot3DHistogram
from ImageRegionsUtilities import densityHistogram, colourFeature
# Math and iteration
from math import exp, sqrt
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageNames = Input image names
histoSize = Size of the histogram
initialPos = position of the region [column, row]
sizeReg = Size of the region [column, row]
sigma = weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageNames = ["frame1.bmp", "frame2.bmp", "frame3.bmp", "frame4.bmp", "frame5.bmp", "frame6.bmp"]
histoSize = 64
initialPos = [100, 60]
sizeReg = [12, 18]
sigma = 6.0
positions = [ ]
positions.append(initialPos)
# Read image and compute density
inputImage, width, height = imageReadRGB(pathToDir + imageNames[0])
q = densityHistogram(inputImage, positions[0], sizeReg, sigma, histoSize)
plot3DHistogram(q)
# To store weights
weights = createImageF(2*sizeReg[0], 2*sizeReg[1])
# Avoid division by zero. Minimum value in the histogram
epsilon = 0.0000000001
# Quantization scale
colourScale = 256.0 / histoSize
# For each frame
numImages = len(imageNames)
for frameNum in range(1, numImages):
inputImage, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
currPos = [0, 0]
newPos = positions[frameNum-1]
while(currPos != newPos):
# Histogram in current position
currPos = newPos
qs = densityHistogram(inputImage, currPos, sizeReg, sigma, histoSize)
# Compute weights
for deltaX, deltaY in itertools.product(range(-sizeReg[0],sizeReg[0]), \
range(-sizeReg[1], sizeReg[1])):
# Position of the pixel in the image and in the weight array
x, y = currPos[0] + deltaX, currPos[1] + deltaY
px,py = deltaX+sizeReg[0], deltaY+sizeReg[1]
# The 2D colour description at this point. Scaled to fit the histogram values
Cb,Cr= colourFeature(inputImage[y,x], colourScale)
# Update weight considering original and current histogram values for the colour
if qs[Cr, Cb] == 0:
qs[Cr, Cb] = epsilon
weights[py, px] = sqrt(q[Cr, Cb] / qs[Cr, Cb])
# Compute mean shift sums
meanSum = [0, 0]
kernelSum = 0
for deltaX, deltaY in itertools.product(range(-sizeReg[0],sizeReg[0]), \
range(-sizeReg[1], sizeReg[1])):
# Position of the pixel in the image
x, y = currPos[0] + deltaX, currPos[1] + deltaY
# Kernel parameter
w = exp(-(deltaX*deltaX + deltaY*deltaY)/(2*sigma*sigma));
# Weight index
px, py = deltaX+sizeReg[0], deltaY+sizeReg[1]
# Mean sum
meanSum[0] += w * weights[py, px] * x
meanSum[1] += w * weights[py, px] * y
# Kernel sum
kernelSum += w * weights[py, px]
# Mean shift
newPos = [int(meanSum[0] / kernelSum), int(meanSum[1] / kernelSum)]
positions.append(newPos);
#print(positions)
# Show results
for frameNum in range(0, numImages):
image, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
p = positions[frameNum]
borderDistance = [sizeReg[0] -5, sizeReg[1] -5]
for x, y in itertools.product(range(p[0]-sizeReg[0], p[0]+sizeReg[0]), \
range(p[1]-sizeReg[1], p[1]+sizeReg[1])):
if abs(x-p[0]) > borderDistance[0] or abs(y-p[1]) > borderDistance[1]:
image[y,x] = [20, 20, 80]
showImageRGB(image)
| 33.925 | 97 | 0.604765 |
eb5095e12bdedb77c706b98ad0b62a6071262e81 | 43,094 | py | Python | datalad/distributed/ora_remote.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | null | null | null | datalad/distributed/ora_remote.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | null | null | null | datalad/distributed/ora_remote.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | null | null | null | from annexremote import SpecialRemote
from annexremote import RemoteError
from annexremote import ProtocolError
from pathlib import (
Path,
PurePosixPath
)
import requests
import shutil
from shlex import quote as sh_quote
import subprocess
import logging
from functools import wraps
from datalad.customremotes.ria_utils import (
get_layout_locations,
UnknownLayoutVersion,
verify_ria_url,
)
lgr = logging.getLogger('datalad.customremotes.ria_remote')
DEFAULT_BUFFER_SIZE = 65536
# TODO
# - make archive check optional
def _get_gitcfg(gitdir, key, cfgargs=None, regex=False):
cmd = [
'git',
'--git-dir', gitdir,
'config',
]
if cfgargs:
cmd += cfgargs
cmd += ['--get-regexp'] if regex else ['--get']
cmd += [key]
try:
return subprocess.check_output(
cmd,
# yield text
universal_newlines=True).strip()
except Exception:
lgr.debug(
"Failed to obtain config '%s' at %s",
key, gitdir,
)
return None
def _get_datalad_id(gitdir):
"""Attempt to determine a DataLad dataset ID for a given repo
Returns
-------
str or None
None in case no ID was found
"""
dsid = _get_gitcfg(
gitdir, 'datalad.dataset.id', ['--blob', ':.datalad/config']
)
if dsid is None:
lgr.debug(
"Cannot determine a DataLad ID for repository: %s",
gitdir,
)
else:
dsid = dsid.strip()
return dsid
class RemoteCommandFailedError(Exception):
pass
class RIARemoteError(RemoteError):
def __init__(self, msg):
super().__init__(msg.replace('\n', '\\n'))
class IOBase(object):
"""Abstract class with the desired API for local/remote operations"""
def mkdir(self, path):
raise NotImplementedError
def put(self, src, dst, progress_cb):
raise NotImplementedError
def get(self, src, dst, progress_cb):
raise NotImplementedError
def rename(self, src, dst):
raise NotImplementedError
def remove(self, path):
raise NotImplementedError
def exists(self, path):
raise NotImplementedError
def get_from_archive(self, archive, src, dst, progress_cb):
"""Get a file from an archive
Parameters
----------
archive_path : Path or str
Must be an absolute path and point to an existing supported archive
file_path : Path or str
Must be a relative Path (relative to the root
of the archive)
"""
raise NotImplementedError
def in_archive(self, archive_path, file_path):
"""Test whether a file is in an archive
Parameters
----------
archive_path : Path or str
Must be an absolute path and point to an existing supported archive
file_path : Path or str
Must be a relative Path (relative to the root
of the archive)
"""
raise NotImplementedError
def read_file(self, file_path):
"""Read a remote file's content
Parameters
----------
file_path : Path or str
Must be an absolute path
Returns
-------
string
"""
raise NotImplementedError
def write_file(self, file_path, content, mode='w'):
"""Write a remote file
Parameters
----------
file_path : Path or str
Must be an absolute path
content : str
"""
raise NotImplementedError
class LocalIO(IOBase):
"""IO operation if the object tree is local (e.g. NFS-mounted)"""
def mkdir(self, path):
path.mkdir(
parents=True,
exist_ok=True,
)
def put(self, src, dst, progress_cb):
shutil.copy(
str(src),
str(dst),
)
def get(self, src, dst, progress_cb):
shutil.copy(
str(src),
str(dst),
)
def get_from_archive(self, archive, src, dst, progress_cb):
# this requires python 3.5
with open(dst, 'wb') as target_file:
subprocess.run([
'7z', 'x', '-so',
str(archive), str(src)],
stdout=target_file,
)
# Note for progress reporting:
# man 7z:
#
# -bs{o|e|p}{0|1|2}
# Set output stream for output/error/progress line
def rename(self, src, dst):
src.rename(dst)
def remove(self, path):
path.unlink()
def remove_dir(self, path):
path.rmdir()
def exists(self, path):
return path.exists()
def in_archive(self, archive_path, file_path):
if not archive_path.exists():
# no archive, not file
return False
loc = str(file_path)
from datalad.cmd import Runner
runner = Runner()
# query 7z for the specific object location, keeps the output
# lean, even for big archives
out, err = runner(
['7z', 'l', str(archive_path),
loc],
log_stdout=True,
)
return loc in out
def read_file(self, file_path):
with open(str(file_path), 'r') as f:
content = f.read()
return content
def write_file(self, file_path, content, mode='w'):
if not content.endswith('\n'):
content += '\n'
with open(str(file_path), mode) as f:
f.write(content)
class SSHRemoteIO(IOBase):
"""IO operation if the object tree is SSH-accessible
It doesn't even think about a windows server.
"""
# output markers to detect possible command failure as well as end of output from a particular command:
REMOTE_CMD_FAIL = "ora-remote: end - fail"
REMOTE_CMD_OK = "ora-remote: end - ok"
def __init__(self, host, buffer_size=DEFAULT_BUFFER_SIZE):
"""
Parameters
----------
host : str
SSH-accessible host(name) to perform remote IO operations
on.
"""
from datalad.support.sshconnector import SSHManager
# connection manager -- we don't have to keep it around, I think
self.sshmanager = SSHManager()
# the connection to the remote
# we don't open it yet, not yet clear if needed
self.ssh = self.sshmanager.get_connection(
host,
use_remote_annex_bundle=False,
)
self.ssh.open()
# open a remote shell
cmd = ['ssh'] + self.ssh._ssh_args + [self.ssh.sshri.as_str()]
self.shell = subprocess.Popen(cmd, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# swallow login message(s):
self.shell.stdin.write(b"echo RIA-REMOTE-LOGIN-END\n")
self.shell.stdin.flush()
while True:
line = self.shell.stdout.readline()
if line == b"RIA-REMOTE-LOGIN-END\n":
break
# TODO: Same for stderr?
# make sure default is used when None was passed, too.
self.buffer_size = buffer_size if buffer_size else DEFAULT_BUFFER_SIZE
def close(self):
# try exiting shell clean first
self.shell.stdin.write(b"exit\n")
self.shell.stdin.flush()
exitcode = self.shell.wait(timeout=0.5)
# be more brutal if it doesn't work
if exitcode is None: # timed out
# TODO: Theoretically terminate() can raise if not successful. How to deal with that?
self.shell.terminate()
self.sshmanager.close()
def _append_end_markers(self, cmd):
"""Append end markers to remote command"""
return cmd + " && printf '%s\\n' {} || printf '%s\\n' {}\n".format(
sh_quote(self.REMOTE_CMD_OK),
sh_quote(self.REMOTE_CMD_FAIL))
def _get_download_size_from_key(self, key):
"""Get the size of an annex object file from it's key
Note, that this is not necessarily the size of the annexed file, but possibly only a chunk of it.
Parameter
---------
key: str
annex key of the file
Returns
-------
int
size in bytes
"""
# TODO: datalad's AnnexRepo.get_size_from_key() is not correct/not fitting. Incorporate the wisdom there, too.
# We prob. don't want to actually move this method there, since AnnexRepo would be quite an expensive
# import. Startup time for special remote matters.
# TODO: this method can be more compact. we don't need particularly elaborated error distinction
# see: https://git-annex.branchable.com/internals/key_format/
key_parts = key.split('--')
key_fields = key_parts[0].split('-')
s = S = C = None
for field in key_fields[1:]: # note: first one has to be backend -> ignore
if field.startswith('s'):
# size of the annexed file content:
s = int(field[1:]) if field[1:].isdigit() else None
elif field.startswith('S'):
# we have a chunk and that's the chunksize:
S = int(field[1:]) if field[1:].isdigit() else None
elif field.startswith('C'):
# we have a chunk, this is it's number:
C = int(field[1:]) if field[1:].isdigit() else None
if s is None:
return None
elif S is None and C is None:
return s
elif S and C:
if C <= int(s / S):
return S
else:
return s % S
else:
raise RIARemoteError("invalid key: {}".format(key))
def _run(self, cmd, no_output=True, check=False):
# TODO: we might want to redirect stderr to stdout here (or have additional end marker in stderr)
# otherwise we can't empty stderr to be ready for next command. We also can't read stderr for better error
# messages (RemoteError) without making sure there's something to read in any case (it's blocking!)
# However, if we are sure stderr can only ever happen if we would raise RemoteError anyway, it might be
# okay
call = self._append_end_markers(cmd)
self.shell.stdin.write(call.encode())
self.shell.stdin.flush()
lines = []
while True:
line = self.shell.stdout.readline().decode()
lines.append(line)
if line == self.REMOTE_CMD_OK + '\n':
# end reading
break
elif line == self.REMOTE_CMD_FAIL + '\n':
if check:
raise RemoteCommandFailedError("{cmd} failed: {msg}".format(cmd=cmd,
msg="".join(lines[:-1]))
)
else:
break
if no_output and len(lines) > 1:
raise RIARemoteError("{}: {}".format(call, "".join(lines)))
return "".join(lines[:-1])
def mkdir(self, path):
self._run('mkdir -p {}'.format(sh_quote(str(path))))
def put(self, src, dst, progress_cb):
self.ssh.put(str(src), str(dst))
def get(self, src, dst, progress_cb):
# Note, that as we are in blocking mode, we can't easily fail on the
# actual get (that is 'cat').
# Therefore check beforehand.
if not self.exists(src):
raise RIARemoteError("annex object {src} does not exist.".format(src=src))
# TODO: see get_from_archive()
# TODO: Currently we will hang forever if the file isn't readable and it's supposed size is bigger than whatever
# cat spits out on stdout. This is because we don't notice that cat has exited non-zero.
# We could have end marker on stderr instead, but then we need to empty stderr beforehand to not act upon
# output from earlier calls. This is a problem with blocking reading, since we need to make sure there's
# actually something to read in any case.
cmd = 'cat {}'.format(sh_quote(str(src)))
self.shell.stdin.write(cmd.encode())
self.shell.stdin.write(b"\n")
self.shell.stdin.flush()
from os.path import basename
key = basename(str(src))
try:
size = self._get_download_size_from_key(key)
except RemoteError as e:
raise RemoteError("src: {}".format(str(src)) + str(e))
if size is None:
# rely on SCP for now
self.ssh.get(str(src), str(dst))
return
with open(dst, 'wb') as target_file:
bytes_received = 0
while bytes_received < size: # TODO: some additional abortion criteria? check stderr in addition?
c = self.shell.stdout.read1(self.buffer_size)
# no idea yet, whether or not there's sth to gain by a sophisticated determination of how many bytes to
# read at once (like size - bytes_received)
if c:
bytes_received += len(c)
target_file.write(c)
progress_cb(bytes_received)
def rename(self, src, dst):
self._run('mv {} {}'.format(sh_quote(str(src)), sh_quote(str(dst))))
def remove(self, path):
self._run('rm {}'.format(sh_quote(str(path))))
def remove_dir(self, path):
self._run('rmdir {}'.format(sh_quote(str(path))))
def exists(self, path):
try:
self._run('test -e {}'.format(sh_quote(str(path))), check=True)
return True
except RemoteCommandFailedError:
return False
def in_archive(self, archive_path, file_path):
if not self.exists(archive_path):
return False
loc = str(file_path)
# query 7z for the specific object location, keeps the output
# lean, even for big archives
cmd = '7z l {} {}'.format(
sh_quote(str(archive_path)),
sh_quote(loc))
# Note: Currently relies on file_path not showing up in case of failure
# including non-existent archive. If need be could be more sophisticated
# and called with check=True + catch RemoteCommandFailedError
out = self._run(cmd, no_output=False, check=False)
return loc in out
def get_from_archive(self, archive, src, dst, progress_cb):
# Note, that as we are in blocking mode, we can't easily fail on the actual get (that is 'cat').
# Therefore check beforehand.
if not self.exists(archive):
raise RIARemoteError("archive {arc} does not exist.".format(arc=archive))
# TODO: We probably need to check exitcode on stderr (via marker). If archive or content is missing we will
# otherwise hang forever waiting for stdout to fill `size`
cmd = '7z x -so {} {}\n'.format(
sh_quote(str(archive)),
sh_quote(str(src)))
self.shell.stdin.write(cmd.encode())
self.shell.stdin.flush()
# TODO: - size needs double-check and some robustness
# - can we assume src to be a posixpath?
# - RF: Apart from the executed command this should be pretty much identical to self.get(), so move that
# code into a common function
from os.path import basename
size = self._get_download_size_from_key(basename(str(src)))
with open(dst, 'wb') as target_file:
bytes_received = 0
while bytes_received < size:
c = self.shell.stdout.read1(self.buffer_size)
if c:
bytes_received += len(c)
target_file.write(c)
progress_cb(bytes_received)
def read_file(self, file_path):
cmd = "cat {}".format(sh_quote(str(file_path)))
try:
out = self._run(cmd, no_output=False, check=True)
except RemoteCommandFailedError:
raise RIARemoteError("Could not read {}".format(str(file_path)))
return out
def write_file(self, file_path, content, mode='w'):
if mode == 'w':
mode = ">"
elif mode == 'a':
mode = ">>"
else:
raise ValueError("Unknown mode '{}'".format(mode))
if not content.endswith('\n'):
content += '\n'
cmd = "printf '%s' {} {} {}".format(
sh_quote(content),
mode,
sh_quote(str(file_path)))
try:
self._run(cmd, check=True)
except RemoteCommandFailedError:
raise RIARemoteError("Could not write to {}".format(str(file_path)))
class HTTPRemoteIO(object):
# !!!!
# This is not actually an IO class like SSHRemoteIO and LocalIO and needs
# respective RF'ing of special remote implementation eventually.
# We want ORA over HTTP, but with a server side CGI to talk to in order to
# reduce the number of requests. Implementing this as such an IO class would
# mean to have separate requests for all server side executions, which is
# what we do not want. As a consequence RIARemote class implementation needs
# to treat HTTP as a special case until refactoring to a design that fits
# both approaches.
# NOTE: For now read-only. Not sure yet whether an IO class is the right
# approach.
def __init__(self, ria_url, dsid, buffer_size=DEFAULT_BUFFER_SIZE):
assert ria_url.startswith("ria+http")
self.base_url = ria_url[4:]
if self.base_url[-1] == '/':
self.base_url = self.base_url[:-1]
self.base_url += "/" + dsid[:3] + '/' + dsid[3:]
# make sure default is used when None was passed, too.
self.buffer_size = buffer_size if buffer_size else DEFAULT_BUFFER_SIZE
def checkpresent(self, key_path):
# Note, that we need the path with hash dirs, since we don't have access
# to annexremote.dirhash from within IO classes
url = self.base_url + "/annex/objects/" + str(key_path)
response = requests.head(url)
return response.status_code == 200
def get(self, key_path, filename, progress_cb):
# Note, that we need the path with hash dirs, since we don't have access
# to annexremote.dirhash from within IO classes
url = self.base_url + "/annex/objects/" + str(key_path)
response = requests.get(url, stream=True)
with open(filename, 'wb') as dst_file:
bytes_received = 0
for chunk in response.iter_content(chunk_size=self.buffer_size,
decode_unicode=False):
dst_file.write(chunk)
bytes_received += len(chunk)
progress_cb(bytes_received)
def handle_errors(func):
"""Decorator to convert and log errors
Intended to use with every method of RiaRemote class, facing the outside
world. In particular, that is about everything, that may be called via
annex' special remote protocol, since a non-RemoteError will simply result
in a broken pipe by default handling.
"""
# TODO: configurable on remote end (flag within layout_version!)
@wraps(func)
def new_func(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
if self.remote_log_enabled:
from datetime import datetime
from traceback import format_exc
exc_str = format_exc()
entry = "{time}: Error:\n{exc_str}\n" \
"".format(time=datetime.now(),
exc_str=exc_str)
log_target = self.store_base_path / 'error_logs' / \
"{dsid}.{uuid}.log".format(dsid=self.archive_id,
uuid=self.uuid)
self.io.write_file(log_target, entry, mode='a')
if not isinstance(e, RIARemoteError):
raise RIARemoteError(str(e))
else:
raise e
return new_func
class NoLayoutVersion(Exception):
pass
class RIARemote(SpecialRemote):
"""This is the class of RIA remotes.
"""
dataset_tree_version = '1'
object_tree_version = '2'
# TODO: Move known versions. Needed by creation routines as well.
known_versions_objt = ['1', '2']
known_versions_dst = ['1']
@handle_errors
def __init__(self, annex):
super(RIARemote, self).__init__(annex)
if hasattr(self, 'configs'):
# introduced in annexremote 1.4.2 to support LISTCONFIGS
self.configs['url'] = "RIA store to use"
# machine to SSH-log-in to access/store the data
# subclass must set this
self.storage_host = None
# must be absolute, and POSIX
# subclass must set this
self.store_base_path = None
# by default we can read and write
self.read_only = False
self.force_write = None
self.uuid = None
self.ignore_remote_config = None
self.remote_log_enabled = None
self.remote_dataset_tree_version = None
self.remote_object_tree_version = None
# for caching the remote's layout locations:
self.remote_git_dir = None
self.remote_archive_dir = None
self.remote_obj_dir = None
self._io = None # lazy
# cache obj_locations:
self._last_archive_path = None
self._last_keypath = (None, None)
def verify_store(self):
"""Check whether the store exists and reports a layout version we
know
The layout of the store is recorded in base_path/ria-layout-version.
If the version found on the remote end isn't supported and `force-write`
isn't configured, sets the remote to read-only operation.
"""
dataset_tree_version_file = \
self.store_base_path / 'ria-layout-version'
# check dataset tree version
try:
self.remote_dataset_tree_version = \
self._get_version_config(dataset_tree_version_file)
if self.remote_dataset_tree_version not in self.known_versions_dst:
# Note: In later versions, condition might change in order to
# deal with older versions.
raise UnknownLayoutVersion(
"RIA store layout version unknown: %s" %
self.remote_dataset_tree_version)
except (RemoteError, FileNotFoundError):
# Exception class depends on whether self.io is local or SSH.
# assume file doesn't exist
# TODO: Is there a possibility RemoteError has a different reason
# and should be handled differently?
# Don't think so ATM. -> Reconsider with new execution layer.
# Note: Error message needs entire URL not just the missing
# path, since it could be due to invalid URL. Path isn't
# telling if it's not clear what system we are looking at.
# Note: Case switch due to still supported configs as an
# alternative to ria+ URLs. To be deprecated.
if self.ria_store_url:
target = self.ria_store_url
elif self.storage_host:
target = "ria+ssh://{}{}".format(
self.storage_host,
dataset_tree_version_file.parent)
else:
target = "ria+" + dataset_tree_version_file.parent.as_uri()
if not self.io.exists(dataset_tree_version_file.parent):
# unify exception to FileNotFoundError
raise FileNotFoundError(
"Configured RIA store not found at %s " % target
)
else:
# Directory is there, but no version file. We don't know what
# that is. Treat the same way as if there was an unknown version
# on record.
raise NoLayoutVersion(
"Configured RIA store lacks a 'ria-layout-version' file at"
" %s" % target
)
def verify_ds_in_store(self):
"""Check whether the dataset exists in store and reports a layout
version we know
The layout is recorded in
'dataset_somewhere_beneath_base_path/ria-layout-version.'
If the version found on the remote end isn't supported and `force-write`
isn't configured, sets the remote to read-only operation.
"""
object_tree_version_file = self.remote_git_dir / 'ria-layout-version'
# check (annex) object tree version
try:
self.remote_object_tree_version =\
self._get_version_config(object_tree_version_file)
if self.remote_object_tree_version not in self.known_versions_objt:
raise UnknownLayoutVersion
except (RemoteError, FileNotFoundError):
# Exception class depends on whether self.io is local or SSH.
# assume file doesn't exist
# TODO: Is there a possibility RemoteError has a different reason
# and should be handled differently?
# Don't think so ATM. -> Reconsider with new execution layer.
if not self.io.exists(object_tree_version_file.parent):
# unify exception
raise FileNotFoundError
else:
raise NoLayoutVersion
def _load_cfg(self, gitdir, name):
# for now still accept the configs, if no ria-URL is known:
if not self.ria_store_url:
self.storage_host = _get_gitcfg(
gitdir, 'annex.ora-remote.{}.ssh-host'.format(name))
store_base_path = _get_gitcfg(
gitdir, 'annex.ora-remote.{}.base-path'.format(name))
self.store_base_path = store_base_path.strip() \
if store_base_path else None
# Whether or not to force writing to the remote. Currently used to overrule write protection due to layout
# version mismatch.
self.force_write = _get_gitcfg(
gitdir, 'annex.ora-remote.{}.force-write'.format(name))
# whether to ignore config flags set at the remote end
self.ignore_remote_config = _get_gitcfg(gitdir, 'annex.ora-remote.{}.ignore-remote-config'.format(name))
# buffer size for reading files over HTTP and SSH
self.buffer_size = _get_gitcfg(gitdir,
"remote.{}.ora-buffer-size"
"".format(name))
if self.buffer_size:
self.buffer_size = int(self.buffer_size)
def _verify_config(self, gitdir, fail_noid=True):
# try loading all needed info from (git) config
name = self.annex.getconfig('name')
if not name:
raise RIARemoteError(
"Cannot determine special remote name, got: {}".format(
repr(name)))
# get store url:
self.ria_store_url = self.annex.getconfig('url')
if self.ria_store_url:
# support URL rewrite without talking to a DataLad ConfigManager
# Q is why? Why not use the config manager?
url_cfgs = dict()
url_cfgs_raw = _get_gitcfg(gitdir, "^url.*", regex=True)
if url_cfgs_raw:
for line in url_cfgs_raw.splitlines():
k, v = line.split()
url_cfgs[k] = v
self.storage_host, self.store_base_path, self.ria_store_url = \
verify_ria_url(self.ria_store_url, url_cfgs)
# TODO duplicates call to `git-config` after RIA url rewrite
self._load_cfg(gitdir, name)
# for now still accept the configs, if no ria-URL is known:
if not self.ria_store_url:
if not self.store_base_path:
self.store_base_path = self.annex.getconfig('base-path')
if not self.store_base_path:
raise RIARemoteError(
"No remote base path configured. "
"Specify `base-path` setting.")
self.store_base_path = Path(self.store_base_path)
if not self.store_base_path.is_absolute():
raise RIARemoteError(
'Non-absolute object tree base path configuration: %s'
'' % str(self.store_base_path))
# for now still accept the configs, if no ria-URL is known:
if not self.ria_store_url:
# Note: Special value '0' is replaced by None only after checking the repository's annex config.
# This is to uniformly handle '0' and None later on, but let a user's config '0' overrule what's
# stored by git-annex.
if not self.storage_host:
self.storage_host = self.annex.getconfig('ssh-host')
elif self.storage_host == '0':
self.storage_host = None
# go look for an ID
self.archive_id = self.annex.getconfig('archive-id')
if fail_noid and not self.archive_id:
raise RIARemoteError(
"No archive ID configured. This should not happen.")
# TODO: This should prob. not be done! Would only have an effect if force-write was committed
# annex-special-remote-config and this is likely a bad idea.
if not self.force_write:
self.force_write = self.annex.getconfig('force-write')
def _get_version_config(self, path):
""" Get version and config flags from remote file
"""
file_content = self.io.read_file(path).strip().split('|')
if not (1 <= len(file_content) <= 2):
self.message("invalid version file {}".format(path))
return None
remote_version = file_content[0]
remote_config_flags = file_content[1] if len(file_content) == 2 else None
if not self.ignore_remote_config and remote_config_flags:
# Note: 'or', since config flags can come from toplevel (dataset-tree-root) as well as
# from dataset-level. toplevel is supposed flag the entire tree.
self.remote_log_enabled = self.remote_log_enabled or 'l' in remote_config_flags
return remote_version
def get_store(self):
"""checks the remote end for an existing store and dataset
Furthermore reads and stores version and config flags, layout
locations, etc.
If this doesn't raise, the remote end should be fine to work with.
"""
# cache remote layout directories
self.remote_git_dir, self.remote_archive_dir, self.remote_obj_dir = \
self.get_layout_locations(self.store_base_path, self.archive_id)
read_only_msg = "Treating remote as read-only in order to" \
"prevent damage by putting things into an unknown " \
"version of the target layout. You can overrule this " \
"by setting 'annex.ora-remote.<name>.force-write=true'."
try:
self.verify_store()
except UnknownLayoutVersion:
reason = "Remote dataset tree reports version {}. Supported " \
"versions are: {}. Consider upgrading datalad or " \
"fix the 'ria-layout-version' file at the RIA store's " \
"root. ".format(self.remote_dataset_tree_version,
self.known_versions_dst)
self._set_read_only(reason + read_only_msg)
except NoLayoutVersion:
reason = "Remote doesn't report any dataset tree version." \
"Consider upgrading datalad or add a fitting " \
"'ria-layout-version' file at the RIA store's " \
"root."
self._set_read_only(reason + read_only_msg)
try:
self.verify_ds_in_store()
except UnknownLayoutVersion:
reason = "Remote object tree reports version {}. Supported" \
"versions are {}. Consider upgrading datalad or " \
"fix the 'ria-layout-version' file at the remote " \
"dataset root. " \
"".format(self.remote_object_tree_version,
self.known_versions_objt)
self._set_read_only(reason + read_only_msg)
except NoLayoutVersion:
reason = "Remote doesn't report any object tree version." \
"Consider upgrading datalad or add a fitting " \
"'ria-layout-version' file at the remote " \
"dataset root. "
self._set_read_only(reason + read_only_msg)
@handle_errors
def initremote(self):
# which repo are we talking about
gitdir = self.annex.getgitdir()
self._verify_config(gitdir, fail_noid=False)
if not self.archive_id:
self.archive_id = _get_datalad_id(gitdir)
if not self.archive_id:
# fall back on the UUID for the annex remote
self.archive_id = self.annex.getuuid()
if not isinstance(self.io, HTTPRemoteIO):
self.get_store()
# else:
# TODO: consistency with SSH and FILE behavior? In those cases we make
# sure the store exists from within initremote
self.annex.setconfig('archive-id', self.archive_id)
# make sure, we store the potentially rewritten URL
self.annex.setconfig('url', self.ria_store_url)
def _local_io(self):
"""Are we doing local operations?"""
# let's not make this decision dependent on the existence
# of a directory the matches the name of the configured
# store tree base dir. Such a match could be pure
# coincidence. Instead, let's do remote whenever there
# is a remote host configured
#return self.store_base_path.is_dir()
return not self.storage_host
def debug(self, msg):
# Annex prints just the message, so prepend with
# a "DEBUG" on our own.
self.annex.debug("ORA-DEBUG: " + msg)
def message(self, msg):
try:
self.annex.info(msg)
except ProtocolError:
# INFO not supported by annex version.
# If we can't have an actual info message, at least have a
# debug message.
self.debug(msg)
def _set_read_only(self, msg):
if not self.force_write:
self.read_only = True
self.message(msg)
else:
self.message("Was instructed to force write")
def _ensure_writeable(self):
if self.read_only:
raise RIARemoteError("Remote is treated as read-only. "
"Set 'ora-remote.<name>.force-write=true' to "
"overrule this.")
if isinstance(self.io, HTTPRemoteIO):
raise RIARemoteError("Write access via HTTP not implemented")
@property
def io(self):
if not self._io:
if self._local_io():
self._io = LocalIO()
elif self.ria_store_url.startswith("ria+http"):
self._io = HTTPRemoteIO(self.ria_store_url,
self.archive_id,
self.buffer_size)
elif self.storage_host:
self._io = SSHRemoteIO(self.storage_host, self.buffer_size)
from atexit import register
register(self._io.close)
else:
raise RIARemoteError(
"Local object tree base path does not exist, and no SSH"
"host configuration found.")
return self._io
@handle_errors
def prepare(self):
gitdir = self.annex.getgitdir()
self.uuid = self.annex.getuuid()
self._verify_config(gitdir)
if not isinstance(self.io, HTTPRemoteIO):
self.get_store()
# report active special remote configuration
self.info = {
'store_base_path': str(self.store_base_path),
'storage_host': 'local'
if self._local_io() else self.storage_host,
}
@handle_errors
def transfer_store(self, key, filename):
self._ensure_writeable()
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
key_path = dsobj_dir / key_path
if self.io.exists(key_path):
# if the key is here, we trust that the content is in sync
# with the key
return
self.io.mkdir(key_path.parent)
# we need to copy to a temp location to let
# checkpresent fail while the transfer is still in progress
# and furthermore not interfere with administrative tasks in annex/objects
# In addition include uuid, to not interfere with parallel uploads from different remotes
transfer_dir = self.remote_git_dir / "ora-remote-{}".format(self.uuid) / "transfer"
self.io.mkdir(transfer_dir)
tmp_path = transfer_dir / key
if tmp_path.exists():
# Just in case - some parallel job could already be writing to it
# at least tell the conclusion, not just some obscure permission error
raise RIARemoteError('{}: upload already in progress'.format(filename))
try:
self.io.put(filename, tmp_path, self.annex.progress)
# copy done, atomic rename to actual target
self.io.rename(tmp_path, key_path)
except Exception as e:
# whatever went wrong, we don't want to leave the transfer location blocked
self.io.remove(tmp_path)
raise e
@handle_errors
def transfer_retrieve(self, key, filename):
if isinstance(self.io, HTTPRemoteIO):
self.io.get(PurePosixPath(self.annex.dirhash(key)) / key / key,
filename,
self.annex.progress)
return
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
abs_key_path = dsobj_dir / key_path
# sadly we have no idea what type of source gave checkpresent->true
# we can either repeat the checks, or just make two opportunistic
# attempts (at most)
try:
self.io.get(abs_key_path, filename, self.annex.progress)
except Exception as e1:
# catch anything and keep it around for a potential re-raise
try:
self.io.get_from_archive(archive_path, key_path, filename,
self.annex.progress)
except Exception as e2:
raise RIARemoteError('Failed to key: {}'.format([str(e1), str(e2)]))
@handle_errors
def checkpresent(self, key):
if isinstance(self.io, HTTPRemoteIO):
return self.io.checkpresent(
PurePosixPath(self.annex.dirhash(key)) / key / key)
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
abs_key_path = dsobj_dir / key_path
if self.io.exists(abs_key_path):
# we have an actual file for this key
return True
# do not make a careful check whether an archive exists, because at
# present this requires an additional SSH call for remote operations
# which may be rather slow. Instead just try to run 7z on it and let
# it fail if no archive is around
# TODO honor future 'archive-mode' flag
return self.io.in_archive(archive_path, key_path)
@handle_errors
def remove(self, key):
self._ensure_writeable()
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
key_path = dsobj_dir / key_path
if self.io.exists(key_path):
self.io.remove(key_path)
key_dir = key_path
# remove at most two levels of empty directories
for level in range(2):
key_dir = key_dir.parent
try:
self.io.remove_dir(key_dir)
except Exception:
break
@handle_errors
def getcost(self):
# 100 is cheap, 200 is expensive (all relative to Config/Cost.hs)
# 100/200 are the defaults for local and remote operations in
# git-annex
# if we have the object tree locally, operations are cheap (100)
# otherwise expensive (200)
return '100' if self._local_io() else '200'
@handle_errors
def whereis(self, key):
if isinstance(self.io, HTTPRemoteIO):
# display the URL for a request
# TODO: method of HTTPRemoteIO
return self.ria_store_url[4:] + "/annex/objects" + \
self.annex.dirhash(key) + "/" + key + "/" + key
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
return str(key_path) if self._local_io() \
else '{}: {}:{}'.format(
self.storage_host,
self.remote_git_dir,
sh_quote(str(key_path)),
)
@staticmethod
def get_layout_locations(base_path, dsid):
return get_layout_locations(1, base_path, dsid)
def _get_obj_location(self, key):
# Notes: - Changes to this method may require an update of
# RIARemote._layout_version
# - archive_path is always the same ATM. However, it might depend
# on `key` in the future. Therefore build the actual filename
# for the archive herein as opposed to `get_layout_locations`.
if not self._last_archive_path:
self._last_archive_path = self.remote_archive_dir / 'archive.7z'
if self._last_keypath[0] != key:
if self.remote_object_tree_version == '1':
key_dir = self.annex.dirhash_lower(key)
# If we didn't recognize the remote layout version, we set to
# read-only and promised to at least try and read according to our
# current version. So, treat that case as if remote version was our
# (client's) version.
else:
key_dir = self.annex.dirhash(key)
# double 'key' is not a mistake, but needed to achieve the exact
# same layout as the annex/objects tree
self._last_keypath = (key, Path(key_dir) / key / key)
return self.remote_obj_dir, self._last_archive_path, \
self._last_keypath[1]
def main():
"""cmdline entry point"""
from annexremote import Master
master = Master()
remote = RIARemote(master)
master.LinkRemote(remote)
master.Listen()
| 37.538328 | 120 | 0.587901 |
fb1876be0cb7704d064207b4cca05865cb775d4c | 1,924 | py | Python | src/compas_igs/ui/Rhino/IGS/dev/IGS_form_move_nodes_cmd.py | BlockResearchGroup/compas-IGS | b40698466b91c867600b94ae2530b19d336ad1b0 | [
"MIT"
] | 1 | 2021-11-03T23:22:37.000Z | 2021-11-03T23:22:37.000Z | src/compas_igs/ui/Rhino/IGS/dev/IGS_form_move_nodes_cmd.py | BlockResearchGroup/compas-IGS | b40698466b91c867600b94ae2530b19d336ad1b0 | [
"MIT"
] | 1 | 2021-11-10T03:27:58.000Z | 2021-11-17T13:51:17.000Z | src/compas_igs/ui/Rhino/IGS/dev/IGS_form_move_nodes_cmd.py | BlockResearchGroup/compas-IGS | b40698466b91c867600b94ae2530b19d336ad1b0 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import scriptcontext as sc
import compas_rhino
__commandname__ = "IGS_form_move_nodes"
def RunCommand(is_interactive):
if 'IGS' not in sc.sticky:
compas_rhino.display_message('IGS has not been initialised yet.')
return
proxy = sc.sticky['IGS']['proxy']
scene = sc.sticky['IGS']['scene']
objects = scene.find_by_name('Form')
if not objects:
compas_rhino.display_message("There is no FormDiagram in the scene.")
return
form = objects[0]
objects = scene.find_by_name('Force')
if not objects:
compas_rhino.display_message("There is no ForceDiagram in the scene.")
return
force = objects[0]
proxy.package = 'compas_ags.ags.graphstatics'
form.settings['show.edgelabels'] = True
form.settings['show.forcelabels'] = False
force.settings['show.edgelabels'] = True
scene.update()
while True:
vertices = form.select_vertices("Select vertices (Press ESC to exit)")
if not vertices:
break
if form.move_vertices(vertices):
if scene.settings['IGS']['autoupdate']:
formdiagram = proxy.form_update_q_from_qind(form.diagram)
form.diagram.data = formdiagram.data
forcediagram = proxy.force_update_from_form(force.diagram, form.diagram)
force.diagram.data = forcediagram.data
scene.update()
form.settings['show.edgelabels'] = False
form.settings['show.forcelabels'] = True
force.settings['show.edgelabels'] = False
scene.update()
scene.save()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| 26.722222 | 88 | 0.605509 |
0456f6ee3e3b6d578aa70ec24c7714bb8d52fde9 | 4,226 | py | Python | aci2xml.py | blackjade/aci2xml | adc1b4b0d048790c6ba8cdd599f2595cb3349c40 | [
"Apache-2.0"
] | null | null | null | aci2xml.py | blackjade/aci2xml | adc1b4b0d048790c6ba8cdd599f2595cb3349c40 | [
"Apache-2.0"
] | null | null | null | aci2xml.py | blackjade/aci2xml | adc1b4b0d048790c6ba8cdd599f2595cb3349c40 | [
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2015 Fluke Networks.
# All rights reserved.
# No part of this source code may be copied, used, or modified
# without the express written consent of Fluke Networks.
#
# aci2xml.py: Convert the policy manager related section in a *.aci
# file to xml. For example, these lines:
# [\PolicyManager\Alarm0]
# Enable=D_1
# Count_Of_Threshold=D_1
# [\PolicyManager\Alarm0\Threshold0]
# Severity=D_2
# SeverityScore=D_100
# Action=S_Beep
# GroupType=D_2
# SSIDGroupCount=D_1
# SSIDGroup=S_MyWLAN
# ACLGroupCount=D_2
# ACLGroups=S_0,1
# Will be converted to this:
# <Alarm0>
# <AlarmEnabled>1</AlarmEnabled>
# <ThresholdCount>1</ThresholdCount>
# <Threshold0>
# <Severity>2</Severity>
# <Action>Beep</Action>
# <ThresholdInt>0</ThresholdInt>
# <ThresholdString/>
# <GroupType>2</GroupType>
# <IntArray_Count>0</IntArray_Count>
# <IntArray/>
# <FrameCount>50</FrameCount>
# <SignalStrength>15</SignalStrength>
# <IntMap_Count>0</IntMap_Count>
# <IntMap/>
# <SSIDGroups_Count>1</SSIDGroups_Count>
# <SSIDGroups>MyWLAN</SSIDGroups>
# <ACLGroups_Count>1</ACLGroups_Count>
# <ACLGroups>0</ACLGroups>
# </Threshold0>
# </Alarm0>
import os, argparse
import json
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import tostring
import xml.dom.minidom as minidom
def dictToXmlElement(tag, xmlDict):
'''
Convert a dict to xml element
'''
if not xmlDict or not isinstance(xmlDict, dict):
return None
elem = Element(tag)
for key, val in xmlDict.items():
if isinstance(val, dict):
# The next level is also a dict. recursive call to convert any depth
child = dictToXmlElement(key, val)
else:
child = Element(key)
child.text = str(val)
elem.append(child)
return elem
def readAci(fileName):
xmlRoot = dict()
with open(fileName) as f:
currNode = None
for s in f:
s = s.strip()
#print s
if s.startswith('[\\') and s.endswith(']'):
s = s[1:-1].strip()
if s == "":
currNode = None
continue
xmlKeys = s.split('\\')
currNode = xmlRoot
for key in xmlKeys:
if key == "":
continue
if not key in currNode:
currNode[key] = dict()
currNode = currNode[key]
elif '=' in s:
if currNode == None:
print s
else:
pos = s.find('=')
key = s[0:pos]
value = s[pos+3:]
currNode[key] = value
return xmlRoot
def writePolicyManagerXml(xmlDict, fileName):
'''
COnvert a simple dict from reading aci file to xml tree
'''
if 'PolicyManager' in xmlDict:
xmlElem = dictToXmlElement('PolicyManager', xmlDict['PolicyManager'])
xmlString = tostring(xmlElem)
reparsed = minidom.parseString(xmlString)
with open(fileName, 'wb') as f:
reparsed.writexml(f, indent="\t", addindent="\t", newl="\n")
print 'Policy written to:', fileName
def main():
#parser = argparse.ArgumentParser(description='Convert the policy manager related section in a .aci file to xml file.')
#parser.add_argument('aciFile', type=str, help='ACI file name', nargs='?', default='./config/Default.aci')
#parser.add_argument('xmlFile', type=str, help='XML file name', nargs='?', default='./config/Default.xml')
#args = parser.parse_args()
aciFile = './config/Default.aci'
xmlFile = './config/Default.xml'
print 'Converting', aciFile, '->', xmlFile
xmlDict = readAci(aciFile)
if not xmlDict:
print 'Can not open the xml file or it is empty:', xmlFile
writePolicyManagerXml(xmlDict, xmlFile)
print 'Done!'
if __name__ == '__main__':
main()
| 31.774436 | 124 | 0.566493 |
a4512d1248c4c51a27a30daebbea403195fa077c | 1,644 | py | Python | python/getoracleprice.py | buzzkillb/Fantom-Tools | 2853cfca0b7eb4b710bf7a423dd9153649000fd6 | [
"MIT"
] | 1 | 2021-10-02T10:30:54.000Z | 2021-10-02T10:30:54.000Z | python/getoracleprice.py | buzzkillb/Fantom-Tools | 2853cfca0b7eb4b710bf7a423dd9153649000fd6 | [
"MIT"
] | null | null | null | python/getoracleprice.py | buzzkillb/Fantom-Tools | 2853cfca0b7eb4b710bf7a423dd9153649000fd6 | [
"MIT"
] | null | null | null | from web3 import Web3
import json
# Grab Prices from Syfin Price Oracle Contract
# https://syfinance.gitbook.io/sy-finance/syf-price-oracle
# Public RPC's
# https://rpcapi.fantom.network
# https://rpc.ftm.tools/
rpc_url = "https://rpc.ftm.tools/"
web3 = Web3(Web3.HTTPProvider(rpc_url))
syfinPriceOracleContract = "0x8fBE84d284D1614eaDc50EE69120Ec4f7f98cEd8"
syfinPriceOracleABI = json.loads('[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"getLatestFTMPrice","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"pairAddress","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"getLatestTokenPrice","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]')
# Returns FTM Price in USD formatted for / 1e8
syfinPriceOracle = web3.eth.contract(address=syfinPriceOracleContract, abi=syfinPriceOracleABI)
latestFTMPrice = syfinPriceOracle.functions.getLatestFTMPrice().call()
print(latestFTMPrice/100000000)
# Returns token 0 current amount vs token 1 from the Liquidity Pool address and amount of coin, recommended 1.
# Any LP pair address that is based off Uniswap V2 can be queried from this function.
# USDC-FTM 0x2b4C76d0dc16BE1C31D4C1DC53bF9B45987Fc75c
# Must be Checksummed address!!!
amountOfCoin = 1
pairContract = "0x2b4C76d0dc16BE1C31D4C1DC53bF9B45987Fc75c"
latestPairPrice = syfinPriceOracle.functions.getLatestTokenPrice(pairContract, amountOfCoin).call()
print(web3.fromWei(latestPairPrice, 'mwei'))
| 56.689655 | 527 | 0.768248 |
a96496be7f45432906ca774b0dbcf8c965b3bacd | 51,025 | py | Python | src/prefect/core/task.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 8,633 | 2019-03-23T17:51:03.000Z | 2022-03-31T22:17:42.000Z | src/prefect/core/task.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 3,903 | 2019-03-23T19:11:21.000Z | 2022-03-31T23:21:23.000Z | src/prefect/core/task.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 937 | 2019-03-23T18:49:44.000Z | 2022-03-31T21:45:13.000Z | import collections.abc
import copy
import enum
import functools
import inspect
import typing
import warnings
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Union,
Tuple,
)
from collections import defaultdict
import prefect
import prefect.engine.cache_validators
import prefect.engine.signals
import prefect.triggers
from prefect.utilities import logging
from prefect.utilities.notifications import callback_factory
from prefect.utilities.edges import EdgeAnnotation
if TYPE_CHECKING:
from prefect.core.flow import Flow
from prefect.engine.result import Result
from prefect.engine.state import State
from prefect.core import Edge
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
# A sentinel value indicating no default was provided
# mypy requires enums for typed sentinel values, so other
# simpler solutions won't work :/
class NoDefault(enum.Enum):
value = "no_default"
def __repr__(self) -> str:
return "<no default>"
def _validate_run_signature(run: Callable) -> None:
func = inspect.unwrap(run)
try:
run_sig = inspect.getfullargspec(func)
except TypeError as exc:
if str(exc) == "unsupported callable":
raise ValueError(
"This function can not be inspected (this is common "
"with `builtin` and `numpy` functions). In order to "
"use it as a task, please wrap it in a standard "
"Python function. For more detail, see "
"https://docs.prefect.io/core/advanced_tutorials/task-guide.html#the-task-decorator"
) from exc
raise
if run_sig.varargs:
raise ValueError(
"Tasks with variable positional arguments (*args) are not "
"supported, because all Prefect arguments are stored as "
"keywords. As a workaround, consider modifying the run() "
"method to accept **kwargs and feeding the values "
"to *args."
)
reserved_kwargs = ["upstream_tasks", "mapped", "task_args", "flow"]
violations = [kw for kw in reserved_kwargs if kw in run_sig.args]
if violations:
msg = "Tasks cannot have the following argument names: {}.".format(
", ".join(violations)
)
msg += " These are reserved keyword arguments."
raise ValueError(msg)
def _infer_run_nout(run: Callable) -> Optional[int]:
"""Infer the number of outputs for a callable from its type annotations.
Returns `None` if infererence failed, or if the type has variable-length.
"""
try:
ret_type = inspect.signature(run).return_annotation
except Exception:
return None
if ret_type is inspect.Parameter.empty:
return None
# New in python 3.8
if hasattr(typing, "get_origin"):
origin = typing.get_origin(ret_type)
else:
origin = getattr(ret_type, "__origin__", None)
if origin in (typing.Tuple, tuple):
# Plain Tuple is a variable-length tuple
if ret_type in (typing.Tuple, tuple):
return None
# New in python 3.8
if hasattr(typing, "get_args"):
args = typing.get_args(ret_type)
else:
args = getattr(ret_type, "__args__", ())
# Empty tuple type has a type arg of the empty tuple
if len(args) == 1 and args[0] == ():
return 0
# Variable-length tuples have Ellipsis as the 2nd arg
if len(args) == 2 and args[1] == Ellipsis:
return None
# All other Tuples are length-of args
return len(args)
return None
class TaskMetaclass(type):
"""A metaclass for enforcing two checks on a task:
- Checks that the `run` method has a valid signature
- Adds a check to the `__init__` method that no tasks are passed as arguments
"""
def __new__(cls, name: str, parents: tuple, methods: dict) -> "TaskMetaclass":
run = methods.get("run", lambda: None)
_validate_run_signature(run)
if "__init__" in methods:
old_init = methods["__init__"]
# Theoretically we could do this by defining a `__new__` method for
# the `Task` class that handles this check, but unfortunately if a
# class defines `__new__`, `inspect.signature` will use the
# signature for `__new__` regardless of the signature for
# `__init__`. This basically kills all type completions or type
# hints for the `Task` constructors. As such, we handle it in the
# metaclass
@functools.wraps(old_init)
def init(self: Any, *args: Any, **kwargs: Any) -> None:
if any(isinstance(a, Task) for a in args + tuple(kwargs.values())):
cls_name = type(self).__name__
warnings.warn(
f"A Task was passed as an argument to {cls_name}, you likely want to "
f"first initialize {cls_name} with any static (non-Task) arguments, "
"then call the initialized task with any dynamic (Task) arguments instead. "
"For example:\n\n"
f" my_task = {cls_name}(...) # static (non-Task) args go here\n"
f" res = my_task(...) # dynamic (Task) args go here\n\n"
"see https://docs.prefect.io/core/concepts/flows.html#apis for more info.",
stacklevel=2,
)
old_init(self, *args, **kwargs)
methods = methods.copy()
methods["__init__"] = init
# necessary to ensure classes that inherit from parent class
# also get passed through __new__
return type.__new__(cls, name, parents, methods) # type: ignore
@property
def _reserved_attributes(self) -> Tuple[str]:
"""A tuple of attributes reserved for use by the `Task` class.
Dynamically computed to make it easier to keep up to date. Lazily
computed to avoid circular import issues.
"""
if not hasattr(Task, "_cached_reserved_attributes"):
# Create a base task instance to determine which attributes are reserved
# we need to disable the unused_task_tracker for this duration or it will
# track this task
with prefect.context(_unused_task_tracker=set()):
Task._cached_reserved_attributes = tuple(sorted(Task().__dict__)) # type: ignore
return Task._cached_reserved_attributes # type: ignore
class instance_property:
"""Like property, but only available on instances, not the class"""
def __init__(self, func: Callable):
self.func = func
def __getattr__(self, k: str) -> Any:
return getattr(self.func, k)
def __get__(self, obj: Any, cls: Any) -> Any:
if obj is None:
raise AttributeError
return self.func(obj)
class Task(metaclass=TaskMetaclass):
"""
The Task class which is used as the full representation of a unit of work.
This Task class can be used directly as a first class object where it must
be inherited from by a class that implements the `run` method. For a more
functional way of generating Tasks, see [the task decorator](../utilities/tasks.html).
Inheritance example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
```
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
An instance of a `Task` can be used functionally to generate other task instances
with the same attributes but with different values bound to their `run` methods.
Example:
```python
class AddTask(Task):
def run(self, x, y):
return x + y
a = AddTask()
with Flow("My Flow") as f:
t1 = a(1, 2) # t1 != a
t2 = a(5, 7) # t2 != a
```
To bind values to a Task's run method imperatively (and without making a copy), see `Task.bind`.
Args:
- name (str, optional): The name of this task
- slug (str, optional): The slug for this task. Slugs provide a stable ID for tasks so
that the Prefect API can identify task run states. If a slug is not provided, one
will be generated automatically once the task is added to a Flow.
- tags ([str], optional): A list of tags for this task
- max_retries (int, optional): The maximum amount of times this task can be retried
- retry_delay (timedelta, optional): The amount of time to wait until task is retried
- timeout (Union[int, timedelta], optional): The amount of time (in seconds) to wait while
running this task before a timeout occurs; note that sub-second
resolution is not supported, even when passing in a timedelta.
- trigger (callable, optional): a function that determines whether the
task should run, based on the states of any upstream tasks.
- skip_on_upstream_skip (bool, optional): if `True`, if any immediately
upstream tasks are skipped, this task will automatically be skipped as
well, regardless of trigger. By default, this prevents tasks from
attempting to use either state or data from tasks that didn't run. If
`False`, the task's trigger will be called as normal, with skips
considered successes. Defaults to `True`.
- cache_for (timedelta, optional): The amount of time to maintain a cache
of the outputs of this task. Useful for situations where the containing Flow
will be rerun multiple times, but this task doesn't need to be.
- cache_validator (Callable, optional): Validator that will determine
whether the cache for this task is still valid (only required if `cache_for`
is provided; defaults to `prefect.engine.cache_validators.duration_only`)
- cache_key (str, optional): if provided, a `cache_key`
serves as a unique identifier for this Task's cache, and can be shared
across both Tasks _and_ Flows; if not provided, the Task's _name_ will
be used if running locally, or the Task's database ID if running in
Cloud
- checkpoint (bool, optional): if this Task is successful, whether to
store its result using the configured result available during the run;
Also note that checkpointing will only occur locally if
`prefect.config.flows.checkpointing` is set to `True`
- result (Result, optional): the result instance used to retrieve and
store task results during execution
- target (Union[str, Callable], optional): location to check for task Result. If a result
exists at that location then the task run will enter a cached state.
`target` strings can be templated formatting strings which will be
formatted at runtime with values from `prefect.context`. If a callable function
is provided, it should have signature `callable(**kwargs) -> str` and at write
time all formatting kwargs will be passed and a fully formatted location is
expected as the return value. The callable can be used for string formatting logic that
`.format(**kwargs)` doesn't support.
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the task changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the task instance, the old (prior) state, and the new
(current) state, with the following signature:
`state_handler(task: Task, old_state: State, new_state: State) -> Optional[State]`
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
- on_failure (Callable, optional): A function with signature
`fn(task: Task, state: State) -> None` that will be called anytime this
Task enters a failure state
- log_stdout (bool, optional): Toggle whether or not to send stdout messages to
the Prefect logger. Defaults to `False`.
- task_run_name (Union[str, Callable], optional): a name to set for this task at runtime.
`task_run_name` strings can be templated formatting strings which will be
formatted at runtime with values from task arguments, `prefect.context`, and flow
parameters (in the case of a name conflict between these, earlier values take precedence).
If a callable function is provided, it should have signature `callable(**kwargs) -> str`
and at write time all formatting kwargs will be passed and a fully formatted location is
expected as the return value. The callable can be used for string formatting logic that
`.format(**kwargs)` doesn't support. **Note**: this only works for tasks running against a
backend API.
- nout (int, optional): for tasks that return multiple results, the number of outputs
to expect. If not provided, will be inferred from the task return annotation, if
possible. Note that `nout=1` implies the task returns a tuple of
one value (leave as `None` for non-tuple return types).
Raises:
- TypeError: if `tags` is of type `str`
- TypeError: if `timeout` is not of type `int`
"""
def __init__(
self,
name: str = None,
slug: str = None,
tags: Iterable[str] = None,
max_retries: int = None,
retry_delay: timedelta = None,
timeout: Union[int, timedelta] = None,
trigger: "Callable[[Dict[Edge, State]], bool]" = None,
skip_on_upstream_skip: bool = True,
cache_for: timedelta = None,
cache_validator: Callable = None,
cache_key: str = None,
checkpoint: bool = None,
state_handlers: List[Callable] = None,
on_failure: Callable = None,
log_stdout: bool = False,
result: "Result" = None,
target: Union[str, Callable] = None,
task_run_name: Union[str, Callable] = None,
nout: int = None,
):
if type(self) is not Task:
for attr in Task._reserved_attributes:
if hasattr(self, attr):
warnings.warn(
f"`{type(self).__name__}` sets a `{attr}` attribute, which "
"will be overwritten by `prefect.Task`. Please rename this "
"attribute to avoid this issue."
)
self.name = name or type(self).__name__
self.slug = slug
self.logger = logging.get_logger(self.name)
# avoid silently iterating over a string
if isinstance(tags, str):
raise TypeError("Tags should be a set of tags, not a string.")
current_tags = set(prefect.context.get("tags", set()))
self.tags = (set(tags) if tags is not None else set()) | current_tags
max_retries = (
max_retries
if max_retries is not None
else prefect.config.tasks.defaults.max_retries
)
retry_delay = (
retry_delay
if retry_delay is not None or not max_retries
else prefect.config.tasks.defaults.retry_delay
)
timeout = (
timeout if timeout is not None else prefect.config.tasks.defaults.timeout
)
if max_retries > 0 and retry_delay is None:
raise ValueError(
"A datetime.timedelta `retry_delay` must be provided if max_retries > 0"
)
# specify not max retries because the default is false
if retry_delay is not None and not max_retries:
raise ValueError(
"A `max_retries` argument greater than 0 must be provided if specifying "
"a retry delay."
)
# Make sure timeout is an integer in seconds
if isinstance(timeout, timedelta):
if timeout.microseconds > 0:
warnings.warn(
"Task timeouts do not support a sub-second resolution; "
"smaller units will be ignored!",
stacklevel=2,
)
timeout = int(timeout.total_seconds())
if timeout is not None and not isinstance(timeout, int):
raise TypeError(
"Only integer timeouts (representing seconds) are supported."
)
self.max_retries = max_retries
self.retry_delay = retry_delay
self.timeout = timeout
self.trigger = trigger or prefect.triggers.all_successful
self.skip_on_upstream_skip = skip_on_upstream_skip
if cache_for is None and (
cache_validator is not None
and cache_validator is not prefect.engine.cache_validators.never_use
):
warnings.warn(
"cache_validator provided without specifying cache expiration "
"(cache_for); this Task will not be cached.",
stacklevel=2,
)
self.cache_for = cache_for
self.cache_key = cache_key
default_validator = (
prefect.engine.cache_validators.never_use
if cache_for is None
else prefect.engine.cache_validators.duration_only
)
self.cache_validator = cache_validator or default_validator
self.checkpoint = checkpoint
self.result = result
self.target = target
# if both a target and a result were provided, update the result location
# to point at the target
if self.target and self.result:
if (
getattr(self.result, "location", None)
and self.result.location != self.target
):
warnings.warn(
"Both `result.location` and `target` were provided. "
"The `target` value will be used.",
stacklevel=2,
)
self.result = self.result.copy()
self.result.location = self.target # type: ignore
self.task_run_name = task_run_name # type: ignore
if state_handlers and not isinstance(state_handlers, collections.abc.Sequence):
raise TypeError("state_handlers should be iterable.")
self.state_handlers = state_handlers or []
if on_failure is not None:
self.state_handlers.append(
callback_factory(on_failure, check=lambda s: s.is_failed())
)
self.auto_generated = False
self.log_stdout = log_stdout
if nout is None:
nout = _infer_run_nout(self.run)
self.nout = nout
# if new task creations are being tracked, add this task
# this makes it possible to give guidance to users that forget
# to add tasks to a flow
if "_unused_task_tracker" in prefect.context:
if not isinstance(self, prefect.tasks.core.constants.Constant):
prefect.context._unused_task_tracker.add(self)
def __repr__(self) -> str:
return "<Task: {self.name}>".format(self=self)
# reimplement __hash__ because we override __eq__
def __hash__(self) -> int:
return id(self)
# Run --------------------------------------------------------------------
def run(self) -> None:
"""
The `run()` method is called (with arguments, if appropriate) to run a task.
*Note:* The implemented `run` method cannot have `*args` in its signature. In addition,
the following keywords are reserved: `upstream_tasks`, `task_args` and `mapped`.
If a task has arguments in its `run()` method, these can be bound either by using the
functional API and _calling_ the task instance, or by using `self.bind` directly.
In addition to running arbitrary functions, tasks can interact with Prefect in a few ways:
<ul><li> Return an optional result. When this function runs successfully,
the task is considered successful and the result (if any) can be
made available to downstream tasks. </li>
<li> Raise an error. Errors are interpreted as failure. </li>
<li> Raise a [signal](../engine/signals.html). Signals can include `FAIL`, `SUCCESS`,
`RETRY`, `SKIP`, etc. and indicate that the task should be put in the indicated state.
<ul>
<li> `FAIL` will lead to retries if appropriate </li>
<li> `SUCCESS` will cause the task to be marked successful </li>
<li> `RETRY` will cause the task to be marked for retry, even if `max_retries`
has been exceeded </li>
<li> `SKIP` will skip the task and possibly propogate the skip state through the
flow, depending on whether downstream tasks have `skip_on_upstream_skip=True`.
</li></ul>
</li></ul>
"""
# Dependencies -------------------------------------------------------------
def copy(self, **task_args: Any) -> "Task":
"""
Creates and returns a copy of the current Task.
Args:
- **task_args (dict, optional): a dictionary of task attribute keyword arguments,
these attributes will be set on the new copy
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a copy of the current Task, with any attributes updated from `task_args`
"""
flow = prefect.context.get("flow", None)
if (
flow
and self in flow.tasks
and (flow.edges_to(self) or flow.edges_from(self))
):
warnings.warn(
"You are making a copy of a task that has dependencies on or to other tasks "
"in the active flow context. The copy will not retain those dependencies.",
stacklevel=2,
)
new = copy.copy(self)
if new.slug and "slug" not in task_args:
task_args["slug"] = new.slug + "-copy"
# check task_args
for attr, val in task_args.items():
if not hasattr(new, attr):
raise AttributeError(
"{0} does not have {1} as an attribute".format(self, attr)
)
else:
setattr(new, attr, val)
# if both a target and a result were provided, update the result location
# to point at the target
if new.target and new.result:
if (
getattr(new.result, "location", None)
and new.result.location != new.target
):
warnings.warn(
"Both `result.location` and `target` were provided. "
"The `target` value will be used.",
stacklevel=2,
)
new.result = new.result.copy()
new.result.location = new.target # type: ignore
new.tags = copy.deepcopy(self.tags).union(set(new.tags))
tags = set(prefect.context.get("tags", set()))
new.tags.update(tags)
# if new task creations are being tracked, add this task
# this makes it possible to give guidance to users that forget
# to add tasks to a flow. We also remove the original task,
# as it has been "interacted" with and don't want spurious
# warnings
if "_unused_task_tracker" in prefect.context:
prefect.context._unused_task_tracker.discard(self)
if not isinstance(new, prefect.tasks.core.constants.Constant):
prefect.context._unused_task_tracker.add(new)
return new
@instance_property
def __signature__(self) -> inspect.Signature:
"""Dynamically generate the signature, replacing ``*args``/``**kwargs``
with parameters from ``run``"""
if not hasattr(self, "_cached_signature"):
sig = inspect.Signature.from_callable(self.run)
parameters = list(sig.parameters.values())
parameters_by_kind = defaultdict(list)
for parameter in parameters:
parameters_by_kind[parameter.kind].append(parameter)
parameters_by_kind[inspect.Parameter.KEYWORD_ONLY].extend(
EXTRA_CALL_PARAMETERS
)
ordered_parameters = []
ordered_kinds = (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.KEYWORD_ONLY,
inspect.Parameter.VAR_KEYWORD,
)
for kind in ordered_kinds:
ordered_parameters.extend(parameters_by_kind[kind])
self._cached_signature = inspect.Signature(
parameters=ordered_parameters, return_annotation="Task"
)
return self._cached_signature
def __call__(
self,
*args: Any,
mapped: bool = False,
task_args: dict = None,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any,
) -> "Task":
"""
Calling a Task instance will first create a _copy_ of the instance, and then
bind any passed `args` / `kwargs` to the run method of the copy. This new task
is then returned.
Args:
- *args: arguments to bind to the new Task's `run` method
- **kwargs: keyword arguments to bind to the new Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.edges.unmapped`
container will _not_ be mapped over.
- task_args (dict, optional): a dictionary of task attribute keyword arguments,
these attributes will be set on the new copy
- upstream_tasks ([Task], optional): a list of upstream dependencies
for the new task. This kwarg can be used to functionally specify
dependencies without binding their result to `run()`
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
Returns:
- Task: a new Task instance
"""
new = self.copy(**(task_args or {}))
new.bind(
*args, mapped=mapped, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
return new
def bind(
self,
*args: Any,
mapped: bool = False,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
**kwargs: Any,
) -> "Task":
"""
Binding a task to (keyword) arguments creates a _keyed_ edge in the active Flow
that will pass data from the arguments (whether Tasks or constants) to the
Task's `run` method under the appropriate key. Once a Task is bound in this
manner, the same task instance cannot be bound a second time in the same Flow.
To bind arguments to a _copy_ of this Task instance, see `__call__`.
Additionally, non-keyed edges can be created by passing any upstream
dependencies through `upstream_tasks`.
Args:
- *args: arguments to bind to the current Task's `run` method
- mapped (bool, optional): Whether the results of these tasks should be mapped over
with the specified keyword arguments; defaults to `False`.
If `True`, any arguments contained within a `prefect.utilities.edges.unmapped`
container will _not_ be mapped over.
- upstream_tasks ([Task], optional): a list of upstream dependencies for the
current task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- **kwargs: keyword arguments to bind to the current Task's `run` method
Returns:
- Task: the current Task instance
"""
# this will raise an error if callargs weren't all provided
signature = inspect.signature(self.run)
callargs = dict(signature.bind(*args, **kwargs).arguments) # type: Dict
# bind() compresses all variable keyword arguments under the ** argument name,
# so we expand them explicitly
var_kw_arg = next(
(p for p in signature.parameters.values() if p.kind == VAR_KEYWORD), None
)
if var_kw_arg:
callargs.update(callargs.pop(var_kw_arg.name, {}))
flow = flow or prefect.context.get("flow", None)
if not flow:
# Determine the task name to display which is either the function task name
# or the initialized class where we can't know the name of the variable
task_name = (
self.name
if isinstance(self, prefect.tasks.core.function.FunctionTask)
else f"{type(self).__name__}(...)"
)
raise ValueError(
f"Could not infer an active Flow context while creating edge to {self}."
" This often means you called a task outside a `with Flow(...)` block. "
"If you're trying to run this task outside of a Flow context, you "
f"need to call `{task_name}.run(...)`"
)
self.set_dependencies(
flow=flow,
upstream_tasks=upstream_tasks,
keyword_tasks=callargs,
mapped=mapped,
)
tags = set(prefect.context.get("tags", set()))
self.tags.update(tags)
return self
def map(
self,
*args: Any,
upstream_tasks: Iterable[Any] = None,
flow: "Flow" = None,
task_args: dict = None,
**kwargs: Any,
) -> "Task":
"""
Map the Task elementwise across one or more Tasks. Arguments that should _not_ be
mapped over should be placed in the `prefect.utilities.edges.unmapped` container.
For example:
```
task.map(x=X, y=unmapped(Y))
```
will map over the values of `X`, but not over the values of `Y`
Args:
- *args: arguments to map over, which will elementwise be bound to the Task's `run`
method
- upstream_tasks ([Task], optional): a list of upstream dependencies
to map over
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- task_args (dict, optional): a dictionary of task attribute keyword arguments,
these attributes will be set on the new copy
- **kwargs: keyword arguments to map over, which will elementwise be bound to the
Task's `run` method
Raises:
- AttributeError: if any passed `task_args` are not attributes of the original
Returns:
- Task: a new Task instance
"""
for arg in args:
if not hasattr(arg, "__getitem__") and not isinstance(arg, EdgeAnnotation):
raise TypeError(
"Cannot map over unsubscriptable object of type {t}: {preview}...".format(
t=type(arg), preview=repr(arg)[:10]
)
)
task_args = task_args.copy() if task_args else {}
task_args.setdefault("nout", None)
new = self.copy(**task_args)
return new.bind(
*args, mapped=True, upstream_tasks=upstream_tasks, flow=flow, **kwargs
)
def set_dependencies(
self,
flow: "Flow" = None,
upstream_tasks: Iterable[object] = None,
downstream_tasks: Iterable[object] = None,
keyword_tasks: Mapping[str, object] = None,
mapped: bool = False,
validate: bool = None,
) -> "Task":
"""
Set dependencies for a flow either specified or in the current context using this task
Args:
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- upstream_tasks ([object], optional): A list of upstream tasks for this task
- downstream_tasks ([object], optional): A list of downtream tasks for this task
- keyword_tasks ({str, object}}, optional): The results of these tasks will be provided
to this task under the specified keyword arguments.
- mapped (bool, optional): Whether the results of the _upstream_ tasks should be
mapped over with the specified keyword arguments
- validate (bool, optional): Whether or not to check the validity of the flow. If not
provided, defaults to the value of `eager_edge_validation` in your Prefect
configuration file.
Returns:
- self
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
flow = flow or prefect.context.get("flow", None)
if not flow:
raise ValueError(
"No Flow was passed, and could not infer an active Flow context."
)
flow.set_dependencies(
task=self,
upstream_tasks=upstream_tasks,
downstream_tasks=downstream_tasks,
keyword_tasks=keyword_tasks,
validate=validate,
mapped=mapped,
)
return self
def set_upstream(
self, task: object, flow: "Flow" = None, key: str = None, mapped: bool = False
) -> "Task":
"""
Sets the provided task as an upstream dependency of this task.
Args:
- task (object): A task or object that will be converted to a task that will be set
as a upstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- key (str, optional): The key to be set for the new edge; the result of the
upstream task will be passed to this task's `run()` method under this keyword
argument.
- mapped (bool, optional): Whether this dependency is mapped; defaults to `False`
Returns:
- self
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
if key is not None:
keyword_tasks = {key: task}
self.set_dependencies(flow=flow, keyword_tasks=keyword_tasks, mapped=mapped)
else:
self.set_dependencies(flow=flow, upstream_tasks=[task], mapped=mapped)
return self
def set_downstream(
self, task: "Task", flow: "Flow" = None, key: str = None, mapped: bool = False
) -> "Task":
"""
Sets the provided task as a downstream dependency of this task.
Args:
- task (Task): A task that will be set as a downstream dependency of this task.
- flow (Flow, optional): The flow to set dependencies on, defaults to the current
flow in context if no flow is specified
- key (str, optional): The key to be set for the new edge; the result of this task
will be passed to the downstream task's `run()` method under this keyword argument.
- mapped (bool, optional): Whether this dependency is mapped; defaults to `False`
Returns:
- self
Raises:
- ValueError: if no flow is specified and no flow can be found in the current context
"""
if key is not None:
keyword_tasks = {key: self}
task.set_dependencies( # type: ignore
flow=flow, keyword_tasks=keyword_tasks, mapped=mapped
) # type: ignore
else:
task.set_dependencies(flow=flow, upstream_tasks=[self], mapped=mapped)
return self
def inputs(self) -> Dict[str, Dict]:
"""
Describe the inputs for this task. The result is a dictionary that maps each input to
a `type`, `required`, and `default`. All values are inferred from the `run()`
signature; this method can be overloaded for more precise control.
Returns:
- dict
"""
inputs = {}
for name, parameter in inspect.signature(self.run).parameters.items():
input_type = parameter.annotation
if input_type is inspect._empty: # type: ignore
input_type = Any
input_default = parameter.default
input_required = False
if input_default is inspect._empty: # type: ignore
input_required = True
input_default = None
inputs[name] = dict(
type=input_type, default=input_default, required=input_required
)
return inputs
def outputs(self) -> Any:
"""
Get the output types for this task.
Returns:
- Any
"""
return_annotation = inspect.signature(self.run).return_annotation
if return_annotation is inspect._empty: # type: ignore
return_annotation = Any
return return_annotation
# Serialization ------------------------------------------------------------
def serialize(self) -> Dict[str, Any]:
"""
Creates a serialized representation of this task
Returns:
- dict representing this task
"""
return prefect.serialization.task.TaskSchema().dump(self)
# Operators ----------------------------------------------------------------
def is_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self == other`
This can't be implemented as the __eq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Equal().bind(self, other)
def is_not_equal(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self != other`
This can't be implemented as the __neq__() magic method because of Task
comparisons.
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.NotEqual().bind(self, other)
def not_(self) -> "Task":
"""
Produces a Task that evaluates `not self`
Returns:
- Task
"""
return prefect.tasks.core.operators.Not().bind(self)
def or_(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self or other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Or().bind(self, other)
# Magic Method Interactions ----------------------------------------------------
def __iter__(self) -> Iterator:
if self.nout is None:
raise TypeError(
"Task is not iterable. If your task returns multiple results, "
"pass `nout` to the task decorator/constructor, or provide a "
"`Tuple` return-type annotation to your task."
)
return (self[i] for i in range(self.nout))
def __getitem__(self, key: Any) -> "Task":
"""
Produces a Task that evaluates `self[key]`
Args:
- key (object): the object to use as an index for this task. It will be converted
to a Task if it isn't one already.
Returns:
- Task
"""
if isinstance(key, Task):
name = f"{self.name}[{key.name}]"
else:
name = f"{self.name}[{key!r}]"
return prefect.tasks.core.operators.GetItem(
checkpoint=self.checkpoint, name=name, result=self.result
).bind(self, key)
def __or__(self, other: object) -> object:
"""
Creates a state dependency between `self` and `other`
`self | other --> self.set_dependencies(downstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task (if it isn't one already)
and set as a downstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(downstream_tasks=[other])
return other
def __mifflin__(self) -> None: # coverage: ignore
"Calls Dunder Mifflin"
import webbrowser
webbrowser.open("https://cicdw.github.io/welcome.html")
def __ror__(self, other: object) -> "Task":
"""
Creates a state dependency between `self` and `other`:
`other | self --> self.set_dependencies(upstream_tasks=[other])`
Args:
- other (object): An object that will be converted to a Task and set as an
upstream dependency of this Task.
Returns:
- Task
"""
self.set_dependencies(upstream_tasks=[other])
return self
# Magic Method Operators -----------------------------------------------------
def __add__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self + other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(self, other)
def __sub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self - other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(self, other)
def __mul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self * other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(self, other)
def __truediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self / other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(self, other)
def __floordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self // other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(self, other)
def __mod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self % other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(self, other)
def __pow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self ** other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(self, other)
def __and__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self & other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(self, other)
def __radd__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other + self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Add().bind(other, self)
def __rsub__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other - self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Sub().bind(other, self)
def __rmul__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other * self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mul().bind(other, self)
def __rtruediv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other / self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Div().bind(other, self)
def __rfloordiv__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other // self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.FloorDiv().bind(other, self)
def __rmod__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other % self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Mod().bind(other, self)
def __rpow__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other ** self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.Pow().bind(other, self)
def __rand__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `other & self`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.And().bind(other, self)
def __gt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self > other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThan().bind(self, other)
def __ge__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self >= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.GreaterThanOrEqual().bind(self, other)
def __lt__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self < other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThan().bind(self, other)
def __le__(self, other: object) -> "Task":
"""
Produces a Task that evaluates `self <= other`
Args:
- other (object): the other operand of the operator. It will be converted to a Task
if it isn't one already.
Returns:
- Task
"""
return prefect.tasks.core.operators.LessThanOrEqual().bind(self, other)
# All keyword-only arguments to Task.__call__, used for dynamically generating
# Signature objects for Task objects
EXTRA_CALL_PARAMETERS = [
p
for p in inspect.Signature.from_callable(Task.__call__).parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
# DEPRECATED - this is to allow backwards-compatible access to Parameters
# https://github.com/PrefectHQ/prefect/pull/2758
from .parameter import Parameter as _Parameter
class Parameter(_Parameter):
def __new__(cls, *args, **kwargs): # type: ignore
warnings.warn(
"`Parameter` has moved, please import as `prefect.Parameter`", stacklevel=2
)
return super().__new__(cls)
| 38.480392 | 102 | 0.589005 |
403a7991da317baa154503a15d09da6e0085265f | 16,288 | py | Python | BioSTEAM 2.x.x/biorefineries/TAL/analyses/models.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-03T21:04:41.000Z | 2020-01-09T01:15:48.000Z | BioSTEAM 2.x.x/biorefineries/TAL/analyses/models.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 6 | 2020-01-03T21:31:27.000Z | 2020-02-28T13:53:56.000Z | BioSTEAM 2.x.x/biorefineries/TAL/analyses/models.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-07T14:04:06.000Z | 2020-01-08T23:05:25.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 23 12:11:15 2020
Modified from the cornstover biorefinery constructed in Cortes-Peña et al., 2020,
with modification of fermentation system for 2,3-Butanediol instead of the original ethanol
[1] Cortes-Peña et al., BioSTEAM: A Fast and Flexible Platform for the Design,
Simulation, and Techno-Economic Analysis of Biorefineries under Uncertainty.
ACS Sustainable Chem. Eng. 2020, 8 (8), 3302–3310.
https://doi.org/10.1021/acssuschemeng.9b07040.
All units are explicitly defined here for transparency and easy reference
@author: yalinli_cabbi
"""
# %%
# =============================================================================
# Setup
# =============================================================================
# import numpy as np
import biosteam as bst
from chaospy import distributions as shape
# from biosteam import main_flowsheet as find
from biosteam.evaluation import Model, Metric
# from biosteam.evaluation.evaluation_tools import Setter
from TAL.system_TAL_adsorption_glucose import TAL_sys, TAL_tea, u, s, unit_groups, spec, price
# get_annual_factor = lambda: TAL_tea._annual_factor
get_annual_factor = lambda: TAL_tea.operating_days*24 # hours per year
_kg_per_ton = 907.18474
system_feeds = [i for i in TAL_sys.feeds if i.price]
system_products = [i for i in TAL_sys.products if i.price]
# gypsum = find.stream.gypsum
# system_products.append(gypsum)
baseline_yield, baseline_titer, baseline_productivity =\
spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity
# %%
# =============================================================================
# Overall biorefinery metrics
# =============================================================================
feedstock = s.feedstock
TAL = s.SA
ethanol_fresh = s.ethanol_fresh
CSL_fresh = s.CSL_fresh
R302 = u.R302
R303 = u.R303
AC401 = u.AC401
# U401 = u.U401
F401 = u.F401
F402 = u.F402
# H403 = u.H403
# H402 = u.H402
T620 = u.T620
BT = u.BT701
_feedstock_factor = feedstock.F_mass / (feedstock.F_mass-feedstock.imass['Water'])
# Minimum selling price of TAL stream
def get_MSP():
for i in range(3):
TAL.price = TAL_tea.solve_price(TAL,
)
return TAL.price
# Mass flow rate of TAL stream
get_yield = lambda: TAL.F_mass*get_annual_factor()/1e6
# Purity (%) of TAL in the final product
get_purity = lambda: TAL.imass['TAL']/TAL.F_mass
# Adjust for purity
get_adjusted_MSP = lambda: get_MSP() / get_purity()
get_adjusted_yield = lambda: get_yield() * get_purity()
# Recovery (%) = recovered/amount in fermentation broth
get_recovery = lambda: TAL.imol['TAL'] \
/(R302.outs[0].imol['TAL'])
get_overall_TCI = lambda: TAL_tea.TCI/1e6
# Annual operating cost, note that AOC excludes electricity credit
get_overall_AOC = lambda: TAL_tea.AOC/1e6
get_material_cost = lambda: TAL_tea.material_cost/1e6
# Annual sale revenue from products, note that electricity credit is not included,
# but negative sales from waste disposal are included
# (i.e., wastes are products of negative selling price)
get_annual_sale = lambda: TAL_tea.sales/1e6
# System power usage, individual unit power usage should be positive
excess_power = lambda: (TAL_sys.power_utility.production-TAL_sys.power_utility.consumption)
electricity_price = bst.PowerUtility.price
# Electricity credit is positive if getting revenue from excess electricity
get_electricity_credit = lambda: (excess_power()*electricity_price*get_annual_factor())/1e6
metrics = [Metric('Minimum selling price', get_MSP, '$/kg', 'Biorefinery'),
Metric('Product yield', get_yield, '10^6 kg/yr', 'Biorefinery'),
Metric('Product purity', get_purity, '%', 'Biorefinery'),
Metric('Adjusted minimum selling price', get_adjusted_MSP, '$/kg', 'Biorefinery'),
Metric('Adjusted product yield', get_adjusted_yield, '10^6 kg/yr', 'Biorefinery'),
Metric('Product recovery', get_recovery, '%', 'Biorefinery'),
Metric('Total capital investment', get_overall_TCI, '10^6 $', 'Biorefinery'),
Metric('Annual operating cost', get_overall_AOC, '10^6 $/yr', 'Biorefinery'),
Metric('Annual material cost', get_material_cost, '10^6 $/yr', 'Biorefinery'),
Metric('Annual product sale', get_annual_sale, '10^6 $/yr', 'Biorefinery'),
Metric('Annual electricity credit', get_electricity_credit, '10^6 $/yr', 'Biorefinery')
]
# To see if TEA converges well for each simulation
get_NPV = lambda: TAL_tea.NPV
metrics.extend((Metric('Net present value', get_NPV, '$', 'TEA'), ))
# for ug in unit_groups:
# ug_metrics = ug.metrics
# metrics.append(Metric(ug.name, ug.metrics[0], '10^6 $', 'Installed cost'))
# metrics.append(Metric(ug.name, ug.metrics[5], 'USD/h', 'Material cost'))
# metrics.append(Metric(ug.name, ug.metrics[1], 'GJ/h', 'Cooling duty'))
# metrics.append(Metric(ug.name, ug.metrics[2], 'GJ/h', 'Heating duty'))
# metrics.append(Metric(ug.name, ug.metrics[3], 'MW', 'Electricity usage'))
metrics_labels_dict = {
'Installed cost':(0, '10^6 $'),
'Material cost':(5,'USD/h'),
'Cooling duty':(1,'GJ/h'),
'Heating duty':(2,'GJ/h'),
'Electricity usage':(3, 'MW'),
}
for m, u_i in metrics_labels_dict.items():
for ug in unit_groups:
metrics.append(Metric(ug.name, ug.metrics[u_i[0]], u_i[1], m))
#%%
# =============================================================================
# Construct base model and add parameters
# =============================================================================
model = TAL_model = Model(TAL_sys, metrics)
param = model.parameter
def baseline_uniform(baseline, ratio):
return shape.Uniform(baseline*(1-ratio), baseline*(1+ratio))
def baseline_triangle(baseline, ratio):
return shape.Triangle(baseline*(1-ratio), baseline, baseline*(1+ratio))
# A fake parameter serving as a "blank" in sensitivity analysis to capture
# fluctuations due to converging errors
D = baseline_uniform(1, 0.1)
@param(name='Blank parameter', element=feedstock, kind='coupled', units='',
baseline=1, distribution=D)
def set_blank_parameter(anything):
# This does nothing
feedstock.T = feedstock.T
#%% ######################## TEA parameters ########################
# U101 = SSCF.U101
# D = baseline_uniform(2205, 0.1)
# @param(name='Feedstock flow rate', element=feedstock, kind='coupled', units='dry-ton/day',
# baseline=2205, distribution=D)
# def set_feedstock_flow_rate(rate):
# feedstock.mass *= rate / U101._cached_flow_rate
# U101._cached_flow_rate = rate
D = shape.Triangle(0.84, 0.9, 0.96)
@param(name='Plant uptime', element='TEA', kind='isolated', units='%',
baseline=0.9, distribution=D)
def set_plant_uptime(uptime):
TAL_tea.operating_days = 365. * uptime
# D = baseline_triangle(1, 0.25)
# @param(name='TCI ratio', element='TEA', kind='isolated', units='% of baseline',
# baseline=1, distribution=D)
# def set_TCI_ratio(new_ratio):
# old_ratio = TAL_tea._TCI_ratio_cached
# # old_ratio = TAL_no_CHP_tea._TCI_ratio_cached
# for unit in TAL_sys.units:
# if hasattr(unit, 'cost_items'):
# for item in unit.cost_items:
# unit.cost_items[item].cost /= old_ratio
# unit.cost_items[item].cost *= new_ratio
# TAL_tea._TCI_ratio_cached = new_ratio
# # TAL_no_CHP_tea._TCI_ratio_cached = new_ratio
# Only include materials that account for >5% of total annual material cost,
# enzyme not included as it's cost is more affected by the loading (considered later)
D = shape.Triangle(0.8*price['Glucose'], price['Glucose'], 1.2*price['Glucose'])
@param(name='Feedstock unit price', element='TEA', kind='isolated', units='$/dry-kg',
baseline=price['Glucose'], distribution=D)
def set_feedstock_price(f_price):
feedstock.price = f_price / _feedstock_factor
D = shape.Triangle(0.198, 0.2527, 0.304)
@param(name='Natural gas unit price', element='TEA', kind='isolated', units='$/kg',
baseline=0.2527, distribution=D)
def set_natural_gas_price(gas_price):
BT.natural_gas_price = gas_price
D = shape.Triangle(0.067, 0.070, 0.074)
@param(name='Electricity unit price', element='TEA', kind='isolated', units='$/kWh',
baseline=0.070, distribution=D)
def set_electricity_price(e_price):
bst.PowerUtility.price = e_price
D = shape.Triangle(0.8*price['Activated carbon'], price['Activated carbon'], 1.2*price['Activated carbon'])
@param(name='Activated carbon unit price', element='TEA', kind='isolated', units='$/ft^3',
baseline=41, distribution=D)
def set_adsorbent_price(ac_price):
AC401.adsorbent_cost['Activated carbon'] = ac_price
# 2.2 is the average whole-sale ethanol price between 2010-2019 in 2016 $/gal
# based on Annual Energy Outlook (AEO) from Energy Information Adiministration (EIA)
# (https://www.eia.gov/outlooks/aeo/), which is $0.7328/gal and similar to the
# 2.2/(2988/1e3) = $0.736/gal based on a density of 2988 g/gal from H2 Tools
# Lower and upper bounds are $1.37/gal and $2.79/gal, or $0.460/kg and $0.978/kg
D = shape.Triangle(0.460, 0.7328, 0.978)
@param(name='Ethanol unit price', element='TEA', kind='isolated', units='$/kg',
baseline=0.7328, distribution=D)
def set_ethanol_price(etoh_price):
ethanol_fresh.price = etoh_price
#%% ######################## Conversion parameters ########################
# Fermentation
D = shape.Triangle(5, 10, 15)
@param(name='CSL loading', element=R302, kind='coupled', units='g/L',
baseline=10, distribution=D)
def set_CSL_loading(loading):
R302.CSL_loading = loading
R302 = u.R302
# 1e-6 is to avoid generating tiny negative flow (e.g., 1e-14)
D = shape.Triangle(0.9, 0.95, 1-1e-6)
@param(name='Seed train fermentation ratio', element=R302, kind='coupled', units='%',
baseline=0.95, distribution=D)
def set_ferm_ratio(ratio):
R303.ferm_ratio = ratio
### Fermentation
D = shape.Triangle(baseline_yield*0.8, baseline_yield, baseline_yield*1.2)
@param(name='TAL yield', element=R302, kind='coupled', units='g/g',
baseline=baseline_yield, distribution=D)
def set_TAL_yield(TAL_yield):
# spec.load_specifications(TAL_yield,
# spec.spec_2,
# spec.spec_3)
spec.spec_1 = TAL_yield
D = shape.Triangle(baseline_titer*0.8, baseline_titer, baseline_titer*1.2)
@param(name='TAL titer', element=R302, kind='coupled', units='g/L',
baseline=baseline_titer, distribution=D)
def set_TAL_titer(TAL_titer):
# spec.load_specifications(spec.spec_1,
# TAL_titer,
# spec.spec_3)
spec.spec_2 = TAL_titer
D = shape.Triangle(baseline_productivity*0.8, baseline_productivity, baseline_productivity*1.2)
@param(name='TAL productivity', element=R302, kind='coupled', units='g/L/hr',
baseline=baseline_productivity, distribution=D)
def set_TAL_productivity(TAL_prod):
# spec.load_specifications(spec.spec_1,
# spec.spec_2,
# TAL_prod)
spec.spec_3 = TAL_prod
###
# D = shape.Uniform(0.8*0.005, 0.005, 1.2*0.005)
# @param(name='VitaminA yield', element=R302, kind='coupled', units='% theoretical',
# baseline=0.005, distribution=D)
# def set_vitaminA_yield(yield_):
# R302.glucose_to_VitaminA_rxn.X = yield_
# R303.glucose_to_VitaminA_rxn.X = R303.ferm_ratio*yield_
# D = shape.Uniform(0.8*0.005, 0.005, 1.2*0.005)
# @param(name='VitaminD2 yield', element=R302, kind='coupled', units='% theoretical',
# baseline=0.005, distribution=D)
# def set_vitaminD2_yield(yield_):
# R302.glucose_to_VitaminD2_rxn.X = yield_
# R303.glucose_to_VitaminD2_rxn.X = R303.ferm_ratio*yield_
D = shape.Triangle(0.8*0.05, 0.05, 1.2*0.05)
@param(name='Microbe yield', element=R302, kind='coupled', units='% theoretical',
baseline=0.05, distribution=D)
def set_microbe_yield(yield_):
R302.glucose_to_microbe_rxn.X = yield_
R303.glucose_to_microbe_rxn.X = R303.ferm_ratio*yield_
#%%
######################## Separation parameters ########################
D = shape.Triangle(0.0739, 0.0910, 0.2474) # experimental data from Singh group
@param(name='Adsorbent unsaturated capacity', element=AC401, kind='coupled', units='g/g',
baseline=0.0910, distribution=D)
def set_adsorbent_cap(cap):
AC401.adsorbent_capacity = cap
D = shape.Uniform(0.4, 0.6) # Seader et al., Table 15.2
@param(name='Adsorbent void volume fraction', element=AC401, kind='coupled', units='L/L',
baseline=0.5, distribution=D)
def set_adsorbent_vvf(frac):
AC401.void_fraction = frac
D = shape.Uniform(500, 900) # Seader et al., Table 15.2
@param(name='Adsorbent solid particle density', element=AC401, kind='coupled', units='kg/m^3',
baseline=700, distribution=D)
def set_adsorbent_solid_rho(rho):
AC401.rho_adsorbent_solid = rho
D = shape.Triangle(0.8*0.07795, 0.07795, 1.2*0.07795) # experimental data from Singh group
@param(name='Desorption single-wash partition coefficient', element=AC401, kind='coupled', units='(g/L)/(g/L)',
baseline=0.07795, distribution=D)
def set_desorption_K(K):
AC401.K = K
D = shape.Uniform(0.1, 1.9) # assumed
@param(name='Adsorbent replacement period', element=AC401, kind='coupled', units='y',
baseline=1., distribution=D)
def set_adsorbent_lifetime(lt):
AC401._default_equipment_lifetime['Activated carbon'] = lt
D = shape.Uniform(0.05, 0.95) # assumed
@param(name='Regeneration fluid retention in column', element=AC401, kind='coupled', units='L-ethanol/L-void',
baseline=0.5, distribution=D)
def set_adsorption_ethanol_retention(wr):
AC401.wet_retention = wr
D = shape.Uniform(0.01, 0.09) # assumed
@param(name='Ethanol retention in product after drying', element=F402, kind='coupled', units='g-ethanol/g-TAL',
baseline=0.05, distribution=D)
def set_drying_ethanol_retention_in_product_stream(ethanol_retention_drying):
F402.product_ethanol_content = ethanol_retention_drying
D = shape.Triangle(0.144945, 0.166880, 0.187718) # experimental data from Singh group
@param(name='TAL solubility in ethanol', element=F401, kind='coupled', units='g-TAL/g-solution',
baseline=0.166880, distribution=D)
def set_TAL_solubility_ethanol(solubility):
F401.TAL_solubility_in_ethanol_ww = solubility
# D = shape.Uniform(0.01, 0.09) # assumed
# @param(name='Ethanol retention in product after drying', element=U401, kind='coupled', units='g-ethanol/g-product',
# baseline=0.05, distribution=D)
# def set_drying_ethanol_retention_in_product_stream(ethanol_retention_drying):
# U401.moisture_content = ethanol_retention_drying
# D = shape.Uniform(0.991, 0.999) # assumed
# @param(name='Crystallization and centrifugation recovery', element=S404, kind='coupled', units='g-recovered/g-influent',
# baseline=0.995, distribution=D)
# def set_crystallization_and_centrifugation_combined_recovery(TAL_recovery):
# S404.split = np.array([0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. , 0. , 0. , 0. , TAL_recovery, 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
# 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
#%%
######################## Facility parameters ########################
D = baseline_uniform(0.8, 0.1)
@param(name='boiler efficiency', element=BT, kind='coupled', units='%',
baseline=0.8, distribution=D)
def set_boiler_efficiency(efficiency):
BT.boiler_efficiency = efficiency
D = shape.Triangle(0.8*7*24, 7*24, 1.2*7*24)
@param(name='Product TAL storage time', element=BT, kind='coupled', units='h',
baseline=7*24, distribution=D)
def set_product_storage_time(storage_time):
T620.tau = storage_time
parameters = model.get_parameters()
| 40.217284 | 122 | 0.657048 |
e9d61fc05f1c3467d09d8e2d6ef4e3dd47e671d4 | 2,945 | py | Python | Lock.py | myrlund/salabim | a45da9d07dbb65a084d617344b5b0a016b3ec665 | [
"MIT"
] | 1 | 2021-01-04T12:24:55.000Z | 2021-01-04T12:24:55.000Z | Lock.py | myrlund/salabim | a45da9d07dbb65a084d617344b5b0a016b3ec665 | [
"MIT"
] | null | null | null | Lock.py | myrlund/salabim | a45da9d07dbb65a084d617344b5b0a016b3ec665 | [
"MIT"
] | 1 | 2018-07-02T10:42:30.000Z | 2018-07-02T10:42:30.000Z | import salabim as sim
left = -1
right = +1
def sidename(side):
return 'l' if side == left else 'r'
class Shipgenerator(sim.Component):
def process(self):
while True:
yield self.hold(sim.Exponential(iat).sample())
ship = Ship(name=sidename(self.side) + 'ship.')
ship.side = self.side
ship.length = meanlength * sim.Uniform(2. / 3, 4. / 3).sample()
if lock.mode() == 'Idle':
lock.activate()
class Ship(sim.Component):
def process(self):
self.arrivaltime = env.now()
self.enter(wait[self.side])
yield self.passivate(mode='Wait')
yield self.hold(intime, mode='Sail in')
self.leave(wait[self.side])
self.enter(lockqueue)
lock.activate()
yield self.passivate(mode='In lock')
yield self.hold(outtime, mode='Sail out')
self.leave(lockqueue)
lock.activate()
lock.monitor_time_in_complex.tally(env.now() - self.arrivaltime)
class Lock(sim.Component):
def setup(self):
self.usedlength = 0
self.side = left
self.monitor_usedlength = sim.MonitorTimestamp(
name='used length')
self.monitor_time_in_complex = sim.Monitor(name='time in complex')
def get_usedlength(self):
return self.usedlength
def process(self):
while True:
if len(wait[left]) + len(wait[right]) == 0:
yield self.passivate(mode='Idle')
for ship in wait[self.side]:
if self.usedlength + ship.length <= locklength:
self.usedlength += ship.length
self.monitor_usedlength.tally(self.usedlength)
ship.activate()
yield self.passivate('Wait for sail in')
yield self.hold(switchtime, mode='Switch')
self.side = -self.side
for ship in lockqueue:
ship.activate()
yield self.passivate('Wait for sail out')
# avoid rounding errors
self.usedlength = max(self.usedlength - ship.length, 0)
self.monitor_usedlength.tally(self.usedlength)
env = sim.Environment(trace=False)
locklength = 60
switchtime = 10
intime = 2
outtime = 2
meanlength = 30
iat = 30
lockqueue = sim.Queue('lockqueue')
shipcounter = 0
wait = {}
for side in (left, right):
wait[side] = sim.Queue(name=sidename(side) + 'Wait')
shipgenerator = Shipgenerator(name=sidename(side) + 'Shipgenerator')
shipgenerator.side = side
lock = Lock(name='lock')
env.run(50000)
lockqueue.length.print_histogram(5, 0, 1)
lockqueue.length_of_stay.print_histogram(10, 10, 1)
for side in (left, right):
wait[side].length.print_histogram(30, 0, 1)
wait[side].length_of_stay.print_histogram(30, 0, 10)
lock.monitor_usedlength.print_histogram(20, 0, 5)
lock.monitor_time_in_complex.print_histogram(30, 0, 10)
| 29.747475 | 75 | 0.609847 |
5c8841874ee5b88eb19c2ba87a8fc87a66e0d52b | 8,637 | py | Python | scripts/harmonic_operators.py | tsutterley/model-harmonics | 17f6842d5fa1f2abf42caea51cfb09b6a4b2ee30 | [
"MIT"
] | 4 | 2021-01-04T00:40:03.000Z | 2021-12-29T13:37:32.000Z | scripts/harmonic_operators.py | tsutterley/model-harmonics | 17f6842d5fa1f2abf42caea51cfb09b6a4b2ee30 | [
"MIT"
] | 2 | 2021-10-10T06:57:20.000Z | 2021-12-06T19:28:34.000Z | scripts/harmonic_operators.py | tsutterley/model-harmonics | 17f6842d5fa1f2abf42caea51cfb09b6a4b2ee30 | [
"MIT"
] | 4 | 2021-05-18T21:00:59.000Z | 2021-12-06T18:22:58.000Z | #!/usr/bin/env python
u"""
harmonic_operators.py
Written by Tyler Sutterley (08/2021)
Performs basic operations on spherical harmonic files
CALLING SEQUENCE:
python harmonic_operators.py --operation add infile1 infile2 outfile
INPUTS:
path to input harmonic files
path to output harmonic file
COMMAND LINE OPTIONS:
-O X, --operation X: Operation to run
add
subtract
multiply
divide
mean
destripe
error
RMS
-l X, --lmax X: maximum spherical harmonic degree
-m X, --mmax X: maximum spherical harmonic order
-F X, --format X: Input and output data format
ascii
netcdf
HDF5
-D, --date: input and output files have date information
-M X, --mode X: Permission mode of directories and files
-V, --verbose: Output information for each output file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://h5py.org
PROGRAM DEPENDENCIES:
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
UPDATE HISTORY:
Updated 08/2021: added variance off mean as estimated error
Updated 02/2021: added options to truncate output to a degree or order
add options to read from individual index files
Written 02/2021
"""
from __future__ import print_function
import sys
import os
import argparse
import numpy as np
from gravity_toolkit.harmonics import harmonics
#-- PURPOSE: Performs operations on harmonic files
def harmonic_operators(INPUT_FILES, OUTPUT_FILE, OPERATION=None, LMAX=None,
MMAX=None, DATAFORM=None, DATE=False, VERBOSE=False, MODE=None):
#-- number of input harmonic files
n_files = len(INPUT_FILES)
#-- extend list if a single format was entered for all files
if len(DATAFORM) < (n_files+1):
DATAFORM = DATAFORM*(n_files+1)
#-- verify that output directory exists
DIRECTORY = os.path.abspath(os.path.dirname(OUTPUT_FILE))
if not os.access(DIRECTORY, os.F_OK):
os.makedirs(DIRECTORY,MODE,exist_ok=True)
#-- read each input file
dinput = [None]*n_files
for i,fi in enumerate(INPUT_FILES):
#-- read spherical harmonics file in data format
if DATAFORM[i] in ('ascii','netCDF4','HDF5'):
#-- ascii (.txt)
#-- netCDF4 (.nc)
#-- HDF5 (.H5)
dinput[i] = harmonics().from_file(fi,format=DATAFORM[i],
date=DATE, verbose=VERBOSE)
elif DATAFORM[i] in ('index-ascii','index-netCDF4','index-HDF5'):
#-- read from index file
_,dataform = DATAFORM[i].split('-')
dinput[i] = harmonics().from_index(fi,format=dataform,date=DATE)
#-- operate on input files
if (OPERATION == 'add'):
output = dinput[0].zeros_like()
for i in range(n_files):
#-- perform operation
output = output.add(dinput[i])
elif (OPERATION == 'subtract'):
output = dinput[0].copy()
for i in range(n_files-1):
#-- perform operation
output = output.subtract(dinput[i+1])
elif (OPERATION == 'multiply'):
output = dinput[0].copy()
for i in range(n_files-1):
#-- perform operation
output = output.multiply(dinput[i+1])
elif (OPERATION == 'divide'):
output = dinput[0].copy()
for i in range(n_files-1):
#-- perform operation
output = output.divide(dinput[i+1])
elif (OPERATION == 'mean'):
output = dinput[0].zeros_like()
for i in range(n_files):
#-- perform operation
output = output.add(dinput[i])
#-- convert from total to mean
output = output.scale(1.0/n_files)
elif (OPERATION == 'destripe'):
#-- destripe spherical harmonics
output = dinput[0].destripe()
elif (OPERATION == 'error'):
mean = dinput[0].zeros_like()
for i in range(n_files):
#-- perform operation
mean = mean.add(dinput[i])
#-- convert from total to mean
mean = mean.scale(1.0/n_files)
#-- use variance off mean as estimated error
output = dinput[0].zeros_like()
for i in range(n_files):
#-- perform operation
temp = dinput[i].subtract(mean)
output = output.add(temp.power(2.0))
#-- calculate RMS of mean differences
output = output.scale(1.0/(n_files-1.0)).power(0.5)
elif (OPERATION == 'RMS'):
output = dinput[0].zeros_like()
for i in range(n_files):
#-- perform operation
output = output.add(dinput[i].power(2.0))
#-- convert from total in quadrature to RMS
output = output.scale(1.0/n_files).power(0.5)
#-- truncate to specified degree and order
if (LMAX is not None) | (MMAX is not None):
output.truncate(LMAX, mmax=MMAX)
#-- copy date variables if specified
if DATE:
output.time = np.copy(dinput[0].time)
output.month = np.copy(dinput[0].month)
#-- output file title
title = 'Output from {0}'.format(os.path.basename(sys.argv[0]))
#-- write spherical harmonic file in data format
output.to_file(OUTPUT_FILE, format=DATAFORM[-1],
date=DATE, title=title, verbose=VERBOSE)
#-- change the permissions mode of the output file
os.chmod(OUTPUT_FILE, MODE)
#-- Main program that calls harmonic_operators()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Performs basic operations on spherical harmonic files
"""
)
#-- command line options
#-- input and output file
parser.add_argument('infiles',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='+',
help='Input files')
parser.add_argument('outfile',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs=1,
help='Output file')
#-- operation to run
choices = ['add','subtract','multiply','divide','mean',
'destripe','error','RMS']
parser.add_argument('--operation','-O',
metavar='OPERATION', type=str,
required=True, choices=choices,
help='Operation to run')
#-- maximum spherical harmonic degree and order
parser.add_argument('--lmax','-l',
type=int, default=None,
help='Maximum spherical harmonic degree')
parser.add_argument('--mmax','-m',
type=int, default=None,
help='Maximum spherical harmonic order')
#-- input and output data format (ascii, netCDF4, HDF5)
choices = []
choices.extend(['ascii','netCDF4','HDF5'])
choices.extend(['index-ascii','index-netCDF4','index-HDF5'])
parser.add_argument('--format','-F',
metavar='FORMAT', type=str, nargs='+',
default=['netCDF4'], choices=choices,
help='Input and output data format')
#-- Input and output files have date information
parser.add_argument('--date','-D',
default=False, action='store_true',
help='Input and output files have date information')
#-- print information about each output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local directories and files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files')
args,_ = parser.parse_known_args()
#-- run program
harmonic_operators(args.infiles, args.outfile[0], OPERATION=args.operation,
LMAX=args.lmax, MMAX=args.mmax, DATAFORM=args.format, DATE=args.date,
VERBOSE=args.verbose, MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main() | 38.558036 | 79 | 0.636679 |
1a2cd21fd7f39e1c2256b2eb9e8e488bfa4af0c0 | 311 | py | Python | @Test/get historical stock data/get historical data.py | guozc12/QuantPy | bf8df57616c11475081839050715ce8152092557 | [
"MIT"
] | 1 | 2020-09-24T04:14:00.000Z | 2020-09-24T04:14:00.000Z | @Test/get historical stock data/get historical data.py | guozc12/QuantPy | bf8df57616c11475081839050715ce8152092557 | [
"MIT"
] | null | null | null | @Test/get historical stock data/get historical data.py | guozc12/QuantPy | bf8df57616c11475081839050715ce8152092557 | [
"MIT"
] | 1 | 2020-09-22T15:35:54.000Z | 2020-09-22T15:35:54.000Z | """This function is for getting historical data of stocks"""
import tushare as ts
mytoken = 'ecd07cde131c84a0419eae37bcd2f231ca2553c5e47b0061c511280a'
pro = ts.pro_api(mytoken)
data = pro.us_daily(ts_code='AAPL', start_date='20190101', end_date='20200928')
print(data)
#data.to_csv('600848.csv',index=False) | 28.272727 | 79 | 0.78135 |
1132a7ee6eeed49c93acfee877a47e8204fd9609 | 930 | py | Python | greatestcommondivisor.py | TheArcMagician/Number-theory | b92351168af478d98a5c95ab229409abca558e42 | [
"MIT"
] | 2 | 2016-07-08T17:56:49.000Z | 2016-07-08T17:56:55.000Z | greatestcommondivisor.py | TheArcMagician/Number-theory | b92351168af478d98a5c95ab229409abca558e42 | [
"MIT"
] | null | null | null | greatestcommondivisor.py | TheArcMagician/Number-theory | b92351168af478d98a5c95ab229409abca558e42 | [
"MIT"
] | null | null | null | #A program that takes two given numbers and finds their
#greatest common divisor -Shriram Krishnamachari 7/18/16
from collections import Counter as mset
def smallestFactor(n):
for i in range(2, n+1):
if n % i == 0:
return i
def factorize(number):
factor = list()
if number == 1:
factor.append(1)
cn = number
while cn > 1:
sf = smallestFactor(cn)
factor.append(sf)
cn = cn / sf
return factor
number1 = input('Give a number')
number2 = input('Give another number')
factor1 = factorize(number1)
factor2 = factorize(number2)
commonfactors = list((mset(factor1) & mset(factor2)).elements())
gcd = 1
# the following loop takes each element in common factors
# and makes gcd = gcd * e
# the short form of which is gcd *= e
for e in commonfactors:
gcd *= e
print "The GCD of the two numbers is: " + str(gcd) | 21.136364 | 65 | 0.622581 |
5ab6d5922a9db29b890514b7a99e6435a32ec15b | 4,873 | py | Python | docs/conf.py | DanGSun/pysonofflan | dd8790a0dbd1b1879f32d66f87c49acb5ff1a859 | [
"MIT"
] | 31 | 2019-01-27T23:34:59.000Z | 2022-03-18T04:11:50.000Z | docs/conf.py | DanGSun/pysonofflan | dd8790a0dbd1b1879f32d66f87c49acb5ff1a859 | [
"MIT"
] | 92 | 2019-01-30T11:03:28.000Z | 2021-11-15T17:48:19.000Z | docs/conf.py | DanGSun/pysonofflan | dd8790a0dbd1b1879f32d66f87c49acb5ff1a859 | [
"MIT"
] | 18 | 2019-02-04T15:06:41.000Z | 2021-11-09T16:35:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pysonofflan documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import pysonofflan
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pySonoffLAN'
copyright = u"2019, Andrew Beveridge"
author = u"Andrew Beveridge"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pysonofflan.__version__
# The full version, including alpha/beta/rc tags.
release = pysonofflan.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysonofflandoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysonofflan.tex',
u'pySonoffLAN Documentation',
u'Andrew Beveridge', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysonofflan',
u'pySonoffLAN Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysonofflan',
u'pySonoffLAN Documentation',
author,
'pysonofflan',
'One line description of project.',
'Miscellaneous'),
]
| 29.713415 | 77 | 0.687256 |
0c4d9f422a3d64618300eaaa1d29faa98b1457bb | 5,102 | py | Python | scripts/postprocessing.py | MauriceKarrenbrock/PythonFSDAM | efd4a1717af37d6598aaaca0fa520f735cf254b0 | [
"BSD-3-Clause"
] | null | null | null | scripts/postprocessing.py | MauriceKarrenbrock/PythonFSDAM | efd4a1717af37d6598aaaca0fa520f735cf254b0 | [
"BSD-3-Clause"
] | null | null | null | scripts/postprocessing.py | MauriceKarrenbrock/PythonFSDAM | efd4a1717af37d6598aaaca0fa520f735cf254b0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=line-too-long
# pylint: disable=duplicate-code
#############################################################
# Copyright (c) 2020-2021 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# BSD 3-Clause "New" or "Revised" License #
#############################################################
"""script to do post processing of a FSDAM or a vDSSB
use --help for usage info
"""
import argparse
import shutil
from pathlib import Path
from PythonFSDAM.pipelines import superclasses
parser = argparse.ArgumentParser(
description='This script will post process everything needed after '
'FSDAM or vDSSB with the wanted md program use --help for usage info '
'Parallelism by default it will use all the cores evailable, '
'use OMP_NUM_THREADS environment variable to limit '
'the number of used CPUs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--md-program',
action='store',
type=str,
default='gromacs',
help='The MD program to use')
parser.add_argument('--kind-of-process',
action='store',
type=str,
default='standard',
choices=['standard', 'vdssb'],
help='if you are doing a standard FSDAM or a vDSSB')
parser.add_argument(
'--unbound-file',
action='store',
type=str,
help=
'the path to the file that contains the names of the energy files resulting from the unbound (ligand) process, something like unbound/file.dat\n'
'IF THEY ARE 2 LIKE IN GROMACS USE A COMMA SEPARATED LIST!')
parser.add_argument(
'--bound-file',
action='store',
type=str,
help=
'the path to the file that contains the names of the energy files resulting from the bound (protein-ligand) process, something like bound/file.dat\n'
'IF THEY ARE 2 LIKE IN GROMACS USE A COMMA SEPARATED LIST!')
parser.add_argument('--temperature',
action='store',
type=float,
default=298.15,
help='temperature in Kelvin (K)')
parsed_input = parser.parse_args()
#Deal with unbound files
unbound_dir = parsed_input.unbound_file.split(',')
for i, directory in enumerate(unbound_dir):
unbound_dir[i] = Path(directory.rsplit('/', 1)[0].strip()).resolve()
unbound_files = []
for n, i in enumerate(parsed_input.unbound_file.split(',')):
tmp_list = []
with open(i, 'r') as f:
for line in f:
if line.strip():
tmp_list.append(unbound_dir[n] / Path(line.strip()))
unbound_files.append(tmp_list)
if len(unbound_files) == 1:
unbound_files = unbound_files[0]
#Deal with bound files
bound_dir = parsed_input.bound_file.split(',')
for i, directory in enumerate(bound_dir):
bound_dir[i] = Path(directory.rsplit('/', 1)[0].strip()).resolve()
bound_files = []
for n, i in enumerate(parsed_input.bound_file.split(',')):
tmp_list = []
with open(i, 'r') as f:
for line in f:
if line.strip():
tmp_list.append(bound_dir[n] / Path(line.strip()))
bound_files.append(tmp_list)
if len(bound_files) == 1:
bound_files = bound_files[0]
if parsed_input.kind_of_process == 'standard':
unbound_obj = superclasses.JarzynskiPostProcessingAlchemicalLeg(
unbound_files,
temperature=parsed_input.temperature,
md_program=parsed_input.md_program,
creation=False)
unbound_free_energy, unbound_std = unbound_obj.execute()
shutil.move(f'{str(unbound_obj)}_free_energy.dat',
'unbound_' + f'{str(unbound_obj)}_free_energy.dat')
bound_obj = superclasses.JarzynskiPostProcessingAlchemicalLeg(
bound_files,
temperature=parsed_input.temperature,
md_program=parsed_input.md_program,
creation=False)
bound_free_energy, bound_std = bound_obj.execute()
shutil.move(f'{str(bound_obj)}_free_energy.dat',
'bound_' + f'{str(bound_obj)}_free_energy.dat')
total_free_energy = bound_free_energy - unbound_free_energy
total_std = bound_std + unbound_std
with open('total_free_energy.dat', 'w') as f:
f.write(
'#total unbinding free energy (no volume correction done) in Kcal mol\n'
'#Dg CI95% STD\n'
f'{total_free_energy:.18e} {1.96*(total_std):.18e} {total_std:.18e}\n'
)
print(f'free energy {total_free_energy}\n' f'CI95 {1.96*(total_std)}')
elif parsed_input.kind_of_process == 'vdssb':
obj = superclasses.JarzynskiVDSSBPostProcessingPipeline(
bound_files,
unbound_files,
temperature=parsed_input.temperature,
md_program=parsed_input.md_program)
free_energy, std = obj.execute()
print(f'free energy {free_energy:.18e}\nCI95 {std*1.96:.18e}')
else:
ValueError('Unknown input')
| 30.369048 | 153 | 0.622501 |
ab502be65277f77defad031c3d472cd4b3ea615e | 2,314 | py | Python | expanduino/classes/meta.py | Expanduino/Expanduino-Python | 468b97091c5808158e05895cc1a9fb244dfdc6c7 | [
"Unlicense"
] | null | null | null | expanduino/classes/meta.py | Expanduino/Expanduino-Python | 468b97091c5808158e05895cc1a9fb244dfdc6c7 | [
"Unlicense"
] | null | null | null | expanduino/classes/meta.py | Expanduino/Expanduino-Python | 468b97091c5808158e05895cc1a9fb244dfdc6c7 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
from expanduino.subdevice import Subdevice
from expanduino.codec import *
from enum import IntEnum
from functools import lru_cache
from cached_property import cached_property
class MetaSubdevice(Subdevice):
class Command(IntEnum):
VENDOR_NAME = 0
PRODUCT_NAME = 1
SHORT_NAME = 2
SERIAL_NUMBER = 3
RESET = 4
GET_INTERRUPTION = 5
GET_INTERRUPTION_ENABLED = 6
SET_INTERRUPTION_ENABLED = 7
NUM_SUBDEVICES = 8
SUBDEVICE_TYPE = 9
SUBDEVICE_NAME = 10
SUBDEVICE_SHORT_NAME = 11
def __init__(self, container, devNum):
Subdevice.__init__(self, container, devNum)
@cached_property
def device_vendor_name(self):
return self.call(MetaSubdevice.Command.VENDOR_NAME, parser=parseString)
@cached_property
def device_product_name(self):
return self.call(MetaSubdevice.Command.PRODUCT_NAME, parser=parseString)
@cached_property
def device_short_name(self):
return self.call(MetaSubdevice.Command.SHORT_NAME, parser=parseString)
@cached_property
def device_serial_number(self):
return self.call(MetaSubdevice.Command.SERIAL_NUMBER, parser=parseString)
def device_reset(self):
self.call(MetaSubdevice.Command.RESET)
@cached_property
def num_subdevices(self):
return self.call(MetaSubdevice.Command.NUM_SUBDEVICES, parser=parseByte)
def subdevice_reset(self, devNum):
return self.call(MetaSubdevice.Command.RESET, args=[devNum])
def subdevice_type(self, devNum):
return self.call(MetaSubdevice.Command.SUBDEVICE_TYPE, args=[devNum], parser=parseEnum(Subdevice.Type))
def subdevice_name(self, devNum):
return self.call(MetaSubdevice.Command.SUBDEVICE_NAME, args=[devNum], parser=parseString)
def subdevice_short_name(self, devNum):
return self.call(MetaSubdevice.Command.SUBDEVICE_SHORT_NAME, args=[devNum], parser=parseString)
def subdevice_get_interrupt_enabled(self, devNum):
return self.call(MetaSubdevice.Command.GET_INTERRUPTION_ENABLED, args=[devNum], parser=parseBool)
def subdevice_set_interrupt_enabled(self, devNum, enabled):
self.call(MetaSubdevice.Command.SET_INTERRUPTION_ENABLED, args=[devNum, enabled]) | 35.060606 | 107 | 0.727312 |
7582c4f0eb0c719f0befcf44104e2c27340ffba6 | 1,187 | py | Python | app/routers/file_.py | penM000/SHINtube-video-api | 924e823af318cce69f06745359f8b35d8920ebfa | [
"MIT"
] | null | null | null | app/routers/file_.py | penM000/SHINtube-video-api | 924e823af318cce69f06745359f8b35d8920ebfa | [
"MIT"
] | null | null | null | app/routers/file_.py | penM000/SHINtube-video-api | 924e823af318cce69f06745359f8b35d8920ebfa | [
"MIT"
] | 1 | 2021-09-20T15:50:01.000Z | 2021-09-20T15:50:01.000Z | import asyncio
import re
from ..internal.video.filemanager import filemanager
from ..internal.video.database import database
from fastapi import APIRouter
from fastapi import BackgroundTasks
from fastapi import File, UploadFile
router = APIRouter(
prefix="/api/file",
tags=["file_api"],
responses={404: {"description": "Not found"}},
)
@router.post("/fileupload")
async def fileupload_endpoint(
cid: str,
title: str,
explanation: str,
meta_data: str = "",
year: int = None,
service_name: str = None,
in_file: UploadFile = File(...),):
"""
ファイルアップロード用\n
titleの名前がファイル名になる\n
ディレクトリトラバース対策のため、
引数\n
service_name(year) : [年度]\n
cid : [授業コード]\n
title : [ファイルタイトル]\n
explanation : [ファイル説明]\n
"""
title = re.sub(r'[\\/:*?"<>|]+', '', title)
print(title)
if service_name is None:
service_name = str(year)
created_dir = await filemanager.create_video_directory(
service_name, cid, title, explanation, meta_data)
file_path = f"./{created_dir}/{title}"
await filemanager.write_file(file_path, in_file)
return {"Result": "OK"}
| 25.255319 | 59 | 0.63016 |
06c0d862018a69258893bbcbef975e5817ab5291 | 2,256 | py | Python | polygon.py | hermannbene/BeamTomography | 39eae19c54128f27eb90a2717b1876768d730f29 | [
"Apache-2.0"
] | 2 | 2021-01-25T12:31:21.000Z | 2021-04-02T23:13:09.000Z | polygon.py | hermannbene/BeamTomography | 39eae19c54128f27eb90a2717b1876768d730f29 | [
"Apache-2.0"
] | null | null | null | polygon.py | hermannbene/BeamTomography | 39eae19c54128f27eb90a2717b1876768d730f29 | [
"Apache-2.0"
] | null | null | null | def genPoly(nWires,radius,scanLength,offAngle,center):
'''
this function generates a polygon path for wire scan tomography.
Input
center: np.array([x,y,z,rx,ry,rz]) in um,deg
nWires: int
radius: flaot um
scanLength: flaot um
offAngle: offset angle of the wire star, 0 means wire 0 is horizontal (aligned with pos. x axis). in deg
Output
A list of length 2*nWires is returned. Points in the same format as center are returned in the following order:
start wire 0, end wire 0, start wire 1, end wire 1, ...
'''
scanPointsPolygon = []
if normalAxis == 'x':
coordinate1 = 1
coordinate2 = 2
elif normalAxis == 'y':
coordinate1 = 0
coordinate2 = 2
if normalAxis == 'z':
coordinate1 = 0
coordinate2 = 1
for i in range(nWires):
beta = 2.*np.pi/nWires*i + offAngle/180*np.pi # angle of i-th wire in rad
for j in range(2): # for start and end point
offset = np.zeros(6)
offset[coordinate1] = radius*np.cos(beta) + (-1)**(j+1)*scanLength/2.*np.sin(-beta)
offset[coordinate2] = radius*np.sin(beta) + (-1)**(j+1)*scanLength/2.*np.cos(-beta)
scanPointsPolygon.append(center+offset)
return scanPointsPolygon
def plotScanPoints(scanPointsPolygon, idX, idY):
'''
this function plots a polygon path for wire scan tomography.
Input
(output from genPoly) + The idX changes the mode of the genPoly, plotScanPoints functions (0 for SwissFEL operation, 2 for laser testing)
A list of scan Points.
start wire 0, end wire 0, start wire 1, end wire 1, ...
'''
fig = plt.figure('Visualize Scan Path')
ax = fig.add_subplot(111)
for i,p in enumerate(scanPointsPolygon):
ax.plot(p[idX], p[idY], marker='+', color='r')
ax.annotate(i, (p[idX], p[idY]))
xlabel = 'x'*(idX==0)+'y'*(idX==1)+'z'*(idX==2)+'rx'*(idX==3)+'ry'*(idX==4)+'rz'*(idX==5)+'arb.'*(idX==-1)
ylabel = 'x'*(idY==0)+'y'*(idY==1)+'z'*(idY==2)+'rx'*(idY==3)+'ry'*(idY==4)+'rz'*(idY==5)+'arb.'*(idY==-1)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_aspect('equal')
fig.tight_layout()
plt.savefig('temp_ScanPoints.png') | 36.387097 | 141 | 0.60461 |
d55c23b440db9040c253ab97bed56d28fc561068 | 9,436 | py | Python | gym_collision_avoidance/envs/policies/social_lstm/validation.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | 3 | 2021-12-16T05:39:14.000Z | 2022-02-25T06:07:51.000Z | gym_collision_avoidance/envs/policies/social_lstm/validation.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | null | null | null | gym_collision_avoidance/envs/policies/social_lstm/validation.py | meghdeepj/Social-Navigation-Simulator | 806d304081bf5ff4fc7a0a58defb050627375865 | [
"MIT"
] | 1 | 2021-11-09T18:04:01.000Z | 2021-11-09T18:04:01.000Z | import os
import pickle
import argparse
import time
import subprocess
import torch
from torch.autograd import Variable
import numpy as np
from utils import DataLoader
from helper import get_mean_error, get_final_error
from helper import *
from grid import getSequenceGridMask
def main():
parser = argparse.ArgumentParser()
# Model to be loaded
parser.add_argument('--epoch', type=int, default=15,
help='Epoch of model to be loaded')
parser.add_argument('--seq_length', type=int, default=20,
help='RNN sequence length')
parser.add_argument('--use_cuda', action="store_true", default=False,
help='Use GPU or not')
parser.add_argument('--drive', action="store_true", default=False,
help='Use Google drive or not')
# Size of neighborhood to be considered parameter
parser.add_argument('--neighborhood_size', type=int, default=32,
help='Neighborhood size to be considered for social grid')
# Size of the social grid parameter
parser.add_argument('--grid_size', type=int, default=4,
help='Grid size of the social grid')
# number of validation will be used
parser.add_argument('--num_validation', type=int, default=5,
help='Total number of validation dataset will be visualized')
# gru support
parser.add_argument('--gru', action="store_true", default=False,
help='True : GRU cell, False: LSTM cell')
# method selection
parser.add_argument('--method', type=int, default=1,
help='Method of lstm will be used (1 = social lstm, 2 = obstacle lstm, 3 = vanilla lstm)')
# Parse the parameters
sample_args = parser.parse_args()
#for drive run
prefix = ''
f_prefix = '.'
if sample_args.drive is True:
prefix='drive/semester_project/social_lstm_final/'
f_prefix = 'drive/semester_project/social_lstm_final'
method_name = get_method_name(sample_args.method)
model_name = "LSTM"
save_tar_name = method_name+"_lstm_model_"
if sample_args.gru:
model_name = "GRU"
save_tar_name = method_name+"_gru_model_"
# Save directory
save_directory = os.path.join(f_prefix, 'model/', method_name, model_name)
#plot directory for plotting in the future
plot_directory = os.path.join(f_prefix, 'plot/', method_name, model_name)
plot_validation_file_directory = 'validation'
# Define the path for the config file for saved args
with open(os.path.join(save_directory,'config.pkl'), 'rb') as f:
saved_args = pickle.load(f)
origin = (0,0)
reference_point = (0,1)
net = get_model(sample_args.method, saved_args, True)
if sample_args.use_cuda:
net = net.cuda()
# Get the checkpoint path
checkpoint_path = os.path.join(save_directory, save_tar_name+str(sample_args.epoch)+'.tar')
if os.path.isfile(checkpoint_path):
print('Loading checkpoint')
checkpoint = torch.load(checkpoint_path)
model_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
print('Loaded checkpoint at epoch', model_epoch)
# Create the DataLoader object
dataloader = DataLoader(f_prefix, 1, sample_args.seq_length, num_of_validation = sample_args.num_validation, forcePreProcess = True, infer = True)
create_directories(plot_directory, [plot_validation_file_directory])
dataloader.reset_batch_pointer()
print('****************Validation dataset batch processing******************')
dataloader.reset_batch_pointer(valid=False)
dataset_pointer_ins = dataloader.dataset_pointer
loss_epoch = 0
err_epoch = 0
f_err_epoch = 0
num_of_batch = 0
smallest_err = 100000
#results of one epoch for all validation datasets
epoch_result = []
#results of one validation dataset
results = []
# For each batch
for batch in range(dataloader.num_batches):
start = time.time()
# Get batch data
x, y, d , numPedsList, PedsList ,target_ids = dataloader.next_batch()
if dataset_pointer_ins is not dataloader.dataset_pointer:
if dataloader.dataset_pointer is not 0:
print('Finished prosessed file : ', dataloader.get_file_name(-1),' Avarage error : ', err_epoch/num_of_batch)
num_of_batch = 0
epoch_result.append(results)
dataset_pointer_ins = dataloader.dataset_pointer
results = []
# Loss for this batch
loss_batch = 0
err_batch = 0
f_err_batch = 0
# For each sequence
for sequence in range(dataloader.batch_size):
# Get data corresponding to the current sequence
x_seq ,_ , d_seq, numPedsList_seq, PedsList_seq = x[sequence], y[sequence], d[sequence], numPedsList[sequence], PedsList[sequence]
target_id = target_ids[sequence]
folder_name = dataloader.get_directory_name_with_pointer(d_seq)
dataset_data = dataloader.get_dataset_dimension(folder_name)
#dense vector creation
x_seq, lookup_seq = dataloader.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)
#will be used for error calculation
orig_x_seq = x_seq.clone()
target_id_values = x_seq[0][lookup_seq[target_id], 0:2]
#grid mask calculation
if sample_args.method == 2: #obstacle lstm
grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, saved_args.neighborhood_size, saved_args.grid_size, saved_args.use_cuda, True)
elif sample_args.method == 1: #social lstm
grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, saved_args.neighborhood_size, saved_args.grid_size, saved_args.use_cuda)
#vectorize datapoints
x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)
# <---------------- Experimental block (may need update in methods)----------------------->
# x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)
# angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
# x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
# # Compute grid masks
# grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, sample_args.neighborhood_size, sample_args.grid_size, sample_args.use_cuda)
# x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)
if sample_args.use_cuda:
x_seq = x_seq.cuda()
if sample_args.method == 3: #vanilla lstm
ret_x_seq, loss = sample_validation_data_vanilla(x_seq, PedsList_seq, sample_args, net, lookup_seq, numPedsList_seq, dataloader)
else:
ret_x_seq, loss = sample_validation_data(x_seq, PedsList_seq, grid_seq, sample_args, net, lookup_seq, numPedsList_seq, dataloader)
#<---------------------Experimental inverse block -------------->
# ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, target_id_values, first_values_dict)
# ret_x_seq = rotate_traj_with_target_ped(ret_x_seq, -angle, PedsList_seq, lookup_seq)
# ret_x_seq = translate(ret_x_seq, PedsList_seq, lookup_seq ,-target_id_values)
#revert the points back to original space
ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, first_values_dict)
err = get_mean_error(ret_x_seq.data, orig_x_seq.data, PedsList_seq, PedsList_seq, sample_args.use_cuda, lookup_seq)
f_err = get_final_error(ret_x_seq.data, orig_x_seq.data, PedsList_seq, PedsList_seq, lookup_seq)
loss_batch += loss
err_batch += err
f_err_batch += f_err
results.append((orig_x_seq.data.cpu().numpy(), ret_x_seq.data.cpu().numpy(), PedsList_seq, lookup_seq, dataloader.get_frame_sequence(sample_args.seq_length), target_id))
end = time.time()
print('Current file : ', dataloader.get_file_name(0),' Batch : ', batch+1, ' Sequence: ', sequence+1, ' Sequence mean error: ', err,' Sequence final error: ',f_err,' time: ', end - start)
loss_batch = loss_batch / dataloader.batch_size
err_batch = err_batch / dataloader.batch_size
f_err_batch = f_err_batch / dataloader.batch_size
num_of_batch += 1
loss_epoch += loss_batch.item()
err_epoch += err_batch
f_err_epoch += f_err_batch
epoch_result.append(results)
if dataloader.num_batches != 0:
loss_epoch = loss_epoch / dataloader.num_batches
err_epoch = err_epoch / dataloader.num_batches
f_err_epoch = f_err_epoch / dataloader.num_batches
print('valid_loss = {:.3f}, valid_mean_err = {:.3f}, valid_final_err = {:.3f}'.format(loss_epoch, err_epoch, f_err_epoch))
dataloader.write_to_plot_file(epoch_result, os.path.join(plot_directory, plot_validation_file_directory))
if __name__ == '__main__':
main()
| 41.752212 | 195 | 0.654939 |
87574cedab14c247e3a7b597a87e796cb89a9c38 | 16,234 | py | Python | xtune/src/transformers/data/processors/xtreme.py | aimore-globality/unilm | f25b30a2b33374e6fa407849c3749a07960535a2 | [
"MIT"
] | 5,129 | 2019-09-30T11:21:03.000Z | 2022-03-31T22:35:12.000Z | xtune/src/transformers/data/processors/xtreme.py | aimore-globality/unilm | f25b30a2b33374e6fa407849c3749a07960535a2 | [
"MIT"
] | 604 | 2019-10-05T00:39:46.000Z | 2022-03-31T11:12:07.000Z | xtune/src/transformers/data/processors/xtreme.py | aimore-globality/unilm | f25b30a2b33374e6fa407849c3749a07960535a2 | [
"MIT"
] | 1,034 | 2019-09-30T15:01:32.000Z | 2022-03-31T06:14:50.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
import random
from ...file_utils import is_tf_available
from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def xtreme_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
word_dropout_rate=0.0,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = xtreme_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = xtreme_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, word_dropout_rate=word_dropout_rate,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("text a: %s" % (example.text_a))
logger.info("text b: %s" % (example.text_b))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label,
guid=example.guid
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
class PawsxProcessor(DataProcessor):
"""Processor for the PAWS-X data set (XTREME version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train-en.tsv")), "train")
def get_translate_train_examples(self, data_dir):
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("translate", i)
text_a = line[0]
text_b = line[1]
label = line[-1]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev-{}.tsv".format(self.language))), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test-{}.tsv".format(self.language))),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = line[1]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_dict(self, data_dir, tgt2src_dict, tgt2src_cnt):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
dict = {}
cnt = {}
for (i, line) in enumerate(lines):
text_a = line[0].strip()
text_b = line[1].strip()
translated_text_a = line[2].strip()
translated_text_b = line[3].strip()
assert isinstance(text_a, str) and isinstance(text_b, str) and \
isinstance(translated_text_a, str) and isinstance(translated_text_b, str)
if text_a not in cnt:
cnt[text_a] = 0
cnt[text_a] += 1
if text_b not in cnt:
cnt[text_b] = 0
cnt[text_b] += 1
if text_a not in dict or random.random() <= 1.0 / cnt[text_a]:
dict[text_a] = translated_text_a
if text_b not in dict or random.random() <= 1.0 / cnt[text_b]:
dict[text_b] = translated_text_b
if translated_text_a not in tgt2src_cnt:
tgt2src_cnt[translated_text_a] = 0
tgt2src_cnt[translated_text_a] += 1
if translated_text_b not in tgt2src_cnt:
tgt2src_cnt[translated_text_b] = 0
tgt2src_cnt[translated_text_b] += 1
if translated_text_a not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_a]:
tgt2src_dict[translated_text_a] = text_a
if translated_text_b not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_b]:
tgt2src_dict[translated_text_b] = text_b
return dict
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set (XTREME version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "train-{}.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("train", i)
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_examples(self, data_dir):
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("train", i)
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[4] == "contradictory" else line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_dict(self, data_dir, tgt2src_dict, tgt2src_cnt):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
dict = {}
cnt = {}
for (i, line) in enumerate(lines):
text_a = line[0].strip()
text_b = line[1].strip()
translated_text_a = line[2].strip()
translated_text_b = line[3].strip()
assert isinstance(text_a, str) and isinstance(text_b, str) and \
isinstance(translated_text_a, str) and isinstance(translated_text_b, str)
if text_a not in cnt:
cnt[text_a] = 0
cnt[text_a] += 1
if text_b not in cnt:
cnt[text_b] = 0
cnt[text_b] += 1
if text_a not in dict or random.random() <= 1.0 / cnt[text_a]:
dict[text_a] = translated_text_a
if text_b not in dict or random.random() <= 1.0 / cnt[text_b]:
dict[text_b] = translated_text_b
if translated_text_a not in tgt2src_cnt:
tgt2src_cnt[translated_text_a] = 0
tgt2src_cnt[translated_text_a] += 1
if translated_text_b not in tgt2src_cnt:
tgt2src_cnt[translated_text_b] = 0
tgt2src_cnt[translated_text_b] += 1
if translated_text_a not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_a]:
tgt2src_dict[translated_text_a] = text_a
if translated_text_b not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_b]:
tgt2src_dict[translated_text_b] = text_b
return dict
def get_valid_examples(self, data_dir):
"""See base class."""
return self.get_test_valid_examples(data_dir, "dev")
def get_test_examples(self, data_dir):
return self.get_test_valid_examples(data_dir, "test")
def get_test_valid_examples(self, data_dir, split):
assert split in ["test", "dev"]
lines = self._read_tsv(os.path.join(data_dir, "{}-{}.tsv".format(split, self.language)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (split, i)
text_a = line[0]
text_b = line[1]
label = line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
xtreme_tasks_num_labels = {
"xnli": 3,
"pawsx": 2,
}
xtreme_processors = {
"xnli": XnliProcessor,
"pawsx": PawsxProcessor,
}
xtreme_output_modes = {
"xnli": "classification",
"pawsx": "classification",
}
| 40.483791 | 156 | 0.609831 |
11ff5f5cf63658772e869b15199de039a84880e3 | 913 | py | Python | samples/openapi3/client/petstore/python-experimental/test/test_health_check_result.py | data-experts/openapi-generator | b86a51ae17e25feae0e0c9f9e6f423b8ff54057d | [
"Apache-2.0"
] | 1 | 2021-07-13T23:28:49.000Z | 2021-07-13T23:28:49.000Z | samples/openapi3/client/petstore/python-experimental/test/test_health_check_result.py | data-experts/openapi-generator | b86a51ae17e25feae0e0c9f9e6f423b8ff54057d | [
"Apache-2.0"
] | 5 | 2021-05-11T22:59:16.000Z | 2022-02-27T10:31:06.000Z | samples/openapi3/client/petstore/python-experimental/test/test_health_check_result.py | data-experts/openapi-generator | b86a51ae17e25feae0e0c9f9e6f423b8ff54057d | [
"Apache-2.0"
] | 1 | 2019-10-06T12:57:47.000Z | 2019-10-06T12:57:47.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import sys
import unittest
import petstore_api
from petstore_api.model.health_check_result import HealthCheckResult
class TestHealthCheckResult(unittest.TestCase):
"""HealthCheckResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHealthCheckResult(self):
"""Test HealthCheckResult"""
# FIXME: construct object with mandatory attributes with example values
# model = HealthCheckResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.410256 | 174 | 0.705367 |
cf6e47425c9dec3696936929e9c01c78eb0d6e23 | 2,401 | py | Python | src/advent/solutions/day12.py | khwilson/advent2021 | 6499b883f1b6d7366f3fe75dc229d646154a4cf8 | [
"MIT"
] | null | null | null | src/advent/solutions/day12.py | khwilson/advent2021 | 6499b883f1b6d7366f3fe75dc229d646154a4cf8 | [
"MIT"
] | null | null | null | src/advent/solutions/day12.py | khwilson/advent2021 | 6499b883f1b6d7366f3fe75dc229d646154a4cf8 | [
"MIT"
] | null | null | null | import string
from collections import defaultdict
from typing import Dict, List
from ..solution import Solution
def count_paths(data: Dict[str, List[str]], path: List[str]) -> int:
cur_end = path[-1]
retval = 0
for next_step in data[cur_end]:
if next_step == "end":
retval += 1
elif ((next_step[0] in string.ascii_lowercase) and (next_step not in path)) or (
next_step[0] in string.ascii_uppercase
):
# Don't visit lower cases multiple times
path.append(next_step)
retval += count_paths(data, path)
path.pop()
return retval
def count_paths_part2(
data: Dict[str, List[str]], path: List[str], already_done_double: bool
) -> int:
cur_end = path[-1]
retval = 0
for next_step in data[cur_end]:
if next_step == "start":
# Not allowed
continue
elif next_step == "end":
retval += 1
elif next_step[0] in string.ascii_uppercase:
path.append(next_step)
retval += count_paths_part2(data, path, already_done_double)
path.pop()
else:
# We have a lowercase cave
if next_step not in path:
# No further checks
path.append(next_step)
retval += count_paths_part2(data, path, already_done_double)
path.pop()
else:
if not already_done_double:
path.append(next_step)
retval += count_paths_part2(data, path, True)
path.pop()
else:
# Only allowed to visit _one_ lowercase cave twice!
pass
return retval
class Day12(Solution, day=12):
def parse(self):
data = defaultdict(list)
with open(self.input_file, "rt") as infile:
for line in infile:
line = line.strip()
if not line:
continue
left, right = line.split("-")
data[left].append(right)
data[right].append(left)
return data
def part1(self):
# Let's just DFS through caves
path = ["start"]
return count_paths(self.data, path)
def part2(self):
path = ["start"]
return count_paths_part2(self.data, path, False)
| 30.392405 | 88 | 0.540608 |
5671b54d7e6630a1fb02aafb4cd401d16382ae6e | 162 | py | Python | 4.17/demo/cookdemo/urls.py | 1179069501/- | b76902a35ed78678c1663f243330bcb014abace1 | [
"MIT"
] | null | null | null | 4.17/demo/cookdemo/urls.py | 1179069501/- | b76902a35ed78678c1663f243330bcb014abace1 | [
"MIT"
] | null | null | null | 4.17/demo/cookdemo/urls.py | 1179069501/- | b76902a35ed78678c1663f243330bcb014abace1 | [
"MIT"
] | null | null | null | from django.urls import re_path
from .import views
urlpatterns = [
re_path(r'^setcook/$',views.setcookfunc),
re_path(r'^getcook/$',views.getcookfunc),
]
| 20.25 | 45 | 0.709877 |
c97a58d000e753a8634c6be4026c80a7d76912be | 1,864 | py | Python | transformer/classes/transformer_model.py | alfa-th/lang-modeler-pytorch | a13e94841df9fc3996b33a93d0a58a99c0596359 | [
"Apache-2.0"
] | null | null | null | transformer/classes/transformer_model.py | alfa-th/lang-modeler-pytorch | a13e94841df9fc3996b33a93d0a58a99c0596359 | [
"Apache-2.0"
] | null | null | null | transformer/classes/transformer_model.py | alfa-th/lang-modeler-pytorch | a13e94841df9fc3996b33a93d0a58a99c0596359 | [
"Apache-2.0"
] | null | null | null | import math
import torch
from torch import nn, Tensor
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from .positional_encoding import PositionalEncoding
class TransformerModel(nn.Module):
def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int, nlayers: int, dropout: float = 0.5):
"""Initiates Transformer Model
Args:
ntoken (int): Number of tokens in the vocabulary
d_model (int): Model embedding dim
nhead (int): Number of heads in multihead attention model
d_hid (int): Embedding dimension in encoder
nlayers (int): Number of encoder layers
dropout (float, optional): Probability to dropout in encoder. Defaults to 0.5.
"""
super().__init__()
self.model_type = "Transformers"
self.pos_encoder = PositionalEncoding(d_model, dropout)
encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, d_model)
self.decoder = nn.Linear(d_model, ntoken)
self.d_model = d_model
self.init_weights()
def init_weights(self) -> None:
"""This method initiates weights and biases
"""
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src: Tensor, src_mask: Tensor) -> Tensor:
"""
Args:
src: Tensor, shape [seq_len, batch_size]
src_mask: Tensor, shape [seq_len, seq_len]
Returns:
output Tensor of shape [seq_len, batch_size, ntoken]
"""
src = self.encoder(src) * math.sqrt(self.d_model)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
output = self.decoder(output)
return output
| 32.701754 | 108 | 0.701717 |
47edbedb3d595d42ff4451d944bf4104c7472f02 | 2,417 | py | Python | venv/Lib/site-packages/pyrogram/raw/functions/invoke_with_takeout.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/functions/invoke_with_takeout.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/functions/invoke_with_takeout.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InvokeWithTakeout(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``126``
- ID: ``0xaca9fd2e``
Parameters:
takeout_id: ``int`` ``64-bit``
query: Any method from :obj:`~pyrogram.raw.functions`
Returns:
Any object from :obj:`~pyrogram.raw.types`
"""
__slots__: List[str] = ["takeout_id", "query"]
ID = 0xaca9fd2e
QUALNAME = "functions.InvokeWithTakeout"
def __init__(self, *, takeout_id: int, query: TLObject) -> None:
self.takeout_id = takeout_id # long
self.query = query # !X
@staticmethod
def read(data: BytesIO, *args: Any) -> "InvokeWithTakeout":
# No flags
takeout_id = Long.read(data)
query = TLObject.read(data)
return InvokeWithTakeout(takeout_id=takeout_id, query=query)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Long(self.takeout_id))
data.write(self.query.write())
return data.getvalue()
| 30.987179 | 103 | 0.618535 |
dc3691a226cf89fb68d71d7082de6ca1bc1a9a56 | 1,314 | py | Python | core/searchLocalCH.py | Ankesh054-official/LittleBrother-GUI- | db1f338109b756a78c2fb142a9f7a5263aef3c12 | [
"MIT"
] | 16 | 2020-09-16T09:10:43.000Z | 2022-02-17T02:19:54.000Z | core/searchLocalCH.py | swagkarna/LittleBrother-GUI- | 73bfca36c3c0640b9c59c135ca1877e84449bf77 | [
"MIT"
] | 1 | 2020-12-24T02:06:26.000Z | 2021-01-14T05:50:08.000Z | core/searchLocalCH.py | Ankesh054-official/LittleBrother-GUI- | db1f338109b756a78c2fb142a9f7a5263aef3c12 | [
"MIT"
] | 4 | 2020-09-16T14:00:47.000Z | 2021-01-04T04:16:23.000Z | import time
import requests
from tkinter import *
from bs4 import BeautifulSoup
from terminaltables import SingleTable
def searchLocalCH(progress, self, text,url):
data = requests.get(url).content.decode("utf-8")
soup = BeautifulSoup(data, "html.parser")
nameList = soup.find_all("span", {"class": "listing-title"})
adresseList = soup.find_all("div", {"class": "listing-address small"})
phoneList = soup.find_all("a", {"class": "btn btn-sm listing-contact-phone lui-margin-right-xs number phone-number"})
nameList2 = []
adresseList2 = []
phoneList2 = []
progress['value'] = 30
self.update_idletasks()
time.sleep(0.1)
for name in nameList:
nameList2.append(name.string.strip())
for adress in adresseList:
adresseList2.append(adress.string.strip())
for phone in phoneList:
phoneList2.append(phone.getText().replace("*", "").strip())
regroup = zip(nameList2,adresseList2, phoneList2)
progress['value'] = 40
self.update_idletasks()
time.sleep(0.1)
TABLE_DATA = [
("Name", "Adresse", "Telephone"),
]
for r in regroup:
TABLE_DATA.append(r)
progress['value'] = 60
self.update_idletasks()
time.sleep(0.1)
table = SingleTable(TABLE_DATA, title="Yellow")
text.insert(END,table.table)
progress['value'] = 70
self.update_idletasks()
time.sleep(0.1)
# print(table.table) | 23.052632 | 118 | 0.711568 |
8a4e71c1f768547a8bffe7ae96575f0cce9a23fc | 2,927 | py | Python | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/ListJobStatusRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/ListJobStatusRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ccc/aliyunsdkccc/request/v20170705/ListJobStatusRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class ListJobStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'ListJobStatus')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TimeAlignment(self):
return self.get_query_params().get('TimeAlignment')
def set_TimeAlignment(self,TimeAlignment):
self.add_query_param('TimeAlignment',TimeAlignment)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_PhoneNumber(self):
return self.get_query_params().get('PhoneNumber')
def set_PhoneNumber(self,PhoneNumber):
self.add_query_param('PhoneNumber',PhoneNumber)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_ContactName(self):
return self.get_query_params().get('ContactName')
def set_ContactName(self,ContactName):
self.add_query_param('ContactName',ContactName)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ScenarioId(self):
return self.get_query_params().get('ScenarioId')
def set_ScenarioId(self,ScenarioId):
self.add_query_param('ScenarioId',ScenarioId) | 32.164835 | 74 | 0.760847 |
1cfcd019dfb18eb755b9773780c874c7950c123e | 4,776 | py | Python | python/pyspark/ml/feature.py | MiguelPeralvo/spark | 979a73f86f77e7ae294979b7962b8ae30d38f1ff | [
"Apache-2.0"
] | 13 | 2015-02-11T21:20:03.000Z | 2019-02-07T22:29:06.000Z | python/pyspark/ml/feature.py | MiguelPeralvo/spark | 979a73f86f77e7ae294979b7962b8ae30d38f1ff | [
"Apache-2.0"
] | 6 | 2019-11-13T07:48:07.000Z | 2022-01-21T23:24:20.000Z | python/pyspark/ml/feature.py | MiguelPeralvo/spark | 979a73f86f77e7ae294979b7962b8ae30d38f1ff | [
"Apache-2.0"
] | 9 | 2015-06-05T22:02:01.000Z | 2020-11-24T06:12:17.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.ml.param.shared import HasInputCol, HasOutputCol, HasNumFeatures
from pyspark.ml.util import keyword_only
from pyspark.ml.wrapper import JavaTransformer
from pyspark.mllib.common import inherit_doc
__all__ = ['Tokenizer', 'HashingTF']
@inherit_doc
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
A tokenizer that converts the input string to lowercase and then
splits it by white spaces.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(text="a b c")]).toDF()
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
>>> print tokenizer.transform(df).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> print tokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> print tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> print tokenizer.transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> tokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
"""
_java_class = "org.apache.spark.ml.feature.Tokenizer"
@keyword_only
def __init__(self, inputCol="input", outputCol="output"):
"""
__init__(self, inputCol="input", outputCol="output")
"""
super(Tokenizer, self).__init__()
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol="input", outputCol="output"):
"""
setParams(self, inputCol="input", outputCol="output")
Sets params for this Tokenizer.
"""
kwargs = self.setParams._input_kwargs
return self._set_params(**kwargs)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures):
"""
Maps a sequence of terms to their term frequencies using the
hashing trick.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(words=["a", "b", "c"])]).toDF()
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
>>> print hashingTF.transform(df).head().features
(10,[7,8,9],[1.0,1.0,1.0])
>>> print hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
(10,[7,8,9],[1.0,1.0,1.0])
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> print hashingTF.transform(df, params).head().vector
(5,[2,3,4],[1.0,1.0,1.0])
"""
_java_class = "org.apache.spark.ml.feature.HashingTF"
@keyword_only
def __init__(self, numFeatures=1 << 18, inputCol="input", outputCol="output"):
"""
__init__(self, numFeatures=1 << 18, inputCol="input", outputCol="output")
"""
super(HashingTF, self).__init__()
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, numFeatures=1 << 18, inputCol="input", outputCol="output"):
"""
setParams(self, numFeatures=1 << 18, inputCol="input", outputCol="output")
Sets params for this HashingTF.
"""
kwargs = self.setParams._input_kwargs
return self._set_params(**kwargs)
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.feature tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
| 37.3125 | 85 | 0.66541 |
4cc2a26dbba9d78b0330fe26b4421923a256af23 | 25,156 | py | Python | tests/core/test_bungiesearch.py | ChristopherRabotin/bungiesearch | 13768342bc2698b214eb0003c2d113b6e273c30d | [
"BSD-3-Clause"
] | 40 | 2015-12-12T05:27:59.000Z | 2020-06-11T13:20:00.000Z | tests/core/test_bungiesearch.py | Sparrho/bungiesearch | 13768342bc2698b214eb0003c2d113b6e273c30d | [
"BSD-3-Clause"
] | 56 | 2015-02-02T17:00:02.000Z | 2015-11-25T09:43:27.000Z | tests/core/test_bungiesearch.py | ChristopherRabotin/bungiesearch | 13768342bc2698b214eb0003c2d113b6e273c30d | [
"BSD-3-Clause"
] | 22 | 2016-02-15T13:54:49.000Z | 2020-12-24T08:00:18.000Z | from datetime import datetime
from django.core.management import call_command
from django.test import TestCase, override_settings
from six import iteritems
import pytz
from bungiesearch import Bungiesearch
from bungiesearch.utils import update_index
from core.bungie_signal import BungieTestSignalProcessor
from core.models import (Article, ManangedButEmpty, NoUpdatedField, Unmanaged,
User)
from core.search_indices import ArticleIndex, UserIndex
class CoreTestCase(TestCase):
@classmethod
def setUpClass(cls):
# Let's start by creating the index and mapping.
# If we create an object before the index, the index
# will be created automatically, and we want to test the command.
call_command('search_index', action='create')
art_1 = {'title': 'Title one',
'description': 'Description of article 1.',
'text_field': '',
'link': 'http://example.com/article_1',
'published': pytz.UTC.localize(datetime(year=2020, month=9, day=15)),
'updated': pytz.UTC.localize(datetime(year=2014, month=9, day=10)),
'tweet_count': 20,
'source_hash': 159159159159,
'missing_data': '',
'positive_feedback': 50,
'negative_feedback': 5,
}
user_1 = {'user_id': 'bungie1',
'about': 'Description of user 1',
'created': pytz.UTC.localize(datetime(year=2015, month=1, day=1)),
'updated': pytz.UTC.localize(datetime(year=2015, month=6, day=1)),
}
Article.objects.create(**art_1)
User.objects.create(**user_1)
art_2 = dict((k, v) for k, v in iteritems(art_1))
art_2['link'] += '/page2'
art_2['title'] = 'Title two'
art_2['description'] = 'This is a second article.'
art_2['text_field'] = None
art_2['published'] = pytz.UTC.localize(datetime(year=2010, month=9, day=15))
user_2 = dict((k, v) for k, v in iteritems(user_1))
user_2['user_id'] = 'bungie2'
user_2['about'] = 'This is the second user'
user_2['created'] = pytz.UTC.localize(datetime(year=2010, month=9, day=15))
Article.objects.create(**art_2)
User.objects.create(**user_2)
NoUpdatedField.objects.create(field_title='My title', field_description='This is a short description.')
call_command('rebuild_index', interactive=False, confirmed='guilty-as-charged')
def test_count_after_clear(self):
# can flake because elasticsearch create API is asynchronous
self.assertEqual(Article.objects.search_index('bungiesearch_demo').count(), 2)
call_command('rebuild_index', interactive=False, confirmed='guilty-as-charged')
self.assertEqual(Article.objects.search_index('bungiesearch_demo').count(), 2)
@classmethod
def tearDownClass(cls):
call_command('search_index', action='delete', confirmed='guilty-as-charged')
def test_model_index_generation(self):
'''
Check that the mapping is the expected one.
'''
expected_article = {'properties': {'updated': {'type': 'date', 'null_value': '2013-07-01'},
'description': {'type': 'string', 'boost': 1.35, 'analyzer': 'snowball'},
'text': {'type': 'string', 'analyzer': 'edge_ngram_analyzer'},
'text_field': {'type': 'string', 'analyzer': 'snowball'},
'created': {'type': 'date'},
'title': {'type': 'string', 'boost': 1.75, 'analyzer': 'snowball'},
'authors': {'type': 'string', 'analyzer': 'snowball'},
'meta_data': {'type': 'string', 'analyzer': 'snowball'},
'link': {'type': 'string', 'analyzer': 'snowball'},
'effective_date': {'type': 'date'},
'tweet_count': {'type': 'integer'},
'id': {'type': 'integer'},
'_id': {'type': 'integer'}, # This is the elastic search index.
'published': {'type': 'date'}}
}
expected_user = {'properties': {'updated': {'type': 'date', 'null_value': '2013-07-01'},
'about': {'type': 'string', 'analyzer': 'edge_ngram_analyzer'},
'int_about': {'type': 'integer'},
'user_id': {'analyzer': 'snowball', 'type': 'string'},
'effective_date': {'type': 'date'},
'created': {'type': 'date'},
'name': {'analyzer': 'snowball', 'type': 'string'},
'_id': {'analyzer': 'snowball', 'type': 'string'}}
}
self.assertEqual(ArticleIndex().get_mapping(), expected_article)
self.assertEqual(UserIndex().get_mapping(), expected_user)
def test_fetch_item(self):
'''
Test searching and mapping.
'''
self.assertEqual(Article.objects.search.query('match', _all='Description')[0], Article.objects.get(title='Title one'), 'Searching for "Description" did not return just the first Article.')
self.assertEqual(Article.objects.search.query('match', _all='second article')[0], Article.objects.get(title='Title two'), 'Searching for "second article" did not return the second Article.')
self.assertEqual(User.objects.search.query('match', _all='Description')[0], User.objects.get(user_id='bungie1'), 'Searching for "About" did not return the User.')
self.assertEqual(User.objects.search.query('match', _all='second user')[0], User.objects.get(user_id='bungie2'), 'Searching for "second user" did not return the User.')
def test_raw_fetch(self):
'''
Test searching and mapping.
'''
item = Article.objects.search.query('match', _all='Description')[:1:True]
self.assertTrue(hasattr(item, 'meta'), 'Fetching first raw results did not return an object with a meta attribute.')
item = User.objects.search.query('match', _all='Description')[:1:True]
self.assertTrue(hasattr(item, 'meta'), 'Fetching first raw results did not return an object with a meta attribute.')
def test_iteration(self):
'''
Tests iteration on Bungiesearch items.
'''
lazy_search_article = Article.objects.search.query('match', title='title')
db_items = list(Article.objects.all())
self.assertTrue(all([result in db_items for result in lazy_search_article]), 'Searching for title "title" did not return all articles.')
self.assertTrue(all([result in db_items for result in lazy_search_article[:]]), 'Searching for title "title" did not return all articles when using empty slice.')
self.assertEqual(len(lazy_search_article[:1]), 1, 'Get item with start=None and stop=1 did not return one item.')
self.assertEqual(len(lazy_search_article[:2]), 2, 'Get item with start=None and stop=2 did not return two item.')
lazy_search_user = User.objects.search.query('match', about='user')
db_items = list(User.objects.all())
self.assertTrue(all([result in db_items for result in lazy_search_user]), 'Searching for description "user" did not return all articles.')
self.assertTrue(all([result in db_items for result in lazy_search_user[:]]), 'Searching for description "user" did not return all articles when using empty slice.')
self.assertEqual(len(lazy_search_user[:1]), 1, 'Get item with start=None and stop=1 did not return one item.')
self.assertEqual(len(lazy_search_user[:2]), 2, 'Get item with start=None and stop=2 did not return two item.')
def test_no_results(self):
'''
Test empty results.
'''
self.assertEqual(list(Article.objects.search.query('match', _all='nothing')), [], 'Searching for "nothing" did not return an empty list on iterator call.')
self.assertEqual(Article.objects.search.query('match', _all='nothing')[:10], [], 'Searching for "nothing" did not return an empty list on get item call.')
self.assertEqual(list(User.objects.search.query('match', _all='nothing')), [], 'Searching for "nothing" did not return an empty list on iterator call.')
self.assertEqual(list(User.objects.search.query('match', _all='nothing')), [], 'Searching for "nothing" did not return an empty list on iterator call.')
def test_custom_search(self):
'''
Test searching on custom index and doc_type.
'''
search = Article.objects.custom_search(index='bungiesearch_demo', doc_type='Article')
es_art1 = search.query('match', _all='Description')[0]
db_art1 = Article.objects.get(title='Title one')
es_art2 = search.query('match', _all='second article')[0]
db_art2 = Article.objects.get(title='Title two')
self.assertTrue(all([es_art1.id == db_art1.id, es_art1.title == db_art1.title, es_art1.description == db_art1.description]), 'Searching for "Description" did not return the first Article.')
self.assertTrue(all([es_art2.id == db_art2.id, es_art2.title == db_art2.title, es_art2.description == db_art2.description]), 'Searching for "second article" did not return the second Article.')
search = User.objects.custom_search(index='bungiesearch_demo', doc_type='User')
es_user1 = search.query('match', _all='Description')[0]
db_user1 = User.objects.get(user_id='bungie1')
self.assertRaises(AttributeError, getattr, es_user1, 'id')
self.assertTrue(all([es_user1.user_id == db_user1.user_id, es_user1.about == db_user1.about]), 'Searching for "About" did not return the first User.')
def test_get_model(self):
'''
Test model mapping.
'''
self.assertEqual(ArticleIndex().get_model(), Article, 'Model was not Article.')
self.assertEqual(UserIndex().get_model(), User, 'Model was not User')
def test_cloning(self):
'''
Tests that Bungiesearch remains lazy with specific function which should return clones.
'''
inst = Article.objects.search.query('match', _all='Description')
self.assertIsInstance(inst.only('_id'), inst.__class__, 'Calling `only` does not return a clone of itself.')
inst = User.objects.search.query('match', _all='Description')
self.assertIsInstance(inst.only('_id'), inst.__class__, 'Calling `only` does not return a clone of itself.')
def test_search_alias_exceptions(self):
'''
Tests that invalid aliases raise exceptions.
'''
self.assertRaises(AttributeError, getattr, Article.objects, 'bsearch_no_such_alias')
self.assertRaises(NotImplementedError, Article.objects.bsearch_invalidalias)
self.assertRaises(ValueError, getattr, Article.objects.search.bsearch_title('title query').bsearch_titlefilter('title filter'), 'bsearch_noupdatedmdlonly')
@override_settings(BUNGIESEARCH={})
def test_search_alias_not_setup(self):
'''
Tests that Bungiesearch is not instantiated when not set up
This is its own test due to the override_settings decorator
'''
self.assertRaises(AttributeError, getattr, Article.objects, 'bsearch_no_such_alias')
self.assertRaises(AttributeError, getattr, Article.objects, 'bsearch_title_search')
def test_search_aliases(self):
'''
Tests search alias errors and functionality.
'''
title_alias = Article.objects.bsearch_title_search('title')
db_items = list(Article.objects.all())
self.assertEqual(title_alias.to_dict(), {'query': {'match': {'title': 'title'}}}, 'Title alias search did not return the expected JSON query.')
self.assertTrue(all([result in db_items for result in title_alias]), 'Alias searching for title "title" did not return all articles.')
self.assertTrue(all([result in db_items for result in title_alias[:]]), 'Alias searching for title "title" did not return all articles when using empty slice.')
self.assertEqual(len(title_alias[:1]), 1, 'Get item on an alias search with start=None and stop=1 did not return one item.')
self.assertEqual(len(title_alias[:2]), 2, 'Get item on an alias search with start=None and stop=2 did not return two item.')
self.assertEqual(title_alias.to_dict(), Article.objects.bsearch_title('title').to_dict(), 'Alias applicable to all models does not return the same JSON request body as the model specific one.')
self.assertEqual(NoUpdatedField.objects.search.filter('term', title='My title').to_dict(), NoUpdatedField.objects.bsearch_noupdatedmdlonly('My title').to_dict(), 'Alias applicable only to NoUpdatedField does not generate the correct filter.')
def test_bungie_instance_search_aliases(self):
alias_dictd = Article.objects.search.bsearch_title('title query').bsearch_titlefilter('title filter').to_dict()
expected = {'query': {'bool': {'filter': [{'term': {'title': 'title filter'}}], 'must': [{'match': {'title': 'title query'}}]}}}
self.assertEqual(alias_dictd, expected, 'Alias on Bungiesearch instance did not return the expected dictionary.')
def test_search_alias_model(self):
self.assertEqual(Article.objects.bsearch_get_alias_for_test().get_model(), Article, 'Unexpected get_model information on search alias.')
self.assertEqual(Article.objects.search.bsearch_title('title query').bsearch_get_alias_for_test().get_model(), Article, 'Unexpected get_model information on search alias.')
self.assertRaises(ValueError, Bungiesearch().bsearch_get_alias_for_test().get_model)
def test_post_save(self):
art = {'title': 'Title three',
'description': 'Postsave',
'link': 'http://example.com/sparrho',
'published': pytz.UTC.localize(datetime(year=2020, month=9, day=15)),
'updated': pytz.UTC.localize(datetime(year=2014, month=9, day=10)),
'tweet_count': 20,
'source_hash': 159159159159,
'missing_data': '',
'positive_feedback': 50,
'negative_feedback': 5}
obj = Article.objects.create(**art)
find_three = Article.objects.search.query('match', title='three')
self.assertEqual(len(find_three), 2, 'Searching for "three" in title did not return exactly two items (got {}).'.format(find_three))
# Let's check that both returned items are from different indices.
self.assertNotEqual(find_three[0:1:True].meta.index, find_three[1:2:True].meta.index, 'Searching for "three" did not return items from different indices.')
# Let's now delete this object to test the post delete signal.
obj.delete()
def test_bulk_delete(self):
'''
This tests that using the update_index function with 'delete' as the action performs a bulk delete operation on the data.
'''
bulk_art1 = {'title': 'Title four',
'description': 'Bulk delete first',
'link': 'http://example.com/bd1',
'published': pytz.UTC.localize(datetime(year=2015, month=7, day=13)),
'updated': pytz.UTC.localize(datetime(year=2015, month=7, day=20)),
'tweet_count': 20,
'source_hash': 159159159159,
'missing_data': '',
'positive_feedback': 50,
'negative_feedback': 5}
bulk_art2 = {'title': 'Title five',
'description': 'Bulk delete second',
'link': 'http://example.com/bd2',
'published': pytz.UTC.localize(datetime(year=2015, month=7, day=13)),
'updated': pytz.UTC.localize(datetime(year=2015, month=7, day=20)),
'tweet_count': 20,
'source_hash': 159159159159,
'missing_data': '',
'positive_feedback': 50,
'negative_feedback': 5}
bulk_obj1 = Article.objects.create(**bulk_art1)
bulk_obj2 = Article.objects.create(**bulk_art2)
find_five = Article.objects.search.query('match', title='five')
self.assertEqual(len(find_five), 2, 'Searching for "five" in title did not return exactly two results (got {})'.format(find_five))
model_items = [bulk_obj1.pk, bulk_obj2.pk]
model_name = Article.__name__
update_index(model_items, model_name, action='delete', bulk_size=2, num_docs=-1, start_date=None, end_date=None, refresh=True)
find_four = Article.objects.search.query('match', title='four')
self.assertEqual(len(find_four), 0, 'Searching for "four" in title did not return exactly zero results (got {})'.format(find_four))
find_five = Article.objects.search.query('match', title='five')
self.assertEqual(len(find_five), 0, 'Searching for "five" in title did not return exactly zero results (got {})'.format(find_five))
def test_manager_interference(self):
'''
This tests that saving an object which is not managed by Bungiesearch won't try to update the index for that model.
'''
Unmanaged.objects.create(field_title='test', field_description='blah')
def test_time_indexing(self):
update_index(Article.objects.all(), 'Article', start_date=datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M'))
update_index(NoUpdatedField.objects.all(), 'NoUpdatedField', end_date=datetime.strftime(datetime.now(), '%Y-%m-%d'))
def test_optimal_queries(self):
db_item = NoUpdatedField.objects.get(pk=1)
src_item = NoUpdatedField.objects.search.query('match', field_title='My title')[0]
self.assertEqual(src_item.id, db_item.id, 'Searching for the object did not return the expected object id.')
self.assertEqual(src_item.get_deferred_fields(), {'field_description'}, 'Was expecting description in the set of deferred fields.')
def test_concat_queries(self):
items = Article.objects.bsearch_title_search('title')[::False] + NoUpdatedField.objects.search.query('match', field_title='My title')[::False]
for item in items:
model = item._meta.proxy_for_model if item._meta.proxy_for_model else type(item)
self.assertIn(model, [Article, NoUpdatedField], 'Got an unmapped item ({}), or an item with an unexpected mapping.'.format(type(item)))
def test_data_templates(self):
# One article has a title that contains 'one'
match_one = Article.objects.search.query('match', text='one')
self.assertEqual(len(match_one), 2, 'Searching for "one" in text did not return exactly one item (got {}).'.format(match_one))
self.assertEqual(match_one[0].title, 'Title one', 'Searching for "one" in text did not yield the first article (got {})'.format(match_one[0].title))
# Two articles have a description that contain 'article'
match_two = Article.objects.search.query('match', text='article')
self.assertEqual(len(match_two), 4, 'Searching for "article" in text did not return exactly two items (got {})'.format(match_two))
# Two articles have a link with 'example,' but since link isn't in the template, there should be zero results
match_zero = Article.objects.search.query('match', text='example')
self.assertEqual(len(match_zero), 0, 'Searching for "article" in text did not return exactly zero items (got {})'.format(match_zero))
def test_fields(self):
'''
Checking that providing a specific field will correctly fetch these items from elasticsearch.
'''
for mdl, id_field in [(Article, 'id'), (User, 'user_id')]:
raw_items = mdl.objects.search.fields('_id')[:5:True]
self.assertTrue(all([dir(raw) == ['meta'] for raw in raw_items]), 'Requesting only _id returned more than just meta info from ES for model {}.'.format(mdl))
items = mdl.objects.search.fields('_id')[:5]
self.assertTrue(all([dbi in items for dbi in mdl.objects.all()]), 'Mapping after fields _id only search did not return all results for model {}.'.format(mdl))
items = mdl.objects.search.fields([id_field, '_id', '_source'])[:5]
self.assertTrue(all([dbi in items for dbi in mdl.objects.all()]), 'Mapping after fields _id, id and _source search did not return all results for model {}.'.format(mdl))
def test_prepare_field(self):
'''
Check that providing a method to calculate the value of a field will yield correct results in the search index.
'''
user_int_description = {'user_id': 'bungie3',
'about': '123',
'created': pytz.UTC.localize(datetime(year=2015, month=1, day=1)),
'updated': pytz.UTC.localize(datetime(year=2015, month=6, day=1)),
}
User.objects.create(**user_int_description)
find_one = User.objects.search.filter('term', int_about=1)
self.assertEqual(len(find_one), 4, 'Searching for users with default int description did not return exactly 4 items (got {})'.format(find_one))
find_123 = User.objects.search.filter('term', int_about=123)
self.assertEqual(len(find_one), 4, 'Searching for users with int description 123 did not return exactly 2 items (got {})'.format(find_123))
find_zero = User.objects.search.filter('term', int_about=0)
self.assertEqual(len(find_zero), 0, 'Searching for users with int description zero did not return exactly 0 items (got {})'.format(find_zero))
def test_fun(self):
'''
Test fun queries.
'''
lazy = Article.objects.bsearch_title_search('title').only('pk').fields('_id')
print(len(lazy)) # Returns the total hits computed by elasticsearch.
assert all([type(item) == Article for item in lazy.filter('range', effective_date={'lte': '2014-09-22'})[5:7]])
def test_meta(self):
'''
Test search meta is set.
'''
lazy = Article.objects.bsearch_title_search('title').only('pk').fields('_id')
assert all([hasattr(item._searchmeta) for item in lazy.filter('range', effective_date={'lte': '2014-09-22'})[5:7]])
def test_manangedbutempty(self):
'''
Tests that the indexing condition controls indexing properly.
'''
mbeo = ManangedButEmpty.objects.create(field_title='Some time', field_description='This should never be indexed.')
idxi = len(ManangedButEmpty.objects.search)
self.assertEquals(idxi, 0, 'ManagedButEmpty has {} indexed items instead of zero.'.format(idxi))
mbeo.delete()
def test_specify_index(self):
self.assertEqual(Article.objects.count(), Article.objects.search_index('bungiesearch_demo').count(), 'Indexed items on bungiesearch_demo for Article does not match number in database.')
self.assertEqual(Article.objects.count(), Article.objects.search_index('bungiesearch_demo_bis').count(), 'Indexed items on bungiesearch_demo_bis for Article does not match number in database.')
self.assertEqual(Article.objects.count(), Article.objects.bsearch_bisindex().count(), 'Indexed items on bungiesearch_demo_bis for Article does not match number in database, using alias.')
self.assertEqual(NoUpdatedField.objects.count(), NoUpdatedField.objects.search_index('bungiesearch_demo').count(), 'Indexed items on bungiesearch_demo for NoUpdatedField does not match number in database.')
self.assertEqual(NoUpdatedField.objects.search_index('bungiesearch_demo_bis').count(), 0, 'Indexed items on bungiesearch_demo_bis for NoUpdatedField is zero.')
def test_None_as_missing(self):
missing = Article.objects.search_index('bungiesearch_demo').filter('missing', field='text_field')
self.assertEqual(len(missing), 1, 'Filtering by missing text_field does not return exactly one item.')
self.assertEqual(missing[0].text_field, None, 'The item with missing text_field does not have text_field=None.')
def test_signal_setup_teardown(self):
'''
Tests that setup and tear down can be ran.
'''
btsp = BungieTestSignalProcessor()
btsp.setup(Article)
self.assertTrue(btsp.setup_ran, 'Calling setup on the signal processor did not set it up.')
btsp.teardown(Article)
self.assertTrue(btsp.teardown_ran, 'Calling teardown on the signal processor did not tear it down.')
| 62.577114 | 250 | 0.638615 |
e8bbfac0af7fadffa4638843d4a39d2ec5b23b21 | 1,197 | py | Python | river/optim/newton.py | fox-ds/river | 9ce947ebfc012ec7059de0a09c765b2da7fc1d25 | [
"BSD-3-Clause"
] | 2,184 | 2020-11-11T12:31:12.000Z | 2022-03-31T16:45:41.000Z | river/optim/newton.py | raphaelsty/river | 2e0b25a2ef2d2ba9ec080cf86a491f7465433b18 | [
"BSD-3-Clause"
] | 262 | 2020-11-11T17:15:47.000Z | 2022-03-31T23:54:03.000Z | river/optim/newton.py | raphaelsty/river | 2e0b25a2ef2d2ba9ec080cf86a491f7465433b18 | [
"BSD-3-Clause"
] | 240 | 2020-11-11T14:25:03.000Z | 2022-03-31T08:25:50.000Z | from .. import utils
from . import base
__all__ = ["Newton"]
class Newton(base.Optimizer):
"""Online Newton Step (ONS) optimizer.
This optimizer uses second-order information (i.e. the Hessian of the cost function) in
addition to first-order information (i.e. the gradient of the cost function).
Parameters
----------
lr
eps
References
----------
[^1]: [Hazan, E., Agarwal, A. and Kale, S., 2007. Logarithmic regret algorithms for online convex optimization. Machine Learning, 69(2-3), pp.169-192](https://www.cs.princeton.edu/~ehazan/papers/log-journal.pdf)
"""
def __init__(self, lr=0.1, eps=1e-5):
super().__init__(lr)
self.eps = eps
self.H_inv = {}
def _step_with_dict(self, w, g):
for i in g:
if (i, i) not in self.H_inv:
self.H_inv[i, i] = self.eps
# Update the Hessian
self.H = utils.math.sherman_morrison(A_inv=self.H_inv, u=g, v=g)
# Calculate the update step
step = utils.math.dotvecmat(x=g, A=self.H_inv)
# Update the weights
for i, s in step.items():
w[i] -= self.learning_rate * s
return w
| 26.021739 | 215 | 0.59315 |
01cb0ee24d32680881d01891d8aba07cec1c1102 | 6,897 | py | Python | spin-hokuyo_ws/devel/lib/python2.7/dist-packages/dynamixel_controllers/srv/_TorqueEnable.py | leontius/hokuyo_ros | d76f8847d3ddc372c17bdb80fd68e89412801a29 | [
"MIT"
] | 1 | 2020-10-06T14:02:44.000Z | 2020-10-06T14:02:44.000Z | spin-hokuyo_ws/devel/lib/python2.7/dist-packages/dynamixel_controllers/srv/_TorqueEnable.py | leontius/hokuyo_ros | d76f8847d3ddc372c17bdb80fd68e89412801a29 | [
"MIT"
] | null | null | null | spin-hokuyo_ws/devel/lib/python2.7/dist-packages/dynamixel_controllers/srv/_TorqueEnable.py | leontius/hokuyo_ros | d76f8847d3ddc372c17bdb80fd68e89412801a29 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from dynamixel_controllers/TorqueEnableRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TorqueEnableRequest(genpy.Message):
_md5sum = "e44dc96db32bd58b5a896c2c5bf316d0"
_type = "dynamixel_controllers/TorqueEnableRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool torque_enable
"""
__slots__ = ['torque_enable']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
torque_enable
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TorqueEnableRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.torque_enable is None:
self.torque_enable = False
else:
self.torque_enable = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.torque_enable))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.torque_enable,) = _get_struct_B().unpack(str[start:end])
self.torque_enable = bool(self.torque_enable)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.torque_enable))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.torque_enable,) = _get_struct_B().unpack(str[start:end])
self.torque_enable = bool(self.torque_enable)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from dynamixel_controllers/TorqueEnableResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TorqueEnableResponse(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "dynamixel_controllers/TorqueEnableResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TorqueEnableResponse, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
class TorqueEnable(object):
_type = 'dynamixel_controllers/TorqueEnable'
_md5sum = 'e44dc96db32bd58b5a896c2c5bf316d0'
_request_class = TorqueEnableRequest
_response_class = TorqueEnableResponse
| 33.808824 | 145 | 0.681021 |
8d1e78e4789ab2e451bde36c0e690352a4b2ca18 | 1,975 | py | Python | PokemonTypes/MatchupList.py | Pedro29152/poke-types | 021647e0bd3008d70c412af92b56be7d46f08428 | [
"MIT"
] | 1 | 2021-10-17T22:50:17.000Z | 2021-10-17T22:50:17.000Z | PokemonTypes/MatchupList.py | Pedro29152/poke-types | 021647e0bd3008d70c412af92b56be7d46f08428 | [
"MIT"
] | null | null | null | PokemonTypes/MatchupList.py | Pedro29152/poke-types | 021647e0bd3008d70c412af92b56be7d46f08428 | [
"MIT"
] | null | null | null | from typing import overload
from .PokemonTypes import Types
class MatchupList:
def __init__(self, defaultValues = 1, matchupList: list = None):
if matchupList:
if not isinstance(matchupList, list):
raise TypeError('matchupList argument must be a list')
max = Types.max() + 1
if not len(matchupList) == max:
raise ValueError('the list must have the exact size of {0}, not {1}'.format(max, len(matchupList)))
self._typeMatch = matchupList
else:
self._typeMatch = [defaultValues for i in range(Types.max() + 1)]
def getList(self):
return self._typeMatch.copy()
def __getStr(self):
ret = []
for t in Types:
ret.append('{0}: {1}'.format(t.name, self._typeMatch[t.value]))
return '[' + (', '.join(ret)) + ']'
#def __add__(self, o):
def __mul__(self, o):
if not isinstance(o, MatchupList):
raise TypeError('unsupported operand type(s) for *: \'{0}\' and \'{1}\''.format(type(self).__name__, type(o).__name__))
ret = MatchupList()
for i, value in enumerate(self._typeMatch):
ret[i] = value * o[i]
return ret
def __getitem__(self, key):
if isinstance(key, slice):
return [self[i] for i in key.indices(len(self))]
if isinstance(key, int):
return self._typeMatch[key]
raise TypeError('matchup list indices must be integers or slices')
def __setitem__(self, key, value):
if not isinstance(key, int):
raise TypeError('matchup list indices must be integer')
if not (isinstance(value, int) or isinstance(value, float)):
raise TypeError('matchup list values must be integers or floats')
self._typeMatch[key] = value
def __str__(self):
return self.__getStr()
def __repr__(self):
return self.__getStr()
| 31.854839 | 131 | 0.586329 |
5cd445a207e80ae1cb9492a1eba7f93e977bb945 | 1,565 | py | Python | cupy/indexing/insert.py | mdeegen/chainer | d4ef0ca4a04c958f07d70a0be6ba3c900baffbdb | [
"MIT"
] | null | null | null | cupy/indexing/insert.py | mdeegen/chainer | d4ef0ca4a04c958f07d70a0be6ba3c900baffbdb | [
"MIT"
] | null | null | null | cupy/indexing/insert.py | mdeegen/chainer | d4ef0ca4a04c958f07d70a0be6ba3c900baffbdb | [
"MIT"
] | 1 | 2021-05-27T16:52:11.000Z | 2021-05-27T16:52:11.000Z | import numpy
# TODO(okuta): Implement place
# TODO(okuta): Implement put
# TODO(okuta): Implement putmask
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Args:
a (cupy.ndarray): The array, at least 2-D.
val (scalar): The value to be written on the diagonal.
Its type must be compatible with that of the array a.
wrap (bool): If specified, the diagonal is "wrapped" after N columns.
This affects only tall matrices.
Examples
--------
>>> a = cupy.zeros((3, 3), int)
>>> cupy.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
.. seealso:: :func:`numpy.fill_diagonal`
"""
# The followings are imported from the original numpy
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
step = a.shape[1] + 1
if not wrap:
end = a.shape[1] * a.shape[1]
else:
if not numpy.alltrue(numpy.diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + numpy.cumprod(a.shape[:-1]).sum()
# Since the current cupy does not support a.flat,
# we use a.ravel() instead of a.flat
a.ravel()[:end:step] = val
| 30.096154 | 79 | 0.587859 |
cbbee1a20f163831e4b888da4b65e7dfa4b63505 | 957 | py | Python | students/K33422/Iskhakova_Emina/labs/lab2/hotel/urls.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | null | null | null | students/K33422/Iskhakova_Emina/labs/lab2/hotel/urls.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | null | null | null | students/K33422/Iskhakova_Emina/labs/lab2/hotel/urls.py | emina13/ITMO_ICT_WebDevelopment_2021-2022 | 498a6138e352e7e0ca40d1eb301bc29416158f51 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
urlpatterns = [
path('', index, name='index'),
path('hotels/', ListHotels.as_view(), name='hotels'),
path('rooms/', ListRooms.as_view(), name='rooms'),
path('hotels/<int:pk>/', ListHotelRooms.as_view()),
path('rooms/<int:pk>/', RoomDetail.as_view()),
path('rooms/<int:pk>/book', CreateBooking.as_view(), name='booking'),
path('register/', register, name='register'),
path('profile/', profile, name='profile'),
path('profile/edit', edit_profile, name='edit_profile'),
path('profile/bookings', ListBookings.as_view(), name='bookings'),
path('profile/bookings/delete/<int:pk>/', DeleteBooking.as_view(), name='delete_booking'),
path('reviews/', ListReviews.as_view(), name='review_list'),
path('rooms/<int:pk>/add_review', CreateReview.as_view(), name='add_review'),
path('last_guests/', ListGuests.as_view(), name='last_guests')
] | 45.571429 | 95 | 0.653083 |
ee2afb55fdc7d9064a2495151eadd7eae28802c7 | 7,537 | py | Python | test/integration/component/test_secsr_mount.py | elShiaLabeouf/cloudstack | 3c5580632425ded5a468c3cd82cd141e7410ef39 | [
"Apache-2.0"
] | 1 | 2020-03-22T14:55:12.000Z | 2020-03-22T14:55:12.000Z | test/integration/component/test_secsr_mount.py | elShiaLabeouf/cloudstack | 3c5580632425ded5a468c3cd82cd141e7410ef39 | [
"Apache-2.0"
] | null | null | null | test/integration/component/test_secsr_mount.py | elShiaLabeouf/cloudstack | 3c5580632425ded5a468c3cd82cd141e7410ef39 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Secondary Storage with Local Storage
"""
# Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = False
class TestSecSRMount(cloudstackTestCase):
def setUp(self):
self.logger = logging.getLogger('TestSecSRMount')
self.stream_handler = logging.StreamHandler()
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(self.stream_handler)
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.dbclient = self.testClient.getDbConnection()
self.services = self.testClient.getParsedTestDataConfig()
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
self.pod = get_pod(self.apiclient, self.zone.id)
self.cleanup = []
self.services = {
"service_offering_local": {
"name": "Ultra Tiny Local Instance",
"displaytext": "Ultra Tiny Local Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
"storagetype": "local"
},
"vm": {
"username": "root",
"password": "password",
"ssh_port": 22,
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 30,
"timeout": 10,
}
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def isOnlyLocalStorageAvailable(self):
if not self.zone.localstorageenabled:
self.skipTest("Local Storage not enabled")
storage_pools = StoragePool.list(
self.apiclient,
zoneid=self.zone.id,
listall=True
)
self.assertEqual(
isinstance(storage_pools, list),
True,
"Check if listStoragePools returns a valid response"
)
for storage_pool in storage_pools:
if storage_pool.type == 'NetworkFilesystem':
return False
return True
def download(self, apiclient, template_id, retries=12, interval=5):
"""Check if template download will finish in 1 minute"""
while retries > -1:
time.sleep(interval)
template_response = Template.list(
apiclient,
id=template_id,
zoneid=self.zone.id,
templatefilter='self'
)
if isinstance(template_response, list):
template = template_response[0]
if not hasattr(template, 'status') or not template or not template.status:
retries = retries - 1
continue
# If template is ready,
# template.status = Download Complete
# Downloading - x% Downloaded
# if Failed
# Error - Any other string
if 'Failed' in template.status:
raise Exception(
"Failed to download template: status - %s" %
template.status)
elif template.status == 'Download Complete' and template.isready:
return
elif 'Downloaded' in template.status:
retries = retries - 1
continue
elif 'Installing' not in template.status:
if retries >= 0:
retries = retries - 1
continue
raise Exception(
"Error in downloading template: status - %s" %
template.status)
else:
retries = retries - 1
raise Exception("Template download failed exception.")
@attr(
tags=[
"advanced",
"xenserver"],
required_hardware="true")
def test_01_prepare_template_local_storage(self):
if not self.isOnlyLocalStorageAvailable():
self.skipTest("Skipping this test as this is for Local storage on only.")
listHost = Host.list(
self.apiclient,
type='Routing',
zoneid=self.zone.id,
podid=self.pod.id,
)
for host in listHost:
self.logger.debug('Host id %s, hypervisor %s, localstorage %s' % (host.id, host.hypervisor, host.islocalstorageactive))
if len(listHost) < 2:
self.logger.debug("Prepare secondary storage race condition can be tested with two or more host only %s, found" % len(listHost));
self.skipTest("Prepare secondary storage can be tested with two host only %s, found" % len(listHost))
list_template_response = Template.list(
self.apiclient,
templatefilter='all',
zoneid=self.zone.id)
template_response = list_template_response[0]
self.logger.debug('Template id %s is Ready %s' % (template_response.id, template_response.isready))
if template_response.isready != True:
self.skipTest('Template id %s is Not Ready' % (template_response.id))
try:
cmd = prepareTemplate.prepareTemplateCmd()
cmd.zoneid = self.zone.id
cmd.templateid = template_response.id
result = self.apiclient.prepareTemplate(cmd)
self.logger.debug('Prepare Template result %s' % result)
except Exception as e:
raise Exception("Warning: Exception during prepare template : %s" % e)
self.download(self.apiclient, template_response.id)
return
| 39.255208 | 141 | 0.543187 |
7730fbc615fd1fa265420b6f7e671c93ca9e515d | 24,964 | py | Python | pypy/interpreter/test/test_function.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/interpreter/test/test_function.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/interpreter/test/test_function.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | import pytest, sys
from pypy.interpreter import eval
from pypy.interpreter.function import Function, Method, descr_function_get
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.argument import Arguments
class AppTestFunctionIntrospection:
def test_attributes(self):
globals()['__name__'] = 'mymodulename'
def f(): pass
assert hasattr(f, 'func_code')
assert f.func_defaults == None
f.func_defaults = None
assert f.func_defaults == None
assert f.func_dict == {}
assert type(f.func_globals) == dict
assert f.func_globals is f.__globals__
assert f.func_closure is None
assert f.func_doc == None
assert f.func_name == 'f'
assert f.__module__ == 'mymodulename'
def test_code_is_ok(self):
def f(): pass
assert not hasattr(f.func_code, '__dict__')
def test_underunder_attributes(self):
def f(): pass
assert f.__name__ == 'f'
assert f.__doc__ == None
assert f.__name__ == f.func_name
assert f.__doc__ == f.func_doc
assert f.__dict__ is f.func_dict
assert f.__code__ is f.func_code
assert f.__defaults__ is f.func_defaults
assert hasattr(f, '__class__')
def test_classmethod(self):
def f():
pass
assert classmethod(f).__func__ is f
assert staticmethod(f).__func__ is f
def test_write_doc(self):
def f(): "hello"
assert f.__doc__ == 'hello'
f.__doc__ = 'good bye'
assert f.__doc__ == 'good bye'
del f.__doc__
assert f.__doc__ == None
def test_write_func_doc(self):
def f(): "hello"
assert f.func_doc == 'hello'
f.func_doc = 'good bye'
assert f.func_doc == 'good bye'
del f.func_doc
assert f.func_doc == None
def test_write_module(self):
def f(): "hello"
f.__module__ = 'ab.c'
assert f.__module__ == 'ab.c'
del f.__module__
assert f.__module__ is None
def test_new(self):
def f(): return 42
FuncType = type(f)
f2 = FuncType(f.func_code, f.func_globals, 'f2', None, None)
assert f2() == 42
def g(x):
def f():
return x
return f
f = g(42)
raises(TypeError, FuncType, f.func_code, f.func_globals, 'f2', None, None)
def test_write_code(self):
def f():
return 42
def g():
return 41
assert f() == 42
assert g() == 41
raises(TypeError, "f.func_code = 1")
f.func_code = g.func_code
assert f() == 41
def h():
return f() # a closure
raises(ValueError, "f.func_code = h.func_code")
def test_write_code_builtin_forbidden(self):
def f(*args):
return 42
raises(TypeError, "dir.func_code = f.func_code")
raises(TypeError, "list.append.im_func.func_code = f.func_code")
def test_set_module_to_name_eagerly(self):
skip("fails on PyPy but works on CPython. Unsure we want to care")
exec '''if 1:
__name__ = "foo"
def f(): pass
__name__ = "bar"
assert f.__module__ == "foo"''' in {}
def test_set_name(self):
def f(): pass
f.__name__ = 'g'
assert f.func_name == 'g'
raises(TypeError, "f.__name__ = u'g'")
class AppTestFunction:
def test_simple_call(self):
def func(arg1, arg2):
return arg1, arg2
res = func(23,42)
assert res[0] == 23
assert res[1] == 42
def test_simple_call_default(self):
def func(arg1, arg2=11, arg3=111):
return arg1, arg2, arg3
res = func(1)
assert res[0] == 1
assert res[1] == 11
assert res[2] == 111
res = func(1, 22)
assert res[0] == 1
assert res[1] == 22
assert res[2] == 111
res = func(1, 22, 333)
assert res[0] == 1
assert res[1] == 22
assert res[2] == 333
raises(TypeError, func)
raises(TypeError, func, 1, 2, 3, 4)
def test_simple_varargs(self):
def func(arg1, *args):
return arg1, args
res = func(23,42)
assert res[0] == 23
assert res[1] == (42,)
res = func(23, *(42,))
assert res[0] == 23
assert res[1] == (42,)
def test_simple_kwargs(self):
def func(arg1, **kwargs):
return arg1, kwargs
res = func(23, value=42)
assert res[0] == 23
assert res[1] == {'value': 42}
res = func(23, **{'value': 42})
assert res[0] == 23
assert res[1] == {'value': 42}
def test_kwargs_sets_wrong_positional_raises(self):
def func(arg1):
pass
raises(TypeError, func, arg2=23)
def test_kwargs_sets_positional(self):
def func(arg1):
return arg1
res = func(arg1=42)
assert res == 42
def test_kwargs_sets_positional_mixed(self):
def func(arg1, **kw):
return arg1, kw
res = func(arg1=42, something=23)
assert res[0] == 42
assert res[1] == {'something': 23}
def test_kwargs_sets_positional_twice(self):
def func(arg1, **kw):
return arg1, kw
raises(
TypeError, func, 42, {'arg1': 23})
@pytest.mark.skipif("config.option.runappdirect")
def test_kwargs_nondict_mapping(self):
class Mapping:
def keys(self):
return ('a', 'b')
def __getitem__(self, key):
return key
def func(arg1, **kw):
return arg1, kw
res = func(23, **Mapping())
assert res[0] == 23
assert res[1] == {'a': 'a', 'b': 'b'}
error = raises(TypeError, lambda: func(42, **[]))
assert error.value.message == ('argument after ** must be a mapping, '
'not list')
def test_default_arg(self):
def func(arg1,arg2=42):
return arg1, arg2
res = func(arg1=23)
assert res[0] == 23
assert res[1] == 42
def test_defaults_keyword_overrides(self):
def func(arg1=42, arg2=23):
return arg1, arg2
res = func(arg1=23)
assert res[0] == 23
assert res[1] == 23
def test_defaults_keyword_override_but_leaves_empty_positional(self):
def func(arg1,arg2=42):
return arg1, arg2
raises(TypeError, func, arg2=23)
def test_kwargs_disallows_same_name_twice(self):
def func(arg1, **kw):
return arg1, kw
raises(TypeError, func, 42, **{'arg1': 23})
def test_kwargs_bound_blind(self):
class A(object):
def func(self, **kw):
return self, kw
func = A().func
# don't want the extra argument passing of raises
try:
func(self=23)
assert False
except TypeError:
pass
try:
func(**{'self': 23})
assert False
except TypeError:
pass
def test_kwargs_confusing_name(self):
def func(self): # 'self' conflicts with the interp-level
return self*7 # argument to call_function()
res = func(self=6)
assert res == 42
def test_get(self):
def func(self): return self
obj = object()
meth = func.__get__(obj, object)
assert meth() == obj
def test_none_get_interaction(self):
skip("XXX issue #2083")
assert type(None).__repr__(None) == 'None'
def test_none_get_interaction_2(self):
f = None.__repr__
assert f() == 'None'
def test_no_get_builtin(self):
assert not hasattr(dir, '__get__')
class A(object):
ord = ord
a = A()
assert a.ord('a') == 97
def test_builtin_as_special_method_is_not_bound(self):
class A(object):
__getattr__ = len
a = A()
assert a.a == 1
assert a.ab == 2
assert a.abcdefghij == 10
def test_call_builtin(self):
s = 'hello'
raises(TypeError, len)
assert len(s) == 5
raises(TypeError, len, s, s)
raises(TypeError, len, s, s, s)
assert len(*[s]) == 5
assert len(s, *[]) == 5
raises(TypeError, len, some_unknown_keyword=s)
raises(TypeError, len, s, some_unknown_keyword=s)
raises(TypeError, len, s, s, some_unknown_keyword=s)
@pytest.mark.skipif("config.option.runappdirect")
def test_call_error_message(self):
try:
len()
except TypeError as e:
assert "len() takes exactly 1 argument (0 given)" in e.message
else:
assert 0, "did not raise"
try:
len(1, 2)
except TypeError as e:
assert "len() takes exactly 1 argument (2 given)" in e.message
else:
assert 0, "did not raise"
def test_unicode_docstring(self):
def f():
u"hi"
assert f.__doc__ == u"hi"
assert type(f.__doc__) is unicode
def test_issue1293(self):
def f1(): "doc f1"
def f2(): "doc f2"
f1.func_code = f2.func_code
assert f1.__doc__ == "doc f1"
def test_subclassing(self):
# cannot subclass 'function' or 'builtin_function'
def f():
pass
raises(TypeError, type, 'Foo', (type(f),), {})
raises(TypeError, type, 'Foo', (type(len),), {})
def test_lambda_docstring(self):
# Like CPython, (lambda:"foo") has a docstring of "foo".
# But let's not test that. Just test that (lambda:42) does not
# have 42 as docstring.
f = lambda: 42
assert f.func_doc is None
@pytest.mark.skipif("config.option.runappdirect")
def test_setstate_called_with_wrong_args(self):
f = lambda: 42
# not sure what it should raise, since CPython doesn't have setstate
# on function types
raises(ValueError, type(f).__setstate__, f, (1, 2, 3))
class AppTestMethod:
def setup_class(cls):
cls.w_runappdirect_on_cpython = cls.space.wrap(
cls.runappdirect and
'__pypy__' not in sys.builtin_module_names)
def test_simple_call(self):
class A(object):
def func(self, arg2):
return self, arg2
a = A()
res = a.func(42)
assert res[0] is a
assert res[1] == 42
def test_simple_varargs(self):
class A(object):
def func(self, *args):
return self, args
a = A()
res = a.func(42)
assert res[0] is a
assert res[1] == (42,)
res = a.func(*(42,))
assert res[0] is a
assert res[1] == (42,)
def test_obscure_varargs(self):
class A(object):
def func(*args):
return args
a = A()
res = a.func(42)
assert res[0] is a
assert res[1] == 42
res = a.func(*(42,))
assert res[0] is a
assert res[1] == 42
def test_simple_kwargs(self):
class A(object):
def func(self, **kwargs):
return self, kwargs
a = A()
res = a.func(value=42)
assert res[0] is a
assert res[1] == {'value': 42}
res = a.func(**{'value': 42})
assert res[0] is a
assert res[1] == {'value': 42}
def test_get(self):
def func(self): return self
class Object(object): pass
obj = Object()
# Create bound method from function
obj.meth = func.__get__(obj, Object)
assert obj.meth() == obj
# Create bound method from method
meth2 = obj.meth.__get__(obj, Object)
assert meth2() == obj
def test_get_get(self):
# sanxiyn's test from email
def m(self): return self
class C(object): pass
class D(C): pass
C.m = m
D.m = C.m
c = C()
assert c.m() == c
d = D()
assert d.m() == d
def test_method_eq(self):
class C(object):
def m(): pass
c = C()
assert C.m == C.m
assert c.m == c.m
assert not (C.m == c.m)
assert not (c.m == C.m)
c2 = C()
assert (c.m == c2.m) is False
assert (c.m != c2.m) is True
assert (c.m != c.m) is False
def test_method_hash(self):
class C(object):
def m(): pass
class D(C):
pass
c = C()
assert hash(C.m) == hash(D.m)
assert hash(c.m) == hash(c.m)
def test_method_repr(self):
class A(object):
def f(self):
pass
assert repr(A.f) == "<unbound method A.f>"
assert repr(A().f).startswith("<bound method A.f of <")
assert repr(A().f).endswith(">>")
class B:
def f(self):
pass
assert repr(B.f) == "<unbound method B.f>"
assert repr(B().f).startswith("<bound method B.f of <")
assert repr(A().f).endswith(">>")
def test_method_call(self):
class C(object):
def __init__(self, **kw):
pass
c = C(type='test')
def test_method_w_callable(self):
class A(object):
def __call__(self, x):
return x
import new
im = new.instancemethod(A(), 3)
assert im() == 3
def test_method_w_callable_call_function(self):
class A(object):
def __call__(self, x, y):
return x+y
import new
im = new.instancemethod(A(), 3)
assert map(im, [4]) == [7]
def test_unbound_typecheck(self):
class A(object):
def foo(self, *args):
return args
class B(A):
pass
class C(A):
pass
assert A.foo(A(), 42) == (42,)
assert A.foo(B(), 42) == (42,)
raises(TypeError, A.foo, 5)
raises(TypeError, B.foo, C())
try:
class Fun:
__metaclass__ = A.foo
assert 0 # should have raised
except TypeError:
pass
class Fun:
__metaclass__ = A().foo
assert Fun[:2] == ('Fun', ())
def test_unbound_abstract_typecheck(self):
import new
def f(*args):
return args
m = new.instancemethod(f, None, "foobar")
raises(TypeError, m)
raises(TypeError, m, None)
raises(TypeError, m, "egg")
m = new.instancemethod(f, None, (str, int)) # really obscure...
assert m(4) == (4,)
assert m("uh") == ("uh",)
raises(TypeError, m, [])
class MyBaseInst(object):
pass
class MyInst(MyBaseInst):
def __init__(self, myclass):
self.myclass = myclass
def __class__(self):
if self.myclass is None:
raise AttributeError
return self.myclass
__class__ = property(__class__)
class MyClass(object):
pass
BBase = MyClass()
BSub1 = MyClass()
BSub2 = MyClass()
BBase.__bases__ = ()
BSub1.__bases__ = (BBase,)
BSub2.__bases__ = (BBase,)
x = MyInst(BSub1)
m = new.instancemethod(f, None, BSub1)
assert m(x) == (x,)
raises(TypeError, m, MyInst(BBase))
raises(TypeError, m, MyInst(BSub2))
raises(TypeError, m, MyInst(None))
raises(TypeError, m, MyInst(42))
def test_invalid_creation(self):
import new
def f(): pass
raises(TypeError, new.instancemethod, f, None)
def test_empty_arg_kwarg_call(self):
def f():
pass
raises(TypeError, lambda: f(*0))
raises(TypeError, lambda: f(**0))
def test_method_equal(self):
class A(object):
def m(self):
pass
class X(object):
def __eq__(self, other):
return True
assert A().m == X()
assert X() == A().m
def test_method_equals_with_identity(self):
from types import MethodType
class CallableBadEq(object):
def __call__(self):
pass
def __eq__(self, other):
raise ZeroDivisionError
func = CallableBadEq()
meth = MethodType(func, object)
assert meth == meth
assert meth == MethodType(func, object)
def test_method_identity(self):
class A(object):
def m(self):
pass
def n(self):
pass
class B(A):
pass
class X(object):
def __eq__(self, other):
return True
a = A()
a2 = A()
x = a.m; y = a.m
assert x is not y
assert id(x) != id(y)
assert x == y
assert x is not a.n
assert id(x) != id(a.n)
assert x is not a2.m
assert id(x) != id(a2.m)
if not self.runappdirect_on_cpython:
assert A.m is A.m
assert id(A.m) == id(A.m)
assert A.m == A.m
x = A.m
assert x is not A.n
assert id(x) != id(A.n)
assert x is not B.m
assert id(x) != id(B.m)
class TestMethod:
def setup_method(self, method):
def c(self, bar):
return bar
code = PyCode._from_code(self.space, c.func_code)
self.fn = Function(self.space, code, self.space.newdict())
def test_get(self):
space = self.space
w_meth = descr_function_get(space, self.fn, space.wrap(5), space.type(space.wrap(5)))
meth = space.unwrap(w_meth)
assert isinstance(meth, Method)
def test_call(self):
space = self.space
w_meth = descr_function_get(space, self.fn, space.wrap(5), space.type(space.wrap(5)))
meth = space.unwrap(w_meth)
w_result = meth.call_args(Arguments(space, [space.wrap(42)]))
assert space.unwrap(w_result) == 42
def test_fail_call(self):
space = self.space
w_meth = descr_function_get(space, self.fn, space.wrap(5), space.type(space.wrap(5)))
meth = space.unwrap(w_meth)
args = Arguments(space, [space.wrap("spam"), space.wrap("egg")])
self.space.raises_w(self.space.w_TypeError, meth.call_args, args)
def test_method_get(self):
space = self.space
# Create some function for this test only
def m(self): return self
func = Function(space, PyCode._from_code(self.space, m.func_code),
space.newdict())
# Some shorthands
obj1 = space.wrap(23)
obj2 = space.wrap(42)
args = Arguments(space, [])
# Check method returned from func.__get__()
w_meth1 = descr_function_get(space, func, obj1, space.type(obj1))
meth1 = space.unwrap(w_meth1)
assert isinstance(meth1, Method)
assert meth1.call_args(args) == obj1
# Check method returned from method.__get__()
# --- meth1 is already bound so meth1.__get__(*) is meth1.
w_meth2 = meth1.descr_method_get(obj2, space.type(obj2))
meth2 = space.unwrap(w_meth2)
assert isinstance(meth2, Method)
assert meth2.call_args(args) == obj1
# Check method returned from unbound_method.__get__()
w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2))
meth3 = space.unwrap(w_meth3)
w_meth4 = meth3.descr_method_get(obj2, space.w_None)
meth4 = space.unwrap(w_meth4)
assert isinstance(meth4, Method)
assert meth4.call_args(args) == obj2
# Check method returned from unbound_method.__get__()
# --- with an incompatible class
w_meth5 = meth3.descr_method_get(space.wrap('hello'), space.w_text)
assert space.is_w(w_meth5, w_meth3)
# Same thing, with an old-style class
w_oldclass = space.call_function(
space.builtin.get('__metaclass__'),
space.wrap('OldClass'), space.newtuple([]), space.newdict())
w_meth6 = meth3.descr_method_get(space.wrap('hello'), w_oldclass)
assert space.is_w(w_meth6, w_meth3)
# Reverse order of old/new styles
w_meth7 = descr_function_get(space, func, space.w_None, w_oldclass)
meth7 = space.unwrap(w_meth7)
w_meth8 = meth7.descr_method_get(space.wrap('hello'), space.w_text)
assert space.is_w(w_meth8, w_meth7)
class TestShortcuts(object):
def test_call_function(self):
space = self.space
d = {}
for i in range(10):
args = "(" + ''.join(["a%d," % a for a in range(i)]) + ")"
exec """
def f%s:
return %s
""" % (args, args) in d
f = d['f']
res = f(*range(i))
code = PyCode._from_code(self.space, f.func_code)
fn = Function(self.space, code, self.space.newdict())
assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL
if i < 5:
def bomb(*args):
assert False, "shortcutting should have avoided this"
code.funcrun = bomb
code.funcrun_obj = bomb
args_w = map(space.wrap, range(i))
w_res = space.call_function(fn, *args_w)
check = space.is_true(space.eq(w_res, space.wrap(res)))
assert check
def test_flatcall(self):
space = self.space
def f(a):
return a
code = PyCode._from_code(self.space, f.func_code)
fn = Function(self.space, code, self.space.newdict())
assert fn.code.fast_natural_arity == 1|PyCode.FLATPYCALL
def bomb(*args):
assert False, "shortcutting should have avoided this"
code.funcrun = bomb
code.funcrun_obj = bomb
w_3 = space.newint(3)
w_res = space.call_function(fn, w_3)
assert w_res is w_3
w_res = space.appexec([fn, w_3], """(f, x):
return f(x)
""")
assert w_res is w_3
def test_flatcall_method(self):
space = self.space
def f(self, a):
return a
code = PyCode._from_code(self.space, f.func_code)
fn = Function(self.space, code, self.space.newdict())
assert fn.code.fast_natural_arity == 2|PyCode.FLATPYCALL
def bomb(*args):
assert False, "shortcutting should have avoided this"
code.funcrun = bomb
code.funcrun_obj = bomb
w_3 = space.newint(3)
w_res = space.appexec([fn, w_3], """(f, x):
class A(object):
m = f
y = A().m(x)
b = A().m
z = b(x)
return y is x and z is x
""")
assert space.is_true(w_res)
def test_flatcall_default_arg(self):
space = self.space
def f(a, b):
return a+b
code = PyCode._from_code(self.space, f.func_code)
fn = Function(self.space, code, self.space.newdict(),
defs_w=[space.newint(1)])
assert fn.code.fast_natural_arity == 2|eval.Code.FLATPYCALL
def bomb(*args):
assert False, "shortcutting should have avoided this"
code.funcrun = bomb
code.funcrun_obj = bomb
w_3 = space.newint(3)
w_4 = space.newint(4)
# ignore this for now
#w_res = space.call_function(fn, w_3)
# assert space.eq_w(w_res, w_4)
w_res = space.appexec([fn, w_3], """(f, x):
return f(x)
""")
assert space.eq_w(w_res, w_4)
def test_flatcall_default_arg_method(self):
space = self.space
def f(self, a, b):
return a+b
code = PyCode._from_code(self.space, f.func_code)
fn = Function(self.space, code, self.space.newdict(),
defs_w=[space.newint(1)])
assert fn.code.fast_natural_arity == 3|eval.Code.FLATPYCALL
def bomb(*args):
assert False, "shortcutting should have avoided this"
code.funcrun = bomb
code.funcrun_obj = bomb
w_3 = space.newint(3)
w_res = space.appexec([fn, w_3], """(f, x):
class A(object):
m = f
y = A().m(x)
b = A().m
z = b(x)
return y+10*z
""")
assert space.eq_w(w_res, space.wrap(44))
class TestFunction:
def test_func_defaults(self):
from pypy.interpreter import gateway
def g(w_a=None):
pass
app_g = gateway.interp2app_temp(g)
space = self.space
w_g = space.wrap(app_g)
w_defs = space.getattr(w_g, space.wrap("func_defaults"))
assert space.is_w(w_defs, space.w_None)
| 29.578199 | 93 | 0.539377 |
4e5fd23b328228698afca0f8f7fff41792e52026 | 3,235 | py | Python | data/image_folder.py | jiaojiening/pytorch-CycleGAN | ab83fe4638f32cb560b8cd1117e8307153b8b5a1 | [
"BSD-3-Clause"
] | 1 | 2018-12-12T02:47:16.000Z | 2018-12-12T02:47:16.000Z | data/image_folder.py | jiaojiening/pytorch-CycleGAN | ab83fe4638f32cb560b8cd1117e8307153b8b5a1 | [
"BSD-3-Clause"
] | null | null | null | data/image_folder.py | jiaojiening/pytorch-CycleGAN | ab83fe4638f32cb560b8cd1117e8307153b8b5a1 | [
"BSD-3-Clause"
] | 1 | 2019-03-04T13:28:48.000Z | 2019-03-04T13:28:48.000Z | ###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
###############################################################################
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
def make_reid_dataset(dir):
images = []
labels = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
ID = fname.split('_')
labels.append(int(ID[0]))
return images, labels
def make_SR_dataset(dir):
SR_images = []
labels = []
# LR_images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname) and (fname.endswith('fake_A.png') or fname.endswith('fake_A.jpg')):
path = os.path.join(root, fname)
SR_images.append(path)
ID = fname.split('_')
labels.append(int(ID[0]))
# # add the LR images path
# if is_image_file(fname) and (fname.endswith('real_B.png') or fname.endswith('real_B.png')):
# path = os.path.join(root, fname)
# LR_images.append(path)
return SR_images, labels
def find_all_index(arr, item):
return [i for i, a in enumerate(arr) if a == item]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 30.809524 | 105 | 0.553014 |
a589cd0c39a411c4afa98dc37e3ceaceedc2c156 | 4,330 | py | Python | sdk/python/pulumi_azure_nextgen/resources/v20190801/get_resource_group.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/resources/v20190801/get_resource_group.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/resources/v20190801/get_resource_group.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetResourceGroupResult',
'AwaitableGetResourceGroupResult',
'get_resource_group',
]
@pulumi.output_type
class GetResourceGroupResult:
"""
Resource group information.
"""
def __init__(__self__, location=None, managed_by=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource group. It cannot be changed after the resource group has been created. It must be one of the supported Azure locations.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[str]:
"""
The ID of the resource that manages this resource group.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ResourceGroupPropertiesResponse':
"""
The resource group properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags attached to the resource group.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource group.
"""
return pulumi.get(self, "type")
class AwaitableGetResourceGroupResult(GetResourceGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceGroupResult(
location=self.location,
managed_by=self.managed_by,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_resource_group(resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceGroupResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group to get. The name is case insensitive.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:resources/v20190801:getResourceGroup', __args__, opts=opts, typ=GetResourceGroupResult).value
return AwaitableGetResourceGroupResult(
location=__ret__.location,
managed_by=__ret__.managed_by,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 33.828125 | 156 | 0.645497 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.