hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790c9a37b43ab557243b6ae70830c18c9d0ac3c0
| 1,093
|
py
|
Python
|
bin/test_server.py
|
jonelleamio/AdventureGameServer
|
be55e48508be4df83fa5e44c77aae26ee072d8e8
|
[
"MIT"
] | null | null | null |
bin/test_server.py
|
jonelleamio/AdventureGameServer
|
be55e48508be4df83fa5e44c77aae26ee072d8e8
|
[
"MIT"
] | null | null | null |
bin/test_server.py
|
jonelleamio/AdventureGameServer
|
be55e48508be4df83fa5e44c77aae26ee072d8e8
|
[
"MIT"
] | null | null | null |
import requests
import json
from pprint import pprint as print
def getCode(res:str) :
return str(res).split("[")[1].split("]")[0]
url = 'http://localhost:4042'
guid = '2012491924' # get guid from connexion.json()
guid2 = '0'
gurl = f"{url}/{guid}"
home = requests.post(url)
print (getCode(home))
print (home.json())
print ("\n\n##################\n\n")
connexion = requests.post('http://localhost:4042/connect')
print (getCode(connexion))
print (connexion.json())
print ("\n\n##################\n\n")
# regarder = requests.get(f"{gurl}/regarder")
# print (getCode(regarder))
# print (regarder.json())
# print ("\n\n##################\n\n")
# myobj = {"direction": "N"}
# deplacement = requests.post(f"{gurl}/deplacement", json=myobj)
# print (getCode(deplacement))
# print (deplacement.json())
# print ("\n\n##################\n\n")
# examiner = requests.get(f"{gurl}/examiner/{guid2}")
# print (getCode(examiner))
# print (examiner.json())
# print ("\n\n##################\n\n")
# taper = requests.get(f"{gurl}/taper/{guid2}")
# print (getCode(taper))
# print (taper.json())
| 25.418605
| 65
| 0.601098
|
import requests
import json
from pprint import pprint as print
def getCode(res:str) :
return str(res).split("[")[1].split("]")[0]
url = 'http://localhost:4042'
guid = '2012491924'
guid2 = '0'
gurl = f"{url}/{guid}"
home = requests.post(url)
print (getCode(home))
print (home.json())
print ("\n\n##################\n\n")
connexion = requests.post('http://localhost:4042/connect')
print (getCode(connexion))
print (connexion.json())
print ("\n\n##################\n\n")
| true
| true
|
790c9babee7534ddf4ada2ee36f6194753c0e399
| 769
|
py
|
Python
|
setup.py
|
listingmirror/async-gelf-handler
|
5b2e665e229277f914db0247ac174f7090882eb7
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
listingmirror/async-gelf-handler
|
5b2e665e229277f914db0247ac174f7090882eb7
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
listingmirror/async-gelf-handler
|
5b2e665e229277f914db0247ac174f7090882eb7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='async-gelf-handler',
version='0.1.4',
description="An async wrapper around the GELF (Graylog Extended Log Format).",
long_description=open('README.rst').read(),
keywords='logging gelf graylog2 graylog async',
author='Developer',
author_email='developer@listingmirror.com',
url='https://github.com/listingmirror/async-gelf-handler',
license='BSD License',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['graypy>=0.2.13.2'],
classifiers=['License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'],
)
| 34.954545
| 82
| 0.6671
|
from setuptools import setup, find_packages
setup(
name='async-gelf-handler',
version='0.1.4',
description="An async wrapper around the GELF (Graylog Extended Log Format).",
long_description=open('README.rst').read(),
keywords='logging gelf graylog2 graylog async',
author='Developer',
author_email='developer@listingmirror.com',
url='https://github.com/listingmirror/async-gelf-handler',
license='BSD License',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['graypy>=0.2.13.2'],
classifiers=['License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'],
)
| true
| true
|
790c9d5690afc8b243bed367fe71b0de57bb58d3
| 2,137
|
py
|
Python
|
ambulation/envs/poppy_humanoid_keep_standing/poppy_humanoid_keep_standing.py
|
garrettkatz/poppy-simulations
|
cd4d132ab6f8b4e69f2edd89662980d252a27966
|
[
"MIT"
] | null | null | null |
ambulation/envs/poppy_humanoid_keep_standing/poppy_humanoid_keep_standing.py
|
garrettkatz/poppy-simulations
|
cd4d132ab6f8b4e69f2edd89662980d252a27966
|
[
"MIT"
] | null | null | null |
ambulation/envs/poppy_humanoid_keep_standing/poppy_humanoid_keep_standing.py
|
garrettkatz/poppy-simulations
|
cd4d132ab6f8b4e69f2edd89662980d252a27966
|
[
"MIT"
] | null | null | null |
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
class PoppyHumanoidKeepStandingEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'poppy_humanoid/poppy_keep_standing.xml', 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
def step(self, a):
pos_before = mass_center(self.model, self.sim)
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
qpos = self.sim.data.qpos
done = bool((qpos[2] < 0.2) or (qpos[2] > 2.0))
return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 0.8
self.viewer.cam.elevation = -20
| 41.096154
| 170
| 0.614881
|
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
class PoppyHumanoidKeepStandingEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'poppy_humanoid/poppy_keep_standing.xml', 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
def step(self, a):
pos_before = mass_center(self.model, self.sim)
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
qpos = self.sim.data.qpos
done = bool((qpos[2] < 0.2) or (qpos[2] > 2.0))
return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,)
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 0.8
self.viewer.cam.elevation = -20
| true
| true
|
790c9d5cec04c1860dd1ed714064d05f2545570c
| 4,090
|
py
|
Python
|
pinax/blog/forms.py
|
Zorking/pinax-blog
|
5546888894557b69c5e7a0b846ea8d8213aba6f2
|
[
"MIT"
] | null | null | null |
pinax/blog/forms.py
|
Zorking/pinax-blog
|
5546888894557b69c5e7a0b846ea8d8213aba6f2
|
[
"MIT"
] | null | null | null |
pinax/blog/forms.py
|
Zorking/pinax-blog
|
5546888894557b69c5e7a0b846ea8d8213aba6f2
|
[
"MIT"
] | null | null | null |
from functools import partial as curry
from django import forms
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from pinax.images.models import ImageSet
from mdeditor.fields import MDTextFormField
from .conf import settings
from .models import Post, Revision, Section
from .signals import post_published
from .utils import load_path_attr
FIELDS = [
"section",
"author",
"markup",
"title",
"slug",
"teaser",
"content",
"description",
"state"
]
class PostFormMixin:
@property
def markup_choice(self):
return self.cleaned_data["markup"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
post = self.instance
latest_revision = post.latest()
if latest_revision:
# set initial data from the latest revision
self.fields["teaser"].initial = latest_revision.teaser
self.fields["content"].initial = latest_revision.content
def save_post(self, post):
published = False
if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():
if self.cleaned_data["state"] == Post.STATE_CHOICES[-1][0]:
post.published = timezone.now()
published = True
render_func = curry(
load_path_attr(
settings.PINAX_BLOG_MARKUP_CHOICE_MAP[self.markup_choice]["parser"]
)
)
post.teaser_html = render_func(self.cleaned_data["teaser"])
post.content_html = render_func(self.cleaned_data["content"])
post.updated = timezone.now()
post.save()
r = Revision()
r.post = post
r.title = post.title
r.teaser = self.cleaned_data["teaser"]
r.content = self.cleaned_data["content"]
r.author = post.author
r.updated = post.updated
r.published = post.published
r.save()
if published:
post_published.send(sender=Post, post=post)
return post
class AdminPostForm(PostFormMixin, forms.ModelForm):
title = forms.CharField(
label=_("Title"),
max_length=90,
widget=forms.TextInput(attrs={"style": "width: 50%;"}),
)
slug = forms.CharField(
label=_("Slug"),
widget=forms.TextInput(attrs={"style": "width: 50%;"})
)
teaser = forms.CharField(
label=_("Teaser"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
)
content = MDTextFormField()
description = forms.CharField(
label=_("Description"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
required=False
)
class Meta:
model = Post
fields = FIELDS
class Media:
js = settings.PINAX_BLOG_ADMIN_JS
def save(self, blog=None):
post = super().save(commit=False)
if blog:
post.blog = blog
return self.save_post(post)
class PostForm(PostFormMixin, forms.ModelForm):
markup_choice = "markdown"
teaser = forms.CharField(widget=forms.Textarea())
content = MDTextFormField()
class Meta:
model = Post
fields = [
"section",
"title",
"teaser",
"content",
"description",
"state"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Section.objects.count() < 2:
self.section = Section.objects.first()
del self.fields["section"]
else:
self.section = None
def save(self, blog=None, author=None):
post = super().save(commit=False)
if blog:
post.blog = blog
if author:
post.author = author
post.image_set = ImageSet.objects.create(created_by=author)
if self.section:
post.section = self.section
post.slug = slugify(post.title)
post.markup = self.markup_choice
return self.save_post(post)
| 26.558442
| 86
| 0.593643
|
from functools import partial as curry
from django import forms
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from pinax.images.models import ImageSet
from mdeditor.fields import MDTextFormField
from .conf import settings
from .models import Post, Revision, Section
from .signals import post_published
from .utils import load_path_attr
FIELDS = [
"section",
"author",
"markup",
"title",
"slug",
"teaser",
"content",
"description",
"state"
]
class PostFormMixin:
@property
def markup_choice(self):
return self.cleaned_data["markup"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
post = self.instance
latest_revision = post.latest()
if latest_revision:
self.fields["teaser"].initial = latest_revision.teaser
self.fields["content"].initial = latest_revision.content
def save_post(self, post):
published = False
if post.pk is None or Post.objects.filter(pk=post.pk, published=None).count():
if self.cleaned_data["state"] == Post.STATE_CHOICES[-1][0]:
post.published = timezone.now()
published = True
render_func = curry(
load_path_attr(
settings.PINAX_BLOG_MARKUP_CHOICE_MAP[self.markup_choice]["parser"]
)
)
post.teaser_html = render_func(self.cleaned_data["teaser"])
post.content_html = render_func(self.cleaned_data["content"])
post.updated = timezone.now()
post.save()
r = Revision()
r.post = post
r.title = post.title
r.teaser = self.cleaned_data["teaser"]
r.content = self.cleaned_data["content"]
r.author = post.author
r.updated = post.updated
r.published = post.published
r.save()
if published:
post_published.send(sender=Post, post=post)
return post
class AdminPostForm(PostFormMixin, forms.ModelForm):
title = forms.CharField(
label=_("Title"),
max_length=90,
widget=forms.TextInput(attrs={"style": "width: 50%;"}),
)
slug = forms.CharField(
label=_("Slug"),
widget=forms.TextInput(attrs={"style": "width: 50%;"})
)
teaser = forms.CharField(
label=_("Teaser"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
)
content = MDTextFormField()
description = forms.CharField(
label=_("Description"),
widget=forms.Textarea(attrs={"style": "width: 80%;"}),
required=False
)
class Meta:
model = Post
fields = FIELDS
class Media:
js = settings.PINAX_BLOG_ADMIN_JS
def save(self, blog=None):
post = super().save(commit=False)
if blog:
post.blog = blog
return self.save_post(post)
class PostForm(PostFormMixin, forms.ModelForm):
markup_choice = "markdown"
teaser = forms.CharField(widget=forms.Textarea())
content = MDTextFormField()
class Meta:
model = Post
fields = [
"section",
"title",
"teaser",
"content",
"description",
"state"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Section.objects.count() < 2:
self.section = Section.objects.first()
del self.fields["section"]
else:
self.section = None
def save(self, blog=None, author=None):
post = super().save(commit=False)
if blog:
post.blog = blog
if author:
post.author = author
post.image_set = ImageSet.objects.create(created_by=author)
if self.section:
post.section = self.section
post.slug = slugify(post.title)
post.markup = self.markup_choice
return self.save_post(post)
| true
| true
|
790c9d790013fdd415cbbabb3e28ad10ea87596d
| 510
|
py
|
Python
|
moviepy/video/fx/supersample.py
|
va6996/moviepy
|
60b95c37816413da6bf304e85f8c0ba8e2d2c6e7
|
[
"MIT"
] | null | null | null |
moviepy/video/fx/supersample.py
|
va6996/moviepy
|
60b95c37816413da6bf304e85f8c0ba8e2d2c6e7
|
[
"MIT"
] | null | null | null |
moviepy/video/fx/supersample.py
|
va6996/moviepy
|
60b95c37816413da6bf304e85f8c0ba8e2d2c6e7
|
[
"MIT"
] | null | null | null |
import cupy as np
def supersample(clip, d, n_frames):
"""Replaces each frame at time t by the mean of `n_frames` equally spaced frames
taken in the interval [t-d, t+d]. This results in motion blur.
"""
def filter(get_frame, t):
timings = np.linspace(t - d, t + d, n_frames)
frame_average = np.mean(
1.0 * np.array([get_frame(t_) for t_ in timings], dtype="uint16"), axis=0
)
return frame_average.astype("uint8")
return clip.transform(filter)
| 30
| 85
| 0.627451
|
import cupy as np
def supersample(clip, d, n_frames):
def filter(get_frame, t):
timings = np.linspace(t - d, t + d, n_frames)
frame_average = np.mean(
1.0 * np.array([get_frame(t_) for t_ in timings], dtype="uint16"), axis=0
)
return frame_average.astype("uint8")
return clip.transform(filter)
| true
| true
|
790c9dea203af1c08329112908e3c2f9c93e3603
| 7,165
|
py
|
Python
|
synthetic/blobs/train.py
|
pattonw/mouselight
|
296e6df7d4e79776ed9f8533d17d937bb6866082
|
[
"MIT"
] | null | null | null |
synthetic/blobs/train.py
|
pattonw/mouselight
|
296e6df7d4e79776ed9f8533d17d937bb6866082
|
[
"MIT"
] | null | null | null |
synthetic/blobs/train.py
|
pattonw/mouselight
|
296e6df7d4e79776ed9f8533d17d937bb6866082
|
[
"MIT"
] | null | null | null |
from mahotas import cwatershed
from mala.losses import ultrametric_loss_op
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import distance_transform_edt
import gunpowder as gp
import json
import numpy as np
import skelerator
import tensorflow as tf
import logging
logging.basicConfig(level=logging.INFO)
with open("tensor_names.json", "r") as f:
tensor_names = json.load(f)
class Synthetic2DSource(gp.BatchProvider):
def __init__(self, raw, gt, smoothness=1.0, n_objects=3, points_per_skeleton=10):
self.raw = raw
self.gt = gt
self.smoothness = smoothness
self.n_objects = n_objects
self.points_per_skeleton = points_per_skeleton
def setup(self):
self.provides(
self.raw,
gp.ArraySpec(
roi=gp.Roi((0, 0), (1000, 1000)),
dtype=np.uint8,
interpolatable=True,
voxel_size=(1, 1),
),
)
self.provides(
self.gt,
gp.ArraySpec(
roi=gp.Roi((0, 0), (1000, 1000)),
dtype=np.uint64,
interpolatable=False,
voxel_size=(1, 1),
),
)
def provide(self, request):
voxel_size = self.spec[self.raw].voxel_size
shape = gp.Coordinate((1,) + request[self.raw].roi.get_shape())
noise = np.abs(np.random.randn(*shape))
smoothed_noise = gaussian_filter(noise, sigma=self.smoothness)
seeds = np.zeros(shape, dtype=int)
for i in range(self.n_objects):
if i == 0:
num_points = 100
else:
num_points = self.points_per_skeleton
points = np.stack(
[np.random.randint(0, shape[dim], num_points) for dim in range(3)],
axis=1,
)
tree = skelerator.Tree(points)
skeleton = skelerator.Skeleton(
tree, [1, 1, 1], "linear", generate_graph=False
)
seeds = skeleton.draw(seeds, np.array([0, 0, 0]), i + 1)
seeds[maximum_filter(seeds, size=4) != seeds] = 0
seeds_dt = distance_transform_edt(seeds == 0) + 5.0 * smoothed_noise
gt_data = cwatershed(seeds_dt, seeds).astype(np.uint64)[0] - 1
labels = np.unique(gt_data)
raw_data = np.zeros_like(gt_data, dtype=np.uint8)
value = 0
for label in labels:
raw_data[gt_data == label] = value
value += 255.0 / self.n_objects
spec = request[self.raw].copy()
spec.voxel_size = (1, 1)
raw = gp.Array(raw_data, spec)
spec = request[self.gt].copy()
spec.voxel_size = (1, 1)
gt_crop = (
request[self.gt].roi - request[self.raw].roi.get_begin()
) / voxel_size
gt_crop = gt_crop.to_slices()
gt = gp.Array(gt_data[gt_crop], spec)
batch = gp.Batch()
batch[self.raw] = raw
batch[self.gt] = gt
return batch
emst_name = "PyFuncStateless:0"
edges_u_name = "Gather:0"
edges_v_name = "Gather_1:0"
def add_loss(graph):
# k, h, w
embedding = graph.get_tensor_by_name(tensor_names["embedding"])
# h, w
fg = graph.get_tensor_by_name(tensor_names["fg"])
# h, w
gt_labels = graph.get_tensor_by_name(tensor_names["gt_labels"])
# h, w
gt_fg = tf.greater(gt_labels, 0, name="gt_fg")
# h, w
shape = tuple(fg.get_shape().as_list())
# 1, 1, h, w
maxima = tf.nn.pool(
tf.reshape(fg, (1, 1) + shape),
[10, 10],
"MAX",
"SAME",
strides=[1, 1],
data_format="NCHW",
)
# h, w
maxima = tf.reshape(tf.equal(fg, maxima), shape, name="maxima")
# 1, k, h, w
embedding = tf.reshape(embedding, (1,) + tuple(embedding.get_shape().as_list()))
# k, 1, h, w
embedding = tf.transpose(embedding, perm=[1, 0, 2, 3])
um_loss, emst, edges_u, edges_v, _ = ultrametric_loss_op(
embedding, gt_labels, mask=maxima, coordinate_scale=0.01
)
assert emst.name == emst_name
assert edges_u.name == edges_u_name
assert edges_v.name == edges_v_name
fg_loss = tf.losses.mean_squared_error(gt_fg, fg)
# higher learning rate for fg network
loss = um_loss + 10 * fg_loss
opt = tf.train.AdamOptimizer(
learning_rate=0.5e-5, beta1=0.95, beta2=0.999, epsilon=1e-8
)
optimizer = opt.minimize(loss)
return (loss, optimizer)
def train(n_iterations):
raw = gp.ArrayKey("RAW")
gt = gp.ArrayKey("GT")
gt_fg = gp.ArrayKey("GT_FP")
embedding = gp.ArrayKey("EMBEDDING")
fg = gp.ArrayKey("FG")
maxima = gp.ArrayKey("MAXIMA")
gradient_embedding = gp.ArrayKey("GRADIENT_EMBEDDING")
gradient_fg = gp.ArrayKey("GRADIENT_FG")
emst = gp.ArrayKey("EMST")
edges_u = gp.ArrayKey("EDGES_U")
edges_v = gp.ArrayKey("EDGES_V")
request = gp.BatchRequest()
request.add(raw, (200, 200))
request.add(gt, (160, 160))
snapshot_request = gp.BatchRequest()
snapshot_request[embedding] = request[gt]
snapshot_request[fg] = request[gt]
snapshot_request[gt_fg] = request[gt]
snapshot_request[maxima] = request[gt]
snapshot_request[gradient_embedding] = request[gt]
snapshot_request[gradient_fg] = request[gt]
snapshot_request[emst] = gp.ArraySpec()
snapshot_request[edges_u] = gp.ArraySpec()
snapshot_request[edges_v] = gp.ArraySpec()
pipeline = (
Synthetic2DSource(raw, gt)
+ gp.Normalize(raw)
+ gp.tensorflow.Train(
"train_net",
optimizer=add_loss,
loss=None,
inputs={tensor_names["raw"]: raw, tensor_names["gt_labels"]: gt},
outputs={
tensor_names["embedding"]: embedding,
tensor_names["fg"]: fg,
"maxima:0": maxima,
"gt_fg:0": gt_fg,
emst_name: emst,
edges_u_name: edges_u,
edges_v_name: edges_v,
},
gradients={
tensor_names["embedding"]: gradient_embedding,
tensor_names["fg"]: gradient_fg,
},
)
+ gp.Snapshot(
output_filename="{iteration}.hdf",
dataset_names={
raw: "volumes/raw",
gt: "volumes/gt",
embedding: "volumes/embedding",
fg: "volumes/fg",
maxima: "volumes/maxima",
gt_fg: "volumes/gt_fg",
gradient_embedding: "volumes/gradient_embedding",
gradient_fg: "volumes/gradient_fg",
emst: "emst",
edges_u: "edges_u",
edges_v: "edges_v",
},
dataset_dtypes={maxima: np.float32, gt_fg: np.float32},
every=100,
additional_request=snapshot_request,
)
)
with gp.build(pipeline):
for i in range(n_iterations):
pipeline.request_batch(request)
if __name__ == "__main__":
train(1000000)
| 29.364754
| 85
| 0.573203
|
from mahotas import cwatershed
from mala.losses import ultrametric_loss_op
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import distance_transform_edt
import gunpowder as gp
import json
import numpy as np
import skelerator
import tensorflow as tf
import logging
logging.basicConfig(level=logging.INFO)
with open("tensor_names.json", "r") as f:
tensor_names = json.load(f)
class Synthetic2DSource(gp.BatchProvider):
def __init__(self, raw, gt, smoothness=1.0, n_objects=3, points_per_skeleton=10):
self.raw = raw
self.gt = gt
self.smoothness = smoothness
self.n_objects = n_objects
self.points_per_skeleton = points_per_skeleton
def setup(self):
self.provides(
self.raw,
gp.ArraySpec(
roi=gp.Roi((0, 0), (1000, 1000)),
dtype=np.uint8,
interpolatable=True,
voxel_size=(1, 1),
),
)
self.provides(
self.gt,
gp.ArraySpec(
roi=gp.Roi((0, 0), (1000, 1000)),
dtype=np.uint64,
interpolatable=False,
voxel_size=(1, 1),
),
)
def provide(self, request):
voxel_size = self.spec[self.raw].voxel_size
shape = gp.Coordinate((1,) + request[self.raw].roi.get_shape())
noise = np.abs(np.random.randn(*shape))
smoothed_noise = gaussian_filter(noise, sigma=self.smoothness)
seeds = np.zeros(shape, dtype=int)
for i in range(self.n_objects):
if i == 0:
num_points = 100
else:
num_points = self.points_per_skeleton
points = np.stack(
[np.random.randint(0, shape[dim], num_points) for dim in range(3)],
axis=1,
)
tree = skelerator.Tree(points)
skeleton = skelerator.Skeleton(
tree, [1, 1, 1], "linear", generate_graph=False
)
seeds = skeleton.draw(seeds, np.array([0, 0, 0]), i + 1)
seeds[maximum_filter(seeds, size=4) != seeds] = 0
seeds_dt = distance_transform_edt(seeds == 0) + 5.0 * smoothed_noise
gt_data = cwatershed(seeds_dt, seeds).astype(np.uint64)[0] - 1
labels = np.unique(gt_data)
raw_data = np.zeros_like(gt_data, dtype=np.uint8)
value = 0
for label in labels:
raw_data[gt_data == label] = value
value += 255.0 / self.n_objects
spec = request[self.raw].copy()
spec.voxel_size = (1, 1)
raw = gp.Array(raw_data, spec)
spec = request[self.gt].copy()
spec.voxel_size = (1, 1)
gt_crop = (
request[self.gt].roi - request[self.raw].roi.get_begin()
) / voxel_size
gt_crop = gt_crop.to_slices()
gt = gp.Array(gt_data[gt_crop], spec)
batch = gp.Batch()
batch[self.raw] = raw
batch[self.gt] = gt
return batch
emst_name = "PyFuncStateless:0"
edges_u_name = "Gather:0"
edges_v_name = "Gather_1:0"
def add_loss(graph):
embedding = graph.get_tensor_by_name(tensor_names["embedding"])
fg = graph.get_tensor_by_name(tensor_names["fg"])
gt_labels = graph.get_tensor_by_name(tensor_names["gt_labels"])
gt_fg = tf.greater(gt_labels, 0, name="gt_fg")
shape = tuple(fg.get_shape().as_list())
maxima = tf.nn.pool(
tf.reshape(fg, (1, 1) + shape),
[10, 10],
"MAX",
"SAME",
strides=[1, 1],
data_format="NCHW",
)
maxima = tf.reshape(tf.equal(fg, maxima), shape, name="maxima")
embedding = tf.reshape(embedding, (1,) + tuple(embedding.get_shape().as_list()))
embedding = tf.transpose(embedding, perm=[1, 0, 2, 3])
um_loss, emst, edges_u, edges_v, _ = ultrametric_loss_op(
embedding, gt_labels, mask=maxima, coordinate_scale=0.01
)
assert emst.name == emst_name
assert edges_u.name == edges_u_name
assert edges_v.name == edges_v_name
fg_loss = tf.losses.mean_squared_error(gt_fg, fg)
loss = um_loss + 10 * fg_loss
opt = tf.train.AdamOptimizer(
learning_rate=0.5e-5, beta1=0.95, beta2=0.999, epsilon=1e-8
)
optimizer = opt.minimize(loss)
return (loss, optimizer)
def train(n_iterations):
raw = gp.ArrayKey("RAW")
gt = gp.ArrayKey("GT")
gt_fg = gp.ArrayKey("GT_FP")
embedding = gp.ArrayKey("EMBEDDING")
fg = gp.ArrayKey("FG")
maxima = gp.ArrayKey("MAXIMA")
gradient_embedding = gp.ArrayKey("GRADIENT_EMBEDDING")
gradient_fg = gp.ArrayKey("GRADIENT_FG")
emst = gp.ArrayKey("EMST")
edges_u = gp.ArrayKey("EDGES_U")
edges_v = gp.ArrayKey("EDGES_V")
request = gp.BatchRequest()
request.add(raw, (200, 200))
request.add(gt, (160, 160))
snapshot_request = gp.BatchRequest()
snapshot_request[embedding] = request[gt]
snapshot_request[fg] = request[gt]
snapshot_request[gt_fg] = request[gt]
snapshot_request[maxima] = request[gt]
snapshot_request[gradient_embedding] = request[gt]
snapshot_request[gradient_fg] = request[gt]
snapshot_request[emst] = gp.ArraySpec()
snapshot_request[edges_u] = gp.ArraySpec()
snapshot_request[edges_v] = gp.ArraySpec()
pipeline = (
Synthetic2DSource(raw, gt)
+ gp.Normalize(raw)
+ gp.tensorflow.Train(
"train_net",
optimizer=add_loss,
loss=None,
inputs={tensor_names["raw"]: raw, tensor_names["gt_labels"]: gt},
outputs={
tensor_names["embedding"]: embedding,
tensor_names["fg"]: fg,
"maxima:0": maxima,
"gt_fg:0": gt_fg,
emst_name: emst,
edges_u_name: edges_u,
edges_v_name: edges_v,
},
gradients={
tensor_names["embedding"]: gradient_embedding,
tensor_names["fg"]: gradient_fg,
},
)
+ gp.Snapshot(
output_filename="{iteration}.hdf",
dataset_names={
raw: "volumes/raw",
gt: "volumes/gt",
embedding: "volumes/embedding",
fg: "volumes/fg",
maxima: "volumes/maxima",
gt_fg: "volumes/gt_fg",
gradient_embedding: "volumes/gradient_embedding",
gradient_fg: "volumes/gradient_fg",
emst: "emst",
edges_u: "edges_u",
edges_v: "edges_v",
},
dataset_dtypes={maxima: np.float32, gt_fg: np.float32},
every=100,
additional_request=snapshot_request,
)
)
with gp.build(pipeline):
for i in range(n_iterations):
pipeline.request_batch(request)
if __name__ == "__main__":
train(1000000)
| true
| true
|
790c9e9cf79b76caba1c7ce4f75ffc0fbf3e7109
| 1,341
|
py
|
Python
|
Store/robot-test-old/hand_shake.py
|
Quanta-Robotics/Robot-Blueberry
|
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
|
[
"MIT"
] | 25
|
2021-06-08T07:09:30.000Z
|
2021-12-30T06:28:35.000Z
|
Store/robot-test-old/hand_shake.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 2
|
2021-05-23T12:54:51.000Z
|
2021-06-07T17:47:56.000Z
|
Store/robot-test-old/hand_shake.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 14
|
2021-06-08T13:02:28.000Z
|
2021-12-30T20:07:18.000Z
|
import time
import RPi.GPIO as GPIO
from adafruit_servokit import ServoKit
'''GPIO.setmode(GPIO.BCM)
GPIO.setup(11,GPIO.OUT)
servo1=GPIO.PWM(11,50)
servo1.start(2)'''
h = ServoKit(channels=16)
#servo1.ChangeDutyCycle(12)
#kit.servo[0].angle
init = [0,90,20,0,180,160,170,180,60,0,0,150]
limitLo = [0,0,20,0,0,40,0,0,60,0,0,30]
limitHi = [35,180,180,180,180,160,170,180,180,180,180,150]
cur = init
def changeDeg(pin,newDegree):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(cur[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,5):
for i in range(0,pinSize):
if cur[pin[i]]<newDegree[i]:
cur[pin[i]] += 5
elif cur[pin[i]]>newDegree[i]:
cur[pin[i]] -= 5
for i in range(0,pinSize):
h.servo[pin[i]].angle = cur[pin[i]]
time.sleep(0.05)
#function closed
for i in range(0,12):
h.servo[i].angle=init[i]
time.sleep(0.05)
#up
changeDeg([3],[60])
changeDeg([7],[150])
time.sleep(0.5)
#shake
for i in range(0,5):
if i&1:
h.servo[7].angle=170
else:
h.servo[7].angle=120
time.sleep(0.2)
time.sleep(1)
#down
changeDeg([7],[180])
changeDeg([3],[0])
time.sleep(2)
for i in range(0,12):
print(cur[i]," ",init[i])
changeDeg([i],[init[i]])
| 20.630769
| 64
| 0.595078
|
import time
import RPi.GPIO as GPIO
from adafruit_servokit import ServoKit
h = ServoKit(channels=16)
init = [0,90,20,0,180,160,170,180,60,0,0,150]
limitLo = [0,0,20,0,0,40,0,0,60,0,0,30]
limitHi = [35,180,180,180,180,160,170,180,180,180,180,150]
cur = init
def changeDeg(pin,newDegree):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(cur[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,5):
for i in range(0,pinSize):
if cur[pin[i]]<newDegree[i]:
cur[pin[i]] += 5
elif cur[pin[i]]>newDegree[i]:
cur[pin[i]] -= 5
for i in range(0,pinSize):
h.servo[pin[i]].angle = cur[pin[i]]
time.sleep(0.05)
for i in range(0,12):
h.servo[i].angle=init[i]
time.sleep(0.05)
changeDeg([3],[60])
changeDeg([7],[150])
time.sleep(0.5)
for i in range(0,5):
if i&1:
h.servo[7].angle=170
else:
h.servo[7].angle=120
time.sleep(0.2)
time.sleep(1)
changeDeg([7],[180])
changeDeg([3],[0])
time.sleep(2)
for i in range(0,12):
print(cur[i]," ",init[i])
changeDeg([i],[init[i]])
| true
| true
|
790c9ea157ca7b565c4ae13301ba9d54c2dac2ff
| 351
|
py
|
Python
|
taxon/config.py
|
linsalrob/EdwardsLab
|
3d4eef1dda61c31ce8163d94d86f186275a6e4a4
|
[
"MIT"
] | 30
|
2015-01-25T16:22:51.000Z
|
2022-01-20T15:56:47.000Z
|
taxon/config.py
|
linsalrob/EdwardsLab
|
3d4eef1dda61c31ce8163d94d86f186275a6e4a4
|
[
"MIT"
] | 2
|
2020-04-13T15:00:37.000Z
|
2020-09-23T12:35:59.000Z
|
taxon/config.py
|
linsalrob/EdwardsLab
|
3d4eef1dda61c31ce8163d94d86f186275a6e4a4
|
[
"MIT"
] | 24
|
2015-04-17T00:52:05.000Z
|
2021-11-26T17:50:01.000Z
|
"""
Some settings for the config files
"""
# defaultdir = '/data/ncbi/taxonomy/current'
# defaultdir = '/home/edwa0468/ncbi/taxonomy'
defaultdir = '/raid60/usr/data/NCBI/taxonomy/current/'
def get_db_dir():
"""
Just return the default dir listed above
:return: the default location for the sqllite database
"""
return defaultdir
| 23.4
| 58
| 0.700855
|
defaultdir = '/raid60/usr/data/NCBI/taxonomy/current/'
def get_db_dir():
return defaultdir
| true
| true
|
790c9fcf219c42b2d8e0648aa613b2e4e1b83ccf
| 484
|
py
|
Python
|
ex30.py
|
AyeThandarAung/python-exercises
|
a4ac378052cddd197deaa2522486572dd6c44678
|
[
"MIT"
] | null | null | null |
ex30.py
|
AyeThandarAung/python-exercises
|
a4ac378052cddd197deaa2522486572dd6c44678
|
[
"MIT"
] | null | null | null |
ex30.py
|
AyeThandarAung/python-exercises
|
a4ac378052cddd197deaa2522486572dd6c44678
|
[
"MIT"
] | null | null | null |
people = 30
cars = 40
trucks = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars.")
else:
print("We can't decide.")
if trucks > cars:
print("That's too many trucks.")
elif trucks < cars:
print("Maybe we could take the trucks.")
else:
print("We still can't decide.")
if people > trucks:
print("Alright, let's just take the trucks.")
else:
print("Fine, let's stay home then.")
| 24.2
| 50
| 0.621901
|
people = 30
cars = 40
trucks = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars.")
else:
print("We can't decide.")
if trucks > cars:
print("That's too many trucks.")
elif trucks < cars:
print("Maybe we could take the trucks.")
else:
print("We still can't decide.")
if people > trucks:
print("Alright, let's just take the trucks.")
else:
print("Fine, let's stay home then.")
| true
| true
|
790ca0da0398153af629e260f133702a1559ea52
| 6,589
|
py
|
Python
|
packages/fetchai/skills/generic_seller/behaviours.py
|
ejfitzgerald/agents-aea
|
6411fcba8af2cdf55a3005939ae8129df92e8c3e
|
[
"Apache-2.0"
] | null | null | null |
packages/fetchai/skills/generic_seller/behaviours.py
|
ejfitzgerald/agents-aea
|
6411fcba8af2cdf55a3005939ae8129df92e8c3e
|
[
"Apache-2.0"
] | null | null | null |
packages/fetchai/skills/generic_seller/behaviours.py
|
ejfitzgerald/agents-aea
|
6411fcba8af2cdf55a3005939ae8129df92e8c3e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains the behaviour of a generic seller AEA."""
from typing import cast
from aea.skills.behaviours import TickerBehaviour
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
LedgerApiDialogues,
OefSearchDialogues,
)
from packages.fetchai.skills.generic_seller.strategy import GenericStrategy
DEFAULT_SERVICES_INTERVAL = 60.0
LEDGER_API_ADDRESS = "fetchai/ledger:0.3.0"
class GenericServiceRegistrationBehaviour(TickerBehaviour):
"""This class implements a behaviour."""
def __init__(self, **kwargs):
"""Initialise the behaviour."""
services_interval = kwargs.pop(
"services_interval", DEFAULT_SERVICES_INTERVAL
) # type: int
super().__init__(tick_interval=services_interval, **kwargs)
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
if strategy.is_ledger_tx:
ledger_api_dialogues = cast(
LedgerApiDialogues, self.context.ledger_api_dialogues
)
ledger_api_msg = LedgerApiMessage(
performative=LedgerApiMessage.Performative.GET_BALANCE,
dialogue_reference=ledger_api_dialogues.new_self_initiated_dialogue_reference(),
ledger_id=strategy.ledger_id,
address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)),
)
ledger_api_msg.counterparty = LEDGER_API_ADDRESS
ledger_api_dialogues.update(ledger_api_msg)
self.context.outbox.put_message(message=ledger_api_msg)
self._register_agent()
self._register_service()
def act(self) -> None:
"""
Implement the act.
:return: None
"""
# self._unregister_service()
# self._register_service()
def teardown(self) -> None:
"""
Implement the task teardown.
:return: None
"""
self._unregister_service()
self._unregister_agent()
def _register_agent(self) -> None:
"""
Register the agent's location.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("registering agent on SOEF.")
def _register_service(self) -> None:
"""
Register the agent's service.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_register_service_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("registering service on SOEF.")
def _unregister_service(self) -> None:
"""
Unregister service from the SOEF.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_unregister_service_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("unregistering service from SOEF.")
def _unregister_agent(self) -> None:
"""
Unregister agent from the SOEF.
:return: None
"""
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("unregistering agent from SOEF.")
| 38.086705
| 96
| 0.670815
|
from typing import cast
from aea.skills.behaviours import TickerBehaviour
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.generic_seller.dialogues import (
LedgerApiDialogues,
OefSearchDialogues,
)
from packages.fetchai.skills.generic_seller.strategy import GenericStrategy
DEFAULT_SERVICES_INTERVAL = 60.0
LEDGER_API_ADDRESS = "fetchai/ledger:0.3.0"
class GenericServiceRegistrationBehaviour(TickerBehaviour):
def __init__(self, **kwargs):
services_interval = kwargs.pop(
"services_interval", DEFAULT_SERVICES_INTERVAL
)
super().__init__(tick_interval=services_interval, **kwargs)
def setup(self) -> None:
strategy = cast(GenericStrategy, self.context.strategy)
if strategy.is_ledger_tx:
ledger_api_dialogues = cast(
LedgerApiDialogues, self.context.ledger_api_dialogues
)
ledger_api_msg = LedgerApiMessage(
performative=LedgerApiMessage.Performative.GET_BALANCE,
dialogue_reference=ledger_api_dialogues.new_self_initiated_dialogue_reference(),
ledger_id=strategy.ledger_id,
address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)),
)
ledger_api_msg.counterparty = LEDGER_API_ADDRESS
ledger_api_dialogues.update(ledger_api_msg)
self.context.outbox.put_message(message=ledger_api_msg)
self._register_agent()
self._register_service()
def act(self) -> None:
def teardown(self) -> None:
self._unregister_service()
self._unregister_agent()
def _register_agent(self) -> None:
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("registering agent on SOEF.")
def _register_service(self) -> None:
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_register_service_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("registering service on SOEF.")
def _unregister_service(self) -> None:
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_unregister_service_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("unregistering service from SOEF.")
def _unregister_agent(self) -> None:
strategy = cast(GenericStrategy, self.context.strategy)
description = strategy.get_location_description()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg = OefSearchMessage(
performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,
dialogue_reference=oef_search_dialogues.new_self_initiated_dialogue_reference(),
service_description=description,
)
oef_search_msg.counterparty = self.context.search_service_address
oef_search_dialogues.update(oef_search_msg)
self.context.outbox.put_message(message=oef_search_msg)
self.context.logger.info("unregistering agent from SOEF.")
| true
| true
|
790ca14900667962fd2acd2fca8ca22d5c0151f4
| 6,258
|
py
|
Python
|
Camera/camera.py
|
marioliu/AutonomousQuadblade
|
08fe54fe37df89ffc7e6378125bb14ad5bead421
|
[
"MIT"
] | null | null | null |
Camera/camera.py
|
marioliu/AutonomousQuadblade
|
08fe54fe37df89ffc7e6378125bb14ad5bead421
|
[
"MIT"
] | null | null | null |
Camera/camera.py
|
marioliu/AutonomousQuadblade
|
08fe54fe37df89ffc7e6378125bb14ad5bead421
|
[
"MIT"
] | null | null | null |
'''
Adapted from https://github.com/IntelligentQuadruped, with permission
Description: Module to connect to camera and retrieve RGB and depth data. Currently supports the Intel RealSense R200 Camera.
'''
import numpy as np
import logging
import time
import cv2
import matplotlib.pyplot as plt
from skimage.transform import rescale
from file_support import ensureDir
from os import path, makedirs
try:
import pyrealsense as pyrs
except ImportError as error:
logging.warning("cam.py: " + str(error))
class Camera:
"""
Object to get data from R200
"""
def __init__(self, max_depth = 4.0, save_images = False, \
t_buffer = 5, output_dir = './Trials/'):
"""
Intitalizes Camera object
"""
self.max_depth = max_depth
self.save_images = save_images
self.clock = time.time()
self.t_buffer = t_buffer
self.output_dir = output_dir
self.data_dir = path.join(self.output_dir,"{}".format(time.strftime("%d_%b_%Y_%H:%M", time.localtime())))
if self.save_images:
ensureDir(self.data_dir)
pass
np.warnings.filterwarnings('ignore')
def connect(self):
"""
Establishes connection to R200 camera
"""
logging.info("Cam.py: connecting components")
self.serv = pyrs.Service()
self.dev = self.serv.Device(device_id=0,
streams=[\
pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])
def disconnect(self):
"""
Disconnects from R200 camera
"""
self.dev.stop()
self.serv.stop()
logging.info("Cam.py: camera disconnected")
def getFrames(self, frames = 5, rgb = False):
"""
Retrieves depth frames (and RGB if true) from R200 input, cleans and averages depth images
"""
self.dev.wait_for_frames()
# Convert depth to meters
depth = self.dev.depth * self.dev.depth_scale
col = self.dev.color
if self.save_images and (time.time() - self.clock > self.t_buffer):
np.save(path.join(self.data_dir,str(time.time())+"_d"),depth)
np.save(path.join(self.data_dir,str(time.time())+"_c"),col)
self.clock = time.time()
for _ in range(frames-1):
self.dev.wait_for_frames()
# Convert depth to meters
curr = self.dev.depth * self.dev.depth_scale
depth = np.dstack((depth, curr))
if frames != 1:
depth = np.nanmean(depth, 2)
depth[depth <= 0] = np.nan
depth[depth > self.max_depth] = np.nan
if rgb:
return depth, col
return depth
def reduceFrame(self, depth, height_ratio = 0.5, sub_sample = 0.3, reduce_to = 'lower'):
"""
Takes in a depth image and rescales it
Args:
height_ratio: Determines fraction of rows to keep
sub_sample: Scaling factor for image
"""
if (height_ratio > 1.0) or (height_ratio < 0.0)\
or (sub_sample > 1.0) or (sub_sample < 0.0):
print('height_ratio and sub_sample must be between 0 and 1')
exit(1)
depth_copy = depth.copy()
height = depth_copy.shape[0]
h = int(height_ratio*(height))
cols_to_cut = 0
# catches the case when all rows are kept
if height_ratio == 1:
d_short = depth_copy
elif reduce_to == 'lower':
d_short = depth_copy[(height - h):,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle_lower':
upper_brdr = int(3*(height/4.0) - h/2)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle':
upper_brdr = int((height - h)/2.0)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle_upper':
upper_brdr = int((height/4.0) - h/2)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'upper':
d_short = depth_copy[:h, cols_to_cut:-(cols_to_cut+1)]
d_short[d_short <= 0] = np.nan
d_short[d_short > self.max_depth] = np.nan
rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)
return rescaled
def main():
"""
Unit tests
"""
max_depth = 4.0
numFrames = 10
# height_ratio of 0 crops 0 rows away
height_ratio = 0.5
sub_sample = 1
# reduce_to argument can be: 'lower', 'middle_lower', 'middle', 'middle_upper', and 'upper'
reduce_to = 'middle_lower'
print('Program settings:')
print('\tmax_depth: ' + str(max_depth))
print('\tnumFrames: ' + str(numFrames))
print('\theight_ratio: ' + str(height_ratio))
print('\tsub_sample: ' + str(sub_sample))
print('\treduce_to: ' + reduce_to)
cam = Camera(max_depth = max_depth)
cam.connect()
time.sleep(2.5)
t1 = time.time()
d = cam.getFrames(numFrames)
t2 = time.time()
printStmt = 'Time to get {0} frames: ' + str(t2 - t1)
print(printStmt.format(numFrames))
d_small = cam.reduceFrame(d, height_ratio = height_ratio, sub_sample = sub_sample, reduce_to = reduce_to)
# colormap:
# https://matplotlib.org/tutorials/colors/colormaps.html
# scaled depth
plt.figure(figsize = (6, 7)) # figsize = width, height
ax2 = plt.subplot(2, 1, 2)
plt.imshow(d_small, cmap='gist_rainbow')
plt.colorbar()
plt.title('Scaled (height_ratio = {0}, sub_sample = {1})'.format(height_ratio, sub_sample))
plt.grid()
# original depth
# plt.subplot(2, 1, 1, sharex=ax2, sharey=ax2)
plt.subplot(2, 1, 1)
plt.imshow(d, cmap='gist_rainbow')
plt.colorbar()
plt.title('Original')
plt.grid()
plt.subplots_adjust(hspace = 0.3)
plt.show()
cam.disconnect()
if __name__ == "__main__":
main()
| 31.134328
| 125
| 0.591243
|
import numpy as np
import logging
import time
import cv2
import matplotlib.pyplot as plt
from skimage.transform import rescale
from file_support import ensureDir
from os import path, makedirs
try:
import pyrealsense as pyrs
except ImportError as error:
logging.warning("cam.py: " + str(error))
class Camera:
def __init__(self, max_depth = 4.0, save_images = False, \
t_buffer = 5, output_dir = './Trials/'):
self.max_depth = max_depth
self.save_images = save_images
self.clock = time.time()
self.t_buffer = t_buffer
self.output_dir = output_dir
self.data_dir = path.join(self.output_dir,"{}".format(time.strftime("%d_%b_%Y_%H:%M", time.localtime())))
if self.save_images:
ensureDir(self.data_dir)
pass
np.warnings.filterwarnings('ignore')
def connect(self):
logging.info("Cam.py: connecting components")
self.serv = pyrs.Service()
self.dev = self.serv.Device(device_id=0,
streams=[\
pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])
def disconnect(self):
self.dev.stop()
self.serv.stop()
logging.info("Cam.py: camera disconnected")
def getFrames(self, frames = 5, rgb = False):
self.dev.wait_for_frames()
depth = self.dev.depth * self.dev.depth_scale
col = self.dev.color
if self.save_images and (time.time() - self.clock > self.t_buffer):
np.save(path.join(self.data_dir,str(time.time())+"_d"),depth)
np.save(path.join(self.data_dir,str(time.time())+"_c"),col)
self.clock = time.time()
for _ in range(frames-1):
self.dev.wait_for_frames()
curr = self.dev.depth * self.dev.depth_scale
depth = np.dstack((depth, curr))
if frames != 1:
depth = np.nanmean(depth, 2)
depth[depth <= 0] = np.nan
depth[depth > self.max_depth] = np.nan
if rgb:
return depth, col
return depth
def reduceFrame(self, depth, height_ratio = 0.5, sub_sample = 0.3, reduce_to = 'lower'):
if (height_ratio > 1.0) or (height_ratio < 0.0)\
or (sub_sample > 1.0) or (sub_sample < 0.0):
print('height_ratio and sub_sample must be between 0 and 1')
exit(1)
depth_copy = depth.copy()
height = depth_copy.shape[0]
h = int(height_ratio*(height))
cols_to_cut = 0
if height_ratio == 1:
d_short = depth_copy
elif reduce_to == 'lower':
d_short = depth_copy[(height - h):,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle_lower':
upper_brdr = int(3*(height/4.0) - h/2)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle':
upper_brdr = int((height - h)/2.0)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle_upper':
upper_brdr = int((height/4.0) - h/2)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'upper':
d_short = depth_copy[:h, cols_to_cut:-(cols_to_cut+1)]
d_short[d_short <= 0] = np.nan
d_short[d_short > self.max_depth] = np.nan
rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)
return rescaled
def main():
max_depth = 4.0
numFrames = 10
height_ratio = 0.5
sub_sample = 1
reduce_to = 'middle_lower'
print('Program settings:')
print('\tmax_depth: ' + str(max_depth))
print('\tnumFrames: ' + str(numFrames))
print('\theight_ratio: ' + str(height_ratio))
print('\tsub_sample: ' + str(sub_sample))
print('\treduce_to: ' + reduce_to)
cam = Camera(max_depth = max_depth)
cam.connect()
time.sleep(2.5)
t1 = time.time()
d = cam.getFrames(numFrames)
t2 = time.time()
printStmt = 'Time to get {0} frames: ' + str(t2 - t1)
print(printStmt.format(numFrames))
d_small = cam.reduceFrame(d, height_ratio = height_ratio, sub_sample = sub_sample, reduce_to = reduce_to)
plt.figure(figsize = (6, 7))
ax2 = plt.subplot(2, 1, 2)
plt.imshow(d_small, cmap='gist_rainbow')
plt.colorbar()
plt.title('Scaled (height_ratio = {0}, sub_sample = {1})'.format(height_ratio, sub_sample))
plt.grid()
plt.subplot(2, 1, 1)
plt.imshow(d, cmap='gist_rainbow')
plt.colorbar()
plt.title('Original')
plt.grid()
plt.subplots_adjust(hspace = 0.3)
plt.show()
cam.disconnect()
if __name__ == "__main__":
main()
| true
| true
|
790ca2977ba11710d82c4fbdfee227696a4c639e
| 6,020
|
py
|
Python
|
src/models/resnet50.py
|
motokimura/cowc_car_counting
|
833795e1b5cc6831409e86bd4b9fe2199c9cb287
|
[
"MIT"
] | 46
|
2018-11-05T15:21:51.000Z
|
2022-02-01T16:08:38.000Z
|
src/models/resnet50.py
|
motokimura/cowc_car_counting
|
833795e1b5cc6831409e86bd4b9fe2199c9cb287
|
[
"MIT"
] | 2
|
2019-12-12T02:56:24.000Z
|
2020-11-30T20:14:56.000Z
|
src/models/resnet50.py
|
motokimura/cowc_car_counting
|
833795e1b5cc6831409e86bd4b9fe2199c9cb287
|
[
"MIT"
] | 9
|
2018-12-21T02:58:43.000Z
|
2021-09-02T12:00:47.000Z
|
# Original author: yasunorikudo
# (https://github.com/yasunorikudo/chainer-ResNet)
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def __call__(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def __call__(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch))
self._layer = layer
def __call__(self, x):
for f in self.children():
x = f(x)
return x
@property
def layer(self):
return self._layer
class ResNet50(chainer.Chain):
def __init__(self, class_num, insize, class_weight=None, caffemodel_path=None):
assert (insize % 32 == 0), "'insize' should be divisible by 32."
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, class_num)
if caffemodel_path is not None:
# Load pre-trained weights from caffemodel
self._load_pretrained_weights(caffemodel_path)
self._class_num = class_num
self._insize = insize
self._class_weight = class_weight
def forward(self, x, compute_cam=False):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
cam_features = h.data
h = F.average_pooling_2d(h, self._insize//32, stride=1)
h = self.fc(h)
if compute_cam:
cam_weights = self.fc.W.data
return h, cam_features, cam_weights
return h
def __call__(self, x, t):
h = self.forward(x)
loss = F.softmax_cross_entropy(h, t, class_weight=self._class_weight)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
@property
def insize(self):
return self._insize
@property
def class_num(self):
return self._class_num
# Functions to load weights from pre-trained ResNet50 caffemodel
# Reference: https://github.com/chainer/chainer/blob/master/chainer/links/model/vision/resnet.py
def _load_weights_conv_bn(self, src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'res{}_branch{}'.format(bname, cname))
src_bn = getattr(src, 'bn{}_branch{}'.format(bname, cname))
src_scale = getattr(src, 'scale{}_branch{}'.format(bname, cname))
dst_conv.W.data[:] = src_conv.W.data
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.data[:] = src_scale.W.data
dst_bn.beta.data[:] = src_scale.bias.b.data
def _load_weights_bottleneckA(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
self._load_weights_conv_bn(src, dst.conv4, dst.bn4, name, '1')
def _load_weights_bottleneckB(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
def _load_weights_block(self, dst, src, names):
for i, (layers, name) in enumerate(zip(dst.children(), names)):
if i ==0:
self._load_weights_bottleneckA(layers, src, name)
else:
self._load_weights_bottleneckB(layers, src, name)
def _load_pretrained_weights(self, caffemodel_path):
# As CaffeFunction uses shortcut symbols,
# CaffeFunction is imported here.
from chainer.links.caffe.caffe_function import CaffeFunction
src = CaffeFunction(caffemodel_path)
self.conv1.W.data[:] = src.conv1.W.data
self.conv1.b.data[:] = src.conv1.b.data
self.bn1.avg_mean[:] = src.bn_conv1.avg_mean
self.bn1.avg_var[:] = src.bn_conv1.avg_var
self.bn1.gamma.data[:] = src.scale_conv1.W.data
self.bn1.beta.data[:] = src.scale_conv1.bias.b.data
self._load_weights_block(self.res2, src, ['2a', '2b', '2c'])
self._load_weights_block(self.res3, src, ['3a', '3b', '3c', '3d'])
self._load_weights_block(self.res4, src, ['4a', '4b', '4c', '4d', '4e', '4f'])
self._load_weights_block(self.res5, src, ['5a', '5b', '5c'])
| 31.684211
| 97
| 0.697674
|
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def __call__(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def __call__(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch))
self._layer = layer
def __call__(self, x):
for f in self.children():
x = f(x)
return x
@property
def layer(self):
return self._layer
class ResNet50(chainer.Chain):
def __init__(self, class_num, insize, class_weight=None, caffemodel_path=None):
assert (insize % 32 == 0), "'insize' should be divisible by 32."
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, class_num)
if caffemodel_path is not None:
self._load_pretrained_weights(caffemodel_path)
self._class_num = class_num
self._insize = insize
self._class_weight = class_weight
def forward(self, x, compute_cam=False):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
cam_features = h.data
h = F.average_pooling_2d(h, self._insize//32, stride=1)
h = self.fc(h)
if compute_cam:
cam_weights = self.fc.W.data
return h, cam_features, cam_weights
return h
def __call__(self, x, t):
h = self.forward(x)
loss = F.softmax_cross_entropy(h, t, class_weight=self._class_weight)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
@property
def insize(self):
return self._insize
@property
def class_num(self):
return self._class_num
def _load_weights_conv_bn(self, src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'res{}_branch{}'.format(bname, cname))
src_bn = getattr(src, 'bn{}_branch{}'.format(bname, cname))
src_scale = getattr(src, 'scale{}_branch{}'.format(bname, cname))
dst_conv.W.data[:] = src_conv.W.data
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.data[:] = src_scale.W.data
dst_bn.beta.data[:] = src_scale.bias.b.data
def _load_weights_bottleneckA(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
self._load_weights_conv_bn(src, dst.conv4, dst.bn4, name, '1')
def _load_weights_bottleneckB(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
def _load_weights_block(self, dst, src, names):
for i, (layers, name) in enumerate(zip(dst.children(), names)):
if i ==0:
self._load_weights_bottleneckA(layers, src, name)
else:
self._load_weights_bottleneckB(layers, src, name)
def _load_pretrained_weights(self, caffemodel_path):
from chainer.links.caffe.caffe_function import CaffeFunction
src = CaffeFunction(caffemodel_path)
self.conv1.W.data[:] = src.conv1.W.data
self.conv1.b.data[:] = src.conv1.b.data
self.bn1.avg_mean[:] = src.bn_conv1.avg_mean
self.bn1.avg_var[:] = src.bn_conv1.avg_var
self.bn1.gamma.data[:] = src.scale_conv1.W.data
self.bn1.beta.data[:] = src.scale_conv1.bias.b.data
self._load_weights_block(self.res2, src, ['2a', '2b', '2c'])
self._load_weights_block(self.res3, src, ['3a', '3b', '3c', '3d'])
self._load_weights_block(self.res4, src, ['4a', '4b', '4c', '4d', '4e', '4f'])
self._load_weights_block(self.res5, src, ['5a', '5b', '5c'])
| true
| true
|
790ca2987e8b94a36faaf0e7acfbfa1c189a6101
| 5,924
|
py
|
Python
|
utils/parsxv2/typesystem.py
|
dstep/old_jf_compiler
|
3e179d91584308d9e7a69e76a78542e83ec2d50b
|
[
"MIT"
] | null | null | null |
utils/parsxv2/typesystem.py
|
dstep/old_jf_compiler
|
3e179d91584308d9e7a69e76a78542e83ec2d50b
|
[
"MIT"
] | null | null | null |
utils/parsxv2/typesystem.py
|
dstep/old_jf_compiler
|
3e179d91584308d9e7a69e76a78542e83ec2d50b
|
[
"MIT"
] | null | null | null |
class Type:
def __init__(self):
pass
def get_repr(self):
return self
def __repr__(self):
return self.get_repr().stringify()
def stringify(self):
return ""
def put_on_stack(self, stack):
stack.put(self.get_repr())
def take_from_stack(self, stack):
stack.take(self.get_repr())
def get_as_single_constant(self):
repr = self.get_repr()
if isinstance(repr, TypeConstant):
return repr
return None
class TypeConstant(Type):
def __init__(self, name):
self.name = name
def stringify(self):
return self.name
class TypeArrow(Type):
def __init__(self, left, right, name = None):
self.left = left
self.right = right
self.name = name
def stringify(self):
return "(" + str(self.left) + ")->" + str(self.right)
def put_on_stack(self, stack):
self.left.take_from_stack(stack)
self.right.put_on_stack(stack)
def take_from_stack(self, stack):
raise ArrowOnTheLeftOfArrowError("Arrow type on the left hand side of the arrow type", self)
class TypeTuple(Type):
def __init__(self, args):
self.args = args
def stringify(self):
return "(" + str.join(", ", map(str, self.args)) + ")"
def put_on_stack(self, stack):
for arg in self.args:
arg.put_on_stack(stack)
def take_from_stack(self, stack):
for arg in self.args:
arg.take_from_stack(stack)
class TypeVar(Type):
def __init__(self, name):
self.name = name
self.rank = 0
self.parent = self
def union(self, other):
self_repr = self.get_repr()
other_repr = other.get_repr()
if self_repr == other_repr:
return
if isinstance(other, TypeVar):
other_rank = other.rank
self_rank = self.rank
if self_rank < other_rank:
self.parent = other_repr
elif self_rank > other_rank:
other.parent = self_repr
else:
other.parent = self_repr
self.rank = self.rank + 1
else:
self.parent = other_repr
def get_repr(self):
if self.parent != self:
self.parent = self.parent.get_repr()
return self.parent
def stringify(self):
return "@" + self.name
class ArrowOnTheLeftOfArrowError(RuntimeError):
def __init__(self, message, type):
RuntimeError.__init__(self, message)
self.message = message
self.type = type
def __str__(self):
return self.message + " " + str(self.type)
class UnifiactionError(RuntimeError):
def __init__(self, message):
RuntimeError.__init__(self, message)
self.message = message
self.unify_stack = []
def add(self, type_a, type_b):
self.unify_stack.append((type_a, type_b))
def __str__(self):
return "Unification error: " + self.message + "\n" + str.join("\n", map(lambda p : "In unification of '%s' and '%s'" % p, self.unify_stack))
def types_equal(a, b):
a = a.get_repr()
b = b.get_repr()
if a == b:
return True
if isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
return False
return all(map(types_equal, zip(a.args, b.args)))
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
return types_equal(a.left, b.left) and types_equal(a.right, b.right)
return False
def types_unify(a, b):
try:
a = a.get_repr()
b = b.get_repr()
if isinstance(a, TypeVar):
a.union(b)
elif isinstance(b, TypeVar):
b.union(a)
elif isinstance(a, TypeConstant) and isinstance(b, TypeConstant):
if a != b:
raise UnifiactionError("Different basic types")
elif isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
raise UnifiactionError("Tuples size mismatch")
for (a,b) in zip(a.args, b.args):
types_unify(a, b)
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
types_unify(a.left, b.left)
types_unify(a.right, b.right)
else:
raise UnifiactionError("Different kinds")
except UnifiactionError as e:
e.add(a, b)
raise
def is_simple_arrow(a):
a = a.get_repr()
if isinstance(a, TypeArrow):
lhs = a.left
rhs = a.right
if lhs.get_repr() == rhs.get_repr():
return True
return False
def is_type_empty(type):
type = type.get_repr()
return isinstance(type, TypeTuple) and len(type.args) == 0
def split_arrow(type):
type = type.get_repr()
lhs = []
while isinstance(type, TypeArrow):
lhs.append(type.left)
type = type.right
return (lhs, type)
class TypeStack:
def __init__(self):
self.given = []
self.taken = []
def take(self, type):
if not isinstance(type, TypeConstant):
raise RuntimeError("Non-constant type placed into typestack: %s" % type)
if len(self.given) > 0:
last = self.given.pop()
types_unify(type, last)
else:
self.taken.append(type)
def put(self, type):
self.given.append(type)
def form_type(self):
if len(self.given) == 1:
rhs = self.given[0]
else:
rhs = TypeTuple(self.given)
t = rhs
for type in reversed(self.taken):
t = TypeArrow(type, t)
return t
#Takes a sequence of types, produces a signle type matching the sequence
def infer_type_from_sequence(seq):
stack = TypeStack()
for type in seq:
type.put_on_stack(stack)
return stack.form_type()
if __name__ == "__main__":
pass
| 29.182266
| 148
| 0.58339
|
class Type:
def __init__(self):
pass
def get_repr(self):
return self
def __repr__(self):
return self.get_repr().stringify()
def stringify(self):
return ""
def put_on_stack(self, stack):
stack.put(self.get_repr())
def take_from_stack(self, stack):
stack.take(self.get_repr())
def get_as_single_constant(self):
repr = self.get_repr()
if isinstance(repr, TypeConstant):
return repr
return None
class TypeConstant(Type):
def __init__(self, name):
self.name = name
def stringify(self):
return self.name
class TypeArrow(Type):
def __init__(self, left, right, name = None):
self.left = left
self.right = right
self.name = name
def stringify(self):
return "(" + str(self.left) + ")->" + str(self.right)
def put_on_stack(self, stack):
self.left.take_from_stack(stack)
self.right.put_on_stack(stack)
def take_from_stack(self, stack):
raise ArrowOnTheLeftOfArrowError("Arrow type on the left hand side of the arrow type", self)
class TypeTuple(Type):
def __init__(self, args):
self.args = args
def stringify(self):
return "(" + str.join(", ", map(str, self.args)) + ")"
def put_on_stack(self, stack):
for arg in self.args:
arg.put_on_stack(stack)
def take_from_stack(self, stack):
for arg in self.args:
arg.take_from_stack(stack)
class TypeVar(Type):
def __init__(self, name):
self.name = name
self.rank = 0
self.parent = self
def union(self, other):
self_repr = self.get_repr()
other_repr = other.get_repr()
if self_repr == other_repr:
return
if isinstance(other, TypeVar):
other_rank = other.rank
self_rank = self.rank
if self_rank < other_rank:
self.parent = other_repr
elif self_rank > other_rank:
other.parent = self_repr
else:
other.parent = self_repr
self.rank = self.rank + 1
else:
self.parent = other_repr
def get_repr(self):
if self.parent != self:
self.parent = self.parent.get_repr()
return self.parent
def stringify(self):
return "@" + self.name
class ArrowOnTheLeftOfArrowError(RuntimeError):
def __init__(self, message, type):
RuntimeError.__init__(self, message)
self.message = message
self.type = type
def __str__(self):
return self.message + " " + str(self.type)
class UnifiactionError(RuntimeError):
def __init__(self, message):
RuntimeError.__init__(self, message)
self.message = message
self.unify_stack = []
def add(self, type_a, type_b):
self.unify_stack.append((type_a, type_b))
def __str__(self):
return "Unification error: " + self.message + "\n" + str.join("\n", map(lambda p : "In unification of '%s' and '%s'" % p, self.unify_stack))
def types_equal(a, b):
a = a.get_repr()
b = b.get_repr()
if a == b:
return True
if isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
return False
return all(map(types_equal, zip(a.args, b.args)))
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
return types_equal(a.left, b.left) and types_equal(a.right, b.right)
return False
def types_unify(a, b):
try:
a = a.get_repr()
b = b.get_repr()
if isinstance(a, TypeVar):
a.union(b)
elif isinstance(b, TypeVar):
b.union(a)
elif isinstance(a, TypeConstant) and isinstance(b, TypeConstant):
if a != b:
raise UnifiactionError("Different basic types")
elif isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
raise UnifiactionError("Tuples size mismatch")
for (a,b) in zip(a.args, b.args):
types_unify(a, b)
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
types_unify(a.left, b.left)
types_unify(a.right, b.right)
else:
raise UnifiactionError("Different kinds")
except UnifiactionError as e:
e.add(a, b)
raise
def is_simple_arrow(a):
a = a.get_repr()
if isinstance(a, TypeArrow):
lhs = a.left
rhs = a.right
if lhs.get_repr() == rhs.get_repr():
return True
return False
def is_type_empty(type):
type = type.get_repr()
return isinstance(type, TypeTuple) and len(type.args) == 0
def split_arrow(type):
type = type.get_repr()
lhs = []
while isinstance(type, TypeArrow):
lhs.append(type.left)
type = type.right
return (lhs, type)
class TypeStack:
def __init__(self):
self.given = []
self.taken = []
def take(self, type):
if not isinstance(type, TypeConstant):
raise RuntimeError("Non-constant type placed into typestack: %s" % type)
if len(self.given) > 0:
last = self.given.pop()
types_unify(type, last)
else:
self.taken.append(type)
def put(self, type):
self.given.append(type)
def form_type(self):
if len(self.given) == 1:
rhs = self.given[0]
else:
rhs = TypeTuple(self.given)
t = rhs
for type in reversed(self.taken):
t = TypeArrow(type, t)
return t
def infer_type_from_sequence(seq):
stack = TypeStack()
for type in seq:
type.put_on_stack(stack)
return stack.form_type()
if __name__ == "__main__":
pass
| true
| true
|
790ca313c418489814b3f9e22b482dc1e87557ee
| 2,705
|
py
|
Python
|
Super_Pow.py
|
thydeyx/LeetCode-Python
|
03296dfa37910ef13b0726bde5e757b52f1590d7
|
[
"MIT"
] | 1
|
2017-05-21T04:28:37.000Z
|
2017-05-21T04:28:37.000Z
|
Super_Pow.py
|
thydeyx/LeetCode-Python
|
03296dfa37910ef13b0726bde5e757b52f1590d7
|
[
"MIT"
] | null | null | null |
Super_Pow.py
|
thydeyx/LeetCode-Python
|
03296dfa37910ef13b0726bde5e757b52f1590d7
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
#
# Author : TangHanYi
# E-mail : thydeyx@163.com
# Create Date : 2016-12-11 09:33:17 AM
# Last modified : 2016-12-11 10:48:50 AM
# File Name : Super_Pow.py
# Desc :
class Solution(object):
def superPow(self, a, b):
if len(b) == 0:
return a
tmp = a
ret = 1
n = len(b)
k = 0
while k != n - 1:
if b[-1] % 2 == 1:
ret = (ret * tmp) % 1337
tmp = (tmp * tmp) % 1337
pre = 0
for i in range(k, n):
pre_b = (pre * 10 + b[i]) % 2
b[i] = (pre * 10 + b[i]) / 2
pre = pre_b
if k == i and b[i] == 0:
k += 1
return ret
if __name__ == "__main__":
s = Solution()
a = 434576
b = [6,4,0,4,4,0,3,9,4,9,9,6,2,0,2,5,4,2,1,7,9,2,5,9,5,2,5,6,2,9,2,4,4,4,8,0,7,4,9,1,3,9,9,7,1,1,2,7,5,5,4,5,7,1,4,4,4,9,1,8,0,5,4,6,2,3,7,9,9,6,0,2,7,1,0,0,3,4,7,8,2,4,3,9,5,9,6,1,8,9,0,9,6,4,5,8,9,4,7,8,1,9,1,0,3,3,1,6,9,8,6,1,4,0,3,0,1,9,1,0,0,1,1,6,8,8,5,7,3,4,6,6,6,9,6,9,2,9,7,3,0,3,7,4,5,0,4,7,1,8,7,1,1,0,9,9,0,4,9,5,1,5,3,7,4,0,8,8,1,5,1,1,8,8,6,4,0,2,1,3,0,0,4,4,2,6,5,2,0,4,0,1,9,3,0,5,5,8,5,7,5,7,0,4,7,6,0,8,1,1,1,3,3,8,7,5,4,3,9,6,7,9,0,9,5,0,6,0,1,2,9,6,1,0,2,8,8,2,6,9,5,0,3,8,8,0,3,4,5,5,0,5,6,0,6,1,3,2,4,4,6,3,2,7,5,5,8,4,9,6,3,5,6,8,3,6,9,9,0,6,4,1,1,2,3,7,4,6,2,0,0,0,5,5,0,1,0,8,7,9,4,2,6,3,1,0,9,2,1,2,8,7,5,0,9,8,9,5,5,1,5,7,4,3,2,4,6,4,2,3,6,8,5,4,1,8,4,1,0,7,3,9,4,8,1,4,8,0,1,5,4,9,3,8,2,7,2,8,4,6,1,2,4,8,6,8,9,3,1,9,0,6,8,5,6,1,1,4,2,2,0,8,1,5,6,5,2,0,3,8,8,6,2,4,7,9,2,6,4,3,5,4,1,6,1,7,7,2,2,1,7,4,9,0,9,7,6,3,9,1,2,7,8,4,2,7,5,6,3,9,2,0,6,3,8,7,1,8,2,5,9,9,9,1,9,8,8,7,1,8,9,5,7,9,2,9,6,7,8,1,9,0,3,5,3,4,4,4,2,6,9,3,5,8,4,7,8,5,4,2,5,5,7,2,6,9,4,4,9,2,5,0,2,1,7,5,5,1,2,9,8,3,2,5,4,9,4,2,4,9,4,9,6,4,3,3,5,7,7,6,9,5,8,3,8,5,1,3,9,3,2,7,8,6,4,2,5,9,7,9,0,3,0,6,9,4,1,5,3,1,1,3,6,0,6,4,7,9,9,6,2,3,5,3,9,0,7,7,1,4,6,1,0,9,9,9,5,1,6,8,2,8,1,0,0,0,6,9,9,5,6,4,0,1,9,9,3,6,8,4,3,7,5,3,6,7,4,1,0,1,9,4,1,3,4,1,5,0,2,6,7,8,0,9,2,1,0,7,8,9,2,1,6,9,6,2,6,0,5,8,1,6,2,2,9,6,5,6,8,8,3,7,8,5,6,0,7,7,8,5,6,2,8,2,1,4,6,0,4,1,8,6,7,1,8,9,9,4,5,0,4,8,9,2,6,6,5,3,5,5,8,3,7,6,7,0,0,3,2,4,6,3,2,5,6,1,4,5,7,2,7,1,2,7,3,8,3,8,1,0,5,1,3,2,9,0,5,1,3,7,8,1,0,0,6,6,3,3,4,0,7,1,3,9,0,7,8,5,7,1,5,3,3,8,7,4,0,2,6,5,2,4,6,2,4,5,1,8,8,7,0,5,0,4,6,1,3,4,6,0,8,2,5,3,2,5,7,3,7,5,8,1,9,7,6,6,2,7,6,0,6,6,7,6,2,3,7,5,0,6,8,8,0,5,3,2,0,0,7,0,8,8,1,7,5,7,5,7,6,1,7,4,0,4,1,2,9,0,8,9,6,6,9,6,1,2,1,4,5,8,4,3,6,7,2,3,5,8,0,3,9,7,8,9,3,1,2,5,1,2,4,0,8,6,8,1,8,9,5,5,0,1,0,8,9,3,2,6,1,4,9,2,2,9,4,7,0,8,2,4,0,9,6,0,7,4,3,5,6,1,3,8,2,3,8,1,6,2,7,9,7,9,4,1,0,0,0,1,8,3,7,0,4,3,2,1,9,5,8,7,6,1,5,1,7,6,2,5,8,2,7,5,1,1,8,3,1,9,4,1,4,3,1,0,8,5,1,0,0,1,7,9,5,5,0,2,1,2,9,1,6,6,9,9,9,7,3,0,6,9,3,0,3,6,0,3,1,3,3,2,7]
print s.superPow(a, b)
| 69.358974
| 2,008
| 0.497597
|
class Solution(object):
def superPow(self, a, b):
if len(b) == 0:
return a
tmp = a
ret = 1
n = len(b)
k = 0
while k != n - 1:
if b[-1] % 2 == 1:
ret = (ret * tmp) % 1337
tmp = (tmp * tmp) % 1337
pre = 0
for i in range(k, n):
pre_b = (pre * 10 + b[i]) % 2
b[i] = (pre * 10 + b[i]) / 2
pre = pre_b
if k == i and b[i] == 0:
k += 1
return ret
if __name__ == "__main__":
s = Solution()
a = 434576
b = [6,4,0,4,4,0,3,9,4,9,9,6,2,0,2,5,4,2,1,7,9,2,5,9,5,2,5,6,2,9,2,4,4,4,8,0,7,4,9,1,3,9,9,7,1,1,2,7,5,5,4,5,7,1,4,4,4,9,1,8,0,5,4,6,2,3,7,9,9,6,0,2,7,1,0,0,3,4,7,8,2,4,3,9,5,9,6,1,8,9,0,9,6,4,5,8,9,4,7,8,1,9,1,0,3,3,1,6,9,8,6,1,4,0,3,0,1,9,1,0,0,1,1,6,8,8,5,7,3,4,6,6,6,9,6,9,2,9,7,3,0,3,7,4,5,0,4,7,1,8,7,1,1,0,9,9,0,4,9,5,1,5,3,7,4,0,8,8,1,5,1,1,8,8,6,4,0,2,1,3,0,0,4,4,2,6,5,2,0,4,0,1,9,3,0,5,5,8,5,7,5,7,0,4,7,6,0,8,1,1,1,3,3,8,7,5,4,3,9,6,7,9,0,9,5,0,6,0,1,2,9,6,1,0,2,8,8,2,6,9,5,0,3,8,8,0,3,4,5,5,0,5,6,0,6,1,3,2,4,4,6,3,2,7,5,5,8,4,9,6,3,5,6,8,3,6,9,9,0,6,4,1,1,2,3,7,4,6,2,0,0,0,5,5,0,1,0,8,7,9,4,2,6,3,1,0,9,2,1,2,8,7,5,0,9,8,9,5,5,1,5,7,4,3,2,4,6,4,2,3,6,8,5,4,1,8,4,1,0,7,3,9,4,8,1,4,8,0,1,5,4,9,3,8,2,7,2,8,4,6,1,2,4,8,6,8,9,3,1,9,0,6,8,5,6,1,1,4,2,2,0,8,1,5,6,5,2,0,3,8,8,6,2,4,7,9,2,6,4,3,5,4,1,6,1,7,7,2,2,1,7,4,9,0,9,7,6,3,9,1,2,7,8,4,2,7,5,6,3,9,2,0,6,3,8,7,1,8,2,5,9,9,9,1,9,8,8,7,1,8,9,5,7,9,2,9,6,7,8,1,9,0,3,5,3,4,4,4,2,6,9,3,5,8,4,7,8,5,4,2,5,5,7,2,6,9,4,4,9,2,5,0,2,1,7,5,5,1,2,9,8,3,2,5,4,9,4,2,4,9,4,9,6,4,3,3,5,7,7,6,9,5,8,3,8,5,1,3,9,3,2,7,8,6,4,2,5,9,7,9,0,3,0,6,9,4,1,5,3,1,1,3,6,0,6,4,7,9,9,6,2,3,5,3,9,0,7,7,1,4,6,1,0,9,9,9,5,1,6,8,2,8,1,0,0,0,6,9,9,5,6,4,0,1,9,9,3,6,8,4,3,7,5,3,6,7,4,1,0,1,9,4,1,3,4,1,5,0,2,6,7,8,0,9,2,1,0,7,8,9,2,1,6,9,6,2,6,0,5,8,1,6,2,2,9,6,5,6,8,8,3,7,8,5,6,0,7,7,8,5,6,2,8,2,1,4,6,0,4,1,8,6,7,1,8,9,9,4,5,0,4,8,9,2,6,6,5,3,5,5,8,3,7,6,7,0,0,3,2,4,6,3,2,5,6,1,4,5,7,2,7,1,2,7,3,8,3,8,1,0,5,1,3,2,9,0,5,1,3,7,8,1,0,0,6,6,3,3,4,0,7,1,3,9,0,7,8,5,7,1,5,3,3,8,7,4,0,2,6,5,2,4,6,2,4,5,1,8,8,7,0,5,0,4,6,1,3,4,6,0,8,2,5,3,2,5,7,3,7,5,8,1,9,7,6,6,2,7,6,0,6,6,7,6,2,3,7,5,0,6,8,8,0,5,3,2,0,0,7,0,8,8,1,7,5,7,5,7,6,1,7,4,0,4,1,2,9,0,8,9,6,6,9,6,1,2,1,4,5,8,4,3,6,7,2,3,5,8,0,3,9,7,8,9,3,1,2,5,1,2,4,0,8,6,8,1,8,9,5,5,0,1,0,8,9,3,2,6,1,4,9,2,2,9,4,7,0,8,2,4,0,9,6,0,7,4,3,5,6,1,3,8,2,3,8,1,6,2,7,9,7,9,4,1,0,0,0,1,8,3,7,0,4,3,2,1,9,5,8,7,6,1,5,1,7,6,2,5,8,2,7,5,1,1,8,3,1,9,4,1,4,3,1,0,8,5,1,0,0,1,7,9,5,5,0,2,1,2,9,1,6,6,9,9,9,7,3,0,6,9,3,0,3,6,0,3,1,3,3,2,7]
print s.superPow(a, b)
| false
| true
|
790ca36ebd16ce8f24e568c044f47b77b90fdfbb
| 30
|
py
|
Python
|
src/products/__init__.py
|
GG31/openfood-graphql-api
|
7b6f74706502f79126c47beb3d47cd07146c8679
|
[
"MIT"
] | null | null | null |
src/products/__init__.py
|
GG31/openfood-graphql-api
|
7b6f74706502f79126c47beb3d47cd07146c8679
|
[
"MIT"
] | 1
|
2018-12-25T22:45:13.000Z
|
2018-12-25T22:45:13.000Z
|
src/products/__init__.py
|
GG31/openfood-graphql-api
|
7b6f74706502f79126c47beb3d47cd07146c8679
|
[
"MIT"
] | null | null | null |
from .products import Products
| 30
| 30
| 0.866667
|
from .products import Products
| true
| true
|
790ca51d9f535dae8f1860efbd39a6910a1fd6b2
| 1,897
|
py
|
Python
|
main.py
|
cheran-senthil/SultanKhan2
|
c2f84080cd79ce3897f7fac82455a4da0d7d7c28
|
[
"MIT"
] | 2
|
2020-12-10T18:32:51.000Z
|
2021-05-29T04:25:25.000Z
|
main.py
|
Cheran-Senthil/SultanKhan2
|
c2f84080cd79ce3897f7fac82455a4da0d7d7c28
|
[
"MIT"
] | null | null | null |
main.py
|
Cheran-Senthil/SultanKhan2
|
c2f84080cd79ce3897f7fac82455a4da0d7d7c28
|
[
"MIT"
] | 1
|
2021-03-31T05:03:03.000Z
|
2021-03-31T05:03:03.000Z
|
import berserk
import chaturanga
token = 'token'
bot_id = 'sultankhan2'
session = berserk.TokenSession(token)
lichess = berserk.Client(session)
for event in lichess.bots.stream_incoming_events():
if event['type'] == 'challenge':
challenge = event['challenge']
if challenge['variant']['key'] == 'standard':
if not challenge['rated']:
game_id = challenge['id']
lichess.bots.accept_challenge(game_id)
else:
game_id = event['game']['id']
challenge = {'color': 'random'}
for game_state in lichess.bots.stream_game_state(game_id):
if game_state['type'] == 'gameFull':
if game_state['state']['moves'] == '':
if game_state['initialFen'] == 'startpos':
Chessboard = chaturanga.Chessboard()
else:
Chessboard = chaturanga.Chessboard(
game_state['initialFen'])
if challenge['color'] == 'random':
if 'id' in game_state['white']:
is_white = game_state['white']['id'] == bot_id
else:
is_white = False
else:
is_white = {
'white': False,
'black': True
}[challenge['color']]
if is_white:
bot_move = chaturanga.bot(Chessboard)[0]
Chessboard.move(bot_move)
lichess.bots.make_move(game_id, bot_move)
if game_state['type'] == 'gameState':
moves = game_state['moves'].split(' ')
if len(moves) % 2 != is_white:
Chessboard.move(moves[-1])
bot_move = chaturanga.bot(Chessboard)[0]
Chessboard.move(bot_move)
lichess.bots.make_move(game_id, bot_move)
| 36.480769
| 70
| 0.508698
|
import berserk
import chaturanga
token = 'token'
bot_id = 'sultankhan2'
session = berserk.TokenSession(token)
lichess = berserk.Client(session)
for event in lichess.bots.stream_incoming_events():
if event['type'] == 'challenge':
challenge = event['challenge']
if challenge['variant']['key'] == 'standard':
if not challenge['rated']:
game_id = challenge['id']
lichess.bots.accept_challenge(game_id)
else:
game_id = event['game']['id']
challenge = {'color': 'random'}
for game_state in lichess.bots.stream_game_state(game_id):
if game_state['type'] == 'gameFull':
if game_state['state']['moves'] == '':
if game_state['initialFen'] == 'startpos':
Chessboard = chaturanga.Chessboard()
else:
Chessboard = chaturanga.Chessboard(
game_state['initialFen'])
if challenge['color'] == 'random':
if 'id' in game_state['white']:
is_white = game_state['white']['id'] == bot_id
else:
is_white = False
else:
is_white = {
'white': False,
'black': True
}[challenge['color']]
if is_white:
bot_move = chaturanga.bot(Chessboard)[0]
Chessboard.move(bot_move)
lichess.bots.make_move(game_id, bot_move)
if game_state['type'] == 'gameState':
moves = game_state['moves'].split(' ')
if len(moves) % 2 != is_white:
Chessboard.move(moves[-1])
bot_move = chaturanga.bot(Chessboard)[0]
Chessboard.move(bot_move)
lichess.bots.make_move(game_id, bot_move)
| true
| true
|
790ca526824369fdd3f703c070674bae414a0614
| 1,502
|
py
|
Python
|
aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/PauseClusterUpgradeRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/PauseClusterUpgradeRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/PauseClusterUpgradeRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkcs.endpoint import endpoint_data
class PauseClusterUpgradeRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'PauseClusterUpgrade')
self.set_uri_pattern('/api/v2/clusters/[ClusterId]/upgrade/pause')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterId(self):
return self.get_path_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_path_param('ClusterId',ClusterId)
| 38.512821
| 74
| 0.768309
|
from aliyunsdkcore.request import RoaRequest
from aliyunsdkcs.endpoint import endpoint_data
class PauseClusterUpgradeRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'PauseClusterUpgrade')
self.set_uri_pattern('/api/v2/clusters/[ClusterId]/upgrade/pause')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterId(self):
return self.get_path_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_path_param('ClusterId',ClusterId)
| true
| true
|
790ca5ba75d3b01cb5868c7361555b131ec0a8b0
| 5,916
|
py
|
Python
|
venv/lib/python3.5/site-packages/coalib/misc/Shell.py
|
prashant0598/CoffeeApp
|
4fa006aebf06e12ed34766450ddcfa548ee63307
|
[
"MIT"
] | null | null | null |
venv/lib/python3.5/site-packages/coalib/misc/Shell.py
|
prashant0598/CoffeeApp
|
4fa006aebf06e12ed34766450ddcfa548ee63307
|
[
"MIT"
] | null | null | null |
venv/lib/python3.5/site-packages/coalib/misc/Shell.py
|
prashant0598/CoffeeApp
|
4fa006aebf06e12ed34766450ddcfa548ee63307
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
import platform
import shlex
from subprocess import PIPE, Popen
from shutil import which
class ShellCommandResult(tuple):
"""
The result of a :func:`coalib.misc.run_shell_command` call.
It is based on a ``(stdout, stderr)`` string tuple like it is returned
form ``subprocess.Popen.communicate`` and was originally returned from
:func:`coalib.misc.run_shell_command`. So it is backwards-compatible.
It additionally stores the return ``.code``:
>>> process = Popen(['python', '-c',
... 'import sys; print(sys.stdin.readline().strip() +'
... ' " processed")'],
... stdin=PIPE, stdout=PIPE, stderr=PIPE,
... universal_newlines=True)
>>> stdout, stderr = process.communicate(input='data')
>>> stderr
''
>>> result = ShellCommandResult(process.returncode, stdout, stderr)
>>> result[0]
'data processed\\n'
>>> result[1]
''
>>> result.code
0
"""
def __new__(cls, code, stdout, stderr):
"""
Creates the basic tuple from `stdout` and `stderr`.
"""
return tuple.__new__(cls, (stdout, stderr))
def __init__(self, code, stdout, stderr):
"""
Stores the return `code`.
"""
self.code = code
@contextmanager
def run_interactive_shell_command(command, **kwargs):
"""
Runs a single command in shell and provides stdout, stderr and stdin
streams.
This function creates a context manager that sets up the process (using
``subprocess.Popen()``), returns to caller and waits for process to exit on
leaving.
By default the process is opened in ``universal_newlines`` mode and creates
pipes for all streams (stdout, stderr and stdin) using ``subprocess.PIPE``
special value. These pipes are closed automatically, so if you want to get
the contents of the streams you should retrieve them before the context
manager exits.
>>> with run_interactive_shell_command(["echo", "TEXT"]) as p:
... stdout = p.stdout
... stdout_text = stdout.read()
>>> stdout_text
'TEXT\\n'
>>> stdout.closed
True
Custom streams provided are not closed except of ``subprocess.PIPE``.
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> with run_interactive_shell_command(["echo", "TEXT"],
... stdout=stream) as p:
... stderr = p.stderr
>>> stderr.closed
True
>>> stream.closed
False
:param command: The command to run on shell. This parameter can either
be a sequence of arguments that are directly passed to
the process or a string. A string gets splitted beforehand
using ``shlex.split()``. If providing ``shell=True`` as a
keyword-argument, no ``shlex.split()`` is performed and the
command string goes directly to ``subprocess.Popen()``.
:param kwargs: Additional keyword arguments to pass to
``subprocess.Popen`` that are used to spawn the process.
:return: A context manager yielding the process started from the
command.
"""
if not kwargs.get('shell', False) and isinstance(command, str):
command = shlex.split(command)
else:
command = list(command)
if platform.system() == 'Windows': # pragma: no cover
# subprocess doesn't implicitly look for .bat and .cmd scripts when
# running commands under Windows
command[0] = which(command[0])
args = {'stdout': PIPE,
'stderr': PIPE,
'stdin': PIPE,
'universal_newlines': True}
args.update(kwargs)
process = Popen(command, **args)
try:
yield process
finally:
if args['stdout'] is PIPE:
process.stdout.close()
if args['stderr'] is PIPE:
process.stderr.close()
if args['stdin'] is PIPE:
process.stdin.close()
process.wait()
def run_shell_command(command, stdin=None, **kwargs):
"""
Runs a single command in shell and returns the read stdout and stderr data.
This function waits for the process (created using ``subprocess.Popen()``)
to exit. Effectively it wraps ``run_interactive_shell_command()`` and uses
``communicate()`` on the process.
See also ``run_interactive_shell_command()``.
:param command: The command to run on shell. This parameter can either
be a sequence of arguments that are directly passed to
the process or a string. A string gets splitted beforehand
using ``shlex.split()``.
:param stdin: Initial input to send to the process.
:param kwargs: Additional keyword arguments to pass to
``subprocess.Popen`` that is used to spawn the process.
:return: A tuple with ``(stdoutstring, stderrstring)``.
"""
with run_interactive_shell_command(command, **kwargs) as p:
ret = p.communicate(stdin)
return ShellCommandResult(p.returncode, *ret)
def get_shell_type(): # pragma: no cover
"""
Finds the current shell type based on the outputs of common pre-defined
variables in them. This is useful to identify which sort of escaping
is required for strings.
:return: The shell type. This can be either "powershell" if Windows
Powershell is detected, "cmd" if command prompt is been
detected or "sh" if it's neither of these.
"""
out = run_shell_command('echo $host.name', shell=True)[0]
if out.strip() == 'ConsoleHost':
return 'powershell'
out = run_shell_command('echo $0', shell=True)[0]
if out.strip() == '$0':
return 'cmd'
return 'sh'
| 35.42515
| 79
| 0.615619
|
from contextlib import contextmanager
import platform
import shlex
from subprocess import PIPE, Popen
from shutil import which
class ShellCommandResult(tuple):
def __new__(cls, code, stdout, stderr):
return tuple.__new__(cls, (stdout, stderr))
def __init__(self, code, stdout, stderr):
self.code = code
@contextmanager
def run_interactive_shell_command(command, **kwargs):
if not kwargs.get('shell', False) and isinstance(command, str):
command = shlex.split(command)
else:
command = list(command)
if platform.system() == 'Windows':
# running commands under Windows
command[0] = which(command[0])
args = {'stdout': PIPE,
'stderr': PIPE,
'stdin': PIPE,
'universal_newlines': True}
args.update(kwargs)
process = Popen(command, **args)
try:
yield process
finally:
if args['stdout'] is PIPE:
process.stdout.close()
if args['stderr'] is PIPE:
process.stderr.close()
if args['stdin'] is PIPE:
process.stdin.close()
process.wait()
def run_shell_command(command, stdin=None, **kwargs):
with run_interactive_shell_command(command, **kwargs) as p:
ret = p.communicate(stdin)
return ShellCommandResult(p.returncode, *ret)
def get_shell_type(): # pragma: no cover
out = run_shell_command('echo $host.name', shell=True)[0]
if out.strip() == 'ConsoleHost':
return 'powershell'
out = run_shell_command('echo $0', shell=True)[0]
if out.strip() == '$0':
return 'cmd'
return 'sh'
| true
| true
|
790ca83823d22be1ff3f0729ae69724cb8efce03
| 1,985
|
py
|
Python
|
services/director-v2/tests/unit/test_core_settings.py
|
ITISFoundation/osparc-simcore
|
5ef4cd985f98f1ca4ee116659624748c5bf683a8
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
services/director-v2/tests/unit/test_core_settings.py
|
ITISFoundation/osparc-simcore
|
5ef4cd985f98f1ca4ee116659624748c5bf683a8
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
services/director-v2/tests/unit/test_core_settings.py
|
ITISFoundation/osparc-simcore
|
5ef4cd985f98f1ca4ee116659624748c5bf683a8
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
import pytest
from models_library.basic_types import LogLevel
from simcore_service_director_v2.core.settings import (
AppSettings,
BootModeEnum,
DynamicSidecarProxySettings,
DynamicSidecarSettings,
RegistrySettings,
)
def test_settings_with_project_env_devel(project_env_devel_environment):
# loads from environ
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings.SC_BOOT_MODE == BootModeEnum.DEBUG
assert settings.LOG_LEVEL == LogLevel.DEBUG
assert settings.POSTGRES.dsn == "postgresql://test:test@localhost:5432/test"
def test_settings_with_env_devel(mock_env_devel_environment):
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings
@pytest.mark.parametrize(
"image",
[
"local/dynamic-sidecar:development",
"local/dynamic-sidecar:production",
"itisfoundation/dynamic-sidecar:merge-github-testbuild-latest",
"itisfoundation/dynamic-sidecar:1.0.0",
"local/dynamic-sidecar:0.0.1",
"dynamic-sidecar:production",
"/dynamic-sidecar:latest",
"/local/dynamic-sidecar:latest",
],
)
def test_dynamic_sidecar_settings(image: str) -> None:
required_kwards = dict(
DYNAMIC_SIDECAR_IMAGE=image,
SIMCORE_SERVICES_NETWORK_NAME="test",
TRAEFIK_SIMCORE_ZONE="",
SWARM_STACK_NAME="",
DYNAMIC_SIDECAR_PROXY_SETTINGS=DynamicSidecarProxySettings(),
REGISTRY=RegistrySettings(
REGISTRY_URL="http://te.st",
REGISTRY_AUTH=True,
REGISTRY_USER="test",
REGISTRY_PW="test",
REGISTRY_SSL=False,
),
)
settings = DynamicSidecarSettings(**required_kwards)
assert settings.DYNAMIC_SIDECAR_IMAGE == image.lstrip("/")
| 31.015625
| 80
| 0.70529
|
import pytest
from models_library.basic_types import LogLevel
from simcore_service_director_v2.core.settings import (
AppSettings,
BootModeEnum,
DynamicSidecarProxySettings,
DynamicSidecarSettings,
RegistrySettings,
)
def test_settings_with_project_env_devel(project_env_devel_environment):
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings.SC_BOOT_MODE == BootModeEnum.DEBUG
assert settings.LOG_LEVEL == LogLevel.DEBUG
assert settings.POSTGRES.dsn == "postgresql://test:test@localhost:5432/test"
def test_settings_with_env_devel(mock_env_devel_environment):
settings = AppSettings.create_from_envs()
print("captured settings: \n", settings.json(indent=2))
assert settings
@pytest.mark.parametrize(
"image",
[
"local/dynamic-sidecar:development",
"local/dynamic-sidecar:production",
"itisfoundation/dynamic-sidecar:merge-github-testbuild-latest",
"itisfoundation/dynamic-sidecar:1.0.0",
"local/dynamic-sidecar:0.0.1",
"dynamic-sidecar:production",
"/dynamic-sidecar:latest",
"/local/dynamic-sidecar:latest",
],
)
def test_dynamic_sidecar_settings(image: str) -> None:
required_kwards = dict(
DYNAMIC_SIDECAR_IMAGE=image,
SIMCORE_SERVICES_NETWORK_NAME="test",
TRAEFIK_SIMCORE_ZONE="",
SWARM_STACK_NAME="",
DYNAMIC_SIDECAR_PROXY_SETTINGS=DynamicSidecarProxySettings(),
REGISTRY=RegistrySettings(
REGISTRY_URL="http://te.st",
REGISTRY_AUTH=True,
REGISTRY_USER="test",
REGISTRY_PW="test",
REGISTRY_SSL=False,
),
)
settings = DynamicSidecarSettings(**required_kwards)
assert settings.DYNAMIC_SIDECAR_IMAGE == image.lstrip("/")
| true
| true
|
790ca8e976d69dae30f216759e78084e78f20a8e
| 2,964
|
py
|
Python
|
submodules/GAN_stability/gan_training/checkpoints.py
|
joebartusek/graf
|
80e1014a1def2660a44188c69021f0c498b6cef9
|
[
"MIT"
] | 888
|
2018-07-02T17:42:36.000Z
|
2022-03-29T16:38:14.000Z
|
submodules/GAN_stability/gan_training/checkpoints.py
|
joebartusek/graf
|
80e1014a1def2660a44188c69021f0c498b6cef9
|
[
"MIT"
] | 20
|
2018-08-14T22:55:18.000Z
|
2020-12-29T05:13:54.000Z
|
submodules/GAN_stability/gan_training/checkpoints.py
|
joebartusek/graf
|
80e1014a1def2660a44188c69021f0c498b6cef9
|
[
"MIT"
] | 134
|
2018-07-07T17:16:57.000Z
|
2022-03-11T14:32:28.000Z
|
import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileNotFoundError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https')
| 29.346535
| 70
| 0.584345
|
import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileNotFoundError
def load_url(self, url):
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https')
| true
| true
|
790ca91d1e267c27a75b0c472c8aadefd871871f
| 11,385
|
py
|
Python
|
main.py
|
VV123/NLIDB_gradient
|
f42a6f383d2d4ac41c354cf55df2a21507577b02
|
[
"MIT"
] | null | null | null |
main.py
|
VV123/NLIDB_gradient
|
f42a6f383d2d4ac41c354cf55df2a21507577b02
|
[
"MIT"
] | 1
|
2021-01-11T03:42:43.000Z
|
2021-02-19T17:06:59.000Z
|
main.py
|
VV123/NLIDB_gradient
|
f42a6f383d2d4ac41c354cf55df2a21507577b02
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import sys
import argparse
import os
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from utils.data_manager import load_data, load_data_one
from collections import defaultdict
from argparse import ArgumentParser
from decode_helper import decode_one
import sys
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tf_helper import train, evaluate, decode_data, decode_data_recover
from model1 import construct_graph
def init_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--data_path',
default=os.path.dirname(os.path.abspath(__file__)) + '/data',
type=str,
help='Data path.')
arg_parser.add_argument(
'--load_data', default=False, type=bool, help='Load data.')
arg_parser.add_argument(
'--data',
choices=['wikisql', 'spider', 'overnight', 'overnight_set'],
default='wikisql',
help='data to train & test')
#arg_parser.add_argument('--tran_data', choices=['wikisql', 'spider', 'overnight'], default='overnight', help='data to transfer')
arg_parser.add_argument(
'--subset', choices=['all'], default='all', help='Subset of data.')
arg_parser.add_argument(
'--maxlen', default=60, type=int, help='Data record max length.')
arg_parser.add_argument(
'--annotation_path',
default=os.path.dirname(os.path.abspath(__file__)) +
'/data/DATA/wiki/',
type=str,
help='Data annotation path.')
arg_parser.add_argument(
'--mode',
choices=['train', 'infer', 'transfer','txt'],
default='infer',
help='Run mode')
#### Model configuration ####
arg_parser.add_argument(
'--cell',
choices=['gru'],
default='gru',
help='Type of cell used, currently only standard GRU cell is supported'
)
arg_parser.add_argument(
'--output_vocab_size',
default=20637,
#default=20452,
type=int,
help='Output vocabulary size.')
# Embedding sizes
arg_parser.add_argument(
'--embedding_dim',
default=300,
type=int,
help='Size of word embeddings')
#Hidden sizes
arg_parser.add_argument(
'--dim', default=400, type=int, help='Size of GRU hidden states')
arg_parser.add_argument(
'--hidden_size',
default=256,
type=int,
help='Size of LSTM hidden states')
arg_parser.add_argument(
'--no_copy',
default=False,
action='store_true',
help='Do not use copy mechanism')
#### Training ####
arg_parser.add_argument(
'--vocab', type=str, help='Path of the serialized vocabulary')
arg_parser.add_argument(
'--glove_embed_path',
default=None,
type=str,
help='Path to pretrained Glove mebedding')
arg_parser.add_argument(
'--batch_size', default=128, type=int, help='Batch size')
arg_parser.add_argument(
'--in_drop', default=0., type=float, help='In dropout rate')
arg_parser.add_argument(
'--out_drop', default=0., type=float, help='Out dropout rate')
# training details
arg_parser.add_argument(
'--valid_epoch_interval',
default=1,
type=int,
help='Perform validation every x epoch')
arg_parser.add_argument(
'--clip_grad', default=5., type=float, help='Clip gradients')
arg_parser.add_argument(
'--total_epochs', default=40, type=int, help='# of training epoches')
arg_parser.add_argument(
'--epochs', default=1, type=int, help='Record per x epoches')
arg_parser.add_argument(
'--lr', default=0.0001, type=float, help='Learning rate')
arg_parser.add_argument(
'--lr_decay',
default=0.5,
type=float,
help='decay learning rate if the validation performance drops')
#### decoding/validation/testing ####
arg_parser.add_argument(
'--load_model', default=False, type=bool, help='Whether to load model')
arg_parser.add_argument(
'--beam_width', default=5, type=int, help='Beam size for beam search')
arg_parser.add_argument(
'--decode_max_time_step',
default=100,
type=int,
help='Maximum number of time steps used '
'in decoding and sampling')
args = arg_parser.parse_args()
return args
def model(args, train_env, infer_env):
tf.reset_default_graph()
train_graph = tf.Graph()
infer_graph = tf.Graph()
with train_graph.as_default():
train_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
train_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
train_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
train_env.train_op, train_env.loss, train_env.acc, sample_ids, logits = construct_graph(
"train", train_env, args)
train_env.saver = tf.train.Saver()
#[print(n.name) for n in tf.get_default_graph().as_graph_def().node if 'xxxxx' in n.name]
with infer_graph.as_default():
infer_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
infer_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
infer_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
_, infer_env.loss, infer_env.acc, infer_env.pred_ids, _ = construct_graph(
"infer", infer_env, args)
infer_env.infer_saver = tf.train.Saver()
return train_graph, infer_graph
def inferrence(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========dev set============')
decode_data(sess, infer_env, X_dev, y_dev)
em = decode_data_recover(sess, infer_env, X_dev, y_dev, 'dev')
print('==========test set===========')
decode_data(sess, infer_env, X_test, y_test)
test_em = decode_data_recover(sess, infer_env, X_test, y_test,
'test')
return
def infer_one(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========decode============')
X_one = load_data_one(args.maxlen, 'qs.txt')
decode_one(sess, infer_env, X_one)
return
def train_model(args):
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
train_graph, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
args.load_model = False
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
max_em, global_test_em, best_base = -1, -1, -1
acc = 0
sess1 = tf.InteractiveSession(graph=train_graph)
sess1.run(tf.global_variables_initializer())
sess1.run(tf.local_variables_initializer())
sess2 = tf.InteractiveSession(graph=infer_graph)
sess2.run(tf.global_variables_initializer())
sess2.run(tf.global_variables_initializer())
for base in range(args.total_epochs / args.epochs):
print('\nIteration: %d (%d epochs)' % (base, args.epochs))
model2load = train(
sess1,
train_env,
X_train,
y_train,
epochs=args.epochs,
load=args.load_model,
name=args.subset,
batch_size=args.batch_size,
base=base,
model2Bload=model2load)
args.load_model = True
infer_env.infer_saver.restore(sess2, model2load)
print('===========dev set============')
dev_em = decode_data(sess2, infer_env, X_dev, y_dev)
dev_em = decode_data_recover(sess2, infer_env, X_dev, y_dev,
'dev')
print('==========test set===========')
test_em = decode_data(sess2, infer_env, X_test, y_test)
test_em = decode_data_recover(sess2, infer_env, X_test, y_test,
'test')
if dev_em > max_em:
max_em = dev_em
global_test_em = test_em
best_base = base
print('\n Saving model for best testing')
train_env.saver.save(sess1, 'best_model/{0}-{1}-{2:.2f}'.format(args.subset, base, max_em))
print('Max EM acc: %.4f during %d iteration.' % (max_em, best_base))
print('test EM acc: %.4f ' % global_test_em)
return
def transfer(args):
load_model = args.load_model if args.mode == 'train' else True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'overnight'
args.load_data = True
#X_tran, y_tran = load_data(args)
X_tran, y_tran = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight')
args.data = 'overnight_set'
#tran_sets = load_data(args)
tran_sets = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight_set')
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('========subset transfer set========')
subsets = ['basketball', 'calendar', 'housing', 'recipes', 'restaurants']
for subset, (X_tran_subset, y_tran_subset) in zip(subsets, tran_sets):
print('---------' + subset + '---------')
tran_em = decode_data(
sess,
infer_env,
X_tran_subset,
y_tran_subset,
filename=str(subset + '.txt'))
print('===========transfer set============')
tran_em = decode_data(sess, infer_env, X_tran, y_tran)
return
if __name__ == '__main__':
args = init_args()
print(args)
if args.mode == 'train':
print('\nTrain model.')
train_model(args)
elif args.mode == 'infer':
print('\nInference.')
inferrence(args)
elif args.mode == 'txt':
print('\nInference from txt.')
infer_one(args)
elif args.mode == 'transfer':
print('\nTransfer.')
transfer(args)
| 33.683432
| 133
| 0.623188
|
import sys
import argparse
import os
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from utils.data_manager import load_data, load_data_one
from collections import defaultdict
from argparse import ArgumentParser
from decode_helper import decode_one
import sys
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tf_helper import train, evaluate, decode_data, decode_data_recover
from model1 import construct_graph
def init_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--data_path',
default=os.path.dirname(os.path.abspath(__file__)) + '/data',
type=str,
help='Data path.')
arg_parser.add_argument(
'--load_data', default=False, type=bool, help='Load data.')
arg_parser.add_argument(
'--data',
choices=['wikisql', 'spider', 'overnight', 'overnight_set'],
default='wikisql',
help='data to train & test')
arg_parser.add_argument(
'--subset', choices=['all'], default='all', help='Subset of data.')
arg_parser.add_argument(
'--maxlen', default=60, type=int, help='Data record max length.')
arg_parser.add_argument(
'--annotation_path',
default=os.path.dirname(os.path.abspath(__file__)) +
'/data/DATA/wiki/',
type=str,
help='Data annotation path.')
arg_parser.add_argument(
'--mode',
choices=['train', 'infer', 'transfer','txt'],
default='infer',
help='Run mode')
ru',
help='Type of cell used, currently only standard GRU cell is supported'
)
arg_parser.add_argument(
'--output_vocab_size',
default=20637,
type=int,
help='Output vocabulary size.')
arg_parser.add_argument(
'--embedding_dim',
default=300,
type=int,
help='Size of word embeddings')
arg_parser.add_argument(
'--dim', default=400, type=int, help='Size of GRU hidden states')
arg_parser.add_argument(
'--hidden_size',
default=256,
type=int,
help='Size of LSTM hidden states')
arg_parser.add_argument(
'--no_copy',
default=False,
action='store_true',
help='Do not use copy mechanism')
help='Path of the serialized vocabulary')
arg_parser.add_argument(
'--glove_embed_path',
default=None,
type=str,
help='Path to pretrained Glove mebedding')
arg_parser.add_argument(
'--batch_size', default=128, type=int, help='Batch size')
arg_parser.add_argument(
'--in_drop', default=0., type=float, help='In dropout rate')
arg_parser.add_argument(
'--out_drop', default=0., type=float, help='Out dropout rate')
arg_parser.add_argument(
'--valid_epoch_interval',
default=1,
type=int,
help='Perform validation every x epoch')
arg_parser.add_argument(
'--clip_grad', default=5., type=float, help='Clip gradients')
arg_parser.add_argument(
'--total_epochs', default=40, type=int, help='# of training epoches')
arg_parser.add_argument(
'--epochs', default=1, type=int, help='Record per x epoches')
arg_parser.add_argument(
'--lr', default=0.0001, type=float, help='Learning rate')
arg_parser.add_argument(
'--lr_decay',
default=0.5,
type=float,
help='decay learning rate if the validation performance drops')
rg_parser.add_argument(
'--beam_width', default=5, type=int, help='Beam size for beam search')
arg_parser.add_argument(
'--decode_max_time_step',
default=100,
type=int,
help='Maximum number of time steps used '
'in decoding and sampling')
args = arg_parser.parse_args()
return args
def model(args, train_env, infer_env):
tf.reset_default_graph()
train_graph = tf.Graph()
infer_graph = tf.Graph()
with train_graph.as_default():
train_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
train_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
train_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
train_env.train_op, train_env.loss, train_env.acc, sample_ids, logits = construct_graph(
"train", train_env, args)
train_env.saver = tf.train.Saver()
with infer_graph.as_default():
infer_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
infer_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
infer_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
_, infer_env.loss, infer_env.acc, infer_env.pred_ids, _ = construct_graph(
"infer", infer_env, args)
infer_env.infer_saver = tf.train.Saver()
return train_graph, infer_graph
def inferrence(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========dev set============')
decode_data(sess, infer_env, X_dev, y_dev)
em = decode_data_recover(sess, infer_env, X_dev, y_dev, 'dev')
print('==========test set===========')
decode_data(sess, infer_env, X_test, y_test)
test_em = decode_data_recover(sess, infer_env, X_test, y_test,
'test')
return
def infer_one(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========decode============')
X_one = load_data_one(args.maxlen, 'qs.txt')
decode_one(sess, infer_env, X_one)
return
def train_model(args):
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
train_graph, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
args.load_model = False
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
model2load = 'model/{}'.format(args.subset)
max_em, global_test_em, best_base = -1, -1, -1
acc = 0
sess1 = tf.InteractiveSession(graph=train_graph)
sess1.run(tf.global_variables_initializer())
sess1.run(tf.local_variables_initializer())
sess2 = tf.InteractiveSession(graph=infer_graph)
sess2.run(tf.global_variables_initializer())
sess2.run(tf.global_variables_initializer())
for base in range(args.total_epochs / args.epochs):
print('\nIteration: %d (%d epochs)' % (base, args.epochs))
model2load = train(
sess1,
train_env,
X_train,
y_train,
epochs=args.epochs,
load=args.load_model,
name=args.subset,
batch_size=args.batch_size,
base=base,
model2Bload=model2load)
args.load_model = True
infer_env.infer_saver.restore(sess2, model2load)
print('===========dev set============')
dev_em = decode_data(sess2, infer_env, X_dev, y_dev)
dev_em = decode_data_recover(sess2, infer_env, X_dev, y_dev,
'dev')
print('==========test set===========')
test_em = decode_data(sess2, infer_env, X_test, y_test)
test_em = decode_data_recover(sess2, infer_env, X_test, y_test,
'test')
if dev_em > max_em:
max_em = dev_em
global_test_em = test_em
best_base = base
print('\n Saving model for best testing')
train_env.saver.save(sess1, 'best_model/{0}-{1}-{2:.2f}'.format(args.subset, base, max_em))
print('Max EM acc: %.4f during %d iteration.' % (max_em, best_base))
print('test EM acc: %.4f ' % global_test_em)
return
def transfer(args):
load_model = args.load_model if args.mode == 'train' else True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'overnight'
args.load_data = True
X_tran, y_tran = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight')
args.data = 'overnight_set'
tran_sets = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight_set')
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('========subset transfer set========')
subsets = ['basketball', 'calendar', 'housing', 'recipes', 'restaurants']
for subset, (X_tran_subset, y_tran_subset) in zip(subsets, tran_sets):
print('---------' + subset + '---------')
tran_em = decode_data(
sess,
infer_env,
X_tran_subset,
y_tran_subset,
filename=str(subset + '.txt'))
print('===========transfer set============')
tran_em = decode_data(sess, infer_env, X_tran, y_tran)
return
if __name__ == '__main__':
args = init_args()
print(args)
if args.mode == 'train':
print('\nTrain model.')
train_model(args)
elif args.mode == 'infer':
print('\nInference.')
inferrence(args)
elif args.mode == 'txt':
print('\nInference from txt.')
infer_one(args)
elif args.mode == 'transfer':
print('\nTransfer.')
transfer(args)
| true
| true
|
790caa0bf021f34fdce2d7643a1774a9d95627ce
| 2,397
|
py
|
Python
|
smoketests/tests/dashboard/test_product_filter.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
smoketests/tests/dashboard/test_product_filter.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
smoketests/tests/dashboard/test_product_filter.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from unittestzero import Assert
from pages.dashboard import DashboardPage
class TestProductFilter(object):
@pytest.mark.nondestructive
def test_feedback_can_be_filtered_by_all_products_and_versions(self, mozwebqa):
"""Tests product filtering in dashboard
1. Verify that at least one product exists
2. Verify that filtering by product returns results
3. Verify that versions show up when you choose a product
4. Verify that the state of the filters are correct after being applied
5. Verify product and version values in the URL
NB: We don't cycle through all product/version
combinations--only the first two of each.
"""
dashboard_pg = DashboardPage(mozwebqa)
dashboard_pg.go_to_dashboard_page()
total_messages = dashboard_pg.total_message_count
products = dashboard_pg.product_filter.products
Assert.greater(len(products), 0)
for product in products[:2]:
if not product:
# If it's the "unknown" product, just skip it.
continue
dashboard_pg.product_filter.select_product(product)
Assert.greater(total_messages, dashboard_pg.total_message_count)
versions = dashboard_pg.product_filter.versions
Assert.greater(len(versions), 0)
for version in versions[:2]:
if not version:
# If it's the "unknown" version, just skip it.
continue
dashboard_pg.product_filter.select_version(version)
Assert.greater(total_messages, dashboard_pg.total_message_count)
Assert.equal(dashboard_pg.product_filter.selected_product, product)
Assert.equal(dashboard_pg.product_filter.selected_version, version)
Assert.equal(dashboard_pg.product_from_url, product)
Assert.equal(dashboard_pg.version_from_url, version)
Assert.greater(len(dashboard_pg.messages), 0)
dashboard_pg.product_filter.unselect_version(version)
dashboard_pg.product_filter.unselect_product(product)
| 39.295082
| 83
| 0.673759
|
import pytest
from unittestzero import Assert
from pages.dashboard import DashboardPage
class TestProductFilter(object):
@pytest.mark.nondestructive
def test_feedback_can_be_filtered_by_all_products_and_versions(self, mozwebqa):
dashboard_pg = DashboardPage(mozwebqa)
dashboard_pg.go_to_dashboard_page()
total_messages = dashboard_pg.total_message_count
products = dashboard_pg.product_filter.products
Assert.greater(len(products), 0)
for product in products[:2]:
if not product:
continue
dashboard_pg.product_filter.select_product(product)
Assert.greater(total_messages, dashboard_pg.total_message_count)
versions = dashboard_pg.product_filter.versions
Assert.greater(len(versions), 0)
for version in versions[:2]:
if not version:
# If it's the "unknown" version, just skip it.
continue
dashboard_pg.product_filter.select_version(version)
Assert.greater(total_messages, dashboard_pg.total_message_count)
Assert.equal(dashboard_pg.product_filter.selected_product, product)
Assert.equal(dashboard_pg.product_filter.selected_version, version)
Assert.equal(dashboard_pg.product_from_url, product)
Assert.equal(dashboard_pg.version_from_url, version)
Assert.greater(len(dashboard_pg.messages), 0)
dashboard_pg.product_filter.unselect_version(version)
dashboard_pg.product_filter.unselect_product(product)
| true
| true
|
790caab67268b9a7464cda38497d7cfb5ee81bd6
| 806
|
py
|
Python
|
template/analytics.py
|
JasonKeirstead/kestrel-analytics
|
4b8ab9b43ff3f73616e5a1a902f8c46bb00b83c0
|
[
"Apache-2.0"
] | 1
|
2021-05-28T02:56:15.000Z
|
2021-05-28T02:56:15.000Z
|
template/analytics.py
|
JasonKeirstead/kestrel-analytics
|
4b8ab9b43ff3f73616e5a1a902f8c46bb00b83c0
|
[
"Apache-2.0"
] | null | null | null |
template/analytics.py
|
JasonKeirstead/kestrel-analytics
|
4b8ab9b43ff3f73616e5a1a902f8c46bb00b83c0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
# Kestrel analytics default paths (single input variable)
INPUT_DATA_PATH = "/data/input/0.parquet.gz"
OUTPUT_DATA_PATH = "/data/output/0.parquet.gz"
OUTPUT_DISPLAY = "/data/display/ret.html"
def analytics(dataframe):
# analyze data in dataframe
# provide insights or additional knowledge
newattr = ["newval" + str(i) for i in range(dataframe.shape[0])]
dataframe["x_new_attr"] = newattr
display = "<p>Hello World! -- a Kestrel analytics</p>"
# return the updated Kestrel variable
return dataframe, display
if __name__ == "__main__":
dfi = pd.read_parquet(INPUT_DATA_PATH)
dfo, disp = analytics(dfi)
dfo.to_parquet(OUTPUT_DATA_PATH, compression="gzip")
with open(OUTPUT_DISPLAY, "w") as o:
o.write(disp)
| 28.785714
| 68
| 0.703474
|
import pandas as pd
INPUT_DATA_PATH = "/data/input/0.parquet.gz"
OUTPUT_DATA_PATH = "/data/output/0.parquet.gz"
OUTPUT_DISPLAY = "/data/display/ret.html"
def analytics(dataframe):
newattr = ["newval" + str(i) for i in range(dataframe.shape[0])]
dataframe["x_new_attr"] = newattr
display = "<p>Hello World! -- a Kestrel analytics</p>"
return dataframe, display
if __name__ == "__main__":
dfi = pd.read_parquet(INPUT_DATA_PATH)
dfo, disp = analytics(dfi)
dfo.to_parquet(OUTPUT_DATA_PATH, compression="gzip")
with open(OUTPUT_DISPLAY, "w") as o:
o.write(disp)
| true
| true
|
790caadb9885b423dd3032914819724eb9e60be4
| 5,922
|
py
|
Python
|
tests/gold_tests/pluginTest/cert_update/cert_update.test.py
|
zds05/trafficserver
|
258c69b7628f5a4b90488e147c244a582222b5c8
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/pluginTest/cert_update/cert_update.test.py
|
zds05/trafficserver
|
258c69b7628f5a4b90488e147c244a582222b5c8
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/pluginTest/cert_update/cert_update.test.py
|
zds05/trafficserver
|
258c69b7628f5a4b90488e147c244a582222b5c8
|
[
"Apache-2.0"
] | null | null | null |
'''
Test the cert_update plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ports
Test.Summary = '''
Test cert_update plugin.
'''
Test.SkipUnless(
Condition.HasProgram("openssl", "Openssl need to be installed on system for this test to work")
)
# Set up origin server
server = Test.MakeOriginServer("server")
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
# Set up ATS
ts = Test.MakeATSProcess("ts", command="traffic_manager", enable_tls=1)
# Set up ssl files
ts.addSSLfile("ssl/server1.pem")
ts.addSSLfile("ssl/server2.pem")
ts.addSSLfile("ssl/client1.pem")
ts.addSSLfile("ssl/client2.pem")
# reserve port, attach it to 'ts' so it is released later
ports.get_port(ts, 's_server_port')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'cert_update',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem'
)
ts.Disk.remap_config.AddLines([
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port),
'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port)
])
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: "*foo.com"',
' client_cert: "client1.pem"',
])
# Set up plugin
Test.PreparePlugin(os.path.join(Test.Variables.AtsExampleDir, 'plugins', 'c-api', '.libs', 'cert_update.so'), ts)
# Server-Cert-Pre
# curl should see that Traffic Server presents bar.com cert from alice
tr = Test.AddTestRun("Server-Cert-Pre")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = (
'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.Streams.stderr = "gold/server-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
# Server-Cert-Update
tr = Test.AddTestRun("Server-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR, ts.Variables.SSLDir)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
# Server-Cert-After
# after use traffic_ctl to update server cert, curl should see bar.com cert from bob
tr = Test.AddTestRun("Server-Cert-After")
tr.Processes.Default.Env = ts.Env
tr.Command = 'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.Streams.stderr = "gold/server-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
# Client-Cert-Pre
# s_server should see client (Traffic Server) as alice.com
tr = Test.AddTestRun("Client-Cert-Pre")
s_server = tr.Processes.Process(
"s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir, ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
# Client-Cert-Update
tr = Test.AddTestRun("Client-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'mv {0}/client2.pem {0}/client1.pem && {1}/traffic_ctl plugin msg cert_update.client {0}/client1.pem'.format(
ts.Variables.SSLDir, ts.Variables.BINDIR)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
# Client-Cert-After
# after use traffic_ctl to update client cert, s_server should see client (Traffic Server) as bob.com
tr = Test.AddTestRun("Client-Cert-After")
s_server = tr.Processes.Process(
"s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir, ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Processes.Default.Env = ts.Env
# Move client2.pem to replace client1.pem since cert path matters in client context mapping
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
| 41.412587
| 166
| 0.738433
|
import os
import ports
Test.Summary = '''
Test cert_update plugin.
'''
Test.SkipUnless(
Condition.HasProgram("openssl", "Openssl need to be installed on system for this test to work")
)
server = Test.MakeOriginServer("server")
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts = Test.MakeATSProcess("ts", command="traffic_manager", enable_tls=1)
ts.addSSLfile("ssl/server1.pem")
ts.addSSLfile("ssl/server2.pem")
ts.addSSLfile("ssl/client1.pem")
ts.addSSLfile("ssl/client2.pem")
ports.get_port(ts, 's_server_port')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'cert_update',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem'
)
ts.Disk.remap_config.AddLines([
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port),
'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port)
])
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: "*foo.com"',
' client_cert: "client1.pem"',
])
Test.PreparePlugin(os.path.join(Test.Variables.AtsExampleDir, 'plugins', 'c-api', '.libs', 'cert_update.so'), ts)
tr = Test.AddTestRun("Server-Cert-Pre")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = (
'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.Streams.stderr = "gold/server-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr = Test.AddTestRun("Server-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR, ts.Variables.SSLDir)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
tr = Test.AddTestRun("Server-Cert-After")
tr.Processes.Default.Env = ts.Env
tr.Command = 'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.Streams.stderr = "gold/server-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
tr = Test.AddTestRun("Client-Cert-Pre")
s_server = tr.Processes.Process(
"s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir, ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-pre.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
tr = Test.AddTestRun("Client-Cert-Update")
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Command = (
'mv {0}/client2.pem {0}/client1.pem && {1}/traffic_ctl plugin msg cert_update.client {0}/client1.pem'.format(
ts.Variables.SSLDir, ts.Variables.BINDIR)
)
ts.Streams.all = "gold/update.gold"
ts.StillRunningAfter = server
tr = Test.AddTestRun("Client-Cert-After")
s_server = tr.Processes.Process(
"s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir, ts.Variables.s_server_port))
s_server.Ready = When.PortReady(ts.Variables.s_server_port)
tr.Processes.Default.Env = ts.Env
tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.StartBefore(s_server)
s_server.Streams.all = "gold/client-cert-after.gold"
tr.Processes.Default.ReturnCode = 0
ts.StillRunningAfter = server
| true
| true
|
790cabdaf6f9ce0aff9ebb0c0baf32a2adc64dca
| 10,544
|
py
|
Python
|
tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.stack_dynamic_partitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict( # empty inputs
data=[],
partitions=[],
num_partitions=0,
expected=[],
expected_ragged_rank=1),
dict( # empty data, num_partitions>0
data=[],
partitions=[],
num_partitions=3,
expected=[[], [], []]),
dict( # 1D data, 1D partitions (docstring example)
data=['a', 'b', 'c', 'd', 'e'],
partitions=[3, 0, 2, 2, 3],
num_partitions=5,
expected=[['b'], [], ['c', 'd'], ['a', 'e'], []]),
dict( # 2D data, 1D partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['c', 'd']], [['a', 'b'], ['e', 'f']], [['g', 'h']]],
expected_ragged_rank=1),
dict( # 2D ragged data, 1D partitions
data=[['a'], ['b', 'c', 'd'], [], ['e', 'f']],
data_ragged_rank=1,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['b', 'c', 'd']], [['a'], []], [['e', 'f']]],
expected_ragged_rank=2),
dict( # 2D data, 2D partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict( # 2D ragged data, 2D ragged partitions
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict( # 3D data, 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[1, 0],
num_partitions=2,
expected=[[[['e', 'f'], ['g', 'h']]], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=1), 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=2),
dict( # 3D data (ragged_rank=2), 1d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f', 'g', 'h']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=3),
dict( # 3D data, 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[[1, 0], [0, 3]],
segment_ids_ragged_rank=0,
num_partitions=4,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']], [], [['g', 'h']]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=1), 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=2,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']]],
expected_ragged_rank=1),
dict( # 3D data (ragged_rank=2), 2d partitions
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=3,
expected=[[['c', 'd'], ['e', 'f', 'g', 'h']], [['a', 'b']], []],
expected_ragged_rank=2),
dict( # 3D data (ragged_rank=2), 3d partitions (ragged_rank=2)
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[[3, 0], [1, 2]], [[1, 1, 0, 1]]],
segment_ids_ragged_rank=2,
num_partitions=4,
expected=[['b', 'g'], ['c', 'e', 'f', 'h'], ['d'], ['a']]),
dict( # 0D data, 0D partitions
data='a',
partitions=3,
num_partitions=5,
expected=[[], [], [], ['a'], []]),
dict( # 1D data, 0D partitions
data=['a', 'b', 'c'],
partitions=3,
num_partitions=5,
expected=[[], [], [], [['a', 'b', 'c']], []],
expected_ragged_rank=1),
dict( # 2D data, 0D partitions
data=[['a', 'b'], ['c', 'd']],
data_ragged_rank=0,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c', 'd']]], []],
expected_ragged_rank=1),
dict( # 2D data (ragged_rank=1), 0D partitions
data=[['a', 'b'], ['c']],
data_ragged_rank=1,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c']]], []],
expected_ragged_rank=3),
])
def testRaggedSegmentStack(self,
data,
partitions,
num_partitions,
expected,
data_ragged_rank=None,
segment_ids_ragged_rank=None,
expected_ragged_rank=None):
for seg_dtype in [dtypes.int32, dtypes.int64]:
data_tensor = ragged_factory_ops.constant(
data, row_splits_dtype=seg_dtype, ragged_rank=data_ragged_rank)
segment_ids_tensor = ragged_factory_ops.constant(
partitions,
dtype=seg_dtype,
row_splits_dtype=seg_dtype,
ragged_rank=segment_ids_ragged_rank)
expected_tensor = ragged_factory_ops.constant(
expected,
row_splits_dtype=seg_dtype,
ragged_rank=expected_ragged_rank)
result = ragged_array_ops.stack_dynamic_partitions(
data_tensor, segment_ids_tensor, num_partitions)
self.assertAllEqual(result, expected_tensor)
# Check that it's equivalent to tf.stack(dynamic_partition(...)),
# where applicable.
if (data_ragged_rank == 0 and segment_ids_ragged_rank == 0 and
seg_dtype == dtypes.int32):
equiv = ragged_concat_ops.stack(
data_flow_ops.dynamic_partition(data_tensor, segment_ids_tensor,
num_partitions))
self.assertAllEqual(result, self.evaluate(equiv).to_list())
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[2, -1, 0],
num_partitions=10,
error='must be non-negative'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=1,
error='partitions must be less than num_partitions'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=10,
error='partitions must be less than num_partitions'),
dict(
data=[['a', 'b'], ['c']],
partitions=[[2], [3, 0]],
num_partitions=10,
error='data and partitions have incompatible ragged shapes'),
])
def testRuntimeError(self, data, partitions, num_partitions, error):
data = ragged_factory_ops.constant(data)
partitions = ragged_factory_ops.constant(partitions, dtype=dtypes.int64)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
self.evaluate(
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions))
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[1, 2],
num_partitions=10,
error=r'Shapes \(2,\) and \(3,\) are incompatible'),
dict(
data=[['a', 'b'], ['c', 'd']],
partitions=[[1, 2, 3], [4, 5, 6]],
num_partitions=10,
error=r'Shapes \(2, 3\) and \(2, 2\) are incompatible'),
dict(
data=['a', 'b', 'c'],
partitions=[1, 2, 3],
num_partitions=[1, 2, 3],
error='must have rank 0'),
])
def testStaticError(self, data, partitions, num_partitions, error):
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions)
def testUnknownRankError(self):
if context.executing_eagerly():
return
partitions = array_ops.placeholder(dtypes.int32, None)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
'partitions must have known rank'):
ragged_array_ops.stack_dynamic_partitions(['a', 'b', 'c'], partitions, 10)
if __name__ == '__main__':
googletest.main()
| 40.868217
| 80
| 0.519537
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict(
data=[],
partitions=[],
num_partitions=0,
expected=[],
expected_ragged_rank=1),
dict(
data=[],
partitions=[],
num_partitions=3,
expected=[[], [], []]),
dict(
data=['a', 'b', 'c', 'd', 'e'],
partitions=[3, 0, 2, 2, 3],
num_partitions=5,
expected=[['b'], [], ['c', 'd'], ['a', 'e'], []]),
dict(
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['c', 'd']], [['a', 'b'], ['e', 'f']], [['g', 'h']]],
expected_ragged_rank=1),
dict(
data=[['a'], ['b', 'c', 'd'], [], ['e', 'f']],
data_ragged_rank=1,
partitions=[2, 1, 2, 3],
num_partitions=4,
expected=[[], [['b', 'c', 'd']], [['a'], []], [['e', 'f']]],
expected_ragged_rank=2),
dict(
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict(
data=[['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']],
data_ragged_rank=0,
partitions=[[3, 0], [2, 2], [4, 3], [2, 0]],
num_partitions=5,
expected=[['b', 'h'], [], ['c', 'd', 'g'], ['a', 'f'], ['e']]),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[1, 0],
num_partitions=2,
expected=[[[['e', 'f'], ['g', 'h']]], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=1),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=2),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[2, 0],
num_partitions=3,
expected=[[[['e', 'f', 'g', 'h']]], [], [[['a', 'b'], ['c', 'd']]]],
expected_ragged_rank=3),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]],
data_ragged_rank=0,
partitions=[[1, 0], [0, 3]],
segment_ids_ragged_rank=0,
num_partitions=4,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']], [], [['g', 'h']]],
expected_ragged_rank=1),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
data_ragged_rank=1,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=2,
expected=[[['c', 'd'], ['e', 'f']], [['a', 'b']]],
expected_ragged_rank=1),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[1, 0], [0]],
segment_ids_ragged_rank=1,
num_partitions=3,
expected=[[['c', 'd'], ['e', 'f', 'g', 'h']], [['a', 'b']], []],
expected_ragged_rank=2),
dict(
data=[[['a', 'b'], ['c', 'd']], [['e', 'f', 'g', 'h']]],
data_ragged_rank=2,
partitions=[[[3, 0], [1, 2]], [[1, 1, 0, 1]]],
segment_ids_ragged_rank=2,
num_partitions=4,
expected=[['b', 'g'], ['c', 'e', 'f', 'h'], ['d'], ['a']]),
dict(
data='a',
partitions=3,
num_partitions=5,
expected=[[], [], [], ['a'], []]),
dict(
data=['a', 'b', 'c'],
partitions=3,
num_partitions=5,
expected=[[], [], [], [['a', 'b', 'c']], []],
expected_ragged_rank=1),
dict(
data=[['a', 'b'], ['c', 'd']],
data_ragged_rank=0,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c', 'd']]], []],
expected_ragged_rank=1),
dict(
data=[['a', 'b'], ['c']],
data_ragged_rank=1,
partitions=3,
num_partitions=5,
expected=[[], [], [], [[['a', 'b'], ['c']]], []],
expected_ragged_rank=3),
])
def testRaggedSegmentStack(self,
data,
partitions,
num_partitions,
expected,
data_ragged_rank=None,
segment_ids_ragged_rank=None,
expected_ragged_rank=None):
for seg_dtype in [dtypes.int32, dtypes.int64]:
data_tensor = ragged_factory_ops.constant(
data, row_splits_dtype=seg_dtype, ragged_rank=data_ragged_rank)
segment_ids_tensor = ragged_factory_ops.constant(
partitions,
dtype=seg_dtype,
row_splits_dtype=seg_dtype,
ragged_rank=segment_ids_ragged_rank)
expected_tensor = ragged_factory_ops.constant(
expected,
row_splits_dtype=seg_dtype,
ragged_rank=expected_ragged_rank)
result = ragged_array_ops.stack_dynamic_partitions(
data_tensor, segment_ids_tensor, num_partitions)
self.assertAllEqual(result, expected_tensor)
# where applicable.
if (data_ragged_rank == 0 and segment_ids_ragged_rank == 0 and
seg_dtype == dtypes.int32):
equiv = ragged_concat_ops.stack(
data_flow_ops.dynamic_partition(data_tensor, segment_ids_tensor,
num_partitions))
self.assertAllEqual(result, self.evaluate(equiv).to_list())
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[2, -1, 0],
num_partitions=10,
error='must be non-negative'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=1,
error='partitions must be less than num_partitions'),
dict(
data=['a', 'b', 'c'],
partitions=[2, 10, 0],
num_partitions=10,
error='partitions must be less than num_partitions'),
dict(
data=[['a', 'b'], ['c']],
partitions=[[2], [3, 0]],
num_partitions=10,
error='data and partitions have incompatible ragged shapes'),
])
def testRuntimeError(self, data, partitions, num_partitions, error):
data = ragged_factory_ops.constant(data)
partitions = ragged_factory_ops.constant(partitions, dtype=dtypes.int64)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
self.evaluate(
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions))
@parameterized.parameters([
dict(
data=['a', 'b', 'c'],
partitions=[1, 2],
num_partitions=10,
error=r'Shapes \(2,\) and \(3,\) are incompatible'),
dict(
data=[['a', 'b'], ['c', 'd']],
partitions=[[1, 2, 3], [4, 5, 6]],
num_partitions=10,
error=r'Shapes \(2, 3\) and \(2, 2\) are incompatible'),
dict(
data=['a', 'b', 'c'],
partitions=[1, 2, 3],
num_partitions=[1, 2, 3],
error='must have rank 0'),
])
def testStaticError(self, data, partitions, num_partitions, error):
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
error):
ragged_array_ops.stack_dynamic_partitions(data, partitions,
num_partitions)
def testUnknownRankError(self):
if context.executing_eagerly():
return
partitions = array_ops.placeholder(dtypes.int32, None)
with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
'partitions must have known rank'):
ragged_array_ops.stack_dynamic_partitions(['a', 'b', 'c'], partitions, 10)
if __name__ == '__main__':
googletest.main()
| true
| true
|
790cabf97033d15b56e34c6ad85ac3cc081dc2d1
| 4,054
|
py
|
Python
|
codelabs/spark-bigquery/backfill.py
|
aosterloh/cloud-dataproc
|
ceca098d6e77e6d2b5147ff79bc69be9a035c296
|
[
"Apache-2.0"
] | null | null | null |
codelabs/spark-bigquery/backfill.py
|
aosterloh/cloud-dataproc
|
ceca098d6e77e6d2b5147ff79bc69be9a035c296
|
[
"Apache-2.0"
] | 9
|
2019-12-16T22:20:20.000Z
|
2022-02-10T01:24:30.000Z
|
spark-bigquery/backfill.py
|
shahzadafarhad/cloud-dataproc
|
afca20a961e18c250d2d3fda4c9789afc3205b8c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code accompanies this codelab: https://codelabs.developers.google.com/codelabs/pyspark-bigquery/.
# This is a script for backfilling a set of data from Reddit into Google Cloud Storage
# Python imports
import re
import time
import sys
# A Spark Session is how we interact with Spark SQL to create Dataframes
from pyspark.sql import SparkSession
# PySpark function for replacing characters using a regex. We'll use this to remove newline characters.
from pyspark.sql.functions import regexp_replace, col
# Library for interacting with Google Cloud Storage
from google.cloud import storage
# This will help catch some PySpark errors
from py4j.protocol import Py4JJavaError
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit").getOrCreate()
# Establish a set of years and months to iterate over
year = sys.argv[1]
month = sys.argv[2]
bucket_name = sys.argv[3]
# Establish a subreddit to process
subreddit = 'food'
# Set Google Cloud Storage temp location
path = "tmp" + str(time.time())
# Keep track of all tables accessed via the job
tables_read = []
# In the form of <project-id>.<dataset>.<table>
table = f"fh-bigquery.reddit_posts.{year}_{month}"
# If the table doesn't exist we will simply continue and not
# log it into our "tables_read" list
try:
df = spark.read.format('bigquery').option('table', table).load()
except Py4JJavaError:
print(f"{table} does not exist. ")
sys.exit(0)
print(f"Processing {table}.")
# Select the "title", "selftext" and "created_utc" columns of the designated subreddit and
# replace newline characters with a single space
subreddit_timestamps = (
df
.select(
regexp_replace(col("title"), "\n", " "),
regexp_replace(col("selftext"), "\n", " "),
"created_utc"
)
.where(df.subreddit == subreddit)
)
tmp_output_path = "gs://" + bucket_name + "/" + path + "/" + year + "/" + month
# Write output to our temp GCS bucket. Spark jobs can be written out to multiple files
# and partitions. By using coalesce, we ensure the output is consolidated to a single file.
# We then use .options to tell Spark to write out in a gzip format, and .csv to do the write.
(
subreddit_timestamps
# Data can get written out to multiple files / partition.
# This ensures it will only write to 1.
.coalesce(1)
.write
# Gzip the output file
.options(codec="org.apache.hadoop.io.compress.GzipCodec")
# Write out to csv
.csv(tmp_output_path)
)
# Lastly, we'll move the temp file to a new bucket and delete the temp directory.
regex = "part-[0-9a-zA-Z\-]*.csv.gz"
new_path = "/".join(["reddit_posts", year, month, subreddit + ".csv.gz"])
# Create the storage client
storage_client = storage.Client()
# Create an object representing the original bucket
source_bucket = storage_client.get_bucket(bucket_name)
# Grab all files in the source bucket. Typically there is also a _SUCCESS file, inside of the
# directory, so we'll make sure to find our single csv file.
buckets = list(source_bucket.list_blobs(prefix=path))
for bucket in buckets:
name = bucket.name
# Locate the file that represents our partition. Copy to new location and
# delete temp directory.
if re.search(regex, name):
blob = source_bucket.blob(name)
source_bucket.copy_blob(blob, source_bucket, new_path)
blob.delete()
| 35.561404
| 105
| 0.715836
|
import re
import time
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import regexp_replace, col
# Library for interacting with Google Cloud Storage
from google.cloud import storage
# This will help catch some PySpark errors
from py4j.protocol import Py4JJavaError
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit").getOrCreate()
# Establish a set of years and months to iterate over
year = sys.argv[1]
month = sys.argv[2]
bucket_name = sys.argv[3]
# Establish a subreddit to process
subreddit = 'food'
# Set Google Cloud Storage temp location
path = "tmp" + str(time.time())
# Keep track of all tables accessed via the job
tables_read = []
# In the form of <project-id>.<dataset>.<table>
table = f"fh-bigquery.reddit_posts.{year}_{month}"
# If the table doesn't exist we will simply continue and not
try:
df = spark.read.format('bigquery').option('table', table).load()
except Py4JJavaError:
print(f"{table} does not exist. ")
sys.exit(0)
print(f"Processing {table}.")
subreddit_timestamps = (
df
.select(
regexp_replace(col("title"), "\n", " "),
regexp_replace(col("selftext"), "\n", " "),
"created_utc"
)
.where(df.subreddit == subreddit)
)
tmp_output_path = "gs://" + bucket_name + "/" + path + "/" + year + "/" + month
(
subreddit_timestamps
.coalesce(1)
.write
.options(codec="org.apache.hadoop.io.compress.GzipCodec")
.csv(tmp_output_path)
)
regex = "part-[0-9a-zA-Z\-]*.csv.gz"
new_path = "/".join(["reddit_posts", year, month, subreddit + ".csv.gz"])
# Create the storage client
storage_client = storage.Client()
# Create an object representing the original bucket
source_bucket = storage_client.get_bucket(bucket_name)
# Grab all files in the source bucket. Typically there is also a _SUCCESS file, inside of the
# directory, so we'll make sure to find our single csv file.
buckets = list(source_bucket.list_blobs(prefix=path))
for bucket in buckets:
name = bucket.name
if re.search(regex, name):
blob = source_bucket.blob(name)
source_bucket.copy_blob(blob, source_bucket, new_path)
blob.delete()
| true
| true
|
790cad625b83752558e458407119f7a5c61591ec
| 20,194
|
py
|
Python
|
inferlo/generic/libdai_bp.py
|
InferLO/inferlo
|
a65efce721d7f99d2f274dd94a1aaf7ca159e944
|
[
"Apache-2.0"
] | 1
|
2022-01-27T18:44:07.000Z
|
2022-01-27T18:44:07.000Z
|
inferlo/generic/libdai_bp.py
|
InferLO/inferlo
|
a65efce721d7f99d2f274dd94a1aaf7ca159e944
|
[
"Apache-2.0"
] | 3
|
2022-01-23T18:02:30.000Z
|
2022-01-27T23:10:51.000Z
|
inferlo/generic/libdai_bp.py
|
InferLO/inferlo
|
a65efce721d7f99d2f274dd94a1aaf7ca159e944
|
[
"Apache-2.0"
] | 1
|
2021-09-03T06:12:57.000Z
|
2021-09-03T06:12:57.000Z
|
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
from __future__ import annotations
import random
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Callable, Dict
import numpy as np
from inferlo.base.factors.discrete_factor import DiscreteFactor
from inferlo.base import InferenceResult
if TYPE_CHECKING:
from inferlo import GraphModel
recordSentMessages = True
class Prob:
"""Equivalent of dai::Prob.
Wrapper around a vector - represents probability distribution.
"""
@staticmethod
def uniform(n):
"""Creates unifom probability distribution."""
return Prob.same_value(n, 1.0 / n)
@staticmethod
def same_value(n: int, val: float):
"""Creates vector filled with the same value."""
return Prob(np.ones(n, dtype=np.float64) * val)
def __init__(self, p: np.ndarray):
self.p = p
def fill(self, x):
"""Sets all entries to x."""
self.p = np.ones_like(self.p) * x
def clone(self):
"""Makes a copy."""
return Prob(np.array(self.p))
def __imul__(self, other):
self.p *= other.p
return self
def __iadd__(self, other):
self.p += other.p
return self
def normalize(self):
"""Normalize distribution."""
self.p /= np.sum(self.p)
def entropy(self) -> float:
"""Calculate entropy of the distribution."""
return - np.sum(self.p * np.log(self.p))
def __str__(self):
return str(self.p)
def dist_kl(p: Prob, q: Prob):
"""Kullback-Leibler divergence between two probability distributions."""
kl_div = p.p * (np.log(p.p + (p == 0)) - np.log(q.p + (p.p == 0)))
return np.sum(kl_div)
def dist_linf(p: Prob, q: Prob):
"""Distance between two probability distributions in L_infinity norm."""
return np.max(np.abs(p.p - q.p))
@dataclass
class Neighbor:
"""Describes the neighbor relationship of two nodes in a graph.
Corresponds to dai::Neighbor.
"""
# Corresponds to the index of this Neighbor entry in the vector of
# neighbors.
iter: int
# Contains the absolute index of the neighboring node.
node: int
# Contains the "dual" index (i.e., the index of this node in the Neighbors
# vector of the neighboring node)
dual: int
@dataclass
class EdgeProp:
"""Type used for storing edge properties."""
index: np.ndarray # Index cached for this edge.
message: Prob # Old message living on this edge.
new_message: Prob # New message living on this edge
residual: float # Residual for this edge
class LDFactor:
"""Equivalent of dai::Factor.
Consists of set of variables and flattened values assigned to all var
combinations. Variables are assigned like in Inferlo, but tensor is
transposed before flattening.
"""
def __init__(self, model: GraphModel, var_idx: List[int], p: Prob):
self.model = model
self.var_idx = var_idx
self.p = p
@staticmethod
def uniform(model: GraphModel, var_idx: List[int]):
"""Creates factor defining uniform distribution."""
total_domain_size = 1
for i in var_idx:
total_domain_size *= model.get_variable(i).domain.size()
return LDFactor(model, var_idx, Prob.uniform(total_domain_size))
@staticmethod
def from_inferlo_factor(f: DiscreteFactor):
"""Converts inferlo.DiscreteFactor to LDFactor."""
rev_perm = list(range(len(f.var_idx)))[::-1]
prob = f.values.transpose(rev_perm).reshape(-1)
return LDFactor(f.model, f.var_idx, Prob(prob))
def to_inferlo_factor(self) -> DiscreteFactor:
"""Converts LDFactor to inferlo.DiscreteFactor."""
sizes = [self.model.get_variable(i).domain.size()
for i in self.var_idx[::-1]]
libdai_tensor = self.p.p.reshape(sizes)
rev_perm = list(range(len(self.var_idx)))[::-1]
inferlo_tensor = libdai_tensor.transpose(rev_perm)
return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)
def combine_with_factor(self, other: LDFactor,
func: Callable[[float, float], float]):
"""Applies binary function to two factors."""
# Check that variables of the other factor are subset of variables of
# the given factor.
for i in other.var_idx:
assert i in self.var_idx
# Now, update every value of given factor with corresponding value of
# the other factor.
for idx in range(len(self.p.p)):
j = other._encode_value_index(self._decode_value_index(idx))
self.p.p[idx] = func(self.p.p[idx], other.p.p[j])
return self
def __iadd__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x + y)
def __imul__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x * y)
def marginal(self, new_var_idx, normed=True) -> LDFactor:
"""Sums factor over some variables."""
result = self.to_inferlo_factor().marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def max_marginal(self, new_var_idx, normed=True) -> LDFactor:
"""Eleiminates certain variables by finding maximum."""
result = self.to_inferlo_factor().max_marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def clone(self):
"""Makes a copy of this factor."""
return LDFactor(self.model, self.var_idx, self.p.clone())
def _decode_value_index(self, idx):
"""Returns dict from variable id to variable value."""
ans = dict()
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans[var_id] = idx % size
idx //= size
return ans
def _encode_value_index(self, var_values: Dict[int, int]):
ans = 0
base = 1
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans += base * var_values[var_id]
base *= size
return ans
def __str__(self):
return "%s %s" % (self.var_idx, self.p.p)
class BP:
"""Belief propagation algorithm, equivalent to dai::BP.
This class is ported from libDAI's dai::BP class. It runs belief
propagation algorithm for graphical model with discrete variables with
arbitrary factor graph.
At the moment MAXPROD algorithm (for finding MAP state) is not supported.
Use BP.infer() to perform inference.
"""
@staticmethod
def infer(model, options=None):
"""Runs inference BP algorithm for given model.
Supports all options which libdai::BP supports. Refer to libDAI
documentation for options descritpion.
"""
if options is None:
options = {'tol': 1e-9, 'logdomain': 0, 'updates': 'SEQRND'}
inf_alg = BP(model, options)
inf_alg.init()
inf_alg.run()
return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())
def __init__(self, model: GraphModel, props: Dict[str, str]):
# Stores all edge properties
self._edges: List[List[EdgeProp]] = []
# Maximum difference between variable beliefs encountered so far
self._maxdiff = 0.0
# Number of iterations needed
self._iters = 0
# The history of message updates (only recorded if \a
# recordSentMessages is \c true)
self._sentMessages = []
# Stores variable beliefs of previous iteration
self._oldBeliefsV: List[LDFactor] = []
# Stores factor beliefs of previous iteration
self._old_beliefs_f: List[LDFactor] = []
# Stores the update schedule
self._update_seq = []
self.model = model
self.factors = [
LDFactor.from_inferlo_factor(
DiscreteFactor.from_factor(f)) for f in model.get_factors()]
self.nrVars = model.num_variables
self.nrFactors = len(self.factors)
# Prepare Neighbors.
# For every variable - factors, referencing it.
self.nbV: List[List[Neighbor]] = [[] for _ in range(self.nrVars)]
# For every factor - variables it references.
self.nbF: List[List[Neighbor]] = [[] for _ in range(self.nrFactors)]
for factor_id in range(len(self.factors)):
factor = self.factors[factor_id]
for var_iter_index in range(len(factor.var_idx)):
var_id = factor.var_idx[var_iter_index]
nbv_len = len(self.nbV[var_id])
nbf_len = len(self.nbF[factor_id])
assert var_iter_index == nbf_len
self.nbV[var_id].append(
Neighbor(
iter=nbv_len,
node=factor_id,
dual=nbf_len))
self.nbF[factor_id].append(
Neighbor(
iter=nbf_len,
node=var_id,
dual=nbv_len))
# Parse properties.
self.logdomain = bool(int(props.get('logdomain', 0)))
self.updates = props['updates']
self.inference = props.get('inference', 'SUMPROD')
self.verbose = int(props.get('verbose', 0))
self.damping = float(props.get('damping', 0.0))
self.maxiter = int(props.get('maxiter', 10000))
self.maxtime = float(props.get('maxtime', np.inf))
self.tol = float(props['tol'])
self._construct()
def _construct(self):
"""Helper function for constructors."""
# Create edge properties
self._edges = []
for i in range(self.nrVars):
self._edges.append([])
for _ in self.nbV[i]:
size = self._var_size(i)
new_ep = EdgeProp(
index=None,
message=Prob.uniform(size),
new_message=Prob.uniform(size),
residual=0.0)
self._edges[i].append(new_ep)
# Create old beliefs
self._oldBeliefsV = []
for i in range(self.nrVars):
self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))
self._old_beliefs_f = []
for ii in range(self.nrFactors):
self._old_beliefs_f.append(
LDFactor.uniform(
self.model,
self.factors[ii].var_idx))
# Create update sequence
self._update_seq = []
for ii in range(self.nrFactors):
for i in self.nbF[ii]:
self._update_seq.append((i.node, i.dual))
def init(self):
"""Initializes messages awith default values."""
c = 0.0 if self.logdomain else 1.0
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._edges[i][ii.iter].message.fill(c)
self._edges[i][ii.iter].new_message.fill(c)
if self.updates == 'SEQMAX':
self._update_residual(i, ii.iter, 0.0)
self._iters = 0
def find_max_residual(self):
"""Find max residual."""
# TODO: optimize with a lookup table.
max_r = -np.inf
best_edge = None
for i in range(self.nrVars):
for _I in range(len(self.nbV[i])):
if self._edges[i][_I].residual > max_r:
max_r = self._edges[i][_I].residual
best_edge = i, _I
return best_edge
def _calc_incoming_message_product(
self,
ii: int,
without_i: bool,
i: int) -> Prob:
"""Calculate the product of factor \a I and the incoming messages.
If without_i == True, the message coming from variable i is omitted
from the product.
This function is used by calc_new_message and calc_belief_f.
"""
f_prod = self.factors[ii].clone()
if self.logdomain:
f_prod.p.p = np.log(f_prod.p.p)
# Calculate product of incoming messages and factor I
for j in self.nbF[ii]:
if without_i and (j.node == i):
continue
# prod_j will be the product of messages coming into j
size = self._var_size(j.node)
default_val = 0.0 if self.logdomain else 1.0
prod_j = Prob.same_value(size, default_val)
for J in self.nbV[j.node]:
if J.node != ii: # for all J in nb(j) \ I
if self.logdomain:
prod_j += self._edges[j.node][J.iter].message
else:
prod_j *= self._edges[j.node][J.iter].message
# multiply prod with prod_j
if self.logdomain:
f_prod += LDFactor(self.model, [j.node], prod_j)
else:
f_prod *= LDFactor(self.model, [j.node], prod_j)
return f_prod.p
def _calc_new_message(self, i: int, _I: int):
# calculate updated message I->i
ii = self.nbV[i][_I].node
if len(self.factors[ii].var_idx) == 1: # optimization
marg = self.factors[ii].p.clone()
else:
Fprod = self.factors[ii].clone()
Fprod.p = self._calc_incoming_message_product(ii, True, i)
if self.logdomain:
Fprod.p.p = np.exp(Fprod.p.p - np.max(Fprod.p.p))
# Marginalize onto i
if self.inference == 'SUMPROD':
marg = Fprod.marginal([i]).p
else:
marg = Fprod.max_marginal([i]).p
# Store result
if self.logdomain:
self._edges[i][_I].new_message = Prob(np.log(marg.p))
else:
self._edges[i][_I].new_message = marg
# Update the residual if necessary
if self.updates == 'SEQMAX':
self._update_residual(
i,
_I,
dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message))
# BP::run does not check for NANs for performance reasons
# Somehow NaNs do not often occur in BP...
def run(self):
"""Runs BP algorithm."""
tic = time.time()
# Do several passes over the network until maximum number of iterations
# has been reached or until the maximum belief difference is smaller
# than tolerance.
max_diff = np.inf
while (self._iters < self.maxiter) and (
max_diff > self.tol) and (time.time() - tic) < self.maxtime:
if self.updates == 'SEQMAX':
if self._iters == 0:
# do the first pass
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
# Maximum-Residual BP [\ref EMK06]
for _ in range(len(self._update_seq)):
# Update the message with the largest residual.
i, _I = self.find_max_residual()
self._update_message(i, _I)
# I->i has been updated, which means that residuals for all
# J->j with J in nb[i]\I and j in nb[J]\i have to be
# updated
for J in self.nbV[i]:
if J.iter != _I:
for j in self.nbF[J.node]:
_J = j.dual
if j != i:
self._calc_new_message(j.node, _J)
elif self.updates == 'PARALL':
# Parallel updates
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._update_message(i, ii.iter)
else:
# Sequential updates
if self.updates == 'SEQRND':
random.shuffle(self._update_seq)
for e in self._update_seq:
self._calc_new_message(e[0], e[1])
self._update_message(e[0], e[1])
# Calculate new beliefs and compare with old ones
max_diff = -np.inf
for i in range(self.nrVars):
b = self._belief_v(i).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._oldBeliefsV[i].p))
self._oldBeliefsV[i] = b
for ii in range(self.nrFactors):
b = self._belief_f(ii).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._old_beliefs_f[ii].p))
self._old_beliefs_f[ii] = b
self._iters += 1
if max_diff > self._maxdiff:
self._maxdiff = max_diff
return max_diff
def _calc_belief_v(self, i: int) -> Prob:
p = Prob.same_value(self.model.get_variable(i).domain.size(),
0.0 if self.logdomain else 1.0)
for ii in self.nbV[i]:
if self.logdomain:
p += self._edges[i][ii.iter].new_message
else:
p *= self._edges[i][ii.iter].new_message
return p
def _belief_v(self, i: int) -> LDFactor:
p = self._calc_belief_v(i)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, [i], p)
def _belief_f(self, ii) -> LDFactor:
p = self._calc_belief_f(ii)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, self.factors[ii].var_idx, p)
def _calc_belief_f(self, ii: int) -> Prob:
return self._calc_incoming_message_product(ii, False, 0)
def log_z(self) -> float:
"""Calculates logarithm of the partition function."""
ans = 0.0
for i in range(self.nrVars):
ans += (1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy()
for ii in range(self.nrFactors):
ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)
return ans
def marg_prob(self) -> np.ndarray:
"""Calculates marginal probabilities."""
max_domain_size = np.max([self._var_size(i)
for i in range(self.nrVars)])
ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)
for var_id in range(self.nrVars):
ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p
return ans
def _var_size(self, var_idx):
return self.model.get_variable(var_idx).domain.size()
def _update_message(self, i: int, _I: int):
if recordSentMessages:
self._sentMessages.append((i, _I))
if self.damping == 0.0:
self._edges[i][_I].message = self._edges[i][_I].new_message.clone()
if self.updates == 'SEQMAX':
self._update_residual(i, _I, 0.0)
else:
d = self.damping
old_msg = self._edges[i][_I].message.p
new_msg = self._edges[i][_I].new_message.p
if self.logdomain:
self._edges[i][_I].message.p = (
(old_msg * d) + (new_msg * (1.0 - d)))
else:
self._edges[i][_I].message.p = (
(old_msg ** d) * (new_msg ** (1.0 - d)))
if self.updates == 'SEQMAX':
new_res = dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message)
self._update_residual(i, _I, new_res)
def _update_residual(self, i, _I, r):
self._edges[i][_I].residual = r
| 35.932384
| 79
| 0.561305
|
from __future__ import annotations
import random
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Callable, Dict
import numpy as np
from inferlo.base.factors.discrete_factor import DiscreteFactor
from inferlo.base import InferenceResult
if TYPE_CHECKING:
from inferlo import GraphModel
recordSentMessages = True
class Prob:
@staticmethod
def uniform(n):
return Prob.same_value(n, 1.0 / n)
@staticmethod
def same_value(n: int, val: float):
return Prob(np.ones(n, dtype=np.float64) * val)
def __init__(self, p: np.ndarray):
self.p = p
def fill(self, x):
self.p = np.ones_like(self.p) * x
def clone(self):
return Prob(np.array(self.p))
def __imul__(self, other):
self.p *= other.p
return self
def __iadd__(self, other):
self.p += other.p
return self
def normalize(self):
self.p /= np.sum(self.p)
def entropy(self) -> float:
return - np.sum(self.p * np.log(self.p))
def __str__(self):
return str(self.p)
def dist_kl(p: Prob, q: Prob):
kl_div = p.p * (np.log(p.p + (p == 0)) - np.log(q.p + (p.p == 0)))
return np.sum(kl_div)
def dist_linf(p: Prob, q: Prob):
return np.max(np.abs(p.p - q.p))
@dataclass
class Neighbor:
iter: int
node: int
dual: int
@dataclass
class EdgeProp:
index: np.ndarray
message: Prob
new_message: Prob
residual: float
class LDFactor:
def __init__(self, model: GraphModel, var_idx: List[int], p: Prob):
self.model = model
self.var_idx = var_idx
self.p = p
@staticmethod
def uniform(model: GraphModel, var_idx: List[int]):
total_domain_size = 1
for i in var_idx:
total_domain_size *= model.get_variable(i).domain.size()
return LDFactor(model, var_idx, Prob.uniform(total_domain_size))
@staticmethod
def from_inferlo_factor(f: DiscreteFactor):
rev_perm = list(range(len(f.var_idx)))[::-1]
prob = f.values.transpose(rev_perm).reshape(-1)
return LDFactor(f.model, f.var_idx, Prob(prob))
def to_inferlo_factor(self) -> DiscreteFactor:
sizes = [self.model.get_variable(i).domain.size()
for i in self.var_idx[::-1]]
libdai_tensor = self.p.p.reshape(sizes)
rev_perm = list(range(len(self.var_idx)))[::-1]
inferlo_tensor = libdai_tensor.transpose(rev_perm)
return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)
def combine_with_factor(self, other: LDFactor,
func: Callable[[float, float], float]):
for i in other.var_idx:
assert i in self.var_idx
for idx in range(len(self.p.p)):
j = other._encode_value_index(self._decode_value_index(idx))
self.p.p[idx] = func(self.p.p[idx], other.p.p[j])
return self
def __iadd__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x + y)
def __imul__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x * y)
def marginal(self, new_var_idx, normed=True) -> LDFactor:
result = self.to_inferlo_factor().marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def max_marginal(self, new_var_idx, normed=True) -> LDFactor:
result = self.to_inferlo_factor().max_marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def clone(self):
return LDFactor(self.model, self.var_idx, self.p.clone())
def _decode_value_index(self, idx):
ans = dict()
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans[var_id] = idx % size
idx //= size
return ans
def _encode_value_index(self, var_values: Dict[int, int]):
ans = 0
base = 1
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans += base * var_values[var_id]
base *= size
return ans
def __str__(self):
return "%s %s" % (self.var_idx, self.p.p)
class BP:
@staticmethod
def infer(model, options=None):
if options is None:
options = {'tol': 1e-9, 'logdomain': 0, 'updates': 'SEQRND'}
inf_alg = BP(model, options)
inf_alg.init()
inf_alg.run()
return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())
def __init__(self, model: GraphModel, props: Dict[str, str]):
self._edges: List[List[EdgeProp]] = []
self._maxdiff = 0.0
self._iters = 0
self._sentMessages = []
self._oldBeliefsV: List[LDFactor] = []
self._old_beliefs_f: List[LDFactor] = []
self._update_seq = []
self.model = model
self.factors = [
LDFactor.from_inferlo_factor(
DiscreteFactor.from_factor(f)) for f in model.get_factors()]
self.nrVars = model.num_variables
self.nrFactors = len(self.factors)
self.nbV: List[List[Neighbor]] = [[] for _ in range(self.nrVars)]
self.nbF: List[List[Neighbor]] = [[] for _ in range(self.nrFactors)]
for factor_id in range(len(self.factors)):
factor = self.factors[factor_id]
for var_iter_index in range(len(factor.var_idx)):
var_id = factor.var_idx[var_iter_index]
nbv_len = len(self.nbV[var_id])
nbf_len = len(self.nbF[factor_id])
assert var_iter_index == nbf_len
self.nbV[var_id].append(
Neighbor(
iter=nbv_len,
node=factor_id,
dual=nbf_len))
self.nbF[factor_id].append(
Neighbor(
iter=nbf_len,
node=var_id,
dual=nbv_len))
self.logdomain = bool(int(props.get('logdomain', 0)))
self.updates = props['updates']
self.inference = props.get('inference', 'SUMPROD')
self.verbose = int(props.get('verbose', 0))
self.damping = float(props.get('damping', 0.0))
self.maxiter = int(props.get('maxiter', 10000))
self.maxtime = float(props.get('maxtime', np.inf))
self.tol = float(props['tol'])
self._construct()
def _construct(self):
self._edges = []
for i in range(self.nrVars):
self._edges.append([])
for _ in self.nbV[i]:
size = self._var_size(i)
new_ep = EdgeProp(
index=None,
message=Prob.uniform(size),
new_message=Prob.uniform(size),
residual=0.0)
self._edges[i].append(new_ep)
self._oldBeliefsV = []
for i in range(self.nrVars):
self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))
self._old_beliefs_f = []
for ii in range(self.nrFactors):
self._old_beliefs_f.append(
LDFactor.uniform(
self.model,
self.factors[ii].var_idx))
self._update_seq = []
for ii in range(self.nrFactors):
for i in self.nbF[ii]:
self._update_seq.append((i.node, i.dual))
def init(self):
c = 0.0 if self.logdomain else 1.0
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._edges[i][ii.iter].message.fill(c)
self._edges[i][ii.iter].new_message.fill(c)
if self.updates == 'SEQMAX':
self._update_residual(i, ii.iter, 0.0)
self._iters = 0
def find_max_residual(self):
max_r = -np.inf
best_edge = None
for i in range(self.nrVars):
for _I in range(len(self.nbV[i])):
if self._edges[i][_I].residual > max_r:
max_r = self._edges[i][_I].residual
best_edge = i, _I
return best_edge
def _calc_incoming_message_product(
self,
ii: int,
without_i: bool,
i: int) -> Prob:
f_prod = self.factors[ii].clone()
if self.logdomain:
f_prod.p.p = np.log(f_prod.p.p)
for j in self.nbF[ii]:
if without_i and (j.node == i):
continue
size = self._var_size(j.node)
default_val = 0.0 if self.logdomain else 1.0
prod_j = Prob.same_value(size, default_val)
for J in self.nbV[j.node]:
if J.node != ii:
if self.logdomain:
prod_j += self._edges[j.node][J.iter].message
else:
prod_j *= self._edges[j.node][J.iter].message
if self.logdomain:
f_prod += LDFactor(self.model, [j.node], prod_j)
else:
f_prod *= LDFactor(self.model, [j.node], prod_j)
return f_prod.p
def _calc_new_message(self, i: int, _I: int):
ii = self.nbV[i][_I].node
if len(self.factors[ii].var_idx) == 1:
marg = self.factors[ii].p.clone()
else:
Fprod = self.factors[ii].clone()
Fprod.p = self._calc_incoming_message_product(ii, True, i)
if self.logdomain:
Fprod.p.p = np.exp(Fprod.p.p - np.max(Fprod.p.p))
if self.inference == 'SUMPROD':
marg = Fprod.marginal([i]).p
else:
marg = Fprod.max_marginal([i]).p
if self.logdomain:
self._edges[i][_I].new_message = Prob(np.log(marg.p))
else:
self._edges[i][_I].new_message = marg
if self.updates == 'SEQMAX':
self._update_residual(
i,
_I,
dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message))
def run(self):
tic = time.time()
max_diff = np.inf
while (self._iters < self.maxiter) and (
max_diff > self.tol) and (time.time() - tic) < self.maxtime:
if self.updates == 'SEQMAX':
if self._iters == 0:
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for _ in range(len(self._update_seq)):
i, _I = self.find_max_residual()
self._update_message(i, _I)
for J in self.nbV[i]:
if J.iter != _I:
for j in self.nbF[J.node]:
_J = j.dual
if j != i:
self._calc_new_message(j.node, _J)
elif self.updates == 'PARALL':
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._update_message(i, ii.iter)
else:
if self.updates == 'SEQRND':
random.shuffle(self._update_seq)
for e in self._update_seq:
self._calc_new_message(e[0], e[1])
self._update_message(e[0], e[1])
max_diff = -np.inf
for i in range(self.nrVars):
b = self._belief_v(i).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._oldBeliefsV[i].p))
self._oldBeliefsV[i] = b
for ii in range(self.nrFactors):
b = self._belief_f(ii).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._old_beliefs_f[ii].p))
self._old_beliefs_f[ii] = b
self._iters += 1
if max_diff > self._maxdiff:
self._maxdiff = max_diff
return max_diff
def _calc_belief_v(self, i: int) -> Prob:
p = Prob.same_value(self.model.get_variable(i).domain.size(),
0.0 if self.logdomain else 1.0)
for ii in self.nbV[i]:
if self.logdomain:
p += self._edges[i][ii.iter].new_message
else:
p *= self._edges[i][ii.iter].new_message
return p
def _belief_v(self, i: int) -> LDFactor:
p = self._calc_belief_v(i)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, [i], p)
def _belief_f(self, ii) -> LDFactor:
p = self._calc_belief_f(ii)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, self.factors[ii].var_idx, p)
def _calc_belief_f(self, ii: int) -> Prob:
return self._calc_incoming_message_product(ii, False, 0)
def log_z(self) -> float:
ans = 0.0
for i in range(self.nrVars):
ans += (1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy()
for ii in range(self.nrFactors):
ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)
return ans
def marg_prob(self) -> np.ndarray:
max_domain_size = np.max([self._var_size(i)
for i in range(self.nrVars)])
ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)
for var_id in range(self.nrVars):
ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p
return ans
def _var_size(self, var_idx):
return self.model.get_variable(var_idx).domain.size()
def _update_message(self, i: int, _I: int):
if recordSentMessages:
self._sentMessages.append((i, _I))
if self.damping == 0.0:
self._edges[i][_I].message = self._edges[i][_I].new_message.clone()
if self.updates == 'SEQMAX':
self._update_residual(i, _I, 0.0)
else:
d = self.damping
old_msg = self._edges[i][_I].message.p
new_msg = self._edges[i][_I].new_message.p
if self.logdomain:
self._edges[i][_I].message.p = (
(old_msg * d) + (new_msg * (1.0 - d)))
else:
self._edges[i][_I].message.p = (
(old_msg ** d) * (new_msg ** (1.0 - d)))
if self.updates == 'SEQMAX':
new_res = dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message)
self._update_residual(i, _I, new_res)
def _update_residual(self, i, _I, r):
self._edges[i][_I].residual = r
| true
| true
|
790cade744279e033d1a42616d9659dc6e2a347f
| 421
|
py
|
Python
|
project/partners/migrations/0009_partner_is_published.py
|
TEDxNTUA/tedxntua2019
|
6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff
|
[
"MIT"
] | 7
|
2018-10-09T19:14:37.000Z
|
2019-11-25T13:43:38.000Z
|
project/partners/migrations/0009_partner_is_published.py
|
TEDxNTUA/tedxntua2019
|
6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff
|
[
"MIT"
] | 16
|
2018-11-01T21:42:17.000Z
|
2019-03-10T16:59:25.000Z
|
project/partners/migrations/0009_partner_is_published.py
|
TEDxNTUA/tedxntua2019
|
6bce7c9dd8c4ee2c1a94b4ff6facb39052d41cff
|
[
"MIT"
] | 5
|
2018-10-28T17:33:06.000Z
|
2018-11-22T00:12:55.000Z
|
# Generated by Django 2.1.2 on 2019-03-19 22:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0008_merge_20190307_1527'),
]
operations = [
migrations.AddField(
model_name='partner',
name='is_published',
field=models.BooleanField(default=True, verbose_name='Published'),
),
]
| 22.157895
| 78
| 0.619952
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0008_merge_20190307_1527'),
]
operations = [
migrations.AddField(
model_name='partner',
name='is_published',
field=models.BooleanField(default=True, verbose_name='Published'),
),
]
| true
| true
|
790cafd39b27c239936e5f31977800b1240a68b4
| 2,682
|
py
|
Python
|
sentence_transformers/losses/TripleSoftmaxLoss.py
|
jaimeenahn/COVID-sentence-bert
|
2f47d116f7d9b774946fbf3c0724b721d1b88225
|
[
"Apache-2.0"
] | null | null | null |
sentence_transformers/losses/TripleSoftmaxLoss.py
|
jaimeenahn/COVID-sentence-bert
|
2f47d116f7d9b774946fbf3c0724b721d1b88225
|
[
"Apache-2.0"
] | null | null | null |
sentence_transformers/losses/TripleSoftmaxLoss.py
|
jaimeenahn/COVID-sentence-bert
|
2f47d116f7d9b774946fbf3c0724b721d1b88225
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class TripleSoftmaxLoss(nn.Module):
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
vocab,
document_coef: float = 0.4,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(TripleSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.hidden = 1000
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.document_coef = document_coef
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 2
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.relu = nn.ReLU()
self.document2hidden = nn.Linear(291868, self.hidden)
self.hidden2output = nn.Linear(self.hidden, 768)
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, document_rep: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
document_rep = self.relu(self.hidden2output(self.relu(self.document2hidden(document_rep.float()))))
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
vectors_concat.append(torch.abs(rep_a - document_rep))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = (1.0 - self.document_coef) * loss_fct(output, labels.view(-1))
loss -= self.document_coef * torch.sum(torch.cosine_similarity(document_rep, rep_b)) # todo: MMI가 들어가면 좋긴하겠다.
return loss
else:
return reps, output
| 43.258065
| 121
| 0.670022
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class TripleSoftmaxLoss(nn.Module):
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
vocab,
document_coef: float = 0.4,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(TripleSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.hidden = 1000
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.document_coef = document_coef
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 2
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.relu = nn.ReLU()
self.document2hidden = nn.Linear(291868, self.hidden)
self.hidden2output = nn.Linear(self.hidden, 768)
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, document_rep: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
document_rep = self.relu(self.hidden2output(self.relu(self.document2hidden(document_rep.float()))))
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
vectors_concat.append(torch.abs(rep_a - document_rep))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = (1.0 - self.document_coef) * loss_fct(output, labels.view(-1))
loss -= self.document_coef * torch.sum(torch.cosine_similarity(document_rep, rep_b))
return loss
else:
return reps, output
| true
| true
|
790cb0a489179a2e43dca813e26d4baa816f0c0d
| 1,781
|
py
|
Python
|
temoc.py
|
aaron-lebo/temoc
|
9ade9fe1990378bec2be5a39d2bc5a53b01ed9ad
|
[
"0BSD"
] | null | null | null |
temoc.py
|
aaron-lebo/temoc
|
9ade9fe1990378bec2be5a39d2bc5a53b01ed9ad
|
[
"0BSD"
] | null | null | null |
temoc.py
|
aaron-lebo/temoc
|
9ade9fe1990378bec2be5a39d2bc5a53b01ed9ad
|
[
"0BSD"
] | null | null | null |
import sqlite3
import time
import feedparser
import requests
subs = [x.strip() for x in open('subs.txt').readlines()]
con = sqlite3.connect('temoc.db')
cur = con.cursor()
try:
cur.execute('create table things(site, id text, utc timestamp, save integer, hide integer, title, url)')
cur.execute('create unique index things_site_id on things(site, id)')
con.commit()
except sqlite3.OperationalError:
pass
def insert(ids, *args):
if not args[1] in ids:
cur.execute('insert into things values (?, ?, ?, 0, 0, ?, ?)', args)
con.commit()
ids.add(args[1])
while 1:
ids = cur.execute(f'select id from things where site = "hn"').fetchall()
ids1 = requests.get('https://hacker-news.firebaseio.com/v0/topstories.json').json()
for id in set(ids1).difference(set(int(x[0]) for x in ids)):
x = requests.get(f'https://hacker-news.firebaseio.com/v0/item/{id}.json').json()
insert(set(), 'hn', id, x['time'], x['title'], x.get('url', f'https://news.ycombinator.com/item?id={id}'))
time.sleep(1)
ids = {x[0] for x in cur.execute('select id from things where site = "lobsters"').fetchall()}
r = requests.get('https://lobste.rs/newest.rss')
for x in feedparser.parse(r.text).entries:
insert(ids, 'lobsters', x['id'][20:], time.mktime(x['published_parsed']), x['title'], x['link'])
ids = {x[0] for x in cur.execute('select id from things where site like "r/%"').fetchall()}
for sub in subs:
r = requests.get(f'https://www.reddit.com/r/{sub}.json', headers={'User-agent': 'temoc 0.1'})
for x in (x['data'] for x in r.json()['data']['children']):
insert(ids, f'r/{sub}', x['id'], x['created_utc'], x['title'], x['url'])
time.sleep(1)
time.sleep(90)
| 40.477273
| 114
| 0.619315
|
import sqlite3
import time
import feedparser
import requests
subs = [x.strip() for x in open('subs.txt').readlines()]
con = sqlite3.connect('temoc.db')
cur = con.cursor()
try:
cur.execute('create table things(site, id text, utc timestamp, save integer, hide integer, title, url)')
cur.execute('create unique index things_site_id on things(site, id)')
con.commit()
except sqlite3.OperationalError:
pass
def insert(ids, *args):
if not args[1] in ids:
cur.execute('insert into things values (?, ?, ?, 0, 0, ?, ?)', args)
con.commit()
ids.add(args[1])
while 1:
ids = cur.execute(f'select id from things where site = "hn"').fetchall()
ids1 = requests.get('https://hacker-news.firebaseio.com/v0/topstories.json').json()
for id in set(ids1).difference(set(int(x[0]) for x in ids)):
x = requests.get(f'https://hacker-news.firebaseio.com/v0/item/{id}.json').json()
insert(set(), 'hn', id, x['time'], x['title'], x.get('url', f'https://news.ycombinator.com/item?id={id}'))
time.sleep(1)
ids = {x[0] for x in cur.execute('select id from things where site = "lobsters"').fetchall()}
r = requests.get('https://lobste.rs/newest.rss')
for x in feedparser.parse(r.text).entries:
insert(ids, 'lobsters', x['id'][20:], time.mktime(x['published_parsed']), x['title'], x['link'])
ids = {x[0] for x in cur.execute('select id from things where site like "r/%"').fetchall()}
for sub in subs:
r = requests.get(f'https://www.reddit.com/r/{sub}.json', headers={'User-agent': 'temoc 0.1'})
for x in (x['data'] for x in r.json()['data']['children']):
insert(ids, f'r/{sub}', x['id'], x['created_utc'], x['title'], x['url'])
time.sleep(1)
time.sleep(90)
| true
| true
|
790cb1ee3c10ab4dc40e5147bd25197d42ed6ef0
| 3,237
|
py
|
Python
|
game/AIRepository.py
|
AnythingTechPro/toontown-otp-original
|
40749161f02c6f75844b1d072bf1498b42c2800d
|
[
"BSD-3-Clause"
] | 2
|
2019-12-05T01:07:38.000Z
|
2021-02-25T06:00:47.000Z
|
game/AIRepository.py
|
rasheelprogrammer/toontown-otp-original
|
40749161f02c6f75844b1d072bf1498b42c2800d
|
[
"BSD-3-Clause"
] | null | null | null |
game/AIRepository.py
|
rasheelprogrammer/toontown-otp-original
|
40749161f02c6f75844b1d072bf1498b42c2800d
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:00:48.000Z
|
2021-02-25T06:00:48.000Z
|
from panda3d.core import *
from direct.distributed.PyDatagram import PyDatagram
from OTPInternalRepository import OTPInternalRepository
from direct.directnotify import DirectNotifyGlobal
from game.OtpDoGlobals import *
from realtime.types import *
from direct.distributed.AIZoneData import AIZoneDataStore
from game.TimeManagerAI import TimeManagerAI
from game.EstateManagerAI import EstateManagerAI
from game.TTHoodAI import TTHoodAI
from game.DDHoodAI import DDHoodAI
from game.DGHoodAI import DGHoodAI
from game.MMHoodAI import MMHoodAI
class AIRepository(OTPInternalRepository):
notify = DirectNotifyGlobal.directNotify.newCategory('AIRepository')
notify.setInfo(True)
GameGlobalsId = OTP_DO_ID_TOONTOWN
def __init__(self, baseChannel, serverId, districtName, dcFileNames):
OTPInternalRepository.__init__(self, baseChannel, serverId, dcFileNames=dcFileNames, dcSuffix='AI')
self.zoneDataStore = AIZoneDataStore()
self.districtName = districtName
self.districtPopulation = 0
self.districtId = self.ourChannel
self.hoods = []
self.zoneAllocator = UniqueIdAllocator(61000, 1 << 20)
def getGameDoId(self):
return self.GameGlobalsId
def getAvatarIdFromSender(self):
return self.getMsgSender() & 0xFFFFFFFF
def getAccountIdFromSender(self):
return (self.getMsgSender() >> 32) & 0xFFFFFFFF
def getZoneDataStore(self):
return self.zoneDataStore
def getAvatarExitEvent(self, avId):
return 'distObjDelete-%d' % avId
def allocateZone(self):
return self.zoneAllocator.allocate()
def deallocateZone(self, zoneId):
self.zoneAllocator.free(zoneId)
def handleConnected(self):
OTPInternalRepository.handleConnected(self)
# register the AI on the state server...
dg = PyDatagram()
dg.addServerHeader(self.serverId, self.ourChannel, STATESERVER_ADD_SHARD)
dg.addString(self.districtName)
dg.addUint32(self.districtPopulation)
self.send(dg)
# add a post remove to remove the shard from the state server
# when we disconnect from the message director...
dg = PyDatagram()
dg.addServerHeader(self.serverId, self.ourChannel, STATESERVER_REMOVE_SHARD)
self.addPostRemove(dg)
# create the AI globals...
self.createGlobals()
self.createZones()
def createGlobals(self):
self.timeManager = TimeManagerAI(self)
self.timeManager.generateWithRequired(OTP_ZONE_ID_OLD_QUIET_ZONE)
self.estateManager = EstateManagerAI(self)
self.estateManager.generateWithRequired(OTP_ZONE_ID_OLD_QUIET_ZONE)
def createZones(self):
if simbase.config.GetBool('want-toontown-central', False):
self.hoods.append(TTHoodAI(self))
if simbase.config.GetBool('want-donalds-dock', False):
self.hoods.append(DDHoodAI(self))
if simbase.config.GetBool('want-daisys-garden', False):
self.hoods.append(DGHoodAI(self))
if simbase.config.GetBool('want-minnies-melody-land', False):
self.hoods.append(MMHoodAI(self))
for hood in self.hoods:
hood.createObjects()
| 33.71875
| 107
| 0.713315
|
from panda3d.core import *
from direct.distributed.PyDatagram import PyDatagram
from OTPInternalRepository import OTPInternalRepository
from direct.directnotify import DirectNotifyGlobal
from game.OtpDoGlobals import *
from realtime.types import *
from direct.distributed.AIZoneData import AIZoneDataStore
from game.TimeManagerAI import TimeManagerAI
from game.EstateManagerAI import EstateManagerAI
from game.TTHoodAI import TTHoodAI
from game.DDHoodAI import DDHoodAI
from game.DGHoodAI import DGHoodAI
from game.MMHoodAI import MMHoodAI
class AIRepository(OTPInternalRepository):
notify = DirectNotifyGlobal.directNotify.newCategory('AIRepository')
notify.setInfo(True)
GameGlobalsId = OTP_DO_ID_TOONTOWN
def __init__(self, baseChannel, serverId, districtName, dcFileNames):
OTPInternalRepository.__init__(self, baseChannel, serverId, dcFileNames=dcFileNames, dcSuffix='AI')
self.zoneDataStore = AIZoneDataStore()
self.districtName = districtName
self.districtPopulation = 0
self.districtId = self.ourChannel
self.hoods = []
self.zoneAllocator = UniqueIdAllocator(61000, 1 << 20)
def getGameDoId(self):
return self.GameGlobalsId
def getAvatarIdFromSender(self):
return self.getMsgSender() & 0xFFFFFFFF
def getAccountIdFromSender(self):
return (self.getMsgSender() >> 32) & 0xFFFFFFFF
def getZoneDataStore(self):
return self.zoneDataStore
def getAvatarExitEvent(self, avId):
return 'distObjDelete-%d' % avId
def allocateZone(self):
return self.zoneAllocator.allocate()
def deallocateZone(self, zoneId):
self.zoneAllocator.free(zoneId)
def handleConnected(self):
OTPInternalRepository.handleConnected(self)
dg = PyDatagram()
dg.addServerHeader(self.serverId, self.ourChannel, STATESERVER_ADD_SHARD)
dg.addString(self.districtName)
dg.addUint32(self.districtPopulation)
self.send(dg)
dg = PyDatagram()
dg.addServerHeader(self.serverId, self.ourChannel, STATESERVER_REMOVE_SHARD)
self.addPostRemove(dg)
self.createGlobals()
self.createZones()
def createGlobals(self):
self.timeManager = TimeManagerAI(self)
self.timeManager.generateWithRequired(OTP_ZONE_ID_OLD_QUIET_ZONE)
self.estateManager = EstateManagerAI(self)
self.estateManager.generateWithRequired(OTP_ZONE_ID_OLD_QUIET_ZONE)
def createZones(self):
if simbase.config.GetBool('want-toontown-central', False):
self.hoods.append(TTHoodAI(self))
if simbase.config.GetBool('want-donalds-dock', False):
self.hoods.append(DDHoodAI(self))
if simbase.config.GetBool('want-daisys-garden', False):
self.hoods.append(DGHoodAI(self))
if simbase.config.GetBool('want-minnies-melody-land', False):
self.hoods.append(MMHoodAI(self))
for hood in self.hoods:
hood.createObjects()
| false
| true
|
790cb21953992624cafa711ad382e9592b996752
| 1,401
|
py
|
Python
|
datashader_nb.py
|
cisaacstern/hrpyzon
|
10050b5286045f8a9a9d1338b5f4d418b19df39d
|
[
"BSD-3-Clause"
] | null | null | null |
datashader_nb.py
|
cisaacstern/hrpyzon
|
10050b5286045f8a9a9d1338b5f4d418b19df39d
|
[
"BSD-3-Clause"
] | null | null | null |
datashader_nb.py
|
cisaacstern/hrpyzon
|
10050b5286045f8a9a9d1338b5f4d418b19df39d
|
[
"BSD-3-Clause"
] | null | null | null |
# +
import numpy as np
import holoviews as hv
from holoviews import opts
import matplotlib.pyplot as plt
from plotsun import plot_sun
hv.extension('bokeh', 'matplotlib')
# -
# # Load data
data = np.load('npz_timeseries/subset.npz')
arr = data['arr']
stack = data['stack']
sun = data['sun']
print(arr.shape, stack.shape, sun.shape)
stack[:,:,25]
plt.imshow(stack[:,:,25], cmap='binary')
# +
stack = hv.Dataset((np.arange(stack.shape[2]),
np.arange(stack.shape[0]),
np.arange(stack.shape[1]),
stack),
['Time', 'x', 'y'], 'Shadows')
stack
# -
arr = hv.Dataset((np.arange(arr.shape[0]),
np.arange(arr.shape[1]),
arr),
['x', 'y'], 'Elevation')
arr
# # View
opts.defaults(
opts.GridSpace(shared_xaxis=True, shared_yaxis=True),
opts.Image(cmap='viridis', invert_yaxis=True, width=400, height=400),
opts.Labels(text_color='white', text_font_size='8pt',
text_align='left', text_baseline='bottom'),
opts.Path(color='white'),
opts.Spread(width=600),
opts.Overlay(show_legend=False))
elevation = arr.to(hv.Image, ['x', 'y'])
shadows = stack.to(hv.Image, ['x', 'y'])
elevation
dims = {'figsize':(4,5), 'top':1, 'bottom':0, 'left':0.2, 'right':0.95}
plot_sun(sunposition=sun, d=dims)
elevation * shadows
stack[:,:,24]
| 21.553846
| 73
| 0.589579
|
import numpy as np
import holoviews as hv
from holoviews import opts
import matplotlib.pyplot as plt
from plotsun import plot_sun
hv.extension('bokeh', 'matplotlib')
.load('npz_timeseries/subset.npz')
arr = data['arr']
stack = data['stack']
sun = data['sun']
print(arr.shape, stack.shape, sun.shape)
stack[:,:,25]
plt.imshow(stack[:,:,25], cmap='binary')
stack = hv.Dataset((np.arange(stack.shape[2]),
np.arange(stack.shape[0]),
np.arange(stack.shape[1]),
stack),
['Time', 'x', 'y'], 'Shadows')
stack
arr = hv.Dataset((np.arange(arr.shape[0]),
np.arange(arr.shape[1]),
arr),
['x', 'y'], 'Elevation')
arr
.defaults(
opts.GridSpace(shared_xaxis=True, shared_yaxis=True),
opts.Image(cmap='viridis', invert_yaxis=True, width=400, height=400),
opts.Labels(text_color='white', text_font_size='8pt',
text_align='left', text_baseline='bottom'),
opts.Path(color='white'),
opts.Spread(width=600),
opts.Overlay(show_legend=False))
elevation = arr.to(hv.Image, ['x', 'y'])
shadows = stack.to(hv.Image, ['x', 'y'])
elevation
dims = {'figsize':(4,5), 'top':1, 'bottom':0, 'left':0.2, 'right':0.95}
plot_sun(sunposition=sun, d=dims)
elevation * shadows
stack[:,:,24]
| true
| true
|
790cb2a200277d0b74b3fa86c967d5890bbbc826
| 18,618
|
py
|
Python
|
var/spack/repos/builtin/packages/lbann/package.py
|
edwardsp/spack
|
f42c5f62373e4c4ea1f21ebab1c9f54e92d9a535
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
var/spack/repos/builtin/packages/lbann/package.py
|
edwardsp/spack
|
f42c5f62373e4c4ea1f21ebab1c9f54e92d9a535
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 12
|
2021-02-15T15:55:08.000Z
|
2022-03-31T00:09:57.000Z
|
var/spack/repos/builtin/packages/lbann/package.py
|
rubendibattista/spack
|
91de23ce650ef4dd007b94f67c26e1e6901be354
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-04-06T09:04:11.000Z
|
2020-01-24T12:52:12.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Lbann(CMakePackage, CudaPackage, ROCmPackage):
"""LBANN: Livermore Big Artificial Neural Network Toolkit. A distributed
memory, HPC-optimized, model and data parallel training toolkit for deep
neural networks.
"""
homepage = "http://software.llnl.gov/lbann/"
url = "https://github.com/LLNL/lbann/archive/v0.91.tar.gz"
git = "https://github.com/LLNL/lbann.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('0.101', sha256='69d3fe000a88a448dc4f7e263bcb342c34a177bd9744153654528cd86335a1f7')
version('0.100', sha256='d1bab4fb6f1b80ae83a7286cc536a32830890f6e5b0c3107a17c2600d0796912')
version('0.99', sha256='3358d44f1bc894321ce07d733afdf6cb7de39c33e3852d73c9f31f530175b7cd')
version('0.98.1', sha256='9a2da8f41cd8bf17d1845edf9de6d60f781204ebd37bffba96d8872036c10c66')
version('0.98', sha256='8d64b9ac0f1d60db553efa4e657f5ea87e790afe65336117267e9c7ae6f68239')
version('0.97.1', sha256='2f2756126ac8bb993202cf532d72c4d4044e877f4d52de9fdf70d0babd500ce4')
version('0.97', sha256='9794a706fc7ac151926231efdf74564c39fbaa99edca4acb745ee7d20c32dae7')
version('0.96', sha256='97af78e9d3c405e963361d0db96ee5425ee0766fa52b43c75b8a5670d48e4b4a')
version('0.95', sha256='d310b986948b5ee2bedec36383a7fe79403721c8dc2663a280676b4e431f83c2')
version('0.94', sha256='567e99b488ebe6294933c98a212281bffd5220fc13a0a5cd8441f9a3761ceccf')
version('0.93', sha256='77bfd7fe52ee7495050f49bcdd0e353ba1730e3ad15042c678faa5eeed55fb8c')
version('0.92', sha256='9187c5bcbc562c2828fe619d53884ab80afb1bcd627a817edb935b80affe7b84')
version('0.91', sha256='b69f470829f434f266119a33695592f74802cff4b76b37022db00ab32de322f5')
variant('al', default=True, description='Builds with support for Aluminum Library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('conduit', default=True,
description='Builds with support for Conduit Library '
'(note that for v0.99 conduit is required)')
variant('deterministic', default=False,
description='Builds with support for deterministic execution')
variant('dihydrogen', default=True,
description='Builds with support for DiHydrogen Tensor Library')
variant('distconv', default=False,
description='Builds with support for spatial, filter, or channel '
'distributed convolutions')
variant('docs', default=False, description='Builds with support for building documentation')
variant('dtype', default='float',
description='Type for floating point representation of weights',
values=('float', 'double'))
variant('extras', default=False, description='Add python modules for LBANN related tools')
variant('fft', default=False, description='Support for FFT operations')
variant('half', default=False,
description='Builds with support for FP16 precision data types')
variant('hwloc', default=True, description='Add support for topology aware algorithms')
variant('nvprof', default=False, description='Build with region annotations for NVPROF')
variant('numpy', default=False,
description='Builds with support for processing NumPy data files')
variant('vision', default=False,
description='Builds with support for image processing data with OpenCV')
variant('vtune', default=False, description='Builds with support for Intel VTune')
variant('onednn', default=False, description='Support for OneDNN')
variant('nvshmem', default=False, description='Support for NVSHMEM')
variant('python', default=True, description='Support for Python extensions (e.g. Data Reader)')
variant('pfe', default=True, description='Python Frontend for generating and launching models')
variant('boost', default=False, description='Enable callbacks that use Boost libraries')
# Variant Conflicts
conflicts('@:0.90,0.99:', when='~conduit')
conflicts('@0.90:0.101.99', when='+fft')
conflicts('@:0.90,0.101.99:', when='~dihydrogen')
conflicts('~cuda', when='+nvprof')
conflicts('~hwloc', when='+al')
conflicts('~cuda', when='+nvshmem')
conflicts('+cuda', when='+rocm', msg='CUDA and ROCm support are mutually exclusive')
conflicts('+extras', when='~pfe', msg='Python extras require the Python front end support')
conflicts('~vision', when='@0.91:0.101')
conflicts('~numpy', when='@0.91:0.101')
conflicts('~python', when='@0.91:0.101')
conflicts('~pfe', when='@0.91:0.101')
depends_on('cmake@3.17.0:', type='build')
# Specify the correct versions of Hydrogen
depends_on('hydrogen@:1.3.4', when='@0.95:0.100')
depends_on('hydrogen@1.4.0:1.4.99', when='@0.101:0.101.99')
depends_on('hydrogen@1.5.0:', when='@:0.90,0.102:')
# Add Hydrogen variants
depends_on('hydrogen +openmp +openmp_blas +shared +int64')
depends_on('hydrogen ~al', when='~al')
depends_on('hydrogen +al', when='+al')
depends_on('hydrogen ~cuda', when='~cuda')
depends_on('hydrogen +cuda', when='+cuda')
depends_on('hydrogen ~half', when='~half')
depends_on('hydrogen +half', when='+half')
depends_on('hydrogen ~rocm', when='~rocm')
depends_on('hydrogen +rocm', when='+rocm')
depends_on('hydrogen build_type=Debug', when='build_type=Debug')
# Older versions depended on Elemental not Hydrogen
depends_on('elemental +openmp_blas +shared +int64', when='@0.91:0.94')
depends_on('elemental +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @0.91:0.94')
# Specify the correct version of Aluminum
depends_on('aluminum@:0.3.99', when='@0.95:0.100 +al')
depends_on('aluminum@0.4:0.4.99', when='@0.101:0.101.99 +al')
depends_on('aluminum@0.5.0:', when='@:0.90,0.102: +al')
# Add Aluminum variants
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
depends_on('aluminum +rocm +rccl +ht', when='+al +rocm')
depends_on('dihydrogen@0.2.0:', when='@:0.90,0.102:')
depends_on('dihydrogen +openmp', when='+dihydrogen')
depends_on('dihydrogen ~cuda', when='+dihydrogen ~cuda')
depends_on('dihydrogen +cuda', when='+dihydrogen +cuda')
depends_on('dihydrogen ~al', when='+dihydrogen ~al')
depends_on('dihydrogen +al', when='+dihydrogen +al')
depends_on('dihydrogen +distconv +cuda', when='+distconv')
depends_on('dihydrogen ~half', when='+dihydrogen ~half')
depends_on('dihydrogen +half', when='+dihydrogen +half')
depends_on('dihydrogen ~nvshmem', when='+dihydrogen ~nvshmem')
depends_on('dihydrogen +nvshmem', when='+dihydrogen +nvshmem')
depends_on('dihydrogen ~rocm', when='+dihydrogen ~rocm')
depends_on('dihydrogen +rocm', when='+dihydrogen +rocm')
depends_on('dihydrogen@0.1', when='@0.101:0.101.99 +dihydrogen')
depends_on('dihydrogen@:0.0,0.2:', when='@:0.90,0.102: +dihydrogen')
conflicts('~dihydrogen', when='+distconv')
for arch in CudaPackage.cuda_arch_values:
depends_on('hydrogen cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
depends_on('dihydrogen cuda_arch=%s' % arch, when='+dihydrogen +cuda cuda_arch=%s' % arch)
depends_on('nccl cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
# variants +rocm and amdgpu_targets are not automatically passed to
# dependencies, so do it manually.
for val in ROCmPackage.amdgpu_targets:
depends_on('hydrogen amdgpu_target=%s' % val, when='amdgpu_target=%s' % val)
depends_on('aluminum amdgpu_target=%s' % val, when='+al amdgpu_target=%s' % val)
depends_on('dihydrogen amdgpu_target=%s' % val, when='+dihydrogen amdgpu_target=%s' % val)
depends_on('cudnn', when='@0.90:0.100.99 +cuda')
depends_on('cudnn@8.0.2:', when='@:0.90,0.101: +cuda')
depends_on('cub', when='@0.94:0.98.2 +cuda ^cuda@:10.99')
depends_on('hipcub', when='+rocm')
depends_on('mpi')
depends_on('hwloc@1.11:', when='@:0.90,0.102: +hwloc')
depends_on('hwloc@1.11:1.11.99', when='@0.95:0.101.99 +hwloc')
depends_on('hwloc +cuda +nvml', when='+cuda')
depends_on('hwloc@2.3.0:', when='+rocm')
depends_on('half', when='+half')
depends_on('fftw@3.3: +openmp', when='+fft')
# LBANN wraps OpenCV calls in OpenMP parallel loops, build without OpenMP
# Additionally disable video related options, they incorrectly link in a
# bad OpenMP library when building with clang or Intel compilers
depends_on('opencv@4.1.0: build_type=RelWithDebInfo +core +highgui '
'+imgcodecs +imgproc +jpeg +png +tiff +fast-math ~cuda',
when='+vision')
# Note that for Power systems we want the environment to add +powerpc
depends_on('opencv@4.1.0: +powerpc', when='+vision arch=ppc64le:')
depends_on('cnpy', when='+numpy')
depends_on('nccl', when='@0.94:0.98.2 +cuda')
depends_on('conduit@0.4.0: +hdf5~hdf5_compat', when='@0.94:0.99 +conduit')
depends_on('conduit@0.5.0:0.6.99 +hdf5~hdf5_compat', when='@0.100:0.101 +conduit')
depends_on('conduit@0.6.0: +hdf5~hdf5_compat', when='@:0.90,0.99:')
# LBANN can use Python in two modes 1) as part of an extensible framework
# and 2) to drive the front end model creation and launch
# Core library support for Python Data Reader and extensible interface
depends_on('python@3: +shared', type=('run'), when='@:0.90,0.99: +python')
extends("python", when='+python')
# Python front end and possible extra packages
depends_on('python@3: +shared', type=('build', 'run'), when='@:0.90,0.99: +pfe')
extends("python", when='+pfe')
depends_on('py-setuptools', type='build', when='+pfe')
depends_on('py-argparse', type='run', when='@:0.90,0.99: +pfe ^python@:2.6')
depends_on('py-configparser', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-graphviz@0.10.1:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-matplotlib@3.0.0:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-numpy@1.16.0:', type=('build', 'run'), when='@:0.90,0.99: +pfe +extras')
depends_on('py-onnx@1.3.0:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-pandas@0.24.1:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-texttable@1.4.0:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-pytest', type='test', when='@:0.90,0.99: +pfe')
depends_on('py-protobuf+cpp@3.10.0', type=('build', 'run'), when='@:0.90,0.99: +pfe')
depends_on('protobuf+shared@3.10.0', when='@:0.90,0.99:')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('py-m2r', type='build', when='+docs')
depends_on('cereal')
depends_on('catch2', type=('build', 'test'))
depends_on('clara')
depends_on('llvm-openmp', when='%apple-clang')
depends_on('onednn cpu_runtime=omp gpu_runtime=none', when='+onednn')
depends_on('nvshmem', when='+nvshmem')
depends_on('zstr')
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def common_config_args(self):
spec = self.spec
# Environment variables
cppflags = []
cppflags.append('-DLBANN_SET_EL_RNG')
args = []
args.extend([
'-DCMAKE_CXX_FLAGS=%s' % ' '.join(cppflags),
'-DLBANN_VERSION=spack',
])
if '+numpy' in spec:
args.append(
'-DCNPY_DIR={0}'.format(spec['cnpy'].prefix),
)
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
# Get any recent versions or non-numeric version
# Note that develop > numeric and non-develop < numeric
@when('@:0.90,0.94:')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DCMAKE_CXX_STANDARD=17',
'-DLBANN_WITH_CNPY=%s' % ('+numpy' in spec),
'-DLBANN_DETERMINISTIC:BOOL=%s' % ('+deterministic' in spec),
'-DLBANN_WITH_HWLOC=%s' % ('+hwloc' in spec),
'-DLBANN_WITH_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DLBANN_WITH_BOOST:BOOL=%s' % ('+boost' in spec),
'-DLBANN_WITH_CONDUIT:BOOL=%s' % ('+conduit' in spec),
'-DLBANN_WITH_NVSHMEM:BOOL=%s' % ('+nvshmem' in spec),
'-DLBANN_WITH_FFT:BOOL=%s' % ('+fft' in spec),
'-DLBANN_WITH_ONEDNN:BOOL=%s' % ('+onednn' in spec),
'-DLBANN_WITH_EMBEDDED_PYTHON:BOOL=%s' % ('+python' in spec),
'-DLBANN_WITH_PYTHON_FRONTEND:BOOL=%s' % ('+pfe' in spec),
'-DLBANN_WITH_TBINF=OFF',
'-DLBANN_WITH_UNIT_TESTING:BOOL=%s' % (self.run_tests),
'-DLBANN_WITH_VISION:BOOL=%s' % ('+vision' in spec),
'-DLBANN_WITH_VTUNE:BOOL=%s' % ('+vtune' in spec),
'-DLBANN_DATATYPE={0}'.format(spec.variants['dtype'].value),
'-DCEREAL_DIR={0}'.format(spec['cereal'].prefix),
# protobuf is included by py-protobuf+cpp
'-DProtobuf_DIR={0}'.format(spec['protobuf'].prefix),
'-Dprotobuf_MODULE_COMPATIBLE=ON'])
if '+cuda' in spec:
if spec.satisfies('^cuda@11.0:'):
args.append('-DCMAKE_CUDA_STANDARD=17')
else:
args.append('-DCMAKE_CUDA_STANDARD=14')
if spec.satisfies('@:0.90') or spec.satisfies('@0.95:'):
args.append(
'-DHydrogen_DIR={0}/CMake/hydrogen'.format(
spec['hydrogen'].prefix))
elif spec.satisfies('@0.94'):
args.append(
'-DElemental_DIR={0}/CMake/elemental'.format(
spec['elemental'].prefix))
if spec.satisfies('@0.94:0.98.2'):
args.append('-DLBANN_WITH_NCCL:BOOL=%s' %
('+cuda +nccl' in spec))
if '+vtune' in spec:
args.append('-DVTUNE_DIR={0}'.format(spec['vtune'].prefix))
if '+al' in spec:
args.append('-DAluminum_DIR={0}'.format(spec['aluminum'].prefix))
if '+conduit' in spec:
args.append('-DConduit_DIR={0}'.format(spec['conduit'].prefix))
# Add support for OpenMP with external (Brew) clang
if spec.satisfies('%clang platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cuda' in spec:
args.append(
'-DCUDA_TOOLKIT_ROOT_DIR={0}'.format(
spec['cuda'].prefix))
args.append(
'-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if spec.satisfies('@0.94:0.98.2'):
if spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
if '+nccl' in spec:
args.append(
'-DNCCL_DIR={0}'.format(
spec['nccl'].prefix))
args.append(
'-DLBANN_WITH_NVPROF:BOOL=%s' % ('+nvprof' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.100:'):
args.append(
'-DLBANN_WITH_DIHYDROGEN:BOOL=%s' % ('+dihydrogen' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.101:'):
args.append(
'-DLBANN_WITH_DISTCONV:BOOL=%s' % ('+distconv' in spec))
if '+rocm' in spec:
args.extend([
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix),
'-DHIP_CXX_COMPILER={0}'.format(self.spec['hip'].hipcc)])
archs = self.spec.variants['amdgpu_target'].value
if archs != 'none':
arch_str = ",".join(archs)
cxxflags_str = " ".join(self.spec.compiler_flags['cxxflags'])
args.append(
'-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
' -g -fsized-deallocation -fPIC -std=c++17 {1}'.format(
arch_str, cxxflags_str)
)
return args
@when('@0.91:0.93')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DWITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DWITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DELEMENTAL_USE_CUBLAS:BOOL=%s' % (
'+cublas' in spec['elemental']),
'-DWITH_TBINF=OFF',
'-DWITH_VTUNE=OFF',
'-DElemental_DIR={0}'.format(spec['elemental'].prefix),
'-DELEMENTAL_MATH_LIBS={0}'.format(
spec['elemental'].libs),
'-DVERBOSE=0',
'-DLBANN_HOME=.'])
if spec.variants['dtype'].value == 'float':
args.append('-DDATATYPE=4')
elif spec.variants['dtype'].value == 'double':
args.append('-DDATATYPE=8')
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cudnn' in spec:
args.append('-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if '+cub' in spec and spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
return args
| 46.198511
| 99
| 0.615104
|
import os
from spack import *
class Lbann(CMakePackage, CudaPackage, ROCmPackage):
homepage = "http://software.llnl.gov/lbann/"
url = "https://github.com/LLNL/lbann/archive/v0.91.tar.gz"
git = "https://github.com/LLNL/lbann.git"
maintainers = ['bvanessen']
version('develop', branch='develop')
version('0.101', sha256='69d3fe000a88a448dc4f7e263bcb342c34a177bd9744153654528cd86335a1f7')
version('0.100', sha256='d1bab4fb6f1b80ae83a7286cc536a32830890f6e5b0c3107a17c2600d0796912')
version('0.99', sha256='3358d44f1bc894321ce07d733afdf6cb7de39c33e3852d73c9f31f530175b7cd')
version('0.98.1', sha256='9a2da8f41cd8bf17d1845edf9de6d60f781204ebd37bffba96d8872036c10c66')
version('0.98', sha256='8d64b9ac0f1d60db553efa4e657f5ea87e790afe65336117267e9c7ae6f68239')
version('0.97.1', sha256='2f2756126ac8bb993202cf532d72c4d4044e877f4d52de9fdf70d0babd500ce4')
version('0.97', sha256='9794a706fc7ac151926231efdf74564c39fbaa99edca4acb745ee7d20c32dae7')
version('0.96', sha256='97af78e9d3c405e963361d0db96ee5425ee0766fa52b43c75b8a5670d48e4b4a')
version('0.95', sha256='d310b986948b5ee2bedec36383a7fe79403721c8dc2663a280676b4e431f83c2')
version('0.94', sha256='567e99b488ebe6294933c98a212281bffd5220fc13a0a5cd8441f9a3761ceccf')
version('0.93', sha256='77bfd7fe52ee7495050f49bcdd0e353ba1730e3ad15042c678faa5eeed55fb8c')
version('0.92', sha256='9187c5bcbc562c2828fe619d53884ab80afb1bcd627a817edb935b80affe7b84')
version('0.91', sha256='b69f470829f434f266119a33695592f74802cff4b76b37022db00ab32de322f5')
variant('al', default=True, description='Builds with support for Aluminum Library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('conduit', default=True,
description='Builds with support for Conduit Library '
'(note that for v0.99 conduit is required)')
variant('deterministic', default=False,
description='Builds with support for deterministic execution')
variant('dihydrogen', default=True,
description='Builds with support for DiHydrogen Tensor Library')
variant('distconv', default=False,
description='Builds with support for spatial, filter, or channel '
'distributed convolutions')
variant('docs', default=False, description='Builds with support for building documentation')
variant('dtype', default='float',
description='Type for floating point representation of weights',
values=('float', 'double'))
variant('extras', default=False, description='Add python modules for LBANN related tools')
variant('fft', default=False, description='Support for FFT operations')
variant('half', default=False,
description='Builds with support for FP16 precision data types')
variant('hwloc', default=True, description='Add support for topology aware algorithms')
variant('nvprof', default=False, description='Build with region annotations for NVPROF')
variant('numpy', default=False,
description='Builds with support for processing NumPy data files')
variant('vision', default=False,
description='Builds with support for image processing data with OpenCV')
variant('vtune', default=False, description='Builds with support for Intel VTune')
variant('onednn', default=False, description='Support for OneDNN')
variant('nvshmem', default=False, description='Support for NVSHMEM')
variant('python', default=True, description='Support for Python extensions (e.g. Data Reader)')
variant('pfe', default=True, description='Python Frontend for generating and launching models')
variant('boost', default=False, description='Enable callbacks that use Boost libraries')
conflicts('@:0.90,0.99:', when='~conduit')
conflicts('@0.90:0.101.99', when='+fft')
conflicts('@:0.90,0.101.99:', when='~dihydrogen')
conflicts('~cuda', when='+nvprof')
conflicts('~hwloc', when='+al')
conflicts('~cuda', when='+nvshmem')
conflicts('+cuda', when='+rocm', msg='CUDA and ROCm support are mutually exclusive')
conflicts('+extras', when='~pfe', msg='Python extras require the Python front end support')
conflicts('~vision', when='@0.91:0.101')
conflicts('~numpy', when='@0.91:0.101')
conflicts('~python', when='@0.91:0.101')
conflicts('~pfe', when='@0.91:0.101')
depends_on('cmake@3.17.0:', type='build')
depends_on('hydrogen@:1.3.4', when='@0.95:0.100')
depends_on('hydrogen@1.4.0:1.4.99', when='@0.101:0.101.99')
depends_on('hydrogen@1.5.0:', when='@:0.90,0.102:')
depends_on('hydrogen +openmp +openmp_blas +shared +int64')
depends_on('hydrogen ~al', when='~al')
depends_on('hydrogen +al', when='+al')
depends_on('hydrogen ~cuda', when='~cuda')
depends_on('hydrogen +cuda', when='+cuda')
depends_on('hydrogen ~half', when='~half')
depends_on('hydrogen +half', when='+half')
depends_on('hydrogen ~rocm', when='~rocm')
depends_on('hydrogen +rocm', when='+rocm')
depends_on('hydrogen build_type=Debug', when='build_type=Debug')
depends_on('elemental +openmp_blas +shared +int64', when='@0.91:0.94')
depends_on('elemental +openmp_blas +shared +int64 build_type=Debug',
when='build_type=Debug @0.91:0.94')
depends_on('aluminum@:0.3.99', when='@0.95:0.100 +al')
depends_on('aluminum@0.4:0.4.99', when='@0.101:0.101.99 +al')
depends_on('aluminum@0.5.0:', when='@:0.90,0.102: +al')
depends_on('aluminum +cuda +nccl +ht +cuda_rma', when='+al +cuda')
depends_on('aluminum +rocm +rccl +ht', when='+al +rocm')
depends_on('dihydrogen@0.2.0:', when='@:0.90,0.102:')
depends_on('dihydrogen +openmp', when='+dihydrogen')
depends_on('dihydrogen ~cuda', when='+dihydrogen ~cuda')
depends_on('dihydrogen +cuda', when='+dihydrogen +cuda')
depends_on('dihydrogen ~al', when='+dihydrogen ~al')
depends_on('dihydrogen +al', when='+dihydrogen +al')
depends_on('dihydrogen +distconv +cuda', when='+distconv')
depends_on('dihydrogen ~half', when='+dihydrogen ~half')
depends_on('dihydrogen +half', when='+dihydrogen +half')
depends_on('dihydrogen ~nvshmem', when='+dihydrogen ~nvshmem')
depends_on('dihydrogen +nvshmem', when='+dihydrogen +nvshmem')
depends_on('dihydrogen ~rocm', when='+dihydrogen ~rocm')
depends_on('dihydrogen +rocm', when='+dihydrogen +rocm')
depends_on('dihydrogen@0.1', when='@0.101:0.101.99 +dihydrogen')
depends_on('dihydrogen@:0.0,0.2:', when='@:0.90,0.102: +dihydrogen')
conflicts('~dihydrogen', when='+distconv')
for arch in CudaPackage.cuda_arch_values:
depends_on('hydrogen cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
depends_on('aluminum cuda_arch=%s' % arch, when='+al +cuda cuda_arch=%s' % arch)
depends_on('dihydrogen cuda_arch=%s' % arch, when='+dihydrogen +cuda cuda_arch=%s' % arch)
depends_on('nccl cuda_arch=%s' % arch, when='+cuda cuda_arch=%s' % arch)
for val in ROCmPackage.amdgpu_targets:
depends_on('hydrogen amdgpu_target=%s' % val, when='amdgpu_target=%s' % val)
depends_on('aluminum amdgpu_target=%s' % val, when='+al amdgpu_target=%s' % val)
depends_on('dihydrogen amdgpu_target=%s' % val, when='+dihydrogen amdgpu_target=%s' % val)
depends_on('cudnn', when='@0.90:0.100.99 +cuda')
depends_on('cudnn@8.0.2:', when='@:0.90,0.101: +cuda')
depends_on('cub', when='@0.94:0.98.2 +cuda ^cuda@:10.99')
depends_on('hipcub', when='+rocm')
depends_on('mpi')
depends_on('hwloc@1.11:', when='@:0.90,0.102: +hwloc')
depends_on('hwloc@1.11:1.11.99', when='@0.95:0.101.99 +hwloc')
depends_on('hwloc +cuda +nvml', when='+cuda')
depends_on('hwloc@2.3.0:', when='+rocm')
depends_on('half', when='+half')
depends_on('fftw@3.3: +openmp', when='+fft')
depends_on('opencv@4.1.0: build_type=RelWithDebInfo +core +highgui '
'+imgcodecs +imgproc +jpeg +png +tiff +fast-math ~cuda',
when='+vision')
depends_on('opencv@4.1.0: +powerpc', when='+vision arch=ppc64le:')
depends_on('cnpy', when='+numpy')
depends_on('nccl', when='@0.94:0.98.2 +cuda')
depends_on('conduit@0.4.0: +hdf5~hdf5_compat', when='@0.94:0.99 +conduit')
depends_on('conduit@0.5.0:0.6.99 +hdf5~hdf5_compat', when='@0.100:0.101 +conduit')
depends_on('conduit@0.6.0: +hdf5~hdf5_compat', when='@:0.90,0.99:')
depends_on('python@3: +shared', type=('run'), when='@:0.90,0.99: +python')
extends("python", when='+python')
depends_on('python@3: +shared', type=('build', 'run'), when='@:0.90,0.99: +pfe')
extends("python", when='+pfe')
depends_on('py-setuptools', type='build', when='+pfe')
depends_on('py-argparse', type='run', when='@:0.90,0.99: +pfe ^python@:2.6')
depends_on('py-configparser', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-graphviz@0.10.1:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-matplotlib@3.0.0:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-numpy@1.16.0:', type=('build', 'run'), when='@:0.90,0.99: +pfe +extras')
depends_on('py-onnx@1.3.0:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-pandas@0.24.1:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-texttable@1.4.0:', type='run', when='@:0.90,0.99: +pfe +extras')
depends_on('py-pytest', type='test', when='@:0.90,0.99: +pfe')
depends_on('py-protobuf+cpp@3.10.0', type=('build', 'run'), when='@:0.90,0.99: +pfe')
depends_on('protobuf+shared@3.10.0', when='@:0.90,0.99:')
depends_on('py-breathe', type='build', when='+docs')
depends_on('doxygen', type='build', when='+docs')
depends_on('py-m2r', type='build', when='+docs')
depends_on('cereal')
depends_on('catch2', type=('build', 'test'))
depends_on('clara')
depends_on('llvm-openmp', when='%apple-clang')
depends_on('onednn cpu_runtime=omp gpu_runtime=none', when='+onednn')
depends_on('nvshmem', when='+nvshmem')
depends_on('zstr')
generator = 'Ninja'
depends_on('ninja', type='build')
@property
def common_config_args(self):
spec = self.spec
cppflags = []
cppflags.append('-DLBANN_SET_EL_RNG')
args = []
args.extend([
'-DCMAKE_CXX_FLAGS=%s' % ' '.join(cppflags),
'-DLBANN_VERSION=spack',
])
if '+numpy' in spec:
args.append(
'-DCNPY_DIR={0}'.format(spec['cnpy'].prefix),
)
return args
def setup_build_environment(self, env):
if self.spec.satisfies('%apple-clang'):
env.append_flags(
'CPPFLAGS', self.compiler.openmp_flag)
env.append_flags(
'CFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'CXXFLAGS', self.spec['llvm-openmp'].headers.include_flags)
env.append_flags(
'LDFLAGS', self.spec['llvm-openmp'].libs.ld_flags)
@when('@:0.90,0.94:')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DCMAKE_CXX_STANDARD=17',
'-DLBANN_WITH_CNPY=%s' % ('+numpy' in spec),
'-DLBANN_DETERMINISTIC:BOOL=%s' % ('+deterministic' in spec),
'-DLBANN_WITH_HWLOC=%s' % ('+hwloc' in spec),
'-DLBANN_WITH_ALUMINUM:BOOL=%s' % ('+al' in spec),
'-DLBANN_WITH_BOOST:BOOL=%s' % ('+boost' in spec),
'-DLBANN_WITH_CONDUIT:BOOL=%s' % ('+conduit' in spec),
'-DLBANN_WITH_NVSHMEM:BOOL=%s' % ('+nvshmem' in spec),
'-DLBANN_WITH_FFT:BOOL=%s' % ('+fft' in spec),
'-DLBANN_WITH_ONEDNN:BOOL=%s' % ('+onednn' in spec),
'-DLBANN_WITH_EMBEDDED_PYTHON:BOOL=%s' % ('+python' in spec),
'-DLBANN_WITH_PYTHON_FRONTEND:BOOL=%s' % ('+pfe' in spec),
'-DLBANN_WITH_TBINF=OFF',
'-DLBANN_WITH_UNIT_TESTING:BOOL=%s' % (self.run_tests),
'-DLBANN_WITH_VISION:BOOL=%s' % ('+vision' in spec),
'-DLBANN_WITH_VTUNE:BOOL=%s' % ('+vtune' in spec),
'-DLBANN_DATATYPE={0}'.format(spec.variants['dtype'].value),
'-DCEREAL_DIR={0}'.format(spec['cereal'].prefix),
'-DProtobuf_DIR={0}'.format(spec['protobuf'].prefix),
'-Dprotobuf_MODULE_COMPATIBLE=ON'])
if '+cuda' in spec:
if spec.satisfies('^cuda@11.0:'):
args.append('-DCMAKE_CUDA_STANDARD=17')
else:
args.append('-DCMAKE_CUDA_STANDARD=14')
if spec.satisfies('@:0.90') or spec.satisfies('@0.95:'):
args.append(
'-DHydrogen_DIR={0}/CMake/hydrogen'.format(
spec['hydrogen'].prefix))
elif spec.satisfies('@0.94'):
args.append(
'-DElemental_DIR={0}/CMake/elemental'.format(
spec['elemental'].prefix))
if spec.satisfies('@0.94:0.98.2'):
args.append('-DLBANN_WITH_NCCL:BOOL=%s' %
('+cuda +nccl' in spec))
if '+vtune' in spec:
args.append('-DVTUNE_DIR={0}'.format(spec['vtune'].prefix))
if '+al' in spec:
args.append('-DAluminum_DIR={0}'.format(spec['aluminum'].prefix))
if '+conduit' in spec:
args.append('-DConduit_DIR={0}'.format(spec['conduit'].prefix))
if spec.satisfies('%clang platform=darwin'):
clang = self.compiler.cc
clang_bin = os.path.dirname(clang)
clang_root = os.path.dirname(clang_bin)
args.extend([
'-DOpenMP_CXX_FLAGS=-fopenmp=libomp',
'-DOpenMP_CXX_LIB_NAMES=libomp',
'-DOpenMP_libomp_LIBRARY={0}/lib/libomp.dylib'.format(
clang_root)])
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cuda' in spec:
args.append(
'-DCUDA_TOOLKIT_ROOT_DIR={0}'.format(
spec['cuda'].prefix))
args.append(
'-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if spec.satisfies('@0.94:0.98.2'):
if spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
if '+nccl' in spec:
args.append(
'-DNCCL_DIR={0}'.format(
spec['nccl'].prefix))
args.append(
'-DLBANN_WITH_NVPROF:BOOL=%s' % ('+nvprof' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.100:'):
args.append(
'-DLBANN_WITH_DIHYDROGEN:BOOL=%s' % ('+dihydrogen' in spec))
if spec.satisfies('@:0.90') or spec.satisfies('@0.101:'):
args.append(
'-DLBANN_WITH_DISTCONV:BOOL=%s' % ('+distconv' in spec))
if '+rocm' in spec:
args.extend([
'-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix),
'-DHIP_CXX_COMPILER={0}'.format(self.spec['hip'].hipcc)])
archs = self.spec.variants['amdgpu_target'].value
if archs != 'none':
arch_str = ",".join(archs)
cxxflags_str = " ".join(self.spec.compiler_flags['cxxflags'])
args.append(
'-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'
' -g -fsized-deallocation -fPIC -std=c++17 {1}'.format(
arch_str, cxxflags_str)
)
return args
@when('@0.91:0.93')
def cmake_args(self):
spec = self.spec
args = self.common_config_args
args.extend([
'-DWITH_CUDA:BOOL=%s' % ('+cuda' in spec),
'-DWITH_CUDNN:BOOL=%s' % ('+cuda' in spec),
'-DELEMENTAL_USE_CUBLAS:BOOL=%s' % (
'+cublas' in spec['elemental']),
'-DWITH_TBINF=OFF',
'-DWITH_VTUNE=OFF',
'-DElemental_DIR={0}'.format(spec['elemental'].prefix),
'-DELEMENTAL_MATH_LIBS={0}'.format(
spec['elemental'].libs),
'-DVERBOSE=0',
'-DLBANN_HOME=.'])
if spec.variants['dtype'].value == 'float':
args.append('-DDATATYPE=4')
elif spec.variants['dtype'].value == 'double':
args.append('-DDATATYPE=8')
if '+vision' in spec:
args.append('-DOpenCV_DIR:STRING={0}'.format(
spec['opencv'].prefix))
if '+cudnn' in spec:
args.append('-DcuDNN_DIR={0}'.format(
spec['cudnn'].prefix))
if '+cub' in spec and spec.satisfies('^cuda@:10.99'):
args.append('-DCUB_DIR={0}'.format(
spec['cub'].prefix))
return args
| true
| true
|
790cb2b72248c2c9cc20f3039a3954d558e9a846
| 1,534
|
py
|
Python
|
source/_sample/pillow/pattern.py
|
showa-yojyo/notebook
|
82c15074c24d64a1dfcb70a526bc1deb2ecffe68
|
[
"MIT"
] | 14
|
2016-04-13T08:10:02.000Z
|
2021-04-19T09:42:51.000Z
|
source/_sample/pillow/pattern.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | 88
|
2017-09-27T15:07:05.000Z
|
2019-10-02T04:05:03.000Z
|
source/_sample/pillow/pattern.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""pattern.py: An example like <Rolling an image> in Pillow document.
"""
import os.path
from PIL import Image
def run(filepath):
"""Create a wallpaper image from a PNG file."""
src = Image.open(filepath)
target = swap_quadrants(src)
paste_with_alpha(target, src, (0, 0), 0x10)
return target
def swap_quadrants(img):
"""Quarter the image and swap two diagonal quadrant pairs."""
boxes = quarter_bbox(img)
regions = [img.crop(box) for box in boxes]
target = img.copy()
paste_with_alpha(target, regions[3], (0, 0), 0x80)
paste_with_alpha(target, regions[2], (regions[3].size[0], 0), 0x80)
paste_with_alpha(target, regions[1], (0, regions[3].size[1]), 0x80)
paste_with_alpha(target, regions[0], regions[3].size, 0x80)
return target
def paste_with_alpha(target, source, left_upper, opacity):
"""An alpha_composite-like operation."""
mask = Image.new('L', source.size, opacity)
target.paste(source, left_upper, mask=mask)
def quarter_bbox(img):
"""Quarter the bounding box of an image."""
(left, upper, right, bottom) = img.getbbox()
xmid = (left + right - 1) // 2
ymid = (upper + bottom - 1) // 2
# Z
return [
(left, upper, xmid, ymid),
(xmid + 1, upper, right, ymid),
(left, ymid + 1, xmid, bottom),
(xmid + 1, ymid + 1, right, bottom),]
if __name__ == '__main__':
result = run(os.path.join(
os.path.dirname(__file__), '../../_images/illvelo.png'))
result.show()
| 28.943396
| 71
| 0.632334
|
import os.path
from PIL import Image
def run(filepath):
src = Image.open(filepath)
target = swap_quadrants(src)
paste_with_alpha(target, src, (0, 0), 0x10)
return target
def swap_quadrants(img):
boxes = quarter_bbox(img)
regions = [img.crop(box) for box in boxes]
target = img.copy()
paste_with_alpha(target, regions[3], (0, 0), 0x80)
paste_with_alpha(target, regions[2], (regions[3].size[0], 0), 0x80)
paste_with_alpha(target, regions[1], (0, regions[3].size[1]), 0x80)
paste_with_alpha(target, regions[0], regions[3].size, 0x80)
return target
def paste_with_alpha(target, source, left_upper, opacity):
mask = Image.new('L', source.size, opacity)
target.paste(source, left_upper, mask=mask)
def quarter_bbox(img):
(left, upper, right, bottom) = img.getbbox()
xmid = (left + right - 1) // 2
ymid = (upper + bottom - 1) // 2
return [
(left, upper, xmid, ymid),
(xmid + 1, upper, right, ymid),
(left, ymid + 1, xmid, bottom),
(xmid + 1, ymid + 1, right, bottom),]
if __name__ == '__main__':
result = run(os.path.join(
os.path.dirname(__file__), '../../_images/illvelo.png'))
result.show()
| true
| true
|
790cb2eec66b03989bf4ebf69d545d5043aed7c3
| 20,545
|
py
|
Python
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.981172
| 204
| 0.585933
|
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
def create_network_interfaces_with_http_info(self, **kwargs):
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
790cb498dc09adf0c96f4a0cfff49cf19147bee5
| 2,401
|
py
|
Python
|
models/layers.py
|
yijingru/ObjGuided-Instance-Segmentation
|
71e39f84aada581743a5d65f103e63ba0fcc8a9a
|
[
"MIT"
] | 9
|
2021-02-08T07:30:32.000Z
|
2022-01-12T08:05:24.000Z
|
models/layers.py
|
yijingru/ObjGuided-Instance-Segmentation
|
71e39f84aada581743a5d65f103e63ba0fcc8a9a
|
[
"MIT"
] | 1
|
2022-03-22T09:29:28.000Z
|
2022-03-23T10:25:36.000Z
|
models/layers.py
|
yijingru/ObjGuided-Instance-Segmentation
|
71e39f84aada581743a5d65f103e63ba0fcc8a9a
|
[
"MIT"
] | 3
|
2021-07-01T06:59:37.000Z
|
2021-12-11T20:31:38.000Z
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class CombinationModule(nn.Module):
def __init__(self, c_low, c_up, batch_norm=False, group_norm=False, instance_norm=False):
super(CombinationModule, self).__init__()
if batch_norm:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(c_up),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1),
nn.BatchNorm2d(c_up),
nn.ReLU(inplace=True))
elif group_norm:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.GroupNorm(num_groups=32, num_channels=c_up),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1),
nn.GroupNorm(num_groups=32, num_channels=c_up),
nn.ReLU(inplace=True))
elif instance_norm:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.InstanceNorm2d(num_features=c_up),#track_running_stats=True),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1),
nn.InstanceNorm2d(num_features=c_up),# track_running_stats=True),
nn.ReLU(inplace=True))
else:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1),
nn.ReLU(inplace=True))
def forward(self, x_low, x_up):
x_low = self.up(F.interpolate(x_low, x_up.shape[2:], mode='bilinear', align_corners=False))
# if self.up[1].running_mean is not None:
# print(self.up[1].running_mean.shape)
return self.cat_conv(torch.cat((x_up, x_low), 1))
| 60.025
| 107
| 0.52853
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class CombinationModule(nn.Module):
def __init__(self, c_low, c_up, batch_norm=False, group_norm=False, instance_norm=False):
super(CombinationModule, self).__init__()
if batch_norm:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(c_up),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1),
nn.BatchNorm2d(c_up),
nn.ReLU(inplace=True))
elif group_norm:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.GroupNorm(num_groups=32, num_channels=c_up),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1),
nn.GroupNorm(num_groups=32, num_channels=c_up),
nn.ReLU(inplace=True))
elif instance_norm:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.InstanceNorm2d(num_features=c_up),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1),
nn.InstanceNorm2d(num_features=c_up),
nn.ReLU(inplace=True))
else:
self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True))
self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1),
nn.ReLU(inplace=True))
def forward(self, x_low, x_up):
x_low = self.up(F.interpolate(x_low, x_up.shape[2:], mode='bilinear', align_corners=False))
return self.cat_conv(torch.cat((x_up, x_low), 1))
| true
| true
|
790cb5a1f9ec7ae8fa43c57b166e178006a478cc
| 2,695
|
py
|
Python
|
PyPoll/main.py
|
dorispira/python-challenge
|
000516550a843265454fb069ec56082f70a10347
|
[
"MIT"
] | null | null | null |
PyPoll/main.py
|
dorispira/python-challenge
|
000516550a843265454fb069ec56082f70a10347
|
[
"MIT"
] | null | null | null |
PyPoll/main.py
|
dorispira/python-challenge
|
000516550a843265454fb069ec56082f70a10347
|
[
"MIT"
] | null | null | null |
import os
import csv
# File path
election_dataCSV = os.path.join('.', 'election_data.csv')
# The total number of votes cast
# A complete list of candidates who received votes
# The percentage of votes each candidate won
# The total number of votes each candidate won
# The winner of the election based on popular vote.
# Declaring my variables
total_votes = 0
khan_votes = 0
correy_votes = 0
li_votes = 0
otooley_votes = 0
# percent_votes = 0
# total_votes_candidate = 0
# winner = 0
# Open file as read
with open ('election_data.csv','r') as csvfile:
# Identifying CSV file with delimiter set
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
# firstRow = next(csvreader)
# total_votes += 1
# previous_row = int(firstRow[0])
# Add rows to list
for row in csvreader:
#Adding total number of votes cast
total_votes += 1
#Candidates that received votes
if row[2] == "Khan":
khan_votes += 1
elif row[2] == "Correy":
correy_votes += 1
elif row[2] == "Li":
li_votes += 1
elif row[2] == "O'Tooley":
otooley_votes +=1
# Create a list of the candidates
candidates_list = ["Khan", "Correy", "Li", "O'Tooley"]
votes = [khan_votes, correy_votes, li_votes, otooley_votes]
# Pair candidates and votes together
dict_candidates_and_votes = dict(zip(candidates_list,votes))
# Find the winner by using the max function
key = max(dict_candidates_and_votes, key = dict_candidates_and_votes.get)
# Calculating the percentage of votes per candidate
khan_percentage = (khan_votes/total_votes) *100
correy_percentage = (correy_votes/total_votes) *100
li_percentage = (li_votes/total_votes) *100
otooley_percentage = (otooley_votes/total_votes) *100
# Print conclusion
print(f"Election Results")
print(f"----------------------------")
print(f"Total Votes: {total_votes}")
print(f"----------------------------")
print(f"Khan: {khan_percentage:.3f}% ({khan_votes})")
print(f"Correy: {correy_percentage:.3f}% ({correy_votes})")
print(f"Li: {li_percentage:.3f}% ({li_votes})")
print(f"O'Tooley: {otooley_percentage:.3f}% ({otooley_votes})")
print(f"----------------------------")
print(f"Winner: {key}")
print(f"----------------------------")
# Export results into txt file
file = open('election_output.txt','w')
file.write("Election Results: Total Votes - 1048575, Khan - 63.094% (661583), Correy - 19.936% (209046), Li: - 13.958% (146360), O'Tooley - 3.012% (31586), Winner - Khan")
file.close
| 30.625
| 171
| 0.621521
|
import os
import csv
election_dataCSV = os.path.join('.', 'election_data.csv')
total_votes = 0
khan_votes = 0
correy_votes = 0
li_votes = 0
otooley_votes = 0
with open ('election_data.csv','r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
for row in csvreader:
total_votes += 1
if row[2] == "Khan":
khan_votes += 1
elif row[2] == "Correy":
correy_votes += 1
elif row[2] == "Li":
li_votes += 1
elif row[2] == "O'Tooley":
otooley_votes +=1
# Create a list of the candidates
candidates_list = ["Khan", "Correy", "Li", "O'Tooley"]
votes = [khan_votes, correy_votes, li_votes, otooley_votes]
dict_candidates_and_votes = dict(zip(candidates_list,votes))
key = max(dict_candidates_and_votes, key = dict_candidates_and_votes.get)
khan_percentage = (khan_votes/total_votes) *100
correy_percentage = (correy_votes/total_votes) *100
li_percentage = (li_votes/total_votes) *100
otooley_percentage = (otooley_votes/total_votes) *100
print(f"Election Results")
print(f"----------------------------")
print(f"Total Votes: {total_votes}")
print(f"----------------------------")
print(f"Khan: {khan_percentage:.3f}% ({khan_votes})")
print(f"Correy: {correy_percentage:.3f}% ({correy_votes})")
print(f"Li: {li_percentage:.3f}% ({li_votes})")
print(f"O'Tooley: {otooley_percentage:.3f}% ({otooley_votes})")
print(f"----------------------------")
print(f"Winner: {key}")
print(f"----------------------------")
# Export results into txt file
file = open('election_output.txt','w')
file.write("Election Results: Total Votes - 1048575, Khan - 63.094% (661583), Correy - 19.936% (209046), Li: - 13.958% (146360), O'Tooley - 3.012% (31586), Winner - Khan")
file.close
| true
| true
|
790cb5b691044225ad777024cc19b9e693c1f668
| 1,253
|
py
|
Python
|
vnpy_deribit/__init__.py
|
NovelResearchInvestment/vnpy_deribit
|
ea567c636b7712f63ab11a70e5b530b14ffc6dc8
|
[
"MIT"
] | 7
|
2021-12-01T12:56:36.000Z
|
2022-01-27T03:05:31.000Z
|
vnpy_deribit/__init__.py
|
NovelResearchInvestment/vnpy_deribit
|
ea567c636b7712f63ab11a70e5b530b14ffc6dc8
|
[
"MIT"
] | null | null | null |
vnpy_deribit/__init__.py
|
NovelResearchInvestment/vnpy_deribit
|
ea567c636b7712f63ab11a70e5b530b14ffc6dc8
|
[
"MIT"
] | 4
|
2021-04-30T06:20:05.000Z
|
2021-09-24T09:05:06.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2015-present, vn-crypto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .deribit_gateway import DeribitGateway
import importlib_metadata
__version__ = importlib_metadata.version("vnpy_deribit")
| 44.75
| 80
| 0.783719
|
from .deribit_gateway import DeribitGateway
import importlib_metadata
__version__ = importlib_metadata.version("vnpy_deribit")
| true
| true
|
790cb5d1976b484d7e527c5d88ae7e59dabc39a2
| 48
|
py
|
Python
|
samcli/__init__.py
|
HiteshMah-Jan/aws-sam-cli
|
5cc7680068c820e972d6165a0cccd21677e2a428
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
samcli/__init__.py
|
HiteshMah-Jan/aws-sam-cli
|
5cc7680068c820e972d6165a0cccd21677e2a428
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
samcli/__init__.py
|
HiteshMah-Jan/aws-sam-cli
|
5cc7680068c820e972d6165a0cccd21677e2a428
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
"""
SAM CLI version
"""
__version__ = "1.23.0"
| 8
| 22
| 0.583333
|
__version__ = "1.23.0"
| true
| true
|
790cb8607b4e97473cd8d77b572067bb176bd9e6
| 5,546
|
py
|
Python
|
cubic_spline_planner.py
|
hadleyhzy34/mpc_python_traj
|
48451533c7ecd473e949c3a680a166fb046447bf
|
[
"Apache-2.0"
] | null | null | null |
cubic_spline_planner.py
|
hadleyhzy34/mpc_python_traj
|
48451533c7ecd473e949c3a680a166fb046447bf
|
[
"Apache-2.0"
] | null | null | null |
cubic_spline_planner.py
|
hadleyhzy34/mpc_python_traj
|
48451533c7ecd473e949c3a680a166fb046447bf
|
[
"Apache-2.0"
] | null | null | null |
"""
Cubic spline planner
Author: Atsushi Sakai(@Atsushi_twi)
"""
import math
import numpy as np
import bisect
class Spline:
"""
Cubic Spline class
"""
def __init__(self, x, y):
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x) # dimension of x
h = np.diff(x)
# calc coefficient c
self.a = [iy for iy in y]
# calc coefficient c
A = self.__calc_A(h)
B = self.__calc_B(h)
self.c = np.linalg.solve(A, B)
# print(self.c1)
# calc spline coefficient b and d
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
"""
Calc position
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calcd(self, t):
"""
Calc first derivative
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
return result
def calcdd(self, t):
"""
Calc second derivative
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def __search_index(self, x):
"""
search data segment index
"""
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
"""
calc matrix A for spline coefficient c
"""
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
# print(A)
return A
def __calc_B(self, h):
"""
calc matrix B for spline coefficient c
"""
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
return B
class Spline2D:
"""
2D Cubic Spline class
"""
def __init__(self, x, y):
self.s = self.__calc_s(x, y)
self.sx = Spline(self.s, x)
self.sy = Spline(self.s, y)
def __calc_s(self, x, y):
dx = np.diff(x)
dy = np.diff(y)
self.ds = np.hypot(dx, dy)
s = [0]
s.extend(np.cumsum(self.ds))
return s
def calc_position(self, s):
"""
calc position
"""
x = self.sx.calc(s)
y = self.sy.calc(s)
return x, y
def calc_curvature(self, s):
"""
calc curvature
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))
return k
def calc_yaw(self, s):
"""
calc yaw
"""
dx = self.sx.calcd(s)
dy = self.sy.calcd(s)
yaw = math.atan2(dy, dx)
return yaw
def calc_spline_course(x, y, ds=0.1):
sp = Spline2D(x, y)
s = list(np.arange(0, sp.s[-1], ds))
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
return rx, ry, ryaw, rk, s
def main(): # pragma: no cover
print("Spline 2D test")
import matplotlib.pyplot as plt
x = [-2.5, 0.0, 2.5, 5.0, 7.5, 3.0, -1.0]
y = [0.7, -6, 5, 6.5, 0.0, 5.0, -2.0]
ds = 0.1 # [m] distance of each interpolated points
sp = Spline2D(x, y)
s = np.arange(0, sp.s[-1], ds)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
plt.plot(rx,ry)
plt.show()
plt.close()
plt.subplots(1)
plt.plot(x, y, "xb", label="input")
plt.plot(rx, ry, "-r", label="spline")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.subplots(1)
plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], "-r", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
plt.subplots(1)
plt.plot(s, rk, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
| 23.400844
| 79
| 0.462495
|
import math
import numpy as np
import bisect
class Spline:
def __init__(self, x, y):
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x)
h = np.diff(x)
self.a = [iy for iy in y]
A = self.__calc_A(h)
B = self.__calc_B(h)
self.c = np.linalg.solve(A, B)
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calcd(self, t):
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
return result
def calcdd(self, t):
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def __search_index(self, x):
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
return A
def __calc_B(self, h):
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
return B
class Spline2D:
def __init__(self, x, y):
self.s = self.__calc_s(x, y)
self.sx = Spline(self.s, x)
self.sy = Spline(self.s, y)
def __calc_s(self, x, y):
dx = np.diff(x)
dy = np.diff(y)
self.ds = np.hypot(dx, dy)
s = [0]
s.extend(np.cumsum(self.ds))
return s
def calc_position(self, s):
x = self.sx.calc(s)
y = self.sy.calc(s)
return x, y
def calc_curvature(self, s):
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))
return k
def calc_yaw(self, s):
dx = self.sx.calcd(s)
dy = self.sy.calcd(s)
yaw = math.atan2(dy, dx)
return yaw
def calc_spline_course(x, y, ds=0.1):
sp = Spline2D(x, y)
s = list(np.arange(0, sp.s[-1], ds))
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
return rx, ry, ryaw, rk, s
def main():
print("Spline 2D test")
import matplotlib.pyplot as plt
x = [-2.5, 0.0, 2.5, 5.0, 7.5, 3.0, -1.0]
y = [0.7, -6, 5, 6.5, 0.0, 5.0, -2.0]
ds = 0.1
sp = Spline2D(x, y)
s = np.arange(0, sp.s[-1], ds)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
plt.plot(rx,ry)
plt.show()
plt.close()
plt.subplots(1)
plt.plot(x, y, "xb", label="input")
plt.plot(rx, ry, "-r", label="spline")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.subplots(1)
plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], "-r", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
plt.subplots(1)
plt.plot(s, rk, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
| true
| true
|
790cb87cb2bead4c83974996491b47adeb913907
| 788
|
py
|
Python
|
maxipago/utils/xml.py
|
fdelvalle/sdk-python
|
e8457644ca7dba94e3dc1cd3ba5a100887d75d26
|
[
"MIT"
] | 1
|
2019-06-04T19:18:00.000Z
|
2019-06-04T19:18:00.000Z
|
maxipago/utils/xml.py
|
fdelvalle/sdk-python
|
e8457644ca7dba94e3dc1cd3ba5a100887d75d26
|
[
"MIT"
] | null | null | null |
maxipago/utils/xml.py
|
fdelvalle/sdk-python
|
e8457644ca7dba94e3dc1cd3ba5a100887d75d26
|
[
"MIT"
] | 3
|
2018-02-22T18:45:42.000Z
|
2022-03-24T15:08:07.000Z
|
# coding: utf-8
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
# raises ImportError
def create_element_recursively(parent, path):
nodes = path.split('/')
node = parent
for n_str in nodes:
n = node.find(n_str)
if n is None:
node = etree.SubElement(node, n_str)
else:
node = n
return node
| 24.625
| 55
| 0.558376
|
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
try:
import cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
def create_element_recursively(parent, path):
nodes = path.split('/')
node = parent
for n_str in nodes:
n = node.find(n_str)
if n is None:
node = etree.SubElement(node, n_str)
else:
node = n
return node
| true
| true
|
790cb8c9d2eff5586181d712f28e9160677d928c
| 305
|
py
|
Python
|
2018/11/graphics/judges-trump-obama-20181113/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14
|
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2018/11/graphics/judges-trump-obama-20181113/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2018/11/graphics/judges-trump-obama-20181113/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7
|
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1IrWnAyt2g0fMsCzCJImHZXgqXiwhyjPl4atT-n6MkkM'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714
| 77
| 0.816393
|
import base_filters
COPY_GOOGLE_DOC_KEY = '1IrWnAyt2g0fMsCzCJImHZXgqXiwhyjPl4atT-n6MkkM'
USE_ASSETS = False
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| true
| true
|
790cb9aee9ca5d8aca1ae39b0c7d06ef3fe83b3d
| 5,814
|
py
|
Python
|
amazon_msk/datadog_checks/amazon_msk/config_models/defaults.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | null | null | null |
amazon_msk/datadog_checks/amazon_msk/config_models/defaults.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | null | null | null |
amazon_msk/datadog_checks/amazon_msk/config_models/defaults.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_assume_role(field, value):
return get_default_field_value(field, value)
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_cache_metric_wildcards(field, value):
return True
def instance_cache_shared_labels(field, value):
return True
def instance_collect_counters_with_distributions(field, value):
return False
def instance_collect_histogram_buckets(field, value):
return True
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_health_service_check(field, value):
return True
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_extra_metrics(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_histogram_buckets_as_distributions(field, value):
return False
def instance_hostname_format(field, value):
return get_default_field_value(field, value)
def instance_hostname_label(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_jmx_exporter_port(field, value):
return 11001
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return get_default_field_value(field, value)
def instance_node_exporter_port(field, value):
return 11002
def instance_non_cumulative_histogram_buckets(field, value):
return False
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_openmetrics_endpoint(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_prometheus_metrics_path(field, value):
return '/metrics'
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_raw_line_filters(field, value):
return get_default_field_value(field, value)
def instance_raw_metric_prefix(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_region_name(field, value):
return get_default_field_value(field, value)
def instance_rename_labels(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_share_labels(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_telemetry(field, value):
return False
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return False
def instance_use_latest_spec(field, value):
return False
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_openmetrics(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
| 20.4
| 75
| 0.78483
|
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_assume_role(field, value):
return get_default_field_value(field, value)
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_cache_metric_wildcards(field, value):
return True
def instance_cache_shared_labels(field, value):
return True
def instance_collect_counters_with_distributions(field, value):
return False
def instance_collect_histogram_buckets(field, value):
return True
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_health_service_check(field, value):
return True
def instance_exclude_labels(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics(field, value):
return get_default_field_value(field, value)
def instance_exclude_metrics_by_labels(field, value):
return get_default_field_value(field, value)
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_extra_metrics(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_histogram_buckets_as_distributions(field, value):
return False
def instance_hostname_format(field, value):
return get_default_field_value(field, value)
def instance_hostname_label(field, value):
return get_default_field_value(field, value)
def instance_ignore_tags(field, value):
return get_default_field_value(field, value)
def instance_jmx_exporter_port(field, value):
return 11001
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metrics(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_namespace(field, value):
return get_default_field_value(field, value)
def instance_node_exporter_port(field, value):
return 11002
def instance_non_cumulative_histogram_buckets(field, value):
return False
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_openmetrics_endpoint(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_prometheus_metrics_path(field, value):
return '/metrics'
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_raw_line_filters(field, value):
return get_default_field_value(field, value)
def instance_raw_metric_prefix(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_region_name(field, value):
return get_default_field_value(field, value)
def instance_rename_labels(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_share_labels(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_telemetry(field, value):
return False
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return False
def instance_use_latest_spec(field, value):
return False
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_use_openmetrics(field, value):
return False
def instance_username(field, value):
return get_default_field_value(field, value)
| true
| true
|
790cb9e7ea467b8374fae87d05bb00d7f1e70de9
| 406
|
py
|
Python
|
setup.py
|
knu2xs/business-analyst-python-api-examples
|
c2f17bc87195872183ecbcd998b4bb0e9c295761
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
knu2xs/business-analyst-python-api-examples
|
c2f17bc87195872183ecbcd998b4bb0e9c295761
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
knu2xs/business-analyst-python-api-examples
|
c2f17bc87195872183ecbcd998b4bb0e9c295761
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='ba_samples',
package_dir={"": "src"},
packages=find_packages('src'),
version='0.1.0-dev0',
description='Examples using ArcGIS Business Analyst with Python.',
long_description=long_description,
author='Joel McCune',
license='Apache 2.0',
)
| 25.375
| 70
| 0.684729
|
from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='ba_samples',
package_dir={"": "src"},
packages=find_packages('src'),
version='0.1.0-dev0',
description='Examples using ArcGIS Business Analyst with Python.',
long_description=long_description,
author='Joel McCune',
license='Apache 2.0',
)
| true
| true
|
790cbaafc95480c008c978f76b5da45274318516
| 712
|
py
|
Python
|
backend/category/urls.py
|
zerlee/open-cmdb
|
e05eeab70bf2c2e14603597bf99c45b6c3330d1e
|
[
"BSD-3-Clause"
] | 126
|
2019-09-17T17:49:35.000Z
|
2022-03-31T13:34:35.000Z
|
backend/category/urls.py
|
tom2jack/open-cmdb
|
68bc028d5d6162dbfa724d7bbf17363f65e44557
|
[
"BSD-3-Clause"
] | 5
|
2020-01-19T08:43:38.000Z
|
2021-06-10T21:58:30.000Z
|
backend/category/urls.py
|
tom2jack/open-cmdb
|
68bc028d5d6162dbfa724d7bbf17363f65e44557
|
[
"BSD-3-Clause"
] | 52
|
2019-09-20T06:10:32.000Z
|
2022-03-31T13:34:28.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls import include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import *
# register的可选参数 base_name: 用来生成urls名字,如果viewset中没有包含queryset, base_name一定要有
router = DefaultRouter()
router.register(r'idcs', IdcViewSet)
router.register(r'racks', RackViewSet)
router.register(r'servers', ServerViewSet)
router.register(r'sshusers', SSHUserViewSet)
router.register(r'businesslines', BusinessLineViewSet)
router.register(r'projects', ProjectViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api_dashboard/$', APIDashBoardView.as_view()),
url(r'^api_local_ssh_user/$', APILocalSSHUserView.as_view()),
]
| 30.956522
| 75
| 0.771067
|
from django.conf.urls import include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import *
router = DefaultRouter()
router.register(r'idcs', IdcViewSet)
router.register(r'racks', RackViewSet)
router.register(r'servers', ServerViewSet)
router.register(r'sshusers', SSHUserViewSet)
router.register(r'businesslines', BusinessLineViewSet)
router.register(r'projects', ProjectViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api_dashboard/$', APIDashBoardView.as_view()),
url(r'^api_local_ssh_user/$', APILocalSSHUserView.as_view()),
]
| true
| true
|
790cbc4dbf115b963041aacc79b8503ea8c2517c
| 2,814
|
py
|
Python
|
src/pal/automation/util.py
|
elinor-fung/coreclr
|
c1801e85024add717f518feb6a9caed60d54500f
|
[
"MIT"
] | 277
|
2015-01-04T20:42:36.000Z
|
2022-03-21T06:52:03.000Z
|
src/pal/automation/util.py
|
elinor-fung/coreclr
|
c1801e85024add717f518feb6a9caed60d54500f
|
[
"MIT"
] | 31
|
2015-01-05T08:00:38.000Z
|
2016-01-05T01:18:59.000Z
|
src/pal/automation/util.py
|
elinor-fung/coreclr
|
c1801e85024add717f518feb6a9caed60d54500f
|
[
"MIT"
] | 46
|
2015-01-21T00:41:59.000Z
|
2021-03-23T07:00:01.000Z
|
import sys
import getopt
import os
import subprocess
import shutil
import logging as log
def Initialize(platform):
print "Initializing Workspace"
global workspace
workspace = os.environ['WORKSPACE']
if platform == "windows":
# Jenkins puts quotes in the path, which is wrong. Remove quotes.
os.environ['PATH'] = os.environ['PATH'].replace('"','')
return workspace
def ParseArgs(argv):
print "Parsing arguments for compile"
try:
opts, args = getopt.getopt(argv, "t:p:a:v", ["target=", "platform=", "arch=", "verbose","noclean"])
except getopt.GetoptError:
print "ERROR: \n\t usage: python compile.py --target <target> --platform <windows|linux> --arch <arch> [--verbose] [--noclean]"
return 2,"","","",True
verbose = False
cleanUp = True
acceptedPlatforms = ['windows','linux']
for opt, arg in opts:
if opt in ("-t", "--target"):
target = arg
elif opt in ("-p", "--platform"):
if arg.lower() not in acceptedPlatforms:
print "ERROR: " + arg + "not an accepted platform. Use windows or linux."
sys.exit(2)
platform = arg.lower()
elif opt in ("-a", "--arch"):
arch = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-c", "--noclean"):
cleanUp = False
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("In verbose mode.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if target == "" or platform == "" or arch == "":
# must specify target, project and arch
log.error("Must specify target, project and arch")
return 2,"","","",True
return 0,target,platform,arch,cleanUp
def SetupDirectories(target, arch, platform):
log.info("Setting up directories")
global rootdir
global builddir
global fullBuildDirPath
rootdir = "build"
if not os.path.isdir(rootdir):
os.mkdir(rootdir)
os.chdir(rootdir)
builddir = "build-" + platform
if platform == "windows":
builddir = builddir + "-" + arch + "-" + target
if os.path.isdir(builddir):
shutil.rmtree(builddir)
os.mkdir(builddir)
os.chdir(builddir)
fullbuilddirpath = workspace + "/" + rootdir + "/" + builddir
return fullbuilddirpath
def Cleanup(cleanUp,workspace):
print "\n==================================================\n"
print "Cleaning Up."
print "\n==================================================\n"
if cleanUp:
os.chdir(workspace + "/" + rootdir)
shutil.rmtree(builddir)
os.chdir("..")
shutil.rmtree(rootdir)
log.shutdown()
return 0
| 28.714286
| 135
| 0.563255
|
import sys
import getopt
import os
import subprocess
import shutil
import logging as log
def Initialize(platform):
print "Initializing Workspace"
global workspace
workspace = os.environ['WORKSPACE']
if platform == "windows":
os.environ['PATH'] = os.environ['PATH'].replace('"','')
return workspace
def ParseArgs(argv):
print "Parsing arguments for compile"
try:
opts, args = getopt.getopt(argv, "t:p:a:v", ["target=", "platform=", "arch=", "verbose","noclean"])
except getopt.GetoptError:
print "ERROR: \n\t usage: python compile.py --target <target> --platform <windows|linux> --arch <arch> [--verbose] [--noclean]"
return 2,"","","",True
verbose = False
cleanUp = True
acceptedPlatforms = ['windows','linux']
for opt, arg in opts:
if opt in ("-t", "--target"):
target = arg
elif opt in ("-p", "--platform"):
if arg.lower() not in acceptedPlatforms:
print "ERROR: " + arg + "not an accepted platform. Use windows or linux."
sys.exit(2)
platform = arg.lower()
elif opt in ("-a", "--arch"):
arch = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-c", "--noclean"):
cleanUp = False
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("In verbose mode.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if target == "" or platform == "" or arch == "":
# must specify target, project and arch
log.error("Must specify target, project and arch")
return 2,"","","",True
return 0,target,platform,arch,cleanUp
def SetupDirectories(target, arch, platform):
log.info("Setting up directories")
global rootdir
global builddir
global fullBuildDirPath
rootdir = "build"
if not os.path.isdir(rootdir):
os.mkdir(rootdir)
os.chdir(rootdir)
builddir = "build-" + platform
if platform == "windows":
builddir = builddir + "-" + arch + "-" + target
if os.path.isdir(builddir):
shutil.rmtree(builddir)
os.mkdir(builddir)
os.chdir(builddir)
fullbuilddirpath = workspace + "/" + rootdir + "/" + builddir
return fullbuilddirpath
def Cleanup(cleanUp,workspace):
print "\n==================================================\n"
print "Cleaning Up."
print "\n==================================================\n"
if cleanUp:
os.chdir(workspace + "/" + rootdir)
shutil.rmtree(builddir)
os.chdir("..")
shutil.rmtree(rootdir)
log.shutdown()
return 0
| false
| true
|
790cbcde2692dc3349e1e263ee75240aff28ac95
| 24,971
|
py
|
Python
|
examples/tutorial.py
|
CadenScharpf/manim-cs
|
17a9717f5580addd7c534f05a3d92c962dbe80eb
|
[
"MIT"
] | 4
|
2019-03-18T02:39:00.000Z
|
2021-12-15T20:39:15.000Z
|
examples/tutorial.py
|
CadenScharpf/manim-cs
|
17a9717f5580addd7c534f05a3d92c962dbe80eb
|
[
"MIT"
] | 5
|
2021-03-18T22:49:37.000Z
|
2022-03-11T23:41:59.000Z
|
examples/tutorial.py
|
CadenScharpf/manim-cs
|
17a9717f5580addd7c534f05a3d92c962dbe80eb
|
[
"MIT"
] | null | null | null |
from big_ol_pile_of_manim_imports import *
import os
import pyclbr
class Shapes(Scene):
#A few simple shapes
#Python 2.7 version runs in Python 3.7 without changes
def construct(self):
#circle = Circle()
#square = Square()
line=Line(UP,DOWN)
#line2=Line
#triangle=Polygon(np.array([0,0,0]),np.array([1,1,0]),np.array([1,-1,0]))
self.add(line)
#self.play(ShowCreation(circle))
#self.play(FadeOut(circle))
#self.play(GrowFromCenter(square))
#self.play(Transform(square,triangle))
class MoreShapes(Scene):
#A few more simple shapes
#2.7 version runs in 3.7 without any changes
#Note: I fixed my 'play command not found' issue by installing sox
def construct(self):
circle = Circle(color=PURPLE_A)
square = Square(fill_color=GOLD_B, fill_opacity=1, color=GOLD_A)
square.move_to(UP+LEFT)
circle.surround(square)
rectangle = Rectangle(height=2, width=3)
ellipse=Ellipse(width=3, height=1, color=RED)
ellipse.shift(2*DOWN+2*RIGHT)
pointer = CurvedArrow(2*RIGHT,5*RIGHT,color=MAROON_C)
arrow = Arrow(LEFT,UP)
arrow.next_to(circle,DOWN+LEFT)
rectangle.next_to(arrow,DOWN+LEFT)
ring=Annulus(inner_radius=.5, outer_radius=1, color=BLUE)
ring.next_to(ellipse, RIGHT)
self.play(FadeIn(square))
self.play(Rotating(square),FadeIn(circle))
self.play(GrowArrow(arrow))
self.play(GrowFromCenter(rectangle), GrowFromCenter(ellipse), GrowFromCenter(ring))
self.add(pointer)
class MovingShapes(Scene):
#Show the difference between .shift() and .move_to
def construct(self):
circle=Circle(color=TEAL_A)
circle.move_to(LEFT)
square=Circle()
square.move_to(LEFT+3*DOWN)
self.play(GrowFromCenter(circle), GrowFromCenter(square), rate=5)
self.play(ApplyMethod(circle.move_to,RIGHT), ApplyMethod(square.shift,RIGHT))
self.play(ApplyMethod(circle.move_to,RIGHT+UP), ApplyMethod(square.shift,RIGHT+UP))
self.play(ApplyMethod(circle.move_to,LEFT+UP), ApplyMethod(square.shift,LEFT+UP))
class AddingText(Scene):
#Adding text on the screen
def construct(self):
my_first_text=TextMobject("Writing with manim is fun")
second_line=TextMobject("and easy to do!")
second_line.next_to(my_first_text,DOWN)
third_line=TextMobject("for me and you!")
third_line.next_to(my_first_text,DOWN)
self.add(my_first_text, second_line)
self.wait(2)
self.play(Transform(second_line,third_line))
self.wait(2)
second_line.shift(3*DOWN)
self.play(ApplyMethod(my_first_text.shift,3*UP))
###Try uncommenting the following###
#self.play(ApplyMethod(second_line.move_to, LEFT_SIDE-2*LEFT))
#self.play(ApplyMethod(my_first_text.next_to,second_line))
class AddingMoreText(Scene):
#Playing around with text properties
def construct(self):
quote = TextMobject("Imagination is more important than knowledge")
quote.set_color(RED)
quote.to_edge(UP)
quote2 = TextMobject("A person who never made a mistake never tried anything new")
quote2.set_color(YELLOW)
author=TextMobject("-Albert Einstein")
author.scale(0.75)
author.next_to(quote.get_corner(DOWN+RIGHT),DOWN)
self.add(quote)
self.add(author)
self.wait(2)
self.play(Transform(quote,quote2),ApplyMethod(author.move_to,quote2.get_corner(DOWN+RIGHT)+DOWN+2*LEFT))
self.play(ApplyMethod(author.scale,1.5))
author.match_color(quote2)
self.play(FadeOut(quote))
class RotateAndHighlight(Scene):
#Rotation of text and highlighting with surrounding geometries
def construct(self):
square=Square(side_length=5,fill_color=YELLOW, fill_opacity=1)
label=TextMobject("Text at an angle")
label.bg=BackgroundRectangle(label,fill_opacity=1)
label_group=VGroup(label.bg,label) #Order matters
label_group.rotate(TAU/8)
label2=TextMobject("Boxed text",color=BLACK)
label2.bg=SurroundingRectangle(label2,color=BLUE,fill_color=RED, fill_opacity=.5)
label2_group=VGroup(label2,label2.bg)
label2_group.next_to(label_group,DOWN)
label3=TextMobject("Rainbow")
label3.scale(2)
label3.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
label3.to_edge(DOWN)
self.add(square)
self.play(FadeIn(label_group))
self.play(FadeIn(label2_group))
self.play(FadeIn(label3))
class BasicEquations(Scene):
#A short script showing how to use Latex commands
def construct(self):
eq1=TextMobject("$\\vec{X}_0 \\cdot \\vec{Y}_1 = 3$")
eq1.shift(2*UP)
eq2=TexMobject(r"\vec{F}_{net} = \sum_i \vec{F}_i")
eq2.shift(2*DOWN)
self.play(Write(eq1))
self.play(Write(eq2))
class ColoringEquations(Scene):
#Grouping and coloring parts of equations
def construct(self):
line1=TexMobject(r"\text{The vector } \vec{F}_{net} \text{ is the net }",r"\text{force }",r"\text{on object of mass }")
line2=TexMobject("m", "\\text{ and acceleration }", "\\vec{a}", ". ")
sentence=VGroup(line1,line2)
sentence.arrange_submobjects(DOWN, buff=MED_LARGE_BUFF)
self.play(Write(sentence))
class UsingBraces(Scene):
#Using braces to group text together
def construct(self):
eq1A = TextMobject("4x + 3y")
eq1B = TextMobject("=")
eq1C = TextMobject("0")
eq2A = TextMobject("5x -2y")
eq2B = TextMobject("=")
eq2C = TextMobject("3")
eq1B.next_to(eq1A,RIGHT)
eq1C.next_to(eq1B,RIGHT)
eq2A.shift(DOWN)
eq2B.shift(DOWN)
eq2C.shift(DOWN)
eq2A.align_to(eq1A,LEFT)
eq2B.align_to(eq1B,LEFT)
eq2C.align_to(eq1C,LEFT)
eq_group=VGroup(eq1A,eq2A)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.add(eq1A, eq1B, eq1C)
self.add(eq2A, eq2B, eq2C)
self.play(GrowFromCenter(braces),Write(eq_text))
class UsingBracesConcise(Scene):
#A more concise block of code with all columns aligned
def construct(self):
eq1_text=["4","x","+","3","y","=","0"]
eq2_text=["5","x","-","2","y","=","3"]
eq1_mob=TexMobject(*eq1_text)
eq2_mob=TexMobject(*eq2_text)
eq1_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
eq2_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
for i,item in enumerate(eq2_mob):
item.align_to(eq1_mob[i],LEFT)
eq1=VGroup(*eq1_mob)
eq2=VGroup(*eq2_mob)
eq2.shift(DOWN)
eq_group=VGroup(eq1,eq2)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.play(Write(eq1),Write(eq2))
self.play(GrowFromCenter(braces),Write(eq_text))
class PlotFunctions(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : RED ,
"axes_color" : GREEN,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph=self.get_graph(self.func_to_graph,self.function_color)
func_graph2=self.get_graph(self.func_to_graph2)
vert_line = self.get_vertical_line_to_graph(TAU,func_graph,color=YELLOW)
graph_lab = self.get_graph_label(func_graph, label = "\\cos(x)")
graph_lab2=self.get_graph_label(func_graph2,label = "\\sin(x)", x_val=-10, direction=UP/2)
two_pi = TexMobject("x = 2 \\pi")
label_coord = self.input_to_graph_point(TAU,func_graph)
two_pi.next_to(label_coord,RIGHT+UP)
self.play(ShowCreation(func_graph),ShowCreation(func_graph2))
self.play(ShowCreation(vert_line), ShowCreation(graph_lab), ShowCreation(graph_lab2),ShowCreation(two_pi))
def func_to_graph(self,x):
return np.cos(x)
def func_to_graph2(self,x):
return np.sin(x)
class ExampleApproximation(GraphScene):
CONFIG = {
"function" : lambda x : np.cos(x),
"function_color" : BLUE,
"taylor" : [lambda x: 1, lambda x: 1-x**2/2, lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4), lambda x: 1-x**2/2+x**4/math.factorial(4)-x**6/math.factorial(6),
lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8), lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8) - x**10/math.factorial(10)],
"center_point" : 0,
"approximation_color" : GREEN,
"x_min" : -10,
"x_max" : 10,
"y_min" : -1,
"y_max" : 1,
"graph_origin" : ORIGIN ,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph = self.get_graph(
self.function,
self.function_color,
)
approx_graphs = [
self.get_graph(
f,
self.approximation_color
)
for f in self.taylor
]
term_num = [
TexMobject("n = " + str(n),aligned_edge=TOP)
for n in range(0,8)]
#[t.to_edge(BOTTOM,buff=SMALL_BUFF) for t in term_num]
#term = TexMobject("")
#term.to_edge(BOTTOM,buff=SMALL_BUFF)
term = VectorizedPoint(3*DOWN)
approx_graph = VectorizedPoint(
self.input_to_graph_point(self.center_point, func_graph)
)
self.play(
ShowCreation(func_graph),
)
for n,graph in enumerate(approx_graphs):
self.play(
Transform(approx_graph, graph, run_time = 2),
Transform(term,term_num[n])
)
self.wait()
class DrawAnAxis(Scene):
CONFIG = { "plane_kwargs" : {
"x_line_frequency" : 2,
"y_line_frequency" :2
}
}
def construct(self):
my_plane = NumberPlane(**self.plane_kwargs)
my_plane.add(my_plane.get_axis_labels())
self.add(my_plane)
#self.wait()
class SimpleField(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED
},
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs) #Create axes and grid
plane.add(plane.get_axis_labels()) #add x and y label
self.add(plane) #Place grid on screen
points = [x*RIGHT+y*UP
for x in np.arange(-5,5,1)
for y in np.arange(-5,5,1)
] #List of vectors pointing to each grid point
vec_field = [] #Empty list to use in for loop
for point in points:
field = 0.5*RIGHT + 0.5*UP #Constant field up and to right
result = Vector(field).shift(point) #Create vector and shift it to grid point
vec_field.append(result) #Append to list
draw_field = VGroup(*vec_field) #Pass list of vectors to create a VGroup
self.play(ShowCreation(draw_field)) #Draw VGroup on screen
class FieldWithAxes(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.play(ShowCreation(field))
def calc_field(self,point):
#This calculates the field at a single point.
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
#efield = np.array((-y,x,0))/math.sqrt(x**2+y**2) #Try one of these two fields
#efield = np.array(( -2*(y%2)+1 , -2*(x%2)+1 , 0 ))/3 #Try one of these two fields
return Vector(efield).shift(point)
class ExampleThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
#self.set_camera_position(0, -np.pi/2) #Old code
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.set_camera_orientation(phi=PI/3,gamma=PI/5)
self.play(ShowCreation(field2D))
self.wait()
self.move_camera(gamma=0,run_time=1)
self.move_camera(phi=3/4*PI, theta=-PI/2)
self.begin_ambient_camera_rotation(rate=0.1)
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
class EFieldInThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
field3D = VGroup(*[self.calc_field3D(x*RIGHT+y*UP+z*OUT)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
for z in np.arange(-5,5,1)])
self.play(ShowCreation(field3D))
self.wait()
self.move_camera(0.8*np.pi/2, -0.45*np.pi)
self.begin_ambient_camera_rotation()
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def calc_field3D(self,point):
x,y,z = point
Rx,Ry,Rz = self.point_charge_loc
r = math.sqrt((x-Rx)**2 + (y-Ry)**2+(z-Rz)**2)
efield = (point - self.point_charge_loc)/r**3
#efield = np.array((-y,x,z))/math.sqrt(x**2+y**2+z**2)
return Vector(efield).shift(point)
class MovingCharges(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
source_charge = self.Positron().move_to(self.point_charge_loc)
self.play(FadeIn(source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def calc_field(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def moving_charge(self):
numb_charges=4
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(*[
self.Positron().move_to(point)
for point in points
])
for particle in particles:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def field_at_point(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return efield
def continual_update(self, *args, **kwargs):
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for p in self.moving_particles:
accel = self.field_at_point(p.get_center())
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
class FieldOfMovingCharge(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_start_loc" : 5.5*LEFT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.create_vect_field(self.point_charge_start_loc,x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
self.source_charge = self.Positron().move_to(self.point_charge_start_loc)
self.source_charge.velocity = np.array((1,0,0))
self.play(FadeIn(self.source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def create_vect_field(self,source_charge,observation_point):
return Vector(self.calc_field(source_charge,observation_point)).shift(observation_point)
def calc_field(self,source_point,observation_point):
x,y,z = observation_point
Rx,Ry,Rz = source_point
r = math.sqrt((x-Rx)**2 + (y-Ry)**2 + (z-Rz)**2)
if r<0.0000001: #Prevent divide by zero
efield = np.array((0,0,0))
else:
efield = (observation_point - source_point)/r**3
return efield
def moving_charge(self):
numb_charges=3
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(self.source_charge, *[
self.Positron().move_to(point)
for point in points
])
for particle in particles[1:]:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles[1:]))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def continual_update(self, *args, **kwargs):
Scene.continual_update(self, *args, **kwargs)
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for v in self.field:
field_vect=np.zeros(3)
for p in self.moving_particles:
field_vect = field_vect + self.calc_field(p.get_center(), v.get_start())
v.put_start_and_end_on(v.get_start(), field_vect+v.get_start())
for p in self.moving_particles:
accel = np.zeros(3)
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
HEAD_INDEX = 0
BODY_INDEX = 1
ARMS_INDEX = 2
LEGS_INDEX = 3
class StickMan(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "stick_man",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"stick_man_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.head = self.submobjects[HEAD_INDEX]
self.body = self.submobjects[BODY_INDEX]
self.arms = self.submobjects[ARMS_INDEX]
self.legs = self.submobjects[LEGS_INDEX]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
if not self.parts_named:
self.name_parts()
self.head.set_fill(self.color, opacity = 1)
self.body.set_fill(RED, opacity = 1)
self.arms.set_fill(YELLOW, opacity = 1)
self.legs.set_fill(BLUE, opacity = 1)
return self
class Waving(Scene):
def construct(self):
start_man = StickMan()
plain_man = StickMan()
waving_man = StickMan("wave")
self.add(start_man)
self.wait()
self.play(Transform(start_man,waving_man))
self.play(Transform(start_man,plain_man))
self.wait()
class CirclesAndSquares(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "circles_and_squares",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
"start_corner" : None,
"circle_index" : 0,
"line1_index" :1,
"line2_index" : 2,
"square1_index" : 3,
"square2_index" : 4,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"circles_and_squares_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.circle = self.submobjects[self.circle_index]
self.line1 = self.submobjects[self.line1_index]
self.line2 = self.submobjects[self.line2_index]
self.square1 = self.submobjects[self.square1_index]
self.square2 = self.submobjects[self.square2_index]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
self.name_parts()
self.circle.set_fill(RED, opacity = 1)
self.line1.set_fill(self.color, opacity = 0)
self.line2.set_fill(self.color, opacity = 0)
self.square1.set_fill(GREEN, opacity = 1)
self.square2.set_fill(BLUE, opacity = 1)
return self
class SVGCircleAndSquare(Scene):
def construct(self):
thingy = CirclesAndSquares()
self.add(thingy)
self.wait()
if __name__ == "__main__":
# Call this file at command line to make sure all scenes work with version of manim
# type "python manim_tutorial_P37.py" at command line to run all scenes in this file
#Must have "import os" and "import pyclbr" at start of file to use this
###Using Python class browser to determine which classes are defined in this file
module_name = 'manim_tutorial_P37' #Name of current file
module_info = pyclbr.readmodule(module_name)
for item in module_info.values():
if item.module==module_name:
print(item.name)
os.system("python -m manim manim_tutorial_P37.py %s -l" % item.name) #Does not play files
| 33.250333
| 245
| 0.593528
|
from big_ol_pile_of_manim_imports import *
import os
import pyclbr
class Shapes(Scene):
def construct(self):
line=Line(UP,DOWN)
self.add(line)
class MoreShapes(Scene):
def construct(self):
circle = Circle(color=PURPLE_A)
square = Square(fill_color=GOLD_B, fill_opacity=1, color=GOLD_A)
square.move_to(UP+LEFT)
circle.surround(square)
rectangle = Rectangle(height=2, width=3)
ellipse=Ellipse(width=3, height=1, color=RED)
ellipse.shift(2*DOWN+2*RIGHT)
pointer = CurvedArrow(2*RIGHT,5*RIGHT,color=MAROON_C)
arrow = Arrow(LEFT,UP)
arrow.next_to(circle,DOWN+LEFT)
rectangle.next_to(arrow,DOWN+LEFT)
ring=Annulus(inner_radius=.5, outer_radius=1, color=BLUE)
ring.next_to(ellipse, RIGHT)
self.play(FadeIn(square))
self.play(Rotating(square),FadeIn(circle))
self.play(GrowArrow(arrow))
self.play(GrowFromCenter(rectangle), GrowFromCenter(ellipse), GrowFromCenter(ring))
self.add(pointer)
class MovingShapes(Scene):
def construct(self):
circle=Circle(color=TEAL_A)
circle.move_to(LEFT)
square=Circle()
square.move_to(LEFT+3*DOWN)
self.play(GrowFromCenter(circle), GrowFromCenter(square), rate=5)
self.play(ApplyMethod(circle.move_to,RIGHT), ApplyMethod(square.shift,RIGHT))
self.play(ApplyMethod(circle.move_to,RIGHT+UP), ApplyMethod(square.shift,RIGHT+UP))
self.play(ApplyMethod(circle.move_to,LEFT+UP), ApplyMethod(square.shift,LEFT+UP))
class AddingText(Scene):
def construct(self):
my_first_text=TextMobject("Writing with manim is fun")
second_line=TextMobject("and easy to do!")
second_line.next_to(my_first_text,DOWN)
third_line=TextMobject("for me and you!")
third_line.next_to(my_first_text,DOWN)
self.add(my_first_text, second_line)
self.wait(2)
self.play(Transform(second_line,third_line))
self.wait(2)
second_line.shift(3*DOWN)
self.play(ApplyMethod(my_first_text.shift,3*UP))
lf):
quote = TextMobject("Imagination is more important than knowledge")
quote.set_color(RED)
quote.to_edge(UP)
quote2 = TextMobject("A person who never made a mistake never tried anything new")
quote2.set_color(YELLOW)
author=TextMobject("-Albert Einstein")
author.scale(0.75)
author.next_to(quote.get_corner(DOWN+RIGHT),DOWN)
self.add(quote)
self.add(author)
self.wait(2)
self.play(Transform(quote,quote2),ApplyMethod(author.move_to,quote2.get_corner(DOWN+RIGHT)+DOWN+2*LEFT))
self.play(ApplyMethod(author.scale,1.5))
author.match_color(quote2)
self.play(FadeOut(quote))
class RotateAndHighlight(Scene):
def construct(self):
square=Square(side_length=5,fill_color=YELLOW, fill_opacity=1)
label=TextMobject("Text at an angle")
label.bg=BackgroundRectangle(label,fill_opacity=1)
label_group=VGroup(label.bg,label)
label_group.rotate(TAU/8)
label2=TextMobject("Boxed text",color=BLACK)
label2.bg=SurroundingRectangle(label2,color=BLUE,fill_color=RED, fill_opacity=.5)
label2_group=VGroup(label2,label2.bg)
label2_group.next_to(label_group,DOWN)
label3=TextMobject("Rainbow")
label3.scale(2)
label3.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
label3.to_edge(DOWN)
self.add(square)
self.play(FadeIn(label_group))
self.play(FadeIn(label2_group))
self.play(FadeIn(label3))
class BasicEquations(Scene):
def construct(self):
eq1=TextMobject("$\\vec{X}_0 \\cdot \\vec{Y}_1 = 3$")
eq1.shift(2*UP)
eq2=TexMobject(r"\vec{F}_{net} = \sum_i \vec{F}_i")
eq2.shift(2*DOWN)
self.play(Write(eq1))
self.play(Write(eq2))
class ColoringEquations(Scene):
def construct(self):
line1=TexMobject(r"\text{The vector } \vec{F}_{net} \text{ is the net }",r"\text{force }",r"\text{on object of mass }")
line2=TexMobject("m", "\\text{ and acceleration }", "\\vec{a}", ". ")
sentence=VGroup(line1,line2)
sentence.arrange_submobjects(DOWN, buff=MED_LARGE_BUFF)
self.play(Write(sentence))
class UsingBraces(Scene):
def construct(self):
eq1A = TextMobject("4x + 3y")
eq1B = TextMobject("=")
eq1C = TextMobject("0")
eq2A = TextMobject("5x -2y")
eq2B = TextMobject("=")
eq2C = TextMobject("3")
eq1B.next_to(eq1A,RIGHT)
eq1C.next_to(eq1B,RIGHT)
eq2A.shift(DOWN)
eq2B.shift(DOWN)
eq2C.shift(DOWN)
eq2A.align_to(eq1A,LEFT)
eq2B.align_to(eq1B,LEFT)
eq2C.align_to(eq1C,LEFT)
eq_group=VGroup(eq1A,eq2A)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.add(eq1A, eq1B, eq1C)
self.add(eq2A, eq2B, eq2C)
self.play(GrowFromCenter(braces),Write(eq_text))
class UsingBracesConcise(Scene):
def construct(self):
eq1_text=["4","x","+","3","y","=","0"]
eq2_text=["5","x","-","2","y","=","3"]
eq1_mob=TexMobject(*eq1_text)
eq2_mob=TexMobject(*eq2_text)
eq1_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
eq2_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
for i,item in enumerate(eq2_mob):
item.align_to(eq1_mob[i],LEFT)
eq1=VGroup(*eq1_mob)
eq2=VGroup(*eq2_mob)
eq2.shift(DOWN)
eq_group=VGroup(eq1,eq2)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.play(Write(eq1),Write(eq2))
self.play(GrowFromCenter(braces),Write(eq_text))
class PlotFunctions(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : RED ,
"axes_color" : GREEN,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph=self.get_graph(self.func_to_graph,self.function_color)
func_graph2=self.get_graph(self.func_to_graph2)
vert_line = self.get_vertical_line_to_graph(TAU,func_graph,color=YELLOW)
graph_lab = self.get_graph_label(func_graph, label = "\\cos(x)")
graph_lab2=self.get_graph_label(func_graph2,label = "\\sin(x)", x_val=-10, direction=UP/2)
two_pi = TexMobject("x = 2 \\pi")
label_coord = self.input_to_graph_point(TAU,func_graph)
two_pi.next_to(label_coord,RIGHT+UP)
self.play(ShowCreation(func_graph),ShowCreation(func_graph2))
self.play(ShowCreation(vert_line), ShowCreation(graph_lab), ShowCreation(graph_lab2),ShowCreation(two_pi))
def func_to_graph(self,x):
return np.cos(x)
def func_to_graph2(self,x):
return np.sin(x)
class ExampleApproximation(GraphScene):
CONFIG = {
"function" : lambda x : np.cos(x),
"function_color" : BLUE,
"taylor" : [lambda x: 1, lambda x: 1-x**2/2, lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4), lambda x: 1-x**2/2+x**4/math.factorial(4)-x**6/math.factorial(6),
lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8), lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8) - x**10/math.factorial(10)],
"center_point" : 0,
"approximation_color" : GREEN,
"x_min" : -10,
"x_max" : 10,
"y_min" : -1,
"y_max" : 1,
"graph_origin" : ORIGIN ,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph = self.get_graph(
self.function,
self.function_color,
)
approx_graphs = [
self.get_graph(
f,
self.approximation_color
)
for f in self.taylor
]
term_num = [
TexMobject("n = " + str(n),aligned_edge=TOP)
for n in range(0,8)]
term = VectorizedPoint(3*DOWN)
approx_graph = VectorizedPoint(
self.input_to_graph_point(self.center_point, func_graph)
)
self.play(
ShowCreation(func_graph),
)
for n,graph in enumerate(approx_graphs):
self.play(
Transform(approx_graph, graph, run_time = 2),
Transform(term,term_num[n])
)
self.wait()
class DrawAnAxis(Scene):
CONFIG = { "plane_kwargs" : {
"x_line_frequency" : 2,
"y_line_frequency" :2
}
}
def construct(self):
my_plane = NumberPlane(**self.plane_kwargs)
my_plane.add(my_plane.get_axis_labels())
self.add(my_plane)
class SimpleField(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED
},
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.add(plane.get_axis_labels())
self.add(plane)
points = [x*RIGHT+y*UP
for x in np.arange(-5,5,1)
for y in np.arange(-5,5,1)
]
vec_field = []
for point in points:
field = 0.5*RIGHT + 0.5*UP
result = Vector(field).shift(point)
vec_field.append(result)
draw_field = VGroup(*vec_field)
self.play(ShowCreation(draw_field))
class FieldWithAxes(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.play(ShowCreation(field))
def calc_field(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
ass ExampleThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.set_camera_orientation(phi=PI/3,gamma=PI/5)
self.play(ShowCreation(field2D))
self.wait()
self.move_camera(gamma=0,run_time=1)
self.move_camera(phi=3/4*PI, theta=-PI/2)
self.begin_ambient_camera_rotation(rate=0.1)
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
class EFieldInThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
field3D = VGroup(*[self.calc_field3D(x*RIGHT+y*UP+z*OUT)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
for z in np.arange(-5,5,1)])
self.play(ShowCreation(field3D))
self.wait()
self.move_camera(0.8*np.pi/2, -0.45*np.pi)
self.begin_ambient_camera_rotation()
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def calc_field3D(self,point):
x,y,z = point
Rx,Ry,Rz = self.point_charge_loc
r = math.sqrt((x-Rx)**2 + (y-Ry)**2+(z-Rz)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
class MovingCharges(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
source_charge = self.Positron().move_to(self.point_charge_loc)
self.play(FadeIn(source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def calc_field(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def moving_charge(self):
numb_charges=4
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(*[
self.Positron().move_to(point)
for point in points
])
for particle in particles:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def field_at_point(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return efield
def continual_update(self, *args, **kwargs):
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for p in self.moving_particles:
accel = self.field_at_point(p.get_center())
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
class FieldOfMovingCharge(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_start_loc" : 5.5*LEFT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.create_vect_field(self.point_charge_start_loc,x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
self.source_charge = self.Positron().move_to(self.point_charge_start_loc)
self.source_charge.velocity = np.array((1,0,0))
self.play(FadeIn(self.source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def create_vect_field(self,source_charge,observation_point):
return Vector(self.calc_field(source_charge,observation_point)).shift(observation_point)
def calc_field(self,source_point,observation_point):
x,y,z = observation_point
Rx,Ry,Rz = source_point
r = math.sqrt((x-Rx)**2 + (y-Ry)**2 + (z-Rz)**2)
if r<0.0000001:
efield = np.array((0,0,0))
else:
efield = (observation_point - source_point)/r**3
return efield
def moving_charge(self):
numb_charges=3
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(self.source_charge, *[
self.Positron().move_to(point)
for point in points
])
for particle in particles[1:]:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles[1:]))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def continual_update(self, *args, **kwargs):
Scene.continual_update(self, *args, **kwargs)
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for v in self.field:
field_vect=np.zeros(3)
for p in self.moving_particles:
field_vect = field_vect + self.calc_field(p.get_center(), v.get_start())
v.put_start_and_end_on(v.get_start(), field_vect+v.get_start())
for p in self.moving_particles:
accel = np.zeros(3)
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
HEAD_INDEX = 0
BODY_INDEX = 1
ARMS_INDEX = 2
LEGS_INDEX = 3
class StickMan(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "stick_man",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"stick_man_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.head = self.submobjects[HEAD_INDEX]
self.body = self.submobjects[BODY_INDEX]
self.arms = self.submobjects[ARMS_INDEX]
self.legs = self.submobjects[LEGS_INDEX]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
if not self.parts_named:
self.name_parts()
self.head.set_fill(self.color, opacity = 1)
self.body.set_fill(RED, opacity = 1)
self.arms.set_fill(YELLOW, opacity = 1)
self.legs.set_fill(BLUE, opacity = 1)
return self
class Waving(Scene):
def construct(self):
start_man = StickMan()
plain_man = StickMan()
waving_man = StickMan("wave")
self.add(start_man)
self.wait()
self.play(Transform(start_man,waving_man))
self.play(Transform(start_man,plain_man))
self.wait()
class CirclesAndSquares(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "circles_and_squares",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
"start_corner" : None,
"circle_index" : 0,
"line1_index" :1,
"line2_index" : 2,
"square1_index" : 3,
"square2_index" : 4,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"circles_and_squares_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.circle = self.submobjects[self.circle_index]
self.line1 = self.submobjects[self.line1_index]
self.line2 = self.submobjects[self.line2_index]
self.square1 = self.submobjects[self.square1_index]
self.square2 = self.submobjects[self.square2_index]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
self.name_parts()
self.circle.set_fill(RED, opacity = 1)
self.line1.set_fill(self.color, opacity = 0)
self.line2.set_fill(self.color, opacity = 0)
self.square1.set_fill(GREEN, opacity = 1)
self.square2.set_fill(BLUE, opacity = 1)
return self
class SVGCircleAndSquare(Scene):
def construct(self):
thingy = CirclesAndSquares()
self.add(thingy)
self.wait()
if __name__ == "__main__":
le_name:
print(item.name)
os.system("python -m manim manim_tutorial_P37.py %s -l" % item.name)
| true
| true
|
790cbdab72f06ccb98ca8351a4a7986a172fc746
| 1,676
|
py
|
Python
|
main.py
|
amanchourasiayt/Random-Number-in-Python
|
a656c686250269c4454b73b2988e5e5489b2e288
|
[
"BSD-Source-Code"
] | 2
|
2021-07-23T02:51:53.000Z
|
2021-07-24T10:17:05.000Z
|
main.py
|
amanchourasiayt/Random-Number-in-Python
|
a656c686250269c4454b73b2988e5e5489b2e288
|
[
"BSD-Source-Code"
] | null | null | null |
main.py
|
amanchourasiayt/Random-Number-in-Python
|
a656c686250269c4454b73b2988e5e5489b2e288
|
[
"BSD-Source-Code"
] | null | null | null |
# -----------------------------
# Copyright - This the Most Detailed Code fully written by the Owner of www.amanchourasia.in! This Code is fully Copyrighted by the Owner of www.amanchourasia.in! No one else wrote this code before!
# Disclaimer - This Code Contains Links, I am not responsible of ay damage caused by the link present in the code.
# About the Code: This the Most Detailed Code written in Python to Generate a Random Number, which is going to be stored in a Variable Name number, and the last step will be to print the code.
# Author: Aman Chourasia
# Website: www.amanchourasia.in
# Date of Creation: 22nd July 2021
# -----------------------------
# The Code Starts Here!
# Imported Random Module if you haven't installed it, then what are you waiting for? Go just open the terminal and type pip install random. I am not responsible for any errors caused during the installation of the Random Module.
# If you want a detailed guide you can head on to this link - https://bit.ly/3y2jrNE.
import random
# Created a Number Variable which will store the random number, which will be generated by the randint function in the random module. There you'll also see that it is given (0,10) this mesans that the value stored in the number variable will be greater than 0 and less then 10. You can change the number as per your need. Once again I am telling that this Code is made possible using the Random Module.
number = random.randint(0, 10)
# Here, at the last step we are using the print() function to print the random number generated by the Random Module. This is a very easy step and the last line of the program.
print(number)
# The Code Ends Here...
| 62.074074
| 403
| 0.741647
|
# If you want a detailed guide you can head on to this link - https://bit.ly/3y2jrNE.
import random
# Created a Number Variable which will store the random number, which will be generated by the randint function in the random module. There you'll also see that it is given (0,10) this mesans that the value stored in the number variable will be greater than 0 and less then 10. You can change the number as per your need. Once again I am telling that this Code is made possible using the Random Module.
number = random.randint(0, 10)
print(number)
| true
| true
|
790cbdc03a680b0655ef8ffd488fef72b4107cef
| 1,610
|
py
|
Python
|
forms/migrations/0017_auto_20150331_1815.py
|
opendatadurban/gmmp
|
cc64fdedcf6e04b0377dc8ad7a7d34bae17ec575
|
[
"Apache-2.0"
] | 4
|
2020-01-05T09:14:19.000Z
|
2022-02-17T03:22:09.000Z
|
forms/migrations/0017_auto_20150331_1815.py
|
opendatadurban/gmmp
|
cc64fdedcf6e04b0377dc8ad7a7d34bae17ec575
|
[
"Apache-2.0"
] | 68
|
2019-12-23T02:19:55.000Z
|
2021-04-23T06:13:36.000Z
|
forms/migrations/0017_auto_20150331_1815.py
|
OpenUpSA/gmmp
|
d82a4be0787c3a3a9e27dc590d7974f9f884fbb6
|
[
"Apache-2.0"
] | 2
|
2019-07-25T11:53:10.000Z
|
2020-06-22T02:07:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0016_auto_20150330_1413'),
]
operations = [
migrations.AlterField(
model_name='radiosheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)]),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)]),
preserve_default=True,
),
]
| 59.62963
| 532
| 0.560248
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0016_auto_20150330_1413'),
]
operations = [
migrations.AlterField(
model_name='radiosheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)]),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)]),
preserve_default=True,
),
]
| true
| true
|
790cbdf31ace28418522c7a2ba501d86db85af44
| 17,358
|
py
|
Python
|
django/core/mail/message.py
|
bak1an/django
|
98bcc5d81bca578f3a5b4d47907ba4ac40446887
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/core/mail/message.py
|
bak1an/django
|
98bcc5d81bca578f3a5b4d47907ba4ac40446887
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/core/mail/message.py
|
bak1an/django
|
98bcc5d81bca578f3a5b4d47907ba4ac40446887
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import mimetypes
import os
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import InvalidHeaderDefect, NonASCIILocalPartDefect
from email.header import Header
from email.headerregistry import Address
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, getaddresses, make_msgid, parseaddr
from io import BytesIO, StringIO
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def split_addr(addr, encoding):
"""
Split the address into local part and domain, properly encoded.
When non-ascii characters are present in the local part, it must be
MIME-word encoded. The domain name must be idna-encoded if it contains
non-ascii characters.
"""
if '@' in addr:
localpart, domain = addr.split('@', 1)
# Try to get the simplest encoding - ascii if possible so that
# to@example.com doesn't become =?utf-8?q?to?=@example.com. This
# makes unit testing a bit easier and more readable.
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = domain.encode('idna').decode('ascii')
else:
localpart = Header(addr, encoding).encode()
domain = ''
return (localpart, domain)
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
localpart, domain = None, None
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN or non-ascii in the local part
localpart, domain = split_addr(addr, encoding)
# An `email.headerregistry.Address` object is used since
# email.utils.formataddr() naively encodes the name as ascii (see #25986).
if localpart and domain:
address = Address(nm, username=localpart, domain=domain)
return str(address)
try:
address = Address(nm, addr_spec=addr)
except (InvalidHeaderDefect, NonASCIILocalPartDefect):
localpart, domain = split_addr(addr, encoding)
address = Address(nm, username=localpart, domain=domain)
return str(address)
class MIMEMixin:
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8':
has_long_lines = any(
len(l.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for l in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All string arguments used to create the message can be strings
or UTF-8 bytestrings. The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, it will be decoded as UTF-8. If that fails,
the mimetype will be set to DEFAULT_ATTACHMENT_MIME_TYPE and the
content is not decoded.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
if not mimetype:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
# actually binary, read() raises a UnicodeDecodeError.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attaches a file from the filesystem.
The mimetype will be set to the DEFAULT_ATTACHMENT_MIME_TYPE if it is
not specified and cannot be guessed.
For a text/* mimetype (guessed or specified), the file's content
will be decoded as UTF-8. If that fails, the mimetype will be set to
DEFAULT_ATTACHMENT_MIME_TYPE and the content is not decoded.
"""
filename = os.path.basename(path)
with open(path, 'rb') as file:
content = file.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All string arguments used to create the message can be strings or UTF-8
bytestrings. The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super().__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| 37.982495
| 107
| 0.63308
|
import mimetypes
import os
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import InvalidHeaderDefect, NonASCIILocalPartDefect
from email.header import Header
from email.headerregistry import Address
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, getaddresses, make_msgid, parseaddr
from io import BytesIO, StringIO
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def split_addr(addr, encoding):
if '@' in addr:
localpart, domain = addr.split('@', 1)
# makes unit testing a bit easier and more readable.
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = domain.encode('idna').decode('ascii')
else:
localpart = Header(addr, encoding).encode()
domain = ''
return (localpart, domain)
def sanitize_address(addr, encoding):
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
localpart, domain = None, None
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN or non-ascii in the local part
localpart, domain = split_addr(addr, encoding)
# An `email.headerregistry.Address` object is used since
# email.utils.formataddr() naively encodes the name as ascii (see #25986).
if localpart and domain:
address = Address(nm, username=localpart, domain=domain)
return str(address)
try:
address = Address(nm, addr_spec=addr)
except (InvalidHeaderDefect, NonASCIILocalPartDefect):
localpart, domain = split_addr(addr, encoding)
address = Address(nm, username=localpart, domain=domain)
return str(address)
class MIMEMixin:
def as_string(self, unixfrom=False, linesep='\n'):
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8':
has_long_lines = any(
len(l.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for l in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
if not mimetype:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
filename = os.path.basename(path)
with open(path, 'rb') as file:
content = file.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
Message):
content = content.message()
elif not isinstance(content, Message):
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
super().__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| true
| true
|
790cbe4553ca80d8b1222b2c52d90ca4f397e4cb
| 3,769
|
py
|
Python
|
tfx/components/pusher/executor_test.py
|
rmgogogo/tfx
|
8ed47f2570bd01d258d8ee9b1ab001e08d16af89
|
[
"Apache-2.0"
] | 1
|
2020-11-08T17:03:33.000Z
|
2020-11-08T17:03:33.000Z
|
tfx/components/pusher/executor_test.py
|
rmgogogo/tfx
|
8ed47f2570bd01d258d8ee9b1ab001e08d16af89
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/pusher/executor_test.py
|
rmgogogo/tfx
|
8ed47f2570bd01d258d8ee9b1ab001e08d16af89
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.pusher.executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.pusher import executor
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super(ExecutorTest, self).setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
tf.io.gfile.makedirs(self._output_data_dir)
self._model_export = standard_artifacts.Model()
self._model_export.uri = os.path.join(self._source_data_dir,
'trainer/current/')
self._model_blessing = standard_artifacts.ModelBlessing()
self._input_dict = {
'model_export': [self._model_export],
'model_blessing': [self._model_blessing],
}
self._model_push = standard_artifacts.PushedModel()
self._model_push.uri = os.path.join(self._output_data_dir, 'model_push')
tf.io.gfile.makedirs(self._model_push.uri)
self._output_dict = {
'model_push': [self._model_push],
}
self._serving_model_dir = os.path.join(self._output_data_dir,
'serving_model_dir')
tf.io.gfile.makedirs(self._serving_model_dir)
self._exec_properties = {
'push_destination':
json_format.MessageToJson(
pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=self._serving_model_dir)),
preserving_proto_field_name=True),
}
self._executor = executor.Executor()
def testDoBlessed(self):
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/blessed/')
self._model_blessing.set_int_custom_property('blessed', 1)
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self.assertNotEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir)))
self.assertNotEqual(0, len(tf.io.gfile.listdir(self._model_push.uri)))
self.assertEqual(
1, self._model_push.artifact.custom_properties['pushed'].int_value)
def testDoNotBlessed(self):
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/not_blessed/')
self._model_blessing.set_int_custom_property('blessed', 0)
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self.assertEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir)))
self.assertEqual(0, len(tf.io.gfile.listdir(self._model_push.uri)))
self.assertEqual(
0, self._model_push.artifact.custom_properties['pushed'].int_value)
if __name__ == '__main__':
tf.test.main()
| 40.967391
| 77
| 0.694879
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.pusher import executor
from tfx.proto import pusher_pb2
from tfx.types import standard_artifacts
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super(ExecutorTest, self).setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
tf.io.gfile.makedirs(self._output_data_dir)
self._model_export = standard_artifacts.Model()
self._model_export.uri = os.path.join(self._source_data_dir,
'trainer/current/')
self._model_blessing = standard_artifacts.ModelBlessing()
self._input_dict = {
'model_export': [self._model_export],
'model_blessing': [self._model_blessing],
}
self._model_push = standard_artifacts.PushedModel()
self._model_push.uri = os.path.join(self._output_data_dir, 'model_push')
tf.io.gfile.makedirs(self._model_push.uri)
self._output_dict = {
'model_push': [self._model_push],
}
self._serving_model_dir = os.path.join(self._output_data_dir,
'serving_model_dir')
tf.io.gfile.makedirs(self._serving_model_dir)
self._exec_properties = {
'push_destination':
json_format.MessageToJson(
pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=self._serving_model_dir)),
preserving_proto_field_name=True),
}
self._executor = executor.Executor()
def testDoBlessed(self):
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/blessed/')
self._model_blessing.set_int_custom_property('blessed', 1)
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self.assertNotEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir)))
self.assertNotEqual(0, len(tf.io.gfile.listdir(self._model_push.uri)))
self.assertEqual(
1, self._model_push.artifact.custom_properties['pushed'].int_value)
def testDoNotBlessed(self):
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/not_blessed/')
self._model_blessing.set_int_custom_property('blessed', 0)
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
self.assertEqual(0, len(tf.io.gfile.listdir(self._serving_model_dir)))
self.assertEqual(0, len(tf.io.gfile.listdir(self._model_push.uri)))
self.assertEqual(
0, self._model_push.artifact.custom_properties['pushed'].int_value)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
790cbedeb8f640245205c0da49120cc0c06eac28
| 9,504
|
py
|
Python
|
PyCTBN/tests/structure_graph/test_networkgraph.py
|
pietroepis/PyCTBN
|
33e4cb5bd7dd68e3e272edfccb016806dd227deb
|
[
"MIT"
] | 1
|
2020-06-30T14:09:26.000Z
|
2020-06-30T14:09:26.000Z
|
PyCTBN/tests/structure_graph/test_networkgraph.py
|
pietroepis/PyCTBN
|
33e4cb5bd7dd68e3e272edfccb016806dd227deb
|
[
"MIT"
] | 1
|
2020-07-13T16:05:47.000Z
|
2020-07-13T16:05:47.000Z
|
PyCTBN/tests/structure_graph/test_networkgraph.py
|
philipMartini/CTBN_Project
|
235c85c8fad8a85f1243dac8162dda60bf45291b
|
[
"MIT"
] | 4
|
2021-03-10T10:16:10.000Z
|
2021-05-12T12:36:27.000Z
|
# License: MIT License
import unittest
import glob
import os
import networkx as nx
import numpy as np
import itertools
from ...PyCTBN.structure_graph.sample_path import SamplePath
from ...PyCTBN.structure_graph.network_graph import NetworkGraph
from ...PyCTBN.utility.json_importer import JsonImporter
class TestNetworkGraph(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.read_files = glob.glob(os.path.join('./PyCTBN/test_data', "*.json"))
cls.importer = JsonImporter(cls.read_files[2], 'samples', 'dyn.str', 'variables', 'Time', 'Name')
cls.importer.import_data(0)
cls.s1 = SamplePath(cls.importer)
cls.s1.build_trajectories()
cls.s1.build_structure()
def test_init(self):
g1 = NetworkGraph(self.s1.structure)
self.assertEqual(self.s1.structure, g1._graph_struct)
self.assertIsInstance(g1._graph, nx.DiGraph)
self.assertIsNone(g1.time_scalar_indexing_strucure)
self.assertIsNone(g1.transition_scalar_indexing_structure)
self.assertIsNone(g1.transition_filtering)
self.assertIsNone(g1.p_combs)
def test_add_nodes(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
for n1, n2 in zip(g1.nodes, self.s1.structure.nodes_labels):
self.assertEqual(n1, n2)
def test_add_edges(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_edges(self.s1.structure.edges)
for e in self.s1.structure.edges:
self.assertIn(tuple(e), g1.edges)
def test_fast_init(self):
g1 = NetworkGraph(self.s1.structure)
for node in self.s1.structure.nodes_labels:
g1.fast_init(node)
self.assertIsNotNone(g1._graph.nodes)
self.assertIsNotNone(g1._graph.edges)
self.assertIsInstance(g1._time_scalar_indexing_structure, np.ndarray)
self.assertIsInstance(g1._transition_scalar_indexing_structure, np.ndarray)
self.assertIsInstance(g1._time_filtering, np.ndarray)
self.assertIsInstance(g1._transition_filtering, np.ndarray)
self.assertIsInstance(g1._p_combs_structure, np.ndarray)
self.assertIsInstance(g1._aggregated_info_about_nodes_parents, tuple)
def test_get_ordered_by_indx_set_of_parents(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
for indx in range(len(aggr_info[0]) - 1 ):
self.assertLess(g1.get_node_indx(aggr_info[0][indx]), g1.get_node_indx(aggr_info[0][indx + 1]))
for par, par_indx in zip(aggr_info[0], aggr_info[1]):
self.assertEqual(g1.get_node_indx(par), par_indx)
for par, par_val in zip(aggr_info[0], aggr_info[2]):
self.assertEqual(g1._graph_struct.get_states_number(par), par_val)
def test_build_time_scalar_indexing_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],
aggr_info[0], aggr_info[2])
def aux_build_time_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels, parents_vals):
node_states = graph.get_states_number(node_id)
time_scalar_indexing = NetworkGraph.build_time_scalar_indexing_structure_for_a_node(node_states, parents_vals)
self.assertEqual(len(time_scalar_indexing), len(parents_indxs) + 1)
merged_list = parents_labels[:]
merged_list.insert(0, node_id)
vals_list = []
for node in merged_list:
vals_list.append(graph.get_states_number(node))
t_vec = np.array(vals_list)
t_vec = t_vec.cumprod()
self.assertTrue(np.array_equal(time_scalar_indexing, t_vec))
def test_build_transition_scalar_indexing_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_transition_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],
aggr_info[0], aggr_info[2])
def aux_build_transition_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels,
parents_values):
node_states = graph.get_states_number(node_id)
transition_scalar_indexing = graph.build_transition_scalar_indexing_structure_for_a_node(node_states,
parents_values)
self.assertEqual(len(transition_scalar_indexing), len(parents_indxs) + 2)
merged_list = parents_labels[:]
merged_list.insert(0, node_id)
merged_list.insert(0, node_id)
vals_list = []
for node_id in merged_list:
vals_list.append(graph.get_states_number(node_id))
m_vec = np.array([vals_list])
m_vec = m_vec.cumprod()
self.assertTrue(np.array_equal(transition_scalar_indexing, m_vec))
def test_build_time_columns_filtering_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])
def aux_build_time_columns_filtering_structure_for_a_node(self, graph, node_id, p_indxs):
graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id), p_indxs)
single_filter = []
single_filter.append(graph.get_node_indx(node_id))
single_filter.extend(p_indxs)
self.assertTrue(np.array_equal(graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id),
p_indxs),np.array(single_filter)))
def test_build_transition_columns_filtering_structure(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])
def aux_build_transition_columns_filtering_structure(self, graph, node_id, p_indxs):
single_filter = []
single_filter.append(graph.get_node_indx(node_id) + graph._graph_struct.total_variables_number)
single_filter.append(graph.get_node_indx(node_id))
single_filter.extend(p_indxs)
self.assertTrue(np.array_equal(graph.build_transition_filtering_for_a_node(graph.get_node_indx(node_id),
p_indxs), np.array(single_filter)))
def test_build_p_combs_structure(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_p_combs_structure(g1, aggr_info[2])
def aux_build_p_combs_structure(self, graph, p_vals):
p_combs = graph.build_p_comb_structure_for_a_node(p_vals)
p_possible_vals = []
for val in p_vals:
vals = [v for v in range(val)]
p_possible_vals.extend(vals)
comb_struct = set(itertools.product(p_possible_vals,repeat=len(p_vals)))
for comb in comb_struct:
self.assertIn(np.array(comb), p_combs)
def test_get_parents_by_id(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in g1.nodes:
self.assertListEqual(g1.get_parents_by_id(node), list(g1._graph.predecessors(node)))
def test_get_states_number(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node, val in zip(g1.nodes, g1.nodes_values):
self.assertEqual(val, g1.get_states_number(node))
def test_get_node_indx(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node, indx in zip(g1.nodes, g1.nodes_indexes):
self.assertEqual(indx, g1.get_node_indx(node))
if __name__ == '__main__':
unittest.main()
| 48.989691
| 127
| 0.675295
|
import unittest
import glob
import os
import networkx as nx
import numpy as np
import itertools
from ...PyCTBN.structure_graph.sample_path import SamplePath
from ...PyCTBN.structure_graph.network_graph import NetworkGraph
from ...PyCTBN.utility.json_importer import JsonImporter
class TestNetworkGraph(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.read_files = glob.glob(os.path.join('./PyCTBN/test_data', "*.json"))
cls.importer = JsonImporter(cls.read_files[2], 'samples', 'dyn.str', 'variables', 'Time', 'Name')
cls.importer.import_data(0)
cls.s1 = SamplePath(cls.importer)
cls.s1.build_trajectories()
cls.s1.build_structure()
def test_init(self):
g1 = NetworkGraph(self.s1.structure)
self.assertEqual(self.s1.structure, g1._graph_struct)
self.assertIsInstance(g1._graph, nx.DiGraph)
self.assertIsNone(g1.time_scalar_indexing_strucure)
self.assertIsNone(g1.transition_scalar_indexing_structure)
self.assertIsNone(g1.transition_filtering)
self.assertIsNone(g1.p_combs)
def test_add_nodes(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
for n1, n2 in zip(g1.nodes, self.s1.structure.nodes_labels):
self.assertEqual(n1, n2)
def test_add_edges(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_edges(self.s1.structure.edges)
for e in self.s1.structure.edges:
self.assertIn(tuple(e), g1.edges)
def test_fast_init(self):
g1 = NetworkGraph(self.s1.structure)
for node in self.s1.structure.nodes_labels:
g1.fast_init(node)
self.assertIsNotNone(g1._graph.nodes)
self.assertIsNotNone(g1._graph.edges)
self.assertIsInstance(g1._time_scalar_indexing_structure, np.ndarray)
self.assertIsInstance(g1._transition_scalar_indexing_structure, np.ndarray)
self.assertIsInstance(g1._time_filtering, np.ndarray)
self.assertIsInstance(g1._transition_filtering, np.ndarray)
self.assertIsInstance(g1._p_combs_structure, np.ndarray)
self.assertIsInstance(g1._aggregated_info_about_nodes_parents, tuple)
def test_get_ordered_by_indx_set_of_parents(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
for indx in range(len(aggr_info[0]) - 1 ):
self.assertLess(g1.get_node_indx(aggr_info[0][indx]), g1.get_node_indx(aggr_info[0][indx + 1]))
for par, par_indx in zip(aggr_info[0], aggr_info[1]):
self.assertEqual(g1.get_node_indx(par), par_indx)
for par, par_val in zip(aggr_info[0], aggr_info[2]):
self.assertEqual(g1._graph_struct.get_states_number(par), par_val)
def test_build_time_scalar_indexing_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],
aggr_info[0], aggr_info[2])
def aux_build_time_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels, parents_vals):
node_states = graph.get_states_number(node_id)
time_scalar_indexing = NetworkGraph.build_time_scalar_indexing_structure_for_a_node(node_states, parents_vals)
self.assertEqual(len(time_scalar_indexing), len(parents_indxs) + 1)
merged_list = parents_labels[:]
merged_list.insert(0, node_id)
vals_list = []
for node in merged_list:
vals_list.append(graph.get_states_number(node))
t_vec = np.array(vals_list)
t_vec = t_vec.cumprod()
self.assertTrue(np.array_equal(time_scalar_indexing, t_vec))
def test_build_transition_scalar_indexing_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_transition_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],
aggr_info[0], aggr_info[2])
def aux_build_transition_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels,
parents_values):
node_states = graph.get_states_number(node_id)
transition_scalar_indexing = graph.build_transition_scalar_indexing_structure_for_a_node(node_states,
parents_values)
self.assertEqual(len(transition_scalar_indexing), len(parents_indxs) + 2)
merged_list = parents_labels[:]
merged_list.insert(0, node_id)
merged_list.insert(0, node_id)
vals_list = []
for node_id in merged_list:
vals_list.append(graph.get_states_number(node_id))
m_vec = np.array([vals_list])
m_vec = m_vec.cumprod()
self.assertTrue(np.array_equal(transition_scalar_indexing, m_vec))
def test_build_time_columns_filtering_structure_for_a_node(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])
def aux_build_time_columns_filtering_structure_for_a_node(self, graph, node_id, p_indxs):
graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id), p_indxs)
single_filter = []
single_filter.append(graph.get_node_indx(node_id))
single_filter.extend(p_indxs)
self.assertTrue(np.array_equal(graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id),
p_indxs),np.array(single_filter)))
def test_build_transition_columns_filtering_structure(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])
def aux_build_transition_columns_filtering_structure(self, graph, node_id, p_indxs):
single_filter = []
single_filter.append(graph.get_node_indx(node_id) + graph._graph_struct.total_variables_number)
single_filter.append(graph.get_node_indx(node_id))
single_filter.extend(p_indxs)
self.assertTrue(np.array_equal(graph.build_transition_filtering_for_a_node(graph.get_node_indx(node_id),
p_indxs), np.array(single_filter)))
def test_build_p_combs_structure(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in self.s1.structure.nodes_labels:
aggr_info = g1.get_ordered_by_indx_set_of_parents(node)
self.aux_build_p_combs_structure(g1, aggr_info[2])
def aux_build_p_combs_structure(self, graph, p_vals):
p_combs = graph.build_p_comb_structure_for_a_node(p_vals)
p_possible_vals = []
for val in p_vals:
vals = [v for v in range(val)]
p_possible_vals.extend(vals)
comb_struct = set(itertools.product(p_possible_vals,repeat=len(p_vals)))
for comb in comb_struct:
self.assertIn(np.array(comb), p_combs)
def test_get_parents_by_id(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node in g1.nodes:
self.assertListEqual(g1.get_parents_by_id(node), list(g1._graph.predecessors(node)))
def test_get_states_number(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node, val in zip(g1.nodes, g1.nodes_values):
self.assertEqual(val, g1.get_states_number(node))
def test_get_node_indx(self):
g1 = NetworkGraph(self.s1.structure)
g1.add_nodes(self.s1.structure.nodes_labels)
g1.add_edges(self.s1.structure.edges)
for node, indx in zip(g1.nodes, g1.nodes_indexes):
self.assertEqual(indx, g1.get_node_indx(node))
if __name__ == '__main__':
unittest.main()
| true
| true
|
790cbfc64effd7e7fea37d991fbdd2800f2b59b0
| 1,019
|
py
|
Python
|
example/unicorn/components/add_flavor.py
|
Franziskhan/django-unicorn
|
ac0bfdafda1e98bc32031e34f8bcc9cf712bc920
|
[
"MIT"
] | null | null | null |
example/unicorn/components/add_flavor.py
|
Franziskhan/django-unicorn
|
ac0bfdafda1e98bc32031e34f8bcc9cf712bc920
|
[
"MIT"
] | null | null | null |
example/unicorn/components/add_flavor.py
|
Franziskhan/django-unicorn
|
ac0bfdafda1e98bc32031e34f8bcc9cf712bc920
|
[
"MIT"
] | null | null | null |
from django_unicorn.components import QuerySetType, UnicornView
from example.coffee.models import Flavor, Taste
class AddFlavorView(UnicornView):
is_adding = False
flavors = None
flavor_qty = 1
flavor_id = None
def __init__(self, *args, **kwargs):
super().__init__(**kwargs) # calling super is required
self.flavor_id = kwargs.get('flavor_id')
self.is_adding = False
def create(self):
if int(self.flavor_qty) > 0:
for i in range(int(self.flavor_qty)):
flavor = Flavor.objects.create(id = self.flavor_id)
flavor.save()
print("create flavor")
self.is_adding = False
self.show_table()
def add_flavor(self):
self.is_adding = True
self.show_table()
def cancel(self):
self.is_adding = False
self.show_table()
def show_table(self):
self.flavors = Flavor.objects.all()
def mount(self):
self.show_table()
| 24.853659
| 67
| 0.60157
|
from django_unicorn.components import QuerySetType, UnicornView
from example.coffee.models import Flavor, Taste
class AddFlavorView(UnicornView):
is_adding = False
flavors = None
flavor_qty = 1
flavor_id = None
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
self.flavor_id = kwargs.get('flavor_id')
self.is_adding = False
def create(self):
if int(self.flavor_qty) > 0:
for i in range(int(self.flavor_qty)):
flavor = Flavor.objects.create(id = self.flavor_id)
flavor.save()
print("create flavor")
self.is_adding = False
self.show_table()
def add_flavor(self):
self.is_adding = True
self.show_table()
def cancel(self):
self.is_adding = False
self.show_table()
def show_table(self):
self.flavors = Flavor.objects.all()
def mount(self):
self.show_table()
| true
| true
|
790cbfe885f74997947ecf49647ed445f57375d1
| 14,929
|
py
|
Python
|
p2p/handshake.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | null | null | null |
p2p/handshake.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | null | null | null |
p2p/handshake.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | null | null | null |
import asyncio
import functools
import operator
from typing import (
cast,
Iterable,
NamedTuple,
Sequence,
Type,
Tuple,
)
from cached_property import cached_property
from eth_utils import (
ExtendedDebugLogger,
to_tuple,
)
from eth_utils.toolz import groupby, valmap
from eth_keys import keys
from p2p._utils import duplicates, get_logger
from p2p.abc import (
ConnectionAPI,
HandshakerAPI,
HandshakeReceiptAPI,
MultiplexerAPI,
NodeAPI,
TransportAPI,
TProtocol,
ProtocolAPI,
)
from p2p.connection import Connection
from p2p.constants import DEVP2P_V5
from p2p.disconnect import DisconnectReason
from p2p.exceptions import (
HandshakeFailure,
HandshakeFailureTooManyPeers,
NoMatchingPeerCapabilities,
)
from p2p.multiplexer import (
stream_transport_messages,
Multiplexer,
)
from p2p.p2p_proto import (
DevP2PReceipt,
Disconnect,
Hello,
HelloPayload,
BaseP2PProtocol,
P2PProtocolV4,
P2PProtocolV5,
)
from p2p.protocol import get_cmd_offsets
from p2p.transport import Transport
from p2p.typing import (
Capabilities,
Capability,
)
class Handshaker(HandshakerAPI[TProtocol]):
"""
Base class that handles the handshake for a given protocol. The primary
justification for this class's existence is to house parameters that are
needed for the protocol handshake.
"""
@cached_property
def logger(self) -> ExtendedDebugLogger:
return get_logger('p2p.handshake.Handshaker')
class DevP2PHandshakeParams(NamedTuple):
client_version_string: str
listen_port: int
version: int
def get_base_protocol_class(self) -> Type[BaseP2PProtocol]:
if self.version == 5:
return P2PProtocolV5
elif self.version == 4:
return P2PProtocolV4
else:
raise Exception(
f"Unknown protocol version: {self.version}. Expected one of "
f"`4` or `5`"
)
@to_tuple
def _select_capabilities(remote_capabilities: Capabilities,
local_capabilities: Capabilities) -> Iterable[Capability]:
"""
Select the appropriate shared capabilities between local and remote.
https://github.com/ethereum/devp2p/blob/master/rlpx.md#capability-messaging
"""
# Determine the remote capabilities that intersect with our own.
matching_capabilities = tuple(sorted(
set(local_capabilities).intersection(remote_capabilities),
key=operator.itemgetter(0),
))
# generate a dictionary of each capability grouped by name and sorted by
# version in descending order.
sort_by_version = functools.partial(sorted, key=operator.itemgetter(1), reverse=True)
capabilities_by_name = valmap(
tuple,
valmap(
sort_by_version,
groupby(operator.itemgetter(0), matching_capabilities),
),
)
# now we loop over the names that have a matching capability and return the
# *highest* version one.
for name in sorted(capabilities_by_name.keys()):
yield capabilities_by_name[name][0]
async def _do_p2p_handshake(transport: TransportAPI,
capabilities: Capabilities,
p2p_handshake_params: DevP2PHandshakeParams,
base_protocol: BaseP2PProtocol,
) -> Tuple[DevP2PReceipt, BaseP2PProtocol]:
client_version_string, listen_port, p2p_version = p2p_handshake_params
base_protocol.send(Hello(HelloPayload(
client_version_string=client_version_string,
capabilities=capabilities,
listen_port=listen_port,
version=p2p_version,
remote_public_key=transport.public_key.to_bytes(),
)))
# The base `p2p` protocol handshake directly streams the messages as it has
# strict requirements about receiving the `Hello` message first.
async for _, cmd in stream_transport_messages(transport, base_protocol):
if isinstance(cmd, Disconnect):
if cmd.payload == DisconnectReason.TOO_MANY_PEERS:
raise HandshakeFailureTooManyPeers(f"Peer disconnected because it is already full")
if not isinstance(cmd, Hello):
raise HandshakeFailure(
f"First message across the DevP2P connection must be a Hello "
f"msg, got {cmd}, disconnecting"
)
protocol: BaseP2PProtocol
if base_protocol.version >= DEVP2P_V5:
# Check whether to support Snappy Compression or not
# based on other peer's p2p protocol version
snappy_support = cmd.payload.version >= DEVP2P_V5
if snappy_support:
# Now update the base protocol to support snappy compression
# This is needed so that Trinity is compatible with parity since
# parity sends Ping immediately after handshake
protocol = P2PProtocolV5(
transport,
command_id_offset=0,
snappy_support=True,
)
else:
protocol = base_protocol
else:
protocol = base_protocol
devp2p_receipt = DevP2PReceipt(
protocol=protocol,
version=cmd.payload.version,
client_version_string=cmd.payload.client_version_string,
capabilities=cmd.payload.capabilities,
remote_public_key=cmd.payload.remote_public_key,
listen_port=cmd.payload.listen_port,
)
break
else:
raise HandshakeFailure("DevP2P message stream exited before finishing handshake")
return devp2p_receipt, protocol
async def negotiate_protocol_handshakes(transport: TransportAPI,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
) -> Tuple[MultiplexerAPI, DevP2PReceipt, Tuple[HandshakeReceiptAPI, ...]]: # noqa: E501
"""
Negotiate the handshakes for both the base `p2p` protocol and the
appropriate sub protocols. The basic logic follows the following steps.
* perform the base `p2p` handshake.
* using the capabilities exchanged during the `p2p` handshake, select the
appropriate sub protocols.
* allow each sub-protocol to perform its own handshake.
* return the established `Multiplexer` as well as the `HandshakeReceipt`
objects from each handshake.
"""
# The `p2p` Protocol class that will be used.
p2p_protocol_class = p2p_handshake_params.get_base_protocol_class()
# Collect our local capabilities, the set of (name, version) pairs for all
# of the protocols that we support.
local_capabilities = tuple(
handshaker.protocol_class.as_capability()
for handshaker
in protocol_handshakers
)
# Verify that there are no duplicated local or remote capabilities
duplicate_capabilities = duplicates(local_capabilities)
if duplicate_capabilities:
raise Exception(f"Duplicate local capabilities: {duplicate_capabilities}")
# We create an *ephemeral* version of the base `p2p` protocol with snappy
# compression disabled for the handshake. As part of the handshake, a new
# instance of this protocol will be created with snappy compression enabled
# if it is supported by the protocol version.
ephemeral_base_protocol = p2p_protocol_class(
transport,
command_id_offset=0,
snappy_support=False,
)
# Perform the actual `p2p` protocol handshake. We need the remote
# capabilities data from the receipt to select the appropriate sub
# protocols.
devp2p_receipt, base_protocol = await _do_p2p_handshake(
transport,
local_capabilities,
p2p_handshake_params,
ephemeral_base_protocol,
)
# This data structure is simply for easy retrieval of the proper
# `Handshaker` for each selected protocol.
protocol_handshakers_by_capability = dict(zip(local_capabilities, protocol_handshakers))
# Using our local capabilities and the ones transmitted by the remote
# select the highest shared version of each shared protocol.
selected_capabilities = _select_capabilities(
devp2p_receipt.capabilities,
local_capabilities,
)
# If there are no capability matches throw an exception.
if len(selected_capabilities) < 1:
raise NoMatchingPeerCapabilities(
"Found no matching capabilities between self and peer:\n"
f" - local : {tuple(sorted(local_capabilities))}\n"
f" - remote: {devp2p_receipt.capabilities}"
)
# Retrieve the handshakers which correspond to the selected protocols.
# These are needed to perform the actual handshake logic for each protocol.
selected_handshakers = tuple(
protocol_handshakers_by_capability[capability]
for capability in selected_capabilities
)
# Grab the `Protocol` class for each of the selected protocols. We need
# this to compute the offsets for each protocol's command ids, as well as
# for instantiation of the protocol instances.
selected_protocol_types = tuple(
handshaker.protocol_class
for handshaker
in selected_handshakers
)
# Compute the offsets for each protocol's command ids
protocol_cmd_offsets = get_cmd_offsets(selected_protocol_types)
# Now instantiate instances of each of the protocol classes.
selected_protocols = tuple(
protocol_class(transport, command_id_offset, base_protocol.snappy_support)
for protocol_class, command_id_offset
in zip(selected_protocol_types, protocol_cmd_offsets)
)
# Create `Multiplexer` to abstract all of the protocols into a single
# interface to stream only messages relevant to the given protocol.
multiplexer = Multiplexer(transport, base_protocol, selected_protocols)
# This context manager runs a background task which reads messages off of
# the `Transport` and feeds them into protocol specific queues. Each
# protocol is responsible for reading its own messages from that queue via
# the `Multiplexer.stream_protocol_messages` API.
await multiplexer.stream_in_background()
# Concurrently perform the handshakes for each protocol, gathering up
# the returned receipts.
try:
protocol_receipts = cast(Tuple[HandshakeReceiptAPI, ...], await asyncio.gather(*(
handshaker.do_handshake(multiplexer, protocol)
for handshaker, protocol
in zip(selected_handshakers, selected_protocols)
)))
except BaseException as handshake_err:
# If the multiplexer has a streaming error, that will certainly be the cause of
# whatever handshake error we got, so raise that instead.
multiplexer.raise_if_streaming_error()
# Ok, no streaming error from the multiplexer, so stop it and raise the handshake error.
await multiplexer.stop_streaming()
raise handshake_err
else:
# The handshake was successful, but there's a chance the multiplexer's streaming stopped
# after that, so we may raise that here to prevent an attempt to use a stopped multiplexer
# further.
multiplexer.raise_if_streaming_error()
# Return the `Multiplexer` object as well as the handshake receipts. The
# `Multiplexer` object acts as a container for the individual protocol
# instances.
return multiplexer, devp2p_receipt, protocol_receipts
async def dial_out(remote: NodeAPI,
private_key: keys.PrivateKey,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
) -> ConnectionAPI:
"""
Perform the auth and P2P handshakes with the given remote.
Return a `Connection` object housing all of the negotiated sub protocols.
Raises UnreachablePeer if we cannot connect to the peer or
HandshakeFailure if the remote disconnects before completing the
handshake or if none of the sub-protocols supported by us is also
supported by the remote.
"""
transport = await Transport.connect(
remote,
private_key,
)
transport.logger.debug2("Initiating p2p handshake with %s", remote)
try:
multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
transport=transport,
p2p_handshake_params=p2p_handshake_params,
protocol_handshakers=protocol_handshakers,
)
except BaseException:
# Note: This is one of two places where we manually handle closing the
# reader/writer connection pair in the event of an error during the
# peer connection and handshake process.
# See `p2p.auth.handshake` for the other.
try:
await transport.close()
except ConnectionResetError:
transport.logger.debug("Could not wait for transport to close")
raise
transport.logger.debug2("Completed p2p handshake with %s", remote)
connection = Connection(
multiplexer=multiplexer,
devp2p_receipt=devp2p_receipt,
protocol_receipts=protocol_receipts,
is_dial_out=True,
)
return connection
async def receive_dial_in(reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
private_key: keys.PrivateKey,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
) -> Connection:
transport = await Transport.receive_connection(
reader=reader,
writer=writer,
private_key=private_key,
)
try:
multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
transport=transport,
p2p_handshake_params=p2p_handshake_params,
protocol_handshakers=protocol_handshakers,
)
except BaseException:
# Note: This is one of two places where we manually handle closing the
# reader/writer connection pair in the event of an error during the
# peer connection and handshake process.
# See `p2p.auth.handshake` for the other.
try:
await transport.close()
except ConnectionResetError:
transport.logger.debug("Could not wait for transport to close")
raise
connection = Connection(
multiplexer=multiplexer,
devp2p_receipt=devp2p_receipt,
protocol_receipts=protocol_receipts,
is_dial_out=False,
)
return connection
| 37.890863
| 129
| 0.681224
|
import asyncio
import functools
import operator
from typing import (
cast,
Iterable,
NamedTuple,
Sequence,
Type,
Tuple,
)
from cached_property import cached_property
from eth_utils import (
ExtendedDebugLogger,
to_tuple,
)
from eth_utils.toolz import groupby, valmap
from eth_keys import keys
from p2p._utils import duplicates, get_logger
from p2p.abc import (
ConnectionAPI,
HandshakerAPI,
HandshakeReceiptAPI,
MultiplexerAPI,
NodeAPI,
TransportAPI,
TProtocol,
ProtocolAPI,
)
from p2p.connection import Connection
from p2p.constants import DEVP2P_V5
from p2p.disconnect import DisconnectReason
from p2p.exceptions import (
HandshakeFailure,
HandshakeFailureTooManyPeers,
NoMatchingPeerCapabilities,
)
from p2p.multiplexer import (
stream_transport_messages,
Multiplexer,
)
from p2p.p2p_proto import (
DevP2PReceipt,
Disconnect,
Hello,
HelloPayload,
BaseP2PProtocol,
P2PProtocolV4,
P2PProtocolV5,
)
from p2p.protocol import get_cmd_offsets
from p2p.transport import Transport
from p2p.typing import (
Capabilities,
Capability,
)
class Handshaker(HandshakerAPI[TProtocol]):
@cached_property
def logger(self) -> ExtendedDebugLogger:
return get_logger('p2p.handshake.Handshaker')
class DevP2PHandshakeParams(NamedTuple):
client_version_string: str
listen_port: int
version: int
def get_base_protocol_class(self) -> Type[BaseP2PProtocol]:
if self.version == 5:
return P2PProtocolV5
elif self.version == 4:
return P2PProtocolV4
else:
raise Exception(
f"Unknown protocol version: {self.version}. Expected one of "
f"`4` or `5`"
)
@to_tuple
def _select_capabilities(remote_capabilities: Capabilities,
local_capabilities: Capabilities) -> Iterable[Capability]:
matching_capabilities = tuple(sorted(
set(local_capabilities).intersection(remote_capabilities),
key=operator.itemgetter(0),
))
sort_by_version = functools.partial(sorted, key=operator.itemgetter(1), reverse=True)
capabilities_by_name = valmap(
tuple,
valmap(
sort_by_version,
groupby(operator.itemgetter(0), matching_capabilities),
),
)
for name in sorted(capabilities_by_name.keys()):
yield capabilities_by_name[name][0]
async def _do_p2p_handshake(transport: TransportAPI,
capabilities: Capabilities,
p2p_handshake_params: DevP2PHandshakeParams,
base_protocol: BaseP2PProtocol,
) -> Tuple[DevP2PReceipt, BaseP2PProtocol]:
client_version_string, listen_port, p2p_version = p2p_handshake_params
base_protocol.send(Hello(HelloPayload(
client_version_string=client_version_string,
capabilities=capabilities,
listen_port=listen_port,
version=p2p_version,
remote_public_key=transport.public_key.to_bytes(),
)))
async for _, cmd in stream_transport_messages(transport, base_protocol):
if isinstance(cmd, Disconnect):
if cmd.payload == DisconnectReason.TOO_MANY_PEERS:
raise HandshakeFailureTooManyPeers(f"Peer disconnected because it is already full")
if not isinstance(cmd, Hello):
raise HandshakeFailure(
f"First message across the DevP2P connection must be a Hello "
f"msg, got {cmd}, disconnecting"
)
protocol: BaseP2PProtocol
if base_protocol.version >= DEVP2P_V5:
snappy_support = cmd.payload.version >= DEVP2P_V5
if snappy_support:
# Now update the base protocol to support snappy compression
# This is needed so that Trinity is compatible with parity since
# parity sends Ping immediately after handshake
protocol = P2PProtocolV5(
transport,
command_id_offset=0,
snappy_support=True,
)
else:
protocol = base_protocol
else:
protocol = base_protocol
devp2p_receipt = DevP2PReceipt(
protocol=protocol,
version=cmd.payload.version,
client_version_string=cmd.payload.client_version_string,
capabilities=cmd.payload.capabilities,
remote_public_key=cmd.payload.remote_public_key,
listen_port=cmd.payload.listen_port,
)
break
else:
raise HandshakeFailure("DevP2P message stream exited before finishing handshake")
return devp2p_receipt, protocol
async def negotiate_protocol_handshakes(transport: TransportAPI,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
) -> Tuple[MultiplexerAPI, DevP2PReceipt, Tuple[HandshakeReceiptAPI, ...]]: # noqa: E501
# The `p2p` Protocol class that will be used.
p2p_protocol_class = p2p_handshake_params.get_base_protocol_class()
# Collect our local capabilities, the set of (name, version) pairs for all
# of the protocols that we support.
local_capabilities = tuple(
handshaker.protocol_class.as_capability()
for handshaker
in protocol_handshakers
)
# Verify that there are no duplicated local or remote capabilities
duplicate_capabilities = duplicates(local_capabilities)
if duplicate_capabilities:
raise Exception(f"Duplicate local capabilities: {duplicate_capabilities}")
# We create an *ephemeral* version of the base `p2p` protocol with snappy
# compression disabled for the handshake. As part of the handshake, a new
# instance of this protocol will be created with snappy compression enabled
# if it is supported by the protocol version.
ephemeral_base_protocol = p2p_protocol_class(
transport,
command_id_offset=0,
snappy_support=False,
)
# Perform the actual `p2p` protocol handshake. We need the remote
# capabilities data from the receipt to select the appropriate sub
# protocols.
devp2p_receipt, base_protocol = await _do_p2p_handshake(
transport,
local_capabilities,
p2p_handshake_params,
ephemeral_base_protocol,
)
# This data structure is simply for easy retrieval of the proper
# `Handshaker` for each selected protocol.
protocol_handshakers_by_capability = dict(zip(local_capabilities, protocol_handshakers))
# Using our local capabilities and the ones transmitted by the remote
# select the highest shared version of each shared protocol.
selected_capabilities = _select_capabilities(
devp2p_receipt.capabilities,
local_capabilities,
)
# If there are no capability matches throw an exception.
if len(selected_capabilities) < 1:
raise NoMatchingPeerCapabilities(
"Found no matching capabilities between self and peer:\n"
f" - local : {tuple(sorted(local_capabilities))}\n"
f" - remote: {devp2p_receipt.capabilities}"
)
# Retrieve the handshakers which correspond to the selected protocols.
# These are needed to perform the actual handshake logic for each protocol.
selected_handshakers = tuple(
protocol_handshakers_by_capability[capability]
for capability in selected_capabilities
)
# Grab the `Protocol` class for each of the selected protocols. We need
# this to compute the offsets for each protocol's command ids, as well as
selected_protocol_types = tuple(
handshaker.protocol_class
for handshaker
in selected_handshakers
)
protocol_cmd_offsets = get_cmd_offsets(selected_protocol_types)
# Now instantiate instances of each of the protocol classes.
selected_protocols = tuple(
protocol_class(transport, command_id_offset, base_protocol.snappy_support)
for protocol_class, command_id_offset
in zip(selected_protocol_types, protocol_cmd_offsets)
)
# Create `Multiplexer` to abstract all of the protocols into a single
# interface to stream only messages relevant to the given protocol.
multiplexer = Multiplexer(transport, base_protocol, selected_protocols)
# This context manager runs a background task which reads messages off of
# the `Transport` and feeds them into protocol specific queues. Each
# protocol is responsible for reading its own messages from that queue via
# the `Multiplexer.stream_protocol_messages` API.
await multiplexer.stream_in_background()
# Concurrently perform the handshakes for each protocol, gathering up
# the returned receipts.
try:
protocol_receipts = cast(Tuple[HandshakeReceiptAPI, ...], await asyncio.gather(*(
handshaker.do_handshake(multiplexer, protocol)
for handshaker, protocol
in zip(selected_handshakers, selected_protocols)
)))
except BaseException as handshake_err:
# If the multiplexer has a streaming error, that will certainly be the cause of
# whatever handshake error we got, so raise that instead.
multiplexer.raise_if_streaming_error()
# Ok, no streaming error from the multiplexer, so stop it and raise the handshake error.
await multiplexer.stop_streaming()
raise handshake_err
else:
# The handshake was successful, but there's a chance the multiplexer's streaming stopped
# after that, so we may raise that here to prevent an attempt to use a stopped multiplexer
# further.
multiplexer.raise_if_streaming_error()
# Return the `Multiplexer` object as well as the handshake receipts. The
# `Multiplexer` object acts as a container for the individual protocol
# instances.
return multiplexer, devp2p_receipt, protocol_receipts
async def dial_out(remote: NodeAPI,
private_key: keys.PrivateKey,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
) -> ConnectionAPI:
transport = await Transport.connect(
remote,
private_key,
)
transport.logger.debug2("Initiating p2p handshake with %s", remote)
try:
multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
transport=transport,
p2p_handshake_params=p2p_handshake_params,
protocol_handshakers=protocol_handshakers,
)
except BaseException:
# Note: This is one of two places where we manually handle closing the
# reader/writer connection pair in the event of an error during the
# peer connection and handshake process.
# See `p2p.auth.handshake` for the other.
try:
await transport.close()
except ConnectionResetError:
transport.logger.debug("Could not wait for transport to close")
raise
transport.logger.debug2("Completed p2p handshake with %s", remote)
connection = Connection(
multiplexer=multiplexer,
devp2p_receipt=devp2p_receipt,
protocol_receipts=protocol_receipts,
is_dial_out=True,
)
return connection
async def receive_dial_in(reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
private_key: keys.PrivateKey,
p2p_handshake_params: DevP2PHandshakeParams,
protocol_handshakers: Sequence[HandshakerAPI[ProtocolAPI]],
) -> Connection:
transport = await Transport.receive_connection(
reader=reader,
writer=writer,
private_key=private_key,
)
try:
multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
transport=transport,
p2p_handshake_params=p2p_handshake_params,
protocol_handshakers=protocol_handshakers,
)
except BaseException:
# Note: This is one of two places where we manually handle closing the
# reader/writer connection pair in the event of an error during the
# peer connection and handshake process.
# See `p2p.auth.handshake` for the other.
try:
await transport.close()
except ConnectionResetError:
transport.logger.debug("Could not wait for transport to close")
raise
connection = Connection(
multiplexer=multiplexer,
devp2p_receipt=devp2p_receipt,
protocol_receipts=protocol_receipts,
is_dial_out=False,
)
return connection
| true
| true
|
790cc11385a01d9ab155c4b02043db992f94b32d
| 1,435
|
py
|
Python
|
honeybot/plugins/google.py
|
marceloyb/honeybot
|
b2b92af54d01228ec150185eaa08a4baf55f1c88
|
[
"MIT"
] | null | null | null |
honeybot/plugins/google.py
|
marceloyb/honeybot
|
b2b92af54d01228ec150185eaa08a4baf55f1c88
|
[
"MIT"
] | null | null | null |
honeybot/plugins/google.py
|
marceloyb/honeybot
|
b2b92af54d01228ec150185eaa08a4baf55f1c88
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
[googleSearch.py]
Google Search Plugin
[Author]
Justin Walker
[About]
Returns the first three links from a google search.
[Commands]
>>> .google <<search term>>
returns search links
"""
try:
from googlesearch import search
except ImportError:
print("No module named 'google' found")
class Plugin:
def __init__(self):
pass
def __google(search_term):
# start is what link to start with, stop is how many links to get
# only_standard limits it to normal links instead of ads and extra
# links.
return search(search_term, start=1, stop=3, \
only_standard=True)
def run(self, incoming, methods, info):
try:
msgs = info['args'][1:][0].split()
if info['command'] == 'PRIVMSG' and msgs[0] == '.google':
# All further messages, if there are any are added to search term.
term = ''
if len(msgs) > 1:
for msg in msgs[1:]:
term += msg
for link in Plugin.__google(term):
methods['send'](info['address'], link)
else:
methods['send'](info['address'], "Input error. '.google search_term'.")
except Exception as e:
print('woops plugin error: ', e)
| 28.7
| 92
| 0.524739
|
try:
from googlesearch import search
except ImportError:
print("No module named 'google' found")
class Plugin:
def __init__(self):
pass
def __google(search_term):
return search(search_term, start=1, stop=3, \
only_standard=True)
def run(self, incoming, methods, info):
try:
msgs = info['args'][1:][0].split()
if info['command'] == 'PRIVMSG' and msgs[0] == '.google':
term = ''
if len(msgs) > 1:
for msg in msgs[1:]:
term += msg
for link in Plugin.__google(term):
methods['send'](info['address'], link)
else:
methods['send'](info['address'], "Input error. '.google search_term'.")
except Exception as e:
print('woops plugin error: ', e)
| true
| true
|
790cc141ab1b8956383f38a29e3b5d66b455a1b2
| 5,225
|
py
|
Python
|
simsalabim/dinosaur_adapter.py
|
MatthewThe/spymsi
|
1debdebbd09ba654923b034736f892e86a8414e6
|
[
"Apache-2.0"
] | 1
|
2022-01-08T16:17:42.000Z
|
2022-01-08T16:17:42.000Z
|
simsalabim/dinosaur_adapter.py
|
MatthewThe/spymsi
|
1debdebbd09ba654923b034736f892e86a8414e6
|
[
"Apache-2.0"
] | null | null | null |
simsalabim/dinosaur_adapter.py
|
MatthewThe/spymsi
|
1debdebbd09ba654923b034736f892e86a8414e6
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import sys
import os
import subprocess
from .simsalabim import __version__, __copyright__
from . import add_quant_info as quant
from . import helpers
def main(argv):
print('dinosaur-adapter version %s\n%s' % (__version__, __copyright__))
print('Issued command:', os.path.basename(__file__) + " " + " ".join(map(str, sys.argv[1:])))
args, params = parseArgs()
run_dinosaur(args.dinosaur_jar_path, args.mzml_fns, args.output_folder, args.spectrum_output_format, params)
def parseArgs():
import argparse
apars = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = apars.add_argument_group('required arguments')
requiredNamed.add_argument('--dinosaur_jar_path', metavar = "JAR", required = True,
help='''Path to the Dinosaur .jar file.
''')
apars.add_argument('--mzml_fns', default=None, metavar = "M", nargs='*',
help='''mzML file(s). To easily specify multiple files one can use wildcards, e.g. my_spectrum_files/*.mzML
''')
apars.add_argument('--file_list_file', default=None, metavar = "L",
help='''Text file with paths to mzML files, one per line.
''')
apars.add_argument('--output_folder', default="./dinosaur/", metavar='O',
help='''Output folder.
''')
apars.add_argument('--dinosaur_mem', default=8.0, metavar='M', type=float,
help='''Memory for allocated for Dinosaur in GB.
''')
apars.add_argument('--dinosaur_flags', default="", metavar='O',
help='''Extra command line flags to pass to Dinosaur, as indicated in Dinosaur's help text.
''')
apars.add_argument('--spectrum_output_format', default=None, metavar='F',
help='''If you want updated spectrum files with the new MS1 features assigned to the MS2 spectra, set this to the desired output format (ms2, mgf or mzML).
''')
apars.add_argument('--split_precursors',
help='''for .mzML or .ms2 output this creates a new spectrum for each precursor, e.g.
if spectrum with scan number 132 matches two precursors, we generate two spectra
with scan numbers 13201 and 13202. This can be useful if your downstream
analysis includes tools that do not support multiple precursors per spectrum,
such as MSGF+. For MGF output this flag is always set, as it does not support
multiple precursors per spectrum.
''',
action='store_true')
# ------------------------------------------------
args = apars.parse_args()
if not args.mzml_fns:
if args.file_list_file and len(args.file_list_file) > 0:
with open(args.file_list_file, 'r') as f:
args.mzml_fns = list(filter(lambda x : len(x) > 0, map(lambda x : re.sub(r"[\n\t\s]*", "", x), f.read().splitlines())))
else:
sys.exit("No input mzML files specified. Use either --mzml_fns or --file_list_file.")
elif args.file_list_file and len(args.file_list_file) > 0:
sys.exit("Ambiguous mzML input. Use either --mzml_fns or --file_list_file, not both.")
params = dict()
params['splitPrecursors'] = args.split_precursors
params['dinosaurMemory'] = args.dinosaur_mem
params['dinosaurFlags'] = args.dinosaur_flags
return args, params
def run_dinosaur(dinosaur_jar_path, mzml_fns, output_folder, spectrum_output_format, params):
dinosaur_binary = "java -Xmx%dM -jar %s --seed=1" % (int(params['dinosaurMemory']*1000), dinosaur_jar_path)
helpers.createDir(output_folder)
for mzml_fn in mzml_fns:
baseFN = helpers.getBase(helpers.getFileName(mzml_fn))
dinosaur_output_file = os.path.join(output_folder, baseFN + ".features.tsv")
if not os.path.isfile(dinosaur_output_file):
cmd_dinosaur = "%s --force --outDir=%s %s %s;" % (dinosaur_binary, output_folder, params['dinosaurFlags'], mzml_fn)
helpers.executeCmd(cmd_dinosaur)
else:
print("Found dinosaur output file at %s, remove this file to re-run Dinosaur on this file" % (dinosaur_output_file))
output_fn = os.path.join(output_folder, baseFN + ".dummy.txt")
if spectrum_output_format:
output_fn = os.path.join(output_folder, baseFN + ".recalibrated." + spectrum_output_format)
params['specPrecMapFile'] = os.path.join(output_folder, baseFN + ".feature_map.tsv")
if not os.path.isfile(params['specPrecMapFile']):
quant.add_accurate_precursors(dinosaur_output_file, mzml_fn, output_fn, params)
if output_fn.endswith(".dummy.txt"):
os.remove(output_fn)
else:
print("Found dinosaur mapping file at %s, remove this file to re-run Dinosaur on this file" % (params['specPrecMapFile']))
if __name__ == '__main__':
main(sys.argv[1:])
| 47.5
| 176
| 0.622775
|
from __future__ import print_function
import sys
import os
import subprocess
from .simsalabim import __version__, __copyright__
from . import add_quant_info as quant
from . import helpers
def main(argv):
print('dinosaur-adapter version %s\n%s' % (__version__, __copyright__))
print('Issued command:', os.path.basename(__file__) + " " + " ".join(map(str, sys.argv[1:])))
args, params = parseArgs()
run_dinosaur(args.dinosaur_jar_path, args.mzml_fns, args.output_folder, args.spectrum_output_format, params)
def parseArgs():
import argparse
apars = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = apars.add_argument_group('required arguments')
requiredNamed.add_argument('--dinosaur_jar_path', metavar = "JAR", required = True,
help='''Path to the Dinosaur .jar file.
''')
apars.add_argument('--mzml_fns', default=None, metavar = "M", nargs='*',
help='''mzML file(s). To easily specify multiple files one can use wildcards, e.g. my_spectrum_files/*.mzML
''')
apars.add_argument('--file_list_file', default=None, metavar = "L",
help='''Text file with paths to mzML files, one per line.
''')
apars.add_argument('--output_folder', default="./dinosaur/", metavar='O',
help='''Output folder.
''')
apars.add_argument('--dinosaur_mem', default=8.0, metavar='M', type=float,
help='''Memory for allocated for Dinosaur in GB.
''')
apars.add_argument('--dinosaur_flags', default="", metavar='O',
help='''Extra command line flags to pass to Dinosaur, as indicated in Dinosaur's help text.
''')
apars.add_argument('--spectrum_output_format', default=None, metavar='F',
help='''If you want updated spectrum files with the new MS1 features assigned to the MS2 spectra, set this to the desired output format (ms2, mgf or mzML).
''')
apars.add_argument('--split_precursors',
help='''for .mzML or .ms2 output this creates a new spectrum for each precursor, e.g.
if spectrum with scan number 132 matches two precursors, we generate two spectra
with scan numbers 13201 and 13202. This can be useful if your downstream
analysis includes tools that do not support multiple precursors per spectrum,
such as MSGF+. For MGF output this flag is always set, as it does not support
multiple precursors per spectrum.
''',
action='store_true')
# ------------------------------------------------
args = apars.parse_args()
if not args.mzml_fns:
if args.file_list_file and len(args.file_list_file) > 0:
with open(args.file_list_file, 'r') as f:
args.mzml_fns = list(filter(lambda x : len(x) > 0, map(lambda x : re.sub(r"[\n\t\s]*", "", x), f.read().splitlines())))
else:
sys.exit("No input mzML files specified. Use either --mzml_fns or --file_list_file.")
elif args.file_list_file and len(args.file_list_file) > 0:
sys.exit("Ambiguous mzML input. Use either --mzml_fns or --file_list_file, not both.")
params = dict()
params['splitPrecursors'] = args.split_precursors
params['dinosaurMemory'] = args.dinosaur_mem
params['dinosaurFlags'] = args.dinosaur_flags
return args, params
def run_dinosaur(dinosaur_jar_path, mzml_fns, output_folder, spectrum_output_format, params):
dinosaur_binary = "java -Xmx%dM -jar %s --seed=1" % (int(params['dinosaurMemory']*1000), dinosaur_jar_path)
helpers.createDir(output_folder)
for mzml_fn in mzml_fns:
baseFN = helpers.getBase(helpers.getFileName(mzml_fn))
dinosaur_output_file = os.path.join(output_folder, baseFN + ".features.tsv")
if not os.path.isfile(dinosaur_output_file):
cmd_dinosaur = "%s --force --outDir=%s %s %s;" % (dinosaur_binary, output_folder, params['dinosaurFlags'], mzml_fn)
helpers.executeCmd(cmd_dinosaur)
else:
print("Found dinosaur output file at %s, remove this file to re-run Dinosaur on this file" % (dinosaur_output_file))
output_fn = os.path.join(output_folder, baseFN + ".dummy.txt")
if spectrum_output_format:
output_fn = os.path.join(output_folder, baseFN + ".recalibrated." + spectrum_output_format)
params['specPrecMapFile'] = os.path.join(output_folder, baseFN + ".feature_map.tsv")
if not os.path.isfile(params['specPrecMapFile']):
quant.add_accurate_precursors(dinosaur_output_file, mzml_fn, output_fn, params)
if output_fn.endswith(".dummy.txt"):
os.remove(output_fn)
else:
print("Found dinosaur mapping file at %s, remove this file to re-run Dinosaur on this file" % (params['specPrecMapFile']))
if __name__ == '__main__':
main(sys.argv[1:])
| true
| true
|
790cc1a21fa6d41fe95ae8781ba045a5f03f0b62
| 7,896
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/eventgrid/get_event_channel.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetEventChannelResult',
'AwaitableGetEventChannelResult',
'get_event_channel',
]
@pulumi.output_type
class GetEventChannelResult:
"""
Event Channel.
"""
def __init__(__self__, destination=None, expiration_time_if_not_activated_utc=None, filter=None, id=None, name=None, partner_topic_friendly_description=None, partner_topic_readiness_state=None, provisioning_state=None, source=None, type=None):
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if expiration_time_if_not_activated_utc and not isinstance(expiration_time_if_not_activated_utc, str):
raise TypeError("Expected argument 'expiration_time_if_not_activated_utc' to be a str")
pulumi.set(__self__, "expiration_time_if_not_activated_utc", expiration_time_if_not_activated_utc)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partner_topic_friendly_description and not isinstance(partner_topic_friendly_description, str):
raise TypeError("Expected argument 'partner_topic_friendly_description' to be a str")
pulumi.set(__self__, "partner_topic_friendly_description", partner_topic_friendly_description)
if partner_topic_readiness_state and not isinstance(partner_topic_readiness_state, str):
raise TypeError("Expected argument 'partner_topic_readiness_state' to be a str")
pulumi.set(__self__, "partner_topic_readiness_state", partner_topic_readiness_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def destination(self) -> Optional['outputs.EventChannelDestinationResponse']:
"""
Represents the destination of an event channel.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="expirationTimeIfNotActivatedUtc")
def expiration_time_if_not_activated_utc(self) -> Optional[str]:
"""
Expiration time of the event channel. If this timer expires while the corresponding partner topic is never activated,
the event channel and corresponding partner topic are deleted.
"""
return pulumi.get(self, "expiration_time_if_not_activated_utc")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventChannelFilterResponse']:
"""
Information about the filter for the event channel.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerTopicFriendlyDescription")
def partner_topic_friendly_description(self) -> Optional[str]:
"""
Friendly description about the topic. This can be set by the publisher/partner to show custom description for the customer partner topic.
This will be helpful to remove any ambiguity of the origin of creation of the partner topic for the customer.
"""
return pulumi.get(self, "partner_topic_friendly_description")
@property
@pulumi.getter(name="partnerTopicReadinessState")
def partner_topic_readiness_state(self) -> str:
"""
The readiness state of the corresponding partner topic.
"""
return pulumi.get(self, "partner_topic_readiness_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the event channel.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> Optional['outputs.EventChannelSourceResponse']:
"""
Source of the event channel. This represents a unique resource in the partner's resource model.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetEventChannelResult(GetEventChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventChannelResult(
destination=self.destination,
expiration_time_if_not_activated_utc=self.expiration_time_if_not_activated_utc,
filter=self.filter,
id=self.id,
name=self.name,
partner_topic_friendly_description=self.partner_topic_friendly_description,
partner_topic_readiness_state=self.partner_topic_readiness_state,
provisioning_state=self.provisioning_state,
source=self.source,
type=self.type)
def get_event_channel(event_channel_name: Optional[str] = None,
partner_namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventChannelResult:
"""
Event Channel.
API Version: 2020-04-01-preview.
:param str event_channel_name: Name of the event channel.
:param str partner_namespace_name: Name of the partner namespace.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['eventChannelName'] = event_channel_name
__args__['partnerNamespaceName'] = partner_namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid:getEventChannel', __args__, opts=opts, typ=GetEventChannelResult).value
return AwaitableGetEventChannelResult(
destination=__ret__.destination,
expiration_time_if_not_activated_utc=__ret__.expiration_time_if_not_activated_utc,
filter=__ret__.filter,
id=__ret__.id,
name=__ret__.name,
partner_topic_friendly_description=__ret__.partner_topic_friendly_description,
partner_topic_readiness_state=__ret__.partner_topic_readiness_state,
provisioning_state=__ret__.provisioning_state,
source=__ret__.source,
type=__ret__.type)
| 41.557895
| 247
| 0.691109
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetEventChannelResult',
'AwaitableGetEventChannelResult',
'get_event_channel',
]
@pulumi.output_type
class GetEventChannelResult:
def __init__(__self__, destination=None, expiration_time_if_not_activated_utc=None, filter=None, id=None, name=None, partner_topic_friendly_description=None, partner_topic_readiness_state=None, provisioning_state=None, source=None, type=None):
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if expiration_time_if_not_activated_utc and not isinstance(expiration_time_if_not_activated_utc, str):
raise TypeError("Expected argument 'expiration_time_if_not_activated_utc' to be a str")
pulumi.set(__self__, "expiration_time_if_not_activated_utc", expiration_time_if_not_activated_utc)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partner_topic_friendly_description and not isinstance(partner_topic_friendly_description, str):
raise TypeError("Expected argument 'partner_topic_friendly_description' to be a str")
pulumi.set(__self__, "partner_topic_friendly_description", partner_topic_friendly_description)
if partner_topic_readiness_state and not isinstance(partner_topic_readiness_state, str):
raise TypeError("Expected argument 'partner_topic_readiness_state' to be a str")
pulumi.set(__self__, "partner_topic_readiness_state", partner_topic_readiness_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def destination(self) -> Optional['outputs.EventChannelDestinationResponse']:
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="expirationTimeIfNotActivatedUtc")
def expiration_time_if_not_activated_utc(self) -> Optional[str]:
return pulumi.get(self, "expiration_time_if_not_activated_utc")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.EventChannelFilterResponse']:
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerTopicFriendlyDescription")
def partner_topic_friendly_description(self) -> Optional[str]:
return pulumi.get(self, "partner_topic_friendly_description")
@property
@pulumi.getter(name="partnerTopicReadinessState")
def partner_topic_readiness_state(self) -> str:
return pulumi.get(self, "partner_topic_readiness_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> Optional['outputs.EventChannelSourceResponse']:
return pulumi.get(self, "source")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetEventChannelResult(GetEventChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventChannelResult(
destination=self.destination,
expiration_time_if_not_activated_utc=self.expiration_time_if_not_activated_utc,
filter=self.filter,
id=self.id,
name=self.name,
partner_topic_friendly_description=self.partner_topic_friendly_description,
partner_topic_readiness_state=self.partner_topic_readiness_state,
provisioning_state=self.provisioning_state,
source=self.source,
type=self.type)
def get_event_channel(event_channel_name: Optional[str] = None,
partner_namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventChannelResult:
__args__ = dict()
__args__['eventChannelName'] = event_channel_name
__args__['partnerNamespaceName'] = partner_namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid:getEventChannel', __args__, opts=opts, typ=GetEventChannelResult).value
return AwaitableGetEventChannelResult(
destination=__ret__.destination,
expiration_time_if_not_activated_utc=__ret__.expiration_time_if_not_activated_utc,
filter=__ret__.filter,
id=__ret__.id,
name=__ret__.name,
partner_topic_friendly_description=__ret__.partner_topic_friendly_description,
partner_topic_readiness_state=__ret__.partner_topic_readiness_state,
provisioning_state=__ret__.provisioning_state,
source=__ret__.source,
type=__ret__.type)
| true
| true
|
790cc3972cbf74b7f562425bacf2d9ac81d4ef1a
| 3,169
|
py
|
Python
|
server/tests/integration/test_dataset_upload.py
|
maxpark/dive
|
5dce25822d9b53d96ff0c2c8fb02265e4b43911e
|
[
"Apache-2.0"
] | null | null | null |
server/tests/integration/test_dataset_upload.py
|
maxpark/dive
|
5dce25822d9b53d96ff0c2c8fb02265e4b43911e
|
[
"Apache-2.0"
] | null | null | null |
server/tests/integration/test_dataset_upload.py
|
maxpark/dive
|
5dce25822d9b53d96ff0c2c8fb02265e4b43911e
|
[
"Apache-2.0"
] | null | null | null |
import json
from girder.constants import AccessType
from girder_client import HttpError
import pytest
from .conftest import getClient, getTestFolder, localDataRoot, users, wait_for_jobs
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=3)
def test_reset_integration_env(user: dict):
client = getClient(user['login'])
privateFolder = getTestFolder(client)
client.delete(f"folder/{privateFolder['_id']}")
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=4)
def test_upload_user_data(user: dict):
client = getClient(user['login'])
createdDatasets = []
for dataset in user['data']:
dsPath = localDataRoot / str(dataset['path'])
privateFolder = getTestFolder(client)
newDatasetFolder = client.createFolder(
privateFolder['_id'],
dataset['name'],
metadata={
'fps': dataset['fps'],
'type': dataset['type'],
},
)
createdDatasets.append(newDatasetFolder)
# Validate the fileset
filenames = [file.name for file in dsPath.iterdir()]
valid = client.post('dive_dataset/validate_files', json=filenames)
assert valid['ok'], 'File validation failed'
for file in dsPath.iterdir():
if file.is_file():
client.uploadFileToFolder(newDatasetFolder['_id'], str(file))
client.post(f'dive_rpc/postprocess/{newDatasetFolder["_id"]}')
if dataset.get('sharedWith', False):
me = client.get('user/me')
otherClient = getClient(dataset['sharedWith'])
otherUser = otherClient.get('user/me')
with pytest.raises(HttpError):
otherClient.get(f'dive_dataset/{newDatasetFolder["_id"]}')
client.put(
f'folder/{newDatasetFolder["_id"]}/access',
data={
'public': False,
'recurse': False,
'progress': False,
'access': json.dumps(
{
'users': [
{'id': me['_id'], 'level': AccessType.ADMIN, 'flags': []},
{'id': otherUser['_id'], 'level': AccessType.READ, 'flags': []},
],
'groups': [],
}
),
},
)
assert (
otherClient.get(
f'dive_dataset/{newDatasetFolder["_id"]}', jsonResp=False
).status_code
== 200
)
wait_for_jobs(client)
# Confirm that the new dataset looks like it should.
for created, expected in zip(createdDatasets, user['data']):
created = client.get(f'dive_dataset/{created["_id"]}')
if expected['type'] == 'video':
assert created['fps'] == expected['originalFps'] or created['fps'] == expected['fps']
assert created['annotate']
assert created['originalFps'] == expected['originalFps']
| 38.180723
| 97
| 0.546229
|
import json
from girder.constants import AccessType
from girder_client import HttpError
import pytest
from .conftest import getClient, getTestFolder, localDataRoot, users, wait_for_jobs
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=3)
def test_reset_integration_env(user: dict):
client = getClient(user['login'])
privateFolder = getTestFolder(client)
client.delete(f"folder/{privateFolder['_id']}")
@pytest.mark.integration
@pytest.mark.parametrize("user", users.values())
@pytest.mark.run(order=4)
def test_upload_user_data(user: dict):
client = getClient(user['login'])
createdDatasets = []
for dataset in user['data']:
dsPath = localDataRoot / str(dataset['path'])
privateFolder = getTestFolder(client)
newDatasetFolder = client.createFolder(
privateFolder['_id'],
dataset['name'],
metadata={
'fps': dataset['fps'],
'type': dataset['type'],
},
)
createdDatasets.append(newDatasetFolder)
filenames = [file.name for file in dsPath.iterdir()]
valid = client.post('dive_dataset/validate_files', json=filenames)
assert valid['ok'], 'File validation failed'
for file in dsPath.iterdir():
if file.is_file():
client.uploadFileToFolder(newDatasetFolder['_id'], str(file))
client.post(f'dive_rpc/postprocess/{newDatasetFolder["_id"]}')
if dataset.get('sharedWith', False):
me = client.get('user/me')
otherClient = getClient(dataset['sharedWith'])
otherUser = otherClient.get('user/me')
with pytest.raises(HttpError):
otherClient.get(f'dive_dataset/{newDatasetFolder["_id"]}')
client.put(
f'folder/{newDatasetFolder["_id"]}/access',
data={
'public': False,
'recurse': False,
'progress': False,
'access': json.dumps(
{
'users': [
{'id': me['_id'], 'level': AccessType.ADMIN, 'flags': []},
{'id': otherUser['_id'], 'level': AccessType.READ, 'flags': []},
],
'groups': [],
}
),
},
)
assert (
otherClient.get(
f'dive_dataset/{newDatasetFolder["_id"]}', jsonResp=False
).status_code
== 200
)
wait_for_jobs(client)
for created, expected in zip(createdDatasets, user['data']):
created = client.get(f'dive_dataset/{created["_id"]}')
if expected['type'] == 'video':
assert created['fps'] == expected['originalFps'] or created['fps'] == expected['fps']
assert created['annotate']
assert created['originalFps'] == expected['originalFps']
| true
| true
|
790cc39c12fa89c6948cffb05bfafa9131ed6db1
| 1,655
|
py
|
Python
|
controller/class_converter.py
|
EmilRyberg/P6BinPicking
|
c33b650db3ae16c56d46d12bfbc59d26c0d9e6aa
|
[
"MIT"
] | 1
|
2021-08-04T16:18:22.000Z
|
2021-08-04T16:18:22.000Z
|
controller/class_converter.py
|
EmilRyberg/P6BinPicking
|
c33b650db3ae16c56d46d12bfbc59d26c0d9e6aa
|
[
"MIT"
] | null | null | null |
controller/class_converter.py
|
EmilRyberg/P6BinPicking
|
c33b650db3ae16c56d46d12bfbc59d26c0d9e6aa
|
[
"MIT"
] | 1
|
2021-08-03T03:41:41.000Z
|
2021-08-03T03:41:41.000Z
|
from controller.enums import PartEnum
def convert_from_part_id(part_id):
if part_id == PartEnum.FUSE.value:
return 'Fuse', 'Fuse'
elif part_id == PartEnum.BACKCOVER.value:
return 'BottomCover', 'BottomCoverFlipped'
elif part_id == PartEnum.WHITECOVER.value:
return 'WhiteCover', 'WhiteCoverFlipped'
elif part_id == PartEnum.BLUECOVER.value:
return 'BlueCover', 'BlueCoverFlipped'
elif part_id == PartEnum.BLACKCOVER.value:
return 'BlackCover', 'BlackCoverFlipped'
elif part_id == PartEnum.PCB.value:
return 'PCB', 'PCBFlipped'
else:
print("[W] Could not convert class_id")
return -1, -1
def convert_to_part_id(class_name):
if class_name == 'Fuse':
return PartEnum.FUSE.value
elif class_name == 'BottomCover':
return PartEnum.BACKCOVER.value
elif class_name == 'BottomCoverFlipped':
return PartEnum.BACKCOVER_FLIPPED.value
elif class_name == 'WhiteCover':
return PartEnum.WHITECOVER.value
elif class_name == 'WhiteCoverFlipped':
return PartEnum.WHITECOVER_FLIPPED.value
elif class_name == 'BlueCover':
return PartEnum.BLUECOVER.value
elif class_name == 'BlueCoverFlipped':
return PartEnum.BLUECOVER_FLIPPED.value
elif class_name == 'BlackCover':
return PartEnum.BLACKCOVER.value
elif class_name == 'BlackCoverFlipped':
return PartEnum.BLACKCOVER_FLIPPED.value
elif class_name == 'PCB':
return PartEnum.PCB.value
elif class_name == 'PCBFlipped':
return PartEnum.PCB_FLIPPED.value
else:
return PartEnum.INVALID.value
| 34.479167
| 50
| 0.682175
|
from controller.enums import PartEnum
def convert_from_part_id(part_id):
if part_id == PartEnum.FUSE.value:
return 'Fuse', 'Fuse'
elif part_id == PartEnum.BACKCOVER.value:
return 'BottomCover', 'BottomCoverFlipped'
elif part_id == PartEnum.WHITECOVER.value:
return 'WhiteCover', 'WhiteCoverFlipped'
elif part_id == PartEnum.BLUECOVER.value:
return 'BlueCover', 'BlueCoverFlipped'
elif part_id == PartEnum.BLACKCOVER.value:
return 'BlackCover', 'BlackCoverFlipped'
elif part_id == PartEnum.PCB.value:
return 'PCB', 'PCBFlipped'
else:
print("[W] Could not convert class_id")
return -1, -1
def convert_to_part_id(class_name):
if class_name == 'Fuse':
return PartEnum.FUSE.value
elif class_name == 'BottomCover':
return PartEnum.BACKCOVER.value
elif class_name == 'BottomCoverFlipped':
return PartEnum.BACKCOVER_FLIPPED.value
elif class_name == 'WhiteCover':
return PartEnum.WHITECOVER.value
elif class_name == 'WhiteCoverFlipped':
return PartEnum.WHITECOVER_FLIPPED.value
elif class_name == 'BlueCover':
return PartEnum.BLUECOVER.value
elif class_name == 'BlueCoverFlipped':
return PartEnum.BLUECOVER_FLIPPED.value
elif class_name == 'BlackCover':
return PartEnum.BLACKCOVER.value
elif class_name == 'BlackCoverFlipped':
return PartEnum.BLACKCOVER_FLIPPED.value
elif class_name == 'PCB':
return PartEnum.PCB.value
elif class_name == 'PCBFlipped':
return PartEnum.PCB_FLIPPED.value
else:
return PartEnum.INVALID.value
| true
| true
|
790cc3aaa9deb871de96be4fd40b9fbe3b566426
| 3,715
|
py
|
Python
|
python_module/sirius/ot/ot_precondition.py
|
mtaillefumier/SIRIUS
|
50ec1c202c019113c5660f1966b170dec9dfd4d4
|
[
"BSD-2-Clause"
] | 77
|
2016-03-18T08:38:30.000Z
|
2022-03-11T14:06:25.000Z
|
python_module/sirius/ot/ot_precondition.py
|
simonpintarelli/SIRIUS
|
f4b5c4810af2a3ea1e67992d65750535227da84b
|
[
"BSD-2-Clause"
] | 240
|
2016-04-12T16:39:11.000Z
|
2022-03-31T08:46:12.000Z
|
python_module/sirius/ot/ot_precondition.py
|
simonpintarelli/SIRIUS
|
f4b5c4810af2a3ea1e67992d65750535227da84b
|
[
"BSD-2-Clause"
] | 43
|
2016-03-18T17:45:07.000Z
|
2022-02-28T05:27:59.000Z
|
from ..coefficient_array import PwCoeffs
from scipy.sparse import dia_matrix
import numpy as np
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):
"""
Preconditioner
P = 1 / (||k|| + ε)
Keyword Arguments:
kpointset --
"""
nk = len(kpointset)
nc = kpointset.ctx().num_spins()
if nc == 1 and nk == 1 and not asPwCoeffs:
# return as np.matrix
kp = kpointset[0]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps)
for i in range(N)
])
return DiagonalPreconditioner(
D=dia_matrix((d, 0), shape=(N, N)), c0=c0)
else:
P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)
for k in range(nk):
kp = kpointset[k]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum(
(np.array(gkvec.gkvec_cart(i)))**2) + eps)
for i in range(N)
])
for ispn in range(nc):
P[k, ispn] = dia_matrix((d, 0), shape=(N, N))
return DiagonalPreconditioner(P, c0)
class Preconditioner:
def __init__(self):
pass
class DiagonalPreconditioner(Preconditioner):
"""
Apply diagonal preconditioner and project resulting gradient to satisfy the constraint.
"""
def __init__(self, D, c0):
super().__init__()
self.c0 = c0
self.D = D
def __matmul__(self, other):
"""
"""
from ..coefficient_array import CoefficientArray
from .ot_transformations import lagrangeMult
out = type(other)(dtype=other.dtype)
if isinstance(other, CoefficientArray):
for key, Dl in self.D.items():
out[key] = Dl * other[key]
else:
raise ValueError('wrong type given')
ll = lagrangeMult(other, self.c0, self)
return out + ll
def __mul__(self, s):
"""
"""
from ..coefficient_array import CoefficientArray
import numpy as np
if np.isscalar(s):
for key, Dl in self.D.items():
self.D[key] = s*Dl
elif isinstance(s, CoefficientArray):
out = type(s)(dtype=s.dtype)
for key in s.keys():
out[key] = self.D[key] * s[key]
return out
__lmul__ = __mul__
__rmul__ = __mul__
def __neg__(self):
"""
"""
from ..coefficient_array import CoefficientArray
if isinstance(self.D, CoefficientArray):
out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype)
out = DiagonalPreconditioner(out_data, self.c0)
for k, v in self.D.items():
out.D[k] = -v
return out
else:
out = DiagonalPreconditioner(self.D, self.c0)
out.D = -self.D
return out
def __getitem__(self, key):
return self.D[key]
class IdentityPreconditioner(Preconditioner):
def __init__(self, c0, _f=1):
super().__init__()
self.c0 = c0
self._f = _f
def __matmul__(self, other):
from .ot_transformations import lagrangeMult
ll = lagrangeMult(other, self.c0, self)
return self._f * other + ll
def __mul__(self, s):
return self._f * s
def __neg__(self):
return IdentityPreconditioner(self.c0, _f=-self._f)
def __getitem__(self, key):
return self._f
__lmul__ = __mul__
__rmul__ = __mul__
| 26.92029
| 91
| 0.544818
|
from ..coefficient_array import PwCoeffs
from scipy.sparse import dia_matrix
import numpy as np
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):
nk = len(kpointset)
nc = kpointset.ctx().num_spins()
if nc == 1 and nk == 1 and not asPwCoeffs:
kp = kpointset[0]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps)
for i in range(N)
])
return DiagonalPreconditioner(
D=dia_matrix((d, 0), shape=(N, N)), c0=c0)
else:
P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)
for k in range(nk):
kp = kpointset[k]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum(
(np.array(gkvec.gkvec_cart(i)))**2) + eps)
for i in range(N)
])
for ispn in range(nc):
P[k, ispn] = dia_matrix((d, 0), shape=(N, N))
return DiagonalPreconditioner(P, c0)
class Preconditioner:
def __init__(self):
pass
class DiagonalPreconditioner(Preconditioner):
def __init__(self, D, c0):
super().__init__()
self.c0 = c0
self.D = D
def __matmul__(self, other):
from ..coefficient_array import CoefficientArray
from .ot_transformations import lagrangeMult
out = type(other)(dtype=other.dtype)
if isinstance(other, CoefficientArray):
for key, Dl in self.D.items():
out[key] = Dl * other[key]
else:
raise ValueError('wrong type given')
ll = lagrangeMult(other, self.c0, self)
return out + ll
def __mul__(self, s):
from ..coefficient_array import CoefficientArray
import numpy as np
if np.isscalar(s):
for key, Dl in self.D.items():
self.D[key] = s*Dl
elif isinstance(s, CoefficientArray):
out = type(s)(dtype=s.dtype)
for key in s.keys():
out[key] = self.D[key] * s[key]
return out
__lmul__ = __mul__
__rmul__ = __mul__
def __neg__(self):
from ..coefficient_array import CoefficientArray
if isinstance(self.D, CoefficientArray):
out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype)
out = DiagonalPreconditioner(out_data, self.c0)
for k, v in self.D.items():
out.D[k] = -v
return out
else:
out = DiagonalPreconditioner(self.D, self.c0)
out.D = -self.D
return out
def __getitem__(self, key):
return self.D[key]
class IdentityPreconditioner(Preconditioner):
def __init__(self, c0, _f=1):
super().__init__()
self.c0 = c0
self._f = _f
def __matmul__(self, other):
from .ot_transformations import lagrangeMult
ll = lagrangeMult(other, self.c0, self)
return self._f * other + ll
def __mul__(self, s):
return self._f * s
def __neg__(self):
return IdentityPreconditioner(self.c0, _f=-self._f)
def __getitem__(self, key):
return self._f
__lmul__ = __mul__
__rmul__ = __mul__
| true
| true
|
790cc3d297af72c200291c7f356793f2b038cd2b
| 5,814
|
py
|
Python
|
exchangelib/services/get_server_time_zones.py
|
monperrus/exchangelib-1
|
31f5ea9150ab724305a6cf7b0fef745d1cb9bfb8
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/services/get_server_time_zones.py
|
monperrus/exchangelib-1
|
31f5ea9150ab724305a6cf7b0fef745d1cb9bfb8
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/services/get_server_time_zones.py
|
monperrus/exchangelib-1
|
31f5ea9150ab724305a6cf7b0fef745d1cb9bfb8
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
from ..errors import NaiveDateTimeNotAllowed
from ..ewsdatetime import EWSDateTime
from ..util import create_element, set_xml_value, xml_text_to_value, peek, TNS, MNS
from ..version import EXCHANGE_2010
from .common import EWSService
class GetServerTimeZones(EWSService):
"""
MSDN: https://msdn.microsoft.com/en-us/library/office/dd899371(v=exchg.150).aspx
"""
SERVICE_NAME = 'GetServerTimeZones'
element_container_name = '{%s}TimeZoneDefinitions' % MNS
def call(self, timezones=None, return_full_timezone_data=False):
if self.protocol.version.build < EXCHANGE_2010:
raise NotImplementedError('%s is only supported for Exchange 2010 servers and later' % self.SERVICE_NAME)
return self._get_elements(payload=self.get_payload(
timezones=timezones,
return_full_timezone_data=return_full_timezone_data
))
def get_payload(self, timezones, return_full_timezone_data):
payload = create_element(
'm:%s' % self.SERVICE_NAME,
attrs=dict(ReturnFullTimeZoneData='true' if return_full_timezone_data else 'false'),
)
if timezones is not None:
is_empty, timezones = peek(timezones)
if not is_empty:
tz_ids = create_element('m:Ids')
for timezone in timezones:
tz_id = set_xml_value(create_element('t:Id'), timezone.ms_id, version=self.protocol.version)
tz_ids.append(tz_id)
payload.append(tz_ids)
return payload
def _get_elements_in_container(self, container):
for timezonedef in container:
tz_id = timezonedef.get('Id')
tz_name = timezonedef.get('Name')
tz_periods = self._get_periods(timezonedef)
tz_transitions_groups = self._get_transitions_groups(timezonedef)
tz_transitions = self._get_transitions(timezonedef)
yield (tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups)
@staticmethod
def _get_periods(timezonedef):
tz_periods = {}
periods = timezonedef.find('{%s}Periods' % TNS)
for period in periods.findall('{%s}Period' % TNS):
# Convert e.g. "trule:Microsoft/Registry/W. Europe Standard Time/2006-Daylight" to (2006, 'Daylight')
p_year, p_type = period.get('Id').rsplit('/', 1)[1].split('-')
tz_periods[(int(p_year), p_type)] = dict(
name=period.get('Name'),
bias=xml_text_to_value(period.get('Bias'), datetime.timedelta)
)
return tz_periods
@staticmethod
def _get_transitions_groups(timezonedef):
from ..recurrence import WEEKDAY_NAMES
tz_transitions_groups = {}
transitiongroups = timezonedef.find('{%s}TransitionsGroups' % TNS)
if transitiongroups is not None:
for transitiongroup in transitiongroups.findall('{%s}TransitionsGroup' % TNS):
tg_id = int(transitiongroup.get('Id'))
tz_transitions_groups[tg_id] = []
for transition in transitiongroup.findall('{%s}Transition' % TNS):
# Apply same conversion to To as for period IDs
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
))
for transition in transitiongroup.findall('{%s}RecurringDayTransition' % TNS):
# Apply same conversion to To as for period IDs
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
occurrence = xml_text_to_value(transition.find('{%s}Occurrence' % TNS).text, int)
if occurrence == -1:
# See TimeZoneTransition.from_xml()
occurrence = 5
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
offset=xml_text_to_value(transition.find('{%s}TimeOffset' % TNS).text, datetime.timedelta),
iso_month=xml_text_to_value(transition.find('{%s}Month' % TNS).text, int),
iso_weekday=WEEKDAY_NAMES.index(transition.find('{%s}DayOfWeek' % TNS).text) + 1,
occurrence=occurrence,
))
return tz_transitions_groups
@staticmethod
def _get_transitions(timezonedef):
tz_transitions = {}
transitions = timezonedef.find('{%s}Transitions' % TNS)
if transitions is not None:
for transition in transitions.findall('{%s}Transition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
tz_transitions[tg_id] = None
for transition in transitions.findall('{%s}AbsoluteDateTransition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
try:
t_date = xml_text_to_value(transition.find('{%s}DateTime' % TNS).text, EWSDateTime).date()
except NaiveDateTimeNotAllowed as e:
# We encountered a naive datetime. Don't worry. we just need the date
t_date = e.args[0].date()
tz_transitions[tg_id] = t_date
return tz_transitions
| 50.12069
| 117
| 0.596491
|
import datetime
from ..errors import NaiveDateTimeNotAllowed
from ..ewsdatetime import EWSDateTime
from ..util import create_element, set_xml_value, xml_text_to_value, peek, TNS, MNS
from ..version import EXCHANGE_2010
from .common import EWSService
class GetServerTimeZones(EWSService):
SERVICE_NAME = 'GetServerTimeZones'
element_container_name = '{%s}TimeZoneDefinitions' % MNS
def call(self, timezones=None, return_full_timezone_data=False):
if self.protocol.version.build < EXCHANGE_2010:
raise NotImplementedError('%s is only supported for Exchange 2010 servers and later' % self.SERVICE_NAME)
return self._get_elements(payload=self.get_payload(
timezones=timezones,
return_full_timezone_data=return_full_timezone_data
))
def get_payload(self, timezones, return_full_timezone_data):
payload = create_element(
'm:%s' % self.SERVICE_NAME,
attrs=dict(ReturnFullTimeZoneData='true' if return_full_timezone_data else 'false'),
)
if timezones is not None:
is_empty, timezones = peek(timezones)
if not is_empty:
tz_ids = create_element('m:Ids')
for timezone in timezones:
tz_id = set_xml_value(create_element('t:Id'), timezone.ms_id, version=self.protocol.version)
tz_ids.append(tz_id)
payload.append(tz_ids)
return payload
def _get_elements_in_container(self, container):
for timezonedef in container:
tz_id = timezonedef.get('Id')
tz_name = timezonedef.get('Name')
tz_periods = self._get_periods(timezonedef)
tz_transitions_groups = self._get_transitions_groups(timezonedef)
tz_transitions = self._get_transitions(timezonedef)
yield (tz_id, tz_name, tz_periods, tz_transitions, tz_transitions_groups)
@staticmethod
def _get_periods(timezonedef):
tz_periods = {}
periods = timezonedef.find('{%s}Periods' % TNS)
for period in periods.findall('{%s}Period' % TNS):
p_year, p_type = period.get('Id').rsplit('/', 1)[1].split('-')
tz_periods[(int(p_year), p_type)] = dict(
name=period.get('Name'),
bias=xml_text_to_value(period.get('Bias'), datetime.timedelta)
)
return tz_periods
@staticmethod
def _get_transitions_groups(timezonedef):
from ..recurrence import WEEKDAY_NAMES
tz_transitions_groups = {}
transitiongroups = timezonedef.find('{%s}TransitionsGroups' % TNS)
if transitiongroups is not None:
for transitiongroup in transitiongroups.findall('{%s}TransitionsGroup' % TNS):
tg_id = int(transitiongroup.get('Id'))
tz_transitions_groups[tg_id] = []
for transition in transitiongroup.findall('{%s}Transition' % TNS):
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
))
for transition in transitiongroup.findall('{%s}RecurringDayTransition' % TNS):
to_year, to_type = transition.find('{%s}To' % TNS).text.rsplit('/', 1)[1].split('-')
occurrence = xml_text_to_value(transition.find('{%s}Occurrence' % TNS).text, int)
if occurrence == -1:
occurrence = 5
tz_transitions_groups[tg_id].append(dict(
to=(int(to_year), to_type),
offset=xml_text_to_value(transition.find('{%s}TimeOffset' % TNS).text, datetime.timedelta),
iso_month=xml_text_to_value(transition.find('{%s}Month' % TNS).text, int),
iso_weekday=WEEKDAY_NAMES.index(transition.find('{%s}DayOfWeek' % TNS).text) + 1,
occurrence=occurrence,
))
return tz_transitions_groups
@staticmethod
def _get_transitions(timezonedef):
tz_transitions = {}
transitions = timezonedef.find('{%s}Transitions' % TNS)
if transitions is not None:
for transition in transitions.findall('{%s}Transition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
tz_transitions[tg_id] = None
for transition in transitions.findall('{%s}AbsoluteDateTransition' % TNS):
to = transition.find('{%s}To' % TNS)
if to.get('Kind') != 'Group':
raise ValueError('Unexpected "Kind" XML attr: %s' % to.get('Kind'))
tg_id = xml_text_to_value(to.text, int)
try:
t_date = xml_text_to_value(transition.find('{%s}DateTime' % TNS).text, EWSDateTime).date()
except NaiveDateTimeNotAllowed as e:
t_date = e.args[0].date()
tz_transitions[tg_id] = t_date
return tz_transitions
| true
| true
|
790cc54f26c5872213e9b1fdae32c9e73fd69e15
| 1,436
|
py
|
Python
|
app.py
|
aws-samples/aws-securityhub-falco-ecs-eks-integration
|
cb667031e043154f3702926983338e8dcb1afa80
|
[
"MIT-0"
] | 2
|
2021-12-18T17:30:39.000Z
|
2022-02-23T02:54:40.000Z
|
app.py
|
aws-samples/aws-securityhub-falco-ecs-eks-integration
|
cb667031e043154f3702926983338e8dcb1afa80
|
[
"MIT-0"
] | 1
|
2022-02-02T17:30:19.000Z
|
2022-02-07T16:23:28.000Z
|
app.py
|
aws-samples/aws-securityhub-falco-ecs-eks-integration
|
cb667031e043154f3702926983338e8dcb1afa80
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python3
import os
from aws_cdk import core as cdk
# For consistency with TypeScript code, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core
from aws_securityhub_falco_ecs_eks_integration.aws_securityhub_falco_ecs_eks_integration_stack import AwsSecurityhubFalcoEcsEksIntegrationStack
app = core.App()
AwsSecurityhubFalcoEcsEksIntegrationStack(app, "AwsSecurityhubFalcoEcsEksIntegrationStack",
# If you don't specify 'env', this stack will be environment-agnostic.
# Account/Region-dependent features and context lookups will not work,
# but a single synthesized template can be deployed anywhere.
# Uncomment the next line to specialize this stack for the AWS Account
# and Region that are implied by the current CLI configuration.
#env=core.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')),
# Uncomment the next line if you know exactly what Account and Region you
# want to deploy the stack to. */
#env=core.Environment(account='123456789012', region='us-east-1'),
# For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html
)
app.synth()
| 41.028571
| 143
| 0.769499
|
import os
from aws_cdk import core as cdk
# with examples from the CDK Developer's Guide, which are in the process of
from aws_cdk import core
from aws_securityhub_falco_ecs_eks_integration.aws_securityhub_falco_ecs_eks_integration_stack import AwsSecurityhubFalcoEcsEksIntegrationStack
app = core.App()
AwsSecurityhubFalcoEcsEksIntegrationStack(app, "AwsSecurityhubFalcoEcsEksIntegrationStack",
# If you don't specify 'env', this stack will be environment-agnostic.
)
app.synth()
| true
| true
|
790cc6aa3346fa4c31e448d5bb45da8672d921a8
| 4,495
|
py
|
Python
|
zPE/base/pgm/asma90_err_code_rc.py
|
T-Tony-T/mainframe-env-simulator
|
9ca8b726b5962502d53c7e8483c5e4fd89ce5ac6
|
[
"BSD-3-Clause"
] | 3
|
2015-07-20T20:11:38.000Z
|
2019-07-17T01:53:50.000Z
|
zPE/base/pgm/asma90_err_code_rc.py
|
T-Tony-T/mainframe-env-simulator
|
9ca8b726b5962502d53c7e8483c5e4fd89ce5ac6
|
[
"BSD-3-Clause"
] | null | null | null |
zPE/base/pgm/asma90_err_code_rc.py
|
T-Tony-T/mainframe-env-simulator
|
9ca8b726b5962502d53c7e8483c5e4fd89ce5ac6
|
[
"BSD-3-Clause"
] | 2
|
2019-11-14T14:40:09.000Z
|
2021-01-21T21:58:58.000Z
|
__I_MSG = { # ASMAxxxI
33 : lambda info, line: 'Storage alignment for {0} unfavorable'.format(line[info[1]:info[2]]),
}
__N_MSG = { # ASMAxxxN
}
__W_MSG = { # ASMAxxxW
45 : lambda info, line: 'Register or label not previously used - {0}'.format(line[info[1]:info[2]]),
140 : lambda info, line: 'END record missing',
163 : lambda info, line: 'Operand not properly enclosed in quotes',
165 : lambda info, line: 'Unexpected name field',
300 : lambda info, line: 'USING overridden by a prior active USING on statement number {0}'.format(info[1]),
301 : lambda info, line: 'Prior active USING on statement number {0} overridden by this USING'.format(info[1]),
302 : lambda info, line: 'USING specifies register 0 with a nonzero absolute or relocatable base address',
303 : lambda info, line: 'Multiple address resolutions may result from this USING and the USING on statement number {0}'.format(info[1]),
}
__E_MSG = { # ASMAxxxE
28 : lambda info, line: 'Invalid displacement',
29 : lambda info, line: 'Incorrect register specification - {0}'.format(line[info[1]:info[2]]),
30 : lambda info, line: 'Invalid literal usage - {0}'.format(line[info[1]:info[2]]),
32 : lambda info, line: 'Relocatable value or unresolved symbol found when absolute value required - {0}'.format(line[info[1]:info[2]]),
34 : lambda info, line: 'Operand {0} beyond active USING range'.format(line[info[1]:info[2]]),
41 : lambda info, line: 'Term expected; text is unclassifiable - {0}'.format(line[info[1]:info[2]]),
43 : lambda info, line: 'Previously defined symbol - {0}'.format(line[info[1]:info[2]]),
44 : lambda info, line: 'Undefined symbol - {0}'.format(line[info[1]:info[2]]),
57 : lambda info, line: 'Undefined operation code - {0}'.format(line[info[1]:info[2]]),
63 : lambda info, line: 'No ending apostrophe - {0}'.format(line[info[1]:info[2]]),
65 : lambda info, line: 'Unknown type - {0}'.format(line[info[1]:info[2]]),
74 : lambda info, line: 'Illegal syntax in expansion - {0}'.format(line[info[1]:info[2]]),
78 : lambda info, line: 'Operand 2 expansion complexly relocatable - {0}'.format(line[info[1]:info[2]]),
141 : lambda info, line: 'Bad character in operation code - {0}'.format(line[info[1]:info[2]]),
142 : lambda info, line: 'Operation code not complete on first record',
143 : lambda info, line: 'Bad character in name field - {0}'.format(line[info[1]:info[2]]),
145 : lambda info, line: 'Operator, right parenthesis, or end-of-expression expected - {0}'.format(line[info[1]:info[2]]),
146 : lambda info, line: 'Self-defining term too long or value too large - {0}'.format(line[info[1]:info[2]]),
150 : lambda info, line: 'Symbol has non-alphanumeric character or invalid delimiter - {0}'.format(line[info[1]:info[2]]),
305 : lambda info, line: 'Operand 1 does not refer to location within reference control section',
307 : lambda info, line: 'No active USING for operand {0}'.format(line[info[1]:info[2]]),
308 : lambda info, line: 'Repeated register {0}'.format(line[info[1]:info[2]]),
}
__S_MSG = { # ASMAxxxS
35 : lambda info, line: 'Invalid delimiter - {0}'.format(line[info[1]:info[2]]),
40 : lambda info, line: 'Missing operand',
173 : lambda info, line: 'Delimiter error, expected blank - {0}'.format(line[info[1]:info[2]]),
174 : lambda info, line: 'Delimiter error, expected blank or comma - {0}'.format(line[info[1]:info[2]]),
175 : lambda info, line: 'Delimiter error, expected comma - {0}'.format(line[info[1]:info[2]]),
178 : lambda info, line: 'Delimiter error, expected comma or right parenthesis - {0}'.format(line[info[1]:info[2]]),
179 : lambda info, line: 'Delimiter error, expected right parenthesis - {0}'.format(line[info[1]:info[2]]),
180 : lambda info, line: 'Operand must be absolute',
}
__MSG = {
'S' : __S_MSG,
'E' : __E_MSG,
'W' : __W_MSG,
'N' : __N_MSG,
'I' : __I_MSG,
}
def gen_msg(msg_type, info, line):
if len(info) == 3: # standard info message
return '** ASMA{0:0>3}{1} {2}\n'.format(info[0], msg_type, __MSG[msg_type][info[0]](info, line))
else:
return '** AS{0}\n'.format(info)
def search_msg_type(errno):
for (k, v) in __MSG.iteritems():
if errno in v:
return k
return None
| 59.933333
| 141
| 0.63337
|
__I_MSG = {
33 : lambda info, line: 'Storage alignment for {0} unfavorable'.format(line[info[1]:info[2]]),
}
__N_MSG = {
}
__W_MSG = {
45 : lambda info, line: 'Register or label not previously used - {0}'.format(line[info[1]:info[2]]),
140 : lambda info, line: 'END record missing',
163 : lambda info, line: 'Operand not properly enclosed in quotes',
165 : lambda info, line: 'Unexpected name field',
300 : lambda info, line: 'USING overridden by a prior active USING on statement number {0}'.format(info[1]),
301 : lambda info, line: 'Prior active USING on statement number {0} overridden by this USING'.format(info[1]),
302 : lambda info, line: 'USING specifies register 0 with a nonzero absolute or relocatable base address',
303 : lambda info, line: 'Multiple address resolutions may result from this USING and the USING on statement number {0}'.format(info[1]),
}
__E_MSG = {
28 : lambda info, line: 'Invalid displacement',
29 : lambda info, line: 'Incorrect register specification - {0}'.format(line[info[1]:info[2]]),
30 : lambda info, line: 'Invalid literal usage - {0}'.format(line[info[1]:info[2]]),
32 : lambda info, line: 'Relocatable value or unresolved symbol found when absolute value required - {0}'.format(line[info[1]:info[2]]),
34 : lambda info, line: 'Operand {0} beyond active USING range'.format(line[info[1]:info[2]]),
41 : lambda info, line: 'Term expected; text is unclassifiable - {0}'.format(line[info[1]:info[2]]),
43 : lambda info, line: 'Previously defined symbol - {0}'.format(line[info[1]:info[2]]),
44 : lambda info, line: 'Undefined symbol - {0}'.format(line[info[1]:info[2]]),
57 : lambda info, line: 'Undefined operation code - {0}'.format(line[info[1]:info[2]]),
63 : lambda info, line: 'No ending apostrophe - {0}'.format(line[info[1]:info[2]]),
65 : lambda info, line: 'Unknown type - {0}'.format(line[info[1]:info[2]]),
74 : lambda info, line: 'Illegal syntax in expansion - {0}'.format(line[info[1]:info[2]]),
78 : lambda info, line: 'Operand 2 expansion complexly relocatable - {0}'.format(line[info[1]:info[2]]),
141 : lambda info, line: 'Bad character in operation code - {0}'.format(line[info[1]:info[2]]),
142 : lambda info, line: 'Operation code not complete on first record',
143 : lambda info, line: 'Bad character in name field - {0}'.format(line[info[1]:info[2]]),
145 : lambda info, line: 'Operator, right parenthesis, or end-of-expression expected - {0}'.format(line[info[1]:info[2]]),
146 : lambda info, line: 'Self-defining term too long or value too large - {0}'.format(line[info[1]:info[2]]),
150 : lambda info, line: 'Symbol has non-alphanumeric character or invalid delimiter - {0}'.format(line[info[1]:info[2]]),
305 : lambda info, line: 'Operand 1 does not refer to location within reference control section',
307 : lambda info, line: 'No active USING for operand {0}'.format(line[info[1]:info[2]]),
308 : lambda info, line: 'Repeated register {0}'.format(line[info[1]:info[2]]),
}
__S_MSG = {
35 : lambda info, line: 'Invalid delimiter - {0}'.format(line[info[1]:info[2]]),
40 : lambda info, line: 'Missing operand',
173 : lambda info, line: 'Delimiter error, expected blank - {0}'.format(line[info[1]:info[2]]),
174 : lambda info, line: 'Delimiter error, expected blank or comma - {0}'.format(line[info[1]:info[2]]),
175 : lambda info, line: 'Delimiter error, expected comma - {0}'.format(line[info[1]:info[2]]),
178 : lambda info, line: 'Delimiter error, expected comma or right parenthesis - {0}'.format(line[info[1]:info[2]]),
179 : lambda info, line: 'Delimiter error, expected right parenthesis - {0}'.format(line[info[1]:info[2]]),
180 : lambda info, line: 'Operand must be absolute',
}
__MSG = {
'S' : __S_MSG,
'E' : __E_MSG,
'W' : __W_MSG,
'N' : __N_MSG,
'I' : __I_MSG,
}
def gen_msg(msg_type, info, line):
if len(info) == 3:
return '** ASMA{0:0>3}{1} {2}\n'.format(info[0], msg_type, __MSG[msg_type][info[0]](info, line))
else:
return '** AS{0}\n'.format(info)
def search_msg_type(errno):
for (k, v) in __MSG.iteritems():
if errno in v:
return k
return None
| true
| true
|
790cc84a59e11e67b64e3d5cb5453ba06c847a06
| 404
|
py
|
Python
|
invenio_subjects_mesh/version.py
|
fenekku/invenio-subjects-mesh
|
acdda73f2f1c2235292c0c4a0c9ec55263003066
|
[
"MIT"
] | 1
|
2022-03-08T22:36:26.000Z
|
2022-03-08T22:36:26.000Z
|
invenio_subjects_mesh/version.py
|
fenekku/invenio-subjects-mesh
|
acdda73f2f1c2235292c0c4a0c9ec55263003066
|
[
"MIT"
] | 3
|
2021-06-29T13:50:28.000Z
|
2021-06-29T18:27:55.000Z
|
invenio_subjects_mesh/version.py
|
fenekku/invenio-subjects-mesh
|
acdda73f2f1c2235292c0c4a0c9ec55263003066
|
[
"MIT"
] | 1
|
2021-06-29T19:36:31.000Z
|
2021-06-29T19:36:31.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# invenio-subjects-mesh is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Version information for invenio-subjects-mesh.
This file is imported by ``invenio_subjects_mesh.__init__``,
and parsed by ``setup.py``.
"""
__version__ = '2021.7.13'
| 25.25
| 73
| 0.725248
|
__version__ = '2021.7.13'
| true
| true
|
790cc8bdf4a2b7aed3f5cca024ee412ef8785951
| 5,925
|
py
|
Python
|
wiking/migrations/0004_auto__add_field_comment_article_version.py
|
srisankethu/opengift.io
|
fc490332bd0252610b55a68c1fff1c4f704fcbd4
|
[
"Apache-2.0"
] | 1
|
2020-08-30T23:12:08.000Z
|
2020-08-30T23:12:08.000Z
|
wiking/migrations/0004_auto__add_field_comment_article_version.py
|
lenarhus/opengift.io
|
db37494eac141e795c8d9d5b262d54cd6f20fb15
|
[
"Apache-2.0"
] | null | null | null |
wiking/migrations/0004_auto__add_field_comment_article_version.py
|
lenarhus/opengift.io
|
db37494eac141e795c8d9d5b262d54cd6f20fb15
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Comment.article_version'
db.add_column(u'wiking_comment', 'article_version',
self.gf('django.db.models.fields.related.ForeignKey')(default=-1, related_name='comments', to=orm['wiking.ArticleVersion']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Comment.article_version'
db.delete_column(u'wiking_comment', 'article_version_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiking.article': {
'Meta': {'object_name': 'Article'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'head': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'article'", 'to': "orm['wiking.ArticleVersion']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_articles'", 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'childs'", 'null': 'True', 'to': "orm['wiking.Article']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'wiking.articleversion': {
'Meta': {'object_name': 'ArticleVersion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'wiking.comment': {
'Meta': {'object_name': 'Comment'},
'article_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['wiking.ArticleVersion']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '1000'})
}
}
complete_apps = ['wiking']
| 68.103448
| 187
| 0.571983
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'wiking_comment', 'article_version',
self.gf('django.db.models.fields.related.ForeignKey')(default=-1, related_name='comments', to=orm['wiking.ArticleVersion']),
keep_default=False)
def backwards(self, orm):
db.delete_column(u'wiking_comment', 'article_version_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiking.article': {
'Meta': {'object_name': 'Article'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'head': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'article'", 'to': "orm['wiking.ArticleVersion']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_articles'", 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'childs'", 'null': 'True', 'to': "orm['wiking.Article']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'wiking.articleversion': {
'Meta': {'object_name': 'ArticleVersion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'wiking.comment': {
'Meta': {'object_name': 'Comment'},
'article_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['wiking.ArticleVersion']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '1000'})
}
}
complete_apps = ['wiking']
| true
| true
|
790cc8c335ca80768ee9ce3d02a3b769ea21dfce
| 38,417
|
py
|
Python
|
r2r_src/agent.py
|
rcorona/R2R-EnvDrop
|
e91c21283ffc309bedfe49596b4066afa338fde6
|
[
"MIT-0",
"MIT"
] | null | null | null |
r2r_src/agent.py
|
rcorona/R2R-EnvDrop
|
e91c21283ffc309bedfe49596b4066afa338fde6
|
[
"MIT-0",
"MIT"
] | null | null | null |
r2r_src/agent.py
|
rcorona/R2R-EnvDrop
|
e91c21283ffc309bedfe49596b4066afa338fde6
|
[
"MIT-0",
"MIT"
] | null | null | null |
import json
import os
import sys
import numpy as np
import random
import math
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from env import R2RBatch
from utils import padding_idx, add_idx, Tokenizer
import utils
import model
import param
from param import args
from collections import defaultdict
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
output = [{'instr_id':k, 'trajectory': v} for k,v in self.results.items()]
with open(self.results_path, 'w') as f:
json.dump(output, f)
def get_results(self):
output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]
return output
def rollout(self, **args):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None)) # If iters is not none, shuffle the env batch
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
looped = False
self.loss = 0
if iters is not None:
# For each time, it will run the first 'iters' iterations. (It was shuffled before)
for i in range(iters):
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj['path']
else: # Do a full round
while True:
for traj in self.rollout(**kwargs):
if traj['instr_id'] in self.results:
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj['path']
if looped:
break
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def __init__(self, env, results_path, tok, episode_len=20):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.tok = tok
self.episode_len = episode_len
self.feature_size = self.env.feature_size
# Models
enc_hidden_size = args.rnn_dim//2 if args.bidir else args.rnn_dim
self.encoder = model.EncoderLSTM(tok.vocab_size(), args.wemb, enc_hidden_size, padding_idx,
args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.critic = model.Critic().cuda()
self.models = (self.encoder, self.decoder, self.critic)
# Optimizers
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
self.critic_optimizer = args.optimizer(self.critic.parameters(), lr=args.lr)
self.optimizers = (self.encoder_optimizer, self.decoder_optimizer, self.critic_optimizer)
# Evaluations
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=args.ignoreid, size_average=False)
# Logs
sys.stdout.flush()
self.logs = defaultdict(list)
def _sort_batch(self, obs):
''' Extract instructions from a list of observations and sort by descending
sequence length (to enable PyTorch packing). '''
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
seq_lengths = np.argmax(seq_tensor == padding_idx, axis=1)
seq_lengths[seq_lengths == 0] = seq_tensor.shape[1] # Full length
seq_tensor = torch.from_numpy(seq_tensor)
seq_lengths = torch.from_numpy(seq_lengths)
# Sort sequences by lengths
seq_lengths, perm_idx = seq_lengths.sort(0, True) # True -> descending
sorted_tensor = seq_tensor[perm_idx]
mask = (sorted_tensor == padding_idx)[:,:seq_lengths[0]] # seq_lengths[0] is the Maximum length
return Variable(sorted_tensor, requires_grad=False).long().cuda(), \
mask.byte().cuda(), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
''' Extract precomputed features into variable. '''
features = np.empty((len(obs), args.views, self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, ob in enumerate(obs):
features[i, :, :] = ob['feature'] # Image feat
return Variable(torch.from_numpy(features), requires_grad=False).cuda()
def _candidate_variable(self, obs):
candidate_leng = [len(ob['candidate']) + 1 for ob in obs] # +1 is for the end
candidate_feat = np.zeros((len(obs), max(candidate_leng), self.feature_size + args.angle_feat_size), dtype=np.float32)
# Note: The candidate_feat at len(ob['candidate']) is the feature for the END
# which is zero in my implementation
for i, ob in enumerate(obs):
for j, c in enumerate(ob['candidate']):
candidate_feat[i, j, :] = c['feature'] # Image feat
return torch.from_numpy(candidate_feat).cuda(), candidate_leng
def get_input_feat(self, obs):
input_a_t = np.zeros((len(obs), args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
input_a_t[i] = utils.angle_feature(ob['heading'], ob['elevation'])
input_a_t = torch.from_numpy(input_a_t).cuda()
f_t = self._feature_variable(obs) # Image features from obs
candidate_feat, candidate_leng = self._candidate_variable(obs)
return input_a_t, f_t, candidate_feat, candidate_leng
def _teacher_action(self, obs, ended):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
"""
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
"""
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
self.env.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
self.env.env.sims[idx].makeAction(*self.env_actions[name])
state = self.env.env.sims[idx].getState()
if traj is not None:
traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
while self.env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
assert select_candidate['viewpointId'] == \
self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None):
"""
:param train_ml: The weight to train with maximum likelihood
:param train_rl: whether use RL in training
:param reset: Reset the environment
:param speaker: Speaker used in back translation.
If the speaker is not None, use back translation.
O.w., normal training
:return:
"""
if self.feedback == 'teacher' or self.feedback == 'argmax':
train_rl = False
if reset:
# Reset env
obs = np.array(self.env.reset())
else:
obs = np.array(self.env._get_obs())
batch_size = len(obs)
if speaker is not None: # Trigger the self_train mode!
noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda())
batch = self.env.batch.copy()
speaker.env = self.env
insts = speaker.infer_batch(featdropmask=noise) # Use the same drop mask in speaker
# Create fake environments with the generated instruction
boss = np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>
insts = np.concatenate((boss, insts), 1)
for i, (datum, inst) in enumerate(zip(batch, insts)):
if inst[-1] != self.tok.word_to_index['<PAD>']: # The inst is not ended!
inst[-1] = self.tok.word_to_index['<EOS>']
datum.pop('instructions')
datum.pop('instr_encoding')
datum['instructions'] = self.tok.decode_sentence(inst)
datum['instr_encoding'] = inst
obs = np.array(self.env.reset(batch))
# Reorder the language input for the encoder (do not ruin the original code)
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx_mask = seq_mask
# Init the reward shaping
last_dist = np.zeros(batch_size, np.float32)
for i, ob in enumerate(perm_obs): # The init distance from the view point to the target
last_dist[i] = ob['distance']
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# For test result submission
visited = [set() for _ in perm_obs]
# Initialization the tracking state
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Init the logs
rewards = []
hidden_states = []
policy_log_probs = []
masks = []
entropys = []
ml_loss = 0.
h1 = h_t
for t in range(self.episode_len):
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
h_t, c_t, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None))
hidden_states.append(h_t)
# Mask outputs where agent can't move forward
# Here the logit is [b, max_candidate]
candidate_mask = utils.length2mask(candidate_leng)
if args.submit: # Avoding cyclic path
for ob_id, ob in enumerate(perm_obs):
visited[ob_id].add(ob['viewpoint'])
for c_id, c in enumerate(ob['candidate']):
if c['viewpointId'] in visited[ob_id]:
candidate_mask[ob_id][c_id] = 1
logit.masked_fill_(candidate_mask, -float('inf'))
# Supervised training
target = self._teacher_action(perm_obs, ended)
ml_loss += self.criterion(logit, target)
# Determine next model inputs
if self.feedback == 'teacher':
a_t = target # teacher forcing
elif self.feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) # Gather the log_prob for each batch
elif self.feedback == 'sample':
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
self.logs['entropy'].append(c.entropy().sum().item()) # For log
entropys.append(c.entropy()) # For optimization
a_t = c.sample().detach()
policy_log_probs.append(c.log_prob(a_t))
else:
print(self.feedback)
sys.exit('Invalid feedback option')
# Prepare environment action
# NOTE: Env action is in the perm_obs space
cpu_a_t = a_t.cpu().numpy()
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid or ended[i]: # The last action is <end>
cpu_a_t[i] = -1 # Change the <end> and ignore action to -1
# Make action and get the new state
self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj)
obs = np.array(self.env._get_obs())
perm_obs = obs[perm_idx] # Perm the obs for the resu
# Calculate the mask and reward
dist = np.zeros(batch_size, np.float32)
reward = np.zeros(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i, ob in enumerate(perm_obs):
dist[i] = ob['distance']
if ended[i]: # If the action is already finished BEFORE THIS ACTION.
reward[i] = 0.
mask[i] = 0.
else: # Calculate the reward
action_idx = cpu_a_t[i]
if action_idx == -1: # If the action now is end
if dist[i] < 3: # Correct
reward[i] = 2.
else: # Incorrect
reward[i] = -2.
else: # The action is not end
reward[i] = - (dist[i] - last_dist[i]) # Change of distance
if reward[i] > 0: # Quantification
reward[i] = 1
elif reward[i] < 0:
reward[i] = -1
else:
raise NameError("The action doesn't change the move")
rewards.append(reward)
masks.append(mask)
last_dist[:] = dist
# Update the finished actions
# -1 means ended or ignored (already ended)
ended[:] = np.logical_or(ended, (cpu_a_t == -1))
# Early exit if all ended
if ended.all():
break
if train_rl:
# Last action in A2C
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None:
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
last_h_, _, _, _ = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
speaker is not None)
rl_loss = 0.
# NOW, A2C!!!
# Calculate the final discounted reward
last_value__ = self.critic(last_h_).detach() # The value esti of the last state, remove the grad for safety
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
for i in range(batch_size):
if not ended[i]: # If the action is not ended, use the value function as the last reward
discount_reward[i] = last_value__[i]
length = len(rewards)
total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * args.gamma + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()
v_ = self.critic(hidden_states[t])
a_ = (r_ - v_).detach()
# r_: The higher, the better. -ln(p(action)) * (discount_reward - value)
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
if self.feedback == 'sample':
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
total = total + np.sum(masks[t])
self.logs['total'].append(total)
# Normalize the loss function
if args.normalize_loss == 'total':
rl_loss /= total
elif args.normalize_loss == 'batch':
rl_loss /= batch_size
else:
assert args.normalize_loss == 'none'
self.loss += rl_loss
if train_ml is not None:
self.loss += ml_loss * train_ml / batch_size
if type(self.loss) is int: # For safety, it will be activated if no losses are added
self.losses.append(0.)
else:
self.losses.append(self.loss.item() / self.episode_len) # This argument is useless.
return traj
def _dijkstra(self):
"""
The dijkstra algorithm.
Was called beam search to be consistent with existing work.
But it actually finds the Exact K paths with smallest listener log_prob.
:return:
[{
"scan": XXX
"instr_id":XXX,
'instr_encoding": XXX
'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
}]
"""
def make_state_id(viewpoint, action): # Make state id
return "%s_%s" % (viewpoint, str(action))
def decompose_state_id(state_id): # Make state id
viewpoint, action = state_id.split("_")
action = int(action)
return viewpoint, action
# Get first obs
obs = self.env._get_obs()
# Prepare the state id
batch_size = len(obs)
results = [{"scan": ob['scan'],
"instr_id": ob['instr_id'],
"instr_encoding": ob["instr_encoding"],
"dijk_path": [ob['viewpoint']],
"paths": []} for ob in obs]
# Encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
recover_idx = np.zeros_like(perm_idx)
for i, idx in enumerate(perm_idx):
recover_idx[idx] = i
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order
# Dijk Graph States:
id2state = [
{make_state_id(ob['viewpoint'], -95):
{"next_viewpoint": ob['viewpoint'],
"running_state": (h_t[i], h_t[i], c_t[i]),
"location": (ob['viewpoint'], ob['heading'], ob['elevation']),
"feature": None,
"from_state_id": None,
"score": 0,
"scores": [],
"actions": [],
}
}
for i, ob in enumerate(obs)
] # -95 is the start point
visited = [set() for _ in range(batch_size)]
finished = [set() for _ in range(batch_size)]
graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path
ended = np.array([False] * batch_size)
# Dijk Algorithm
for _ in range(300):
# Get the state with smallest score for each batch
# If the batch is not ended, find the smallest item.
# Else use a random item from the dict (It always exists)
smallest_idXstate = [
max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),
key=lambda item: item[1]['score'])
if not ended[i]
else
next(iter(id2state[i].items()))
for i in range(batch_size)
]
# Set the visited and the end seqs
for i, (state_id, state) in enumerate(smallest_idXstate):
assert (ended[i]) or (state_id not in visited[i])
if not ended[i]:
viewpoint, action = decompose_state_id(state_id)
visited[i].add(state_id)
if action == -1:
finished[i].add(state_id)
if len(finished[i]) >= args.candidates: # Get enough candidates
ended[i] = True
# Gather the running state in the batch
h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))
h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)
# Recover the env and gather the feature
for i, (state_id, state) in enumerate(smallest_idXstate):
next_viewpoint = state['next_viewpoint']
scan = results[i]['scan']
from_viewpoint, heading, elevation = state['location']
self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic
obs = self.env._get_obs()
# Update the floyd graph
# Only used to shorten the navigation length
# Will not effect the result
for i, ob in enumerate(obs):
viewpoint = ob['viewpoint']
if not graphs[i].visited(viewpoint): # Update the Graph
for c in ob['candidate']:
next_viewpoint = c['viewpointId']
dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]
graphs[i].add_edge(viewpoint, next_viewpoint, dis)
graphs[i].update(viewpoint)
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)
# Run one decoding step
h_t, c_t, alpha, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
False)
# Update the dijk graph's states with the newly visited viewpoint
candidate_mask = utils.length2mask(candidate_leng)
logit.masked_fill_(candidate_mask, -float('inf'))
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
_, max_act = log_probs.max(1)
for i, ob in enumerate(obs):
current_viewpoint = ob['viewpoint']
candidate = ob['candidate']
current_state_id, current_state = smallest_idXstate[i]
old_viewpoint, from_action = decompose_state_id(current_state_id)
assert ob['viewpoint'] == current_state['next_viewpoint']
if from_action == -1 or ended[i]: # If the action is <end> or the batch is ended, skip it
continue
for j in range(len(ob['candidate']) + 1): # +1 to include the <end> action
# score + log_prob[action]
modified_log_prob = log_probs[i][j].detach().cpu().item()
new_score = current_state['score'] + modified_log_prob
if j < len(candidate): # A normal action
next_id = make_state_id(current_viewpoint, j)
next_viewpoint = candidate[j]['viewpointId']
trg_point = candidate[j]['pointId']
heading = (trg_point % 12) * math.pi / 6
elevation = (trg_point // 12 - 1) * math.pi / 6
location = (next_viewpoint, heading, elevation)
else: # The end action
next_id = make_state_id(current_viewpoint, -1) # action is -1
next_viewpoint = current_viewpoint # next viewpoint is still here
location = (current_viewpoint, ob['heading'], ob['elevation'])
if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:
id2state[i][next_id] = {
"next_viewpoint": next_viewpoint,
"location": location,
"running_state": (h_t[i], h1[i], c_t[i]),
"from_state_id": current_state_id,
"feature": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),
"score": new_score,
"scores": current_state['scores'] + [modified_log_prob],
"actions": current_state['actions'] + [len(candidate)+1],
}
# The active state is zero after the updating, then setting the ended to True
for i in range(batch_size):
if len(visited[i]) == len(id2state[i]): # It's the last active state
ended[i] = True
# End?
if ended.all():
break
# Move back to the start point
for i in range(batch_size):
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))
"""
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
"""
# Gather the Path
for i, result in enumerate(results):
assert len(finished[i]) <= args.candidates
for state_id in finished[i]:
path_info = {
"trajectory": [],
"action": [],
"listener_scores": id2state[i][state_id]['scores'],
"listener_actions": id2state[i][state_id]['actions'],
"visual_feature": []
}
viewpoint, action = decompose_state_id(state_id)
while action != -95:
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
path_info['action'].append(action)
path_info['visual_feature'].append(state['feature'])
state_id = id2state[i][state_id]['from_state_id']
viewpoint, action = decompose_state_id(state_id)
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
for need_reverse_key in ["trajectory", "action", "visual_feature"]:
path_info[need_reverse_key] = path_info[need_reverse_key][::-1]
result['paths'].append(path_info)
return results
def beam_search(self, speaker):
"""
:param speaker: The speaker to be used in searching.
:return:
{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"speaker_scores": [log_prob_word1, log_prob_word2, ..., ],
}]
}
"""
self.env.reset()
results = self._dijkstra()
"""
return from self._dijkstra()
[{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}]
}]
"""
# Compute the speaker scores:
for result in results:
lengths = []
num_paths = len(result['paths'])
for path in result['paths']:
assert len(path['trajectory']) == (len(path['visual_feature']) + 1)
lengths.append(len(path['visual_feature']))
max_len = max(lengths)
img_feats = torch.zeros(num_paths, max_len, 36, self.feature_size + args.angle_feat_size)
can_feats = torch.zeros(num_paths, max_len, self.feature_size + args.angle_feat_size)
for j, path in enumerate(result['paths']):
for k, feat in enumerate(path['visual_feature']):
img_feat, can_feat = feat
img_feats[j][k] = img_feat
can_feats[j][k] = can_feat
img_feats, can_feats = img_feats.cuda(), can_feats.cuda()
features = ((img_feats, can_feats), lengths)
insts = np.array([result['instr_encoding'] for _ in range(num_paths)])
seq_lengths = np.argmax(insts == self.tok.word_to_index['<EOS>'], axis=1) # len(seq + 'BOS') == len(seq + 'EOS')
insts = torch.from_numpy(insts).cuda()
speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True)
for j, path in enumerate(result['paths']):
path.pop("visual_feature")
path['speaker_scores'] = -speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]
return results
def beam_search_test(self, speaker):
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
looped = False
self.results = {}
while True:
for traj in self.beam_search(speaker):
if traj['instr_id'] in self.results:
looped = True
else:
self.results[traj['instr_id']] = traj
if looped:
break
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None):
''' Evaluate once on each instruction in the current environment '''
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
self.critic.train()
else:
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
super(Seq2SeqAgent, self).test(iters)
def zero_grad(self):
self.loss = 0.
self.losses = []
for model, optimizer in zip(self.models, self.optimizers):
model.train()
optimizer.zero_grad()
def accumulate_gradient(self, feedback='teacher', **kwargs):
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
def optim_step(self):
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def train(self, n_iters, feedback='teacher', **kwargs):
''' Train for a given number of iterations '''
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.critic.train()
self.losses = []
for iter in tqdm(range(1, n_iters + 1)):
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
self.loss = 0
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
if args.ml_weight != 0:
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def save(self, epoch, path):
''' Snapshot models '''
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path):
''' Loads parameters (but not training state) '''
states = torch.load(path)
def recover_state(name, model, optimizer):
state = model.state_dict()
model_keys = set(state.keys())
load_keys = set(states[name]['state_dict'].keys())
if model_keys != load_keys:
print("NOTICE: DIFFERENT KEYS IN THE LISTEREN")
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
| 44.722934
| 145
| 0.532655
|
import json
import os
import sys
import numpy as np
import random
import math
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from env import R2RBatch
from utils import padding_idx, add_idx, Tokenizer
import utils
import model
import param
from param import args
from collections import defaultdict
class BaseAgent(object):
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = []
def write_results(self):
output = [{'instr_id':k, 'trajectory': v} for k,v in self.results.items()]
with open(self.results_path, 'w') as f:
json.dump(output, f)
def get_results(self):
output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]
return output
def rollout(self, **args):
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None))
self.losses = []
self.results = {}
looped = False
self.loss = 0
if iters is not None:
for i in range(iters):
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj['path']
else:
while True:
for traj in self.rollout(**kwargs):
if traj['instr_id'] in self.results:
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj['path']
if looped:
break
class Seq2SeqAgent(BaseAgent):
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def __init__(self, env, results_path, tok, episode_len=20):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.tok = tok
self.episode_len = episode_len
self.feature_size = self.env.feature_size
# Models
enc_hidden_size = args.rnn_dim//2 if args.bidir else args.rnn_dim
self.encoder = model.EncoderLSTM(tok.vocab_size(), args.wemb, enc_hidden_size, padding_idx,
args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.critic = model.Critic().cuda()
self.models = (self.encoder, self.decoder, self.critic)
# Optimizers
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
self.critic_optimizer = args.optimizer(self.critic.parameters(), lr=args.lr)
self.optimizers = (self.encoder_optimizer, self.decoder_optimizer, self.critic_optimizer)
# Evaluations
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=args.ignoreid, size_average=False)
# Logs
sys.stdout.flush()
self.logs = defaultdict(list)
def _sort_batch(self, obs):
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
seq_lengths = np.argmax(seq_tensor == padding_idx, axis=1)
seq_lengths[seq_lengths == 0] = seq_tensor.shape[1] # Full length
seq_tensor = torch.from_numpy(seq_tensor)
seq_lengths = torch.from_numpy(seq_lengths)
# Sort sequences by lengths
seq_lengths, perm_idx = seq_lengths.sort(0, True) # True -> descending
sorted_tensor = seq_tensor[perm_idx]
mask = (sorted_tensor == padding_idx)[:,:seq_lengths[0]] # seq_lengths[0] is the Maximum length
return Variable(sorted_tensor, requires_grad=False).long().cuda(), \
mask.byte().cuda(), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
features = np.empty((len(obs), args.views, self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, ob in enumerate(obs):
features[i, :, :] = ob['feature'] # Image feat
return Variable(torch.from_numpy(features), requires_grad=False).cuda()
def _candidate_variable(self, obs):
candidate_leng = [len(ob['candidate']) + 1 for ob in obs] # +1 is for the end
candidate_feat = np.zeros((len(obs), max(candidate_leng), self.feature_size + args.angle_feat_size), dtype=np.float32)
# Note: The candidate_feat at len(ob['candidate']) is the feature for the END
# which is zero in my implementation
for i, ob in enumerate(obs):
for j, c in enumerate(ob['candidate']):
candidate_feat[i, j, :] = c['feature'] # Image feat
return torch.from_numpy(candidate_feat).cuda(), candidate_leng
def get_input_feat(self, obs):
input_a_t = np.zeros((len(obs), args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
input_a_t[i] = utils.angle_feature(ob['heading'], ob['elevation'])
input_a_t = torch.from_numpy(input_a_t).cuda()
f_t = self._feature_variable(obs) # Image features from obs
candidate_feat, candidate_leng = self._candidate_variable(obs)
return input_a_t, f_t, candidate_feat, candidate_leng
def _teacher_action(self, obs, ended):
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
self.env.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
self.env.env.sims[idx].makeAction(*self.env_actions[name])
state = self.env.env.sims[idx].getState()
if traj is not None:
traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
while self.env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
assert select_candidate['viewpointId'] == \
self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None):
if self.feedback == 'teacher' or self.feedback == 'argmax':
train_rl = False
if reset:
# Reset env
obs = np.array(self.env.reset())
else:
obs = np.array(self.env._get_obs())
batch_size = len(obs)
if speaker is not None: # Trigger the self_train mode!
noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda())
batch = self.env.batch.copy()
speaker.env = self.env
insts = speaker.infer_batch(featdropmask=noise) # Use the same drop mask in speaker
# Create fake environments with the generated instruction
boss = np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>
insts = np.concatenate((boss, insts), 1)
for i, (datum, inst) in enumerate(zip(batch, insts)):
if inst[-1] != self.tok.word_to_index['<PAD>']: # The inst is not ended!
inst[-1] = self.tok.word_to_index['<EOS>']
datum.pop('instructions')
datum.pop('instr_encoding')
datum['instructions'] = self.tok.decode_sentence(inst)
datum['instr_encoding'] = inst
obs = np.array(self.env.reset(batch))
# Reorder the language input for the encoder (do not ruin the original code)
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx_mask = seq_mask
# Init the reward shaping
last_dist = np.zeros(batch_size, np.float32)
for i, ob in enumerate(perm_obs): # The init distance from the view point to the target
last_dist[i] = ob['distance']
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# For test result submission
visited = [set() for _ in perm_obs]
# Initialization the tracking state
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Init the logs
rewards = []
hidden_states = []
policy_log_probs = []
masks = []
entropys = []
ml_loss = 0.
h1 = h_t
for t in range(self.episode_len):
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
h_t, c_t, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None))
hidden_states.append(h_t)
# Mask outputs where agent can't move forward
candidate_mask = utils.length2mask(candidate_leng)
if args.submit:
for ob_id, ob in enumerate(perm_obs):
visited[ob_id].add(ob['viewpoint'])
for c_id, c in enumerate(ob['candidate']):
if c['viewpointId'] in visited[ob_id]:
candidate_mask[ob_id][c_id] = 1
logit.masked_fill_(candidate_mask, -float('inf'))
target = self._teacher_action(perm_obs, ended)
ml_loss += self.criterion(logit, target)
if self.feedback == 'teacher':
a_t = target
elif self.feedback == 'argmax':
_, a_t = logit.max(1)
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1)
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1)))
elif self.feedback == 'sample':
probs = F.softmax(logit, 1)
c = torch.distributions.Categorical(probs)
self.logs['entropy'].append(c.entropy().sum().item())
entropys.append(c.entropy())
a_t = c.sample().detach()
policy_log_probs.append(c.log_prob(a_t))
else:
print(self.feedback)
sys.exit('Invalid feedback option')
cpu_a_t = a_t.cpu().numpy()
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid or ended[i]:
cpu_a_t[i] = -1
self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj)
obs = np.array(self.env._get_obs())
perm_obs = obs[perm_idx]
dist = np.zeros(batch_size, np.float32)
reward = np.zeros(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i, ob in enumerate(perm_obs):
dist[i] = ob['distance']
if ended[i]:
reward[i] = 0.
mask[i] = 0.
else:
action_idx = cpu_a_t[i]
if action_idx == -1:
if dist[i] < 3:
reward[i] = 2.
else:
reward[i] = -2.
else:
reward[i] = - (dist[i] - last_dist[i])
if reward[i] > 0:
reward[i] = 1
elif reward[i] < 0:
reward[i] = -1
else:
raise NameError("The action doesn't change the move")
rewards.append(reward)
masks.append(mask)
last_dist[:] = dist
# Update the finished actions
# -1 means ended or ignored (already ended)
ended[:] = np.logical_or(ended, (cpu_a_t == -1))
# Early exit if all ended
if ended.all():
break
if train_rl:
# Last action in A2C
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None:
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
last_h_, _, _, _ = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
speaker is not None)
rl_loss = 0.
# NOW, A2C!!!
# Calculate the final discounted reward
last_value__ = self.critic(last_h_).detach() # The value esti of the last state, remove the grad for safety
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
for i in range(batch_size):
if not ended[i]: # If the action is not ended, use the value function as the last reward
discount_reward[i] = last_value__[i]
length = len(rewards)
total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * args.gamma + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()
v_ = self.critic(hidden_states[t])
a_ = (r_ - v_).detach()
# r_: The higher, the better. -ln(p(action)) * (discount_reward - value)
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
if self.feedback == 'sample':
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
total = total + np.sum(masks[t])
self.logs['total'].append(total)
# Normalize the loss function
if args.normalize_loss == 'total':
rl_loss /= total
elif args.normalize_loss == 'batch':
rl_loss /= batch_size
else:
assert args.normalize_loss == 'none'
self.loss += rl_loss
if train_ml is not None:
self.loss += ml_loss * train_ml / batch_size
if type(self.loss) is int: # For safety, it will be activated if no losses are added
self.losses.append(0.)
else:
self.losses.append(self.loss.item() / self.episode_len) # This argument is useless.
return traj
def _dijkstra(self):
def make_state_id(viewpoint, action): # Make state id
return "%s_%s" % (viewpoint, str(action))
def decompose_state_id(state_id): # Make state id
viewpoint, action = state_id.split("_")
action = int(action)
return viewpoint, action
# Get first obs
obs = self.env._get_obs()
# Prepare the state id
batch_size = len(obs)
results = [{"scan": ob['scan'],
"instr_id": ob['instr_id'],
"instr_encoding": ob["instr_encoding"],
"dijk_path": [ob['viewpoint']],
"paths": []} for ob in obs]
# Encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
recover_idx = np.zeros_like(perm_idx)
for i, idx in enumerate(perm_idx):
recover_idx[idx] = i
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order
# Dijk Graph States:
id2state = [
{make_state_id(ob['viewpoint'], -95):
{"next_viewpoint": ob['viewpoint'],
"running_state": (h_t[i], h_t[i], c_t[i]),
"location": (ob['viewpoint'], ob['heading'], ob['elevation']),
"feature": None,
"from_state_id": None,
"score": 0,
"scores": [],
"actions": [],
}
}
for i, ob in enumerate(obs)
] # -95 is the start point
visited = [set() for _ in range(batch_size)]
finished = [set() for _ in range(batch_size)]
graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path
ended = np.array([False] * batch_size)
# Dijk Algorithm
for _ in range(300):
# Get the state with smallest score for each batch
# If the batch is not ended, find the smallest item.
# Else use a random item from the dict (It always exists)
smallest_idXstate = [
max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),
key=lambda item: item[1]['score'])
if not ended[i]
else
next(iter(id2state[i].items()))
for i in range(batch_size)
]
# Set the visited and the end seqs
for i, (state_id, state) in enumerate(smallest_idXstate):
assert (ended[i]) or (state_id not in visited[i])
if not ended[i]:
viewpoint, action = decompose_state_id(state_id)
visited[i].add(state_id)
if action == -1:
finished[i].add(state_id)
if len(finished[i]) >= args.candidates: # Get enough candidates
ended[i] = True
# Gather the running state in the batch
h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))
h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)
# Recover the env and gather the feature
for i, (state_id, state) in enumerate(smallest_idXstate):
next_viewpoint = state['next_viewpoint']
scan = results[i]['scan']
from_viewpoint, heading, elevation = state['location']
self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic
obs = self.env._get_obs()
# Update the floyd graph
# Only used to shorten the navigation length
# Will not effect the result
for i, ob in enumerate(obs):
viewpoint = ob['viewpoint']
if not graphs[i].visited(viewpoint): # Update the Graph
for c in ob['candidate']:
next_viewpoint = c['viewpointId']
dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]
graphs[i].add_edge(viewpoint, next_viewpoint, dis)
graphs[i].update(viewpoint)
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)
# Run one decoding step
h_t, c_t, alpha, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
False)
# Update the dijk graph's states with the newly visited viewpoint
candidate_mask = utils.length2mask(candidate_leng)
logit.masked_fill_(candidate_mask, -float('inf'))
log_probs = F.log_softmax(logit, 1)
_, max_act = log_probs.max(1)
for i, ob in enumerate(obs):
current_viewpoint = ob['viewpoint']
candidate = ob['candidate']
current_state_id, current_state = smallest_idXstate[i]
old_viewpoint, from_action = decompose_state_id(current_state_id)
assert ob['viewpoint'] == current_state['next_viewpoint']
if from_action == -1 or ended[i]:
continue
for j in range(len(ob['candidate']) + 1):
modified_log_prob = log_probs[i][j].detach().cpu().item()
new_score = current_state['score'] + modified_log_prob
if j < len(candidate):
next_id = make_state_id(current_viewpoint, j)
next_viewpoint = candidate[j]['viewpointId']
trg_point = candidate[j]['pointId']
heading = (trg_point % 12) * math.pi / 6
elevation = (trg_point // 12 - 1) * math.pi / 6
location = (next_viewpoint, heading, elevation)
else:
next_id = make_state_id(current_viewpoint, -1)
next_viewpoint = current_viewpoint
location = (current_viewpoint, ob['heading'], ob['elevation'])
if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:
id2state[i][next_id] = {
"next_viewpoint": next_viewpoint,
"location": location,
"running_state": (h_t[i], h1[i], c_t[i]),
"from_state_id": current_state_id,
"feature": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),
"score": new_score,
"scores": current_state['scores'] + [modified_log_prob],
"actions": current_state['actions'] + [len(candidate)+1],
}
for i in range(batch_size):
if len(visited[i]) == len(id2state[i]):
ended[i] = True
# End?
if ended.all():
break
# Move back to the start point
for i in range(batch_size):
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))
# Gather the Path
for i, result in enumerate(results):
assert len(finished[i]) <= args.candidates
for state_id in finished[i]:
path_info = {
"trajectory": [],
"action": [],
"listener_scores": id2state[i][state_id]['scores'],
"listener_actions": id2state[i][state_id]['actions'],
"visual_feature": []
}
viewpoint, action = decompose_state_id(state_id)
while action != -95:
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
path_info['action'].append(action)
path_info['visual_feature'].append(state['feature'])
state_id = id2state[i][state_id]['from_state_id']
viewpoint, action = decompose_state_id(state_id)
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
for need_reverse_key in ["trajectory", "action", "visual_feature"]:
path_info[need_reverse_key] = path_info[need_reverse_key][::-1]
result['paths'].append(path_info)
return results
def beam_search(self, speaker):
self.env.reset()
results = self._dijkstra()
# Compute the speaker scores:
for result in results:
lengths = []
num_paths = len(result['paths'])
for path in result['paths']:
assert len(path['trajectory']) == (len(path['visual_feature']) + 1)
lengths.append(len(path['visual_feature']))
max_len = max(lengths)
img_feats = torch.zeros(num_paths, max_len, 36, self.feature_size + args.angle_feat_size)
can_feats = torch.zeros(num_paths, max_len, self.feature_size + args.angle_feat_size)
for j, path in enumerate(result['paths']):
for k, feat in enumerate(path['visual_feature']):
img_feat, can_feat = feat
img_feats[j][k] = img_feat
can_feats[j][k] = can_feat
img_feats, can_feats = img_feats.cuda(), can_feats.cuda()
features = ((img_feats, can_feats), lengths)
insts = np.array([result['instr_encoding'] for _ in range(num_paths)])
seq_lengths = np.argmax(insts == self.tok.word_to_index['<EOS>'], axis=1) # len(seq + 'BOS') == len(seq + 'EOS')
insts = torch.from_numpy(insts).cuda()
speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True)
for j, path in enumerate(result['paths']):
path.pop("visual_feature")
path['speaker_scores'] = -speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]
return results
def beam_search_test(self, speaker):
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
looped = False
self.results = {}
while True:
for traj in self.beam_search(speaker):
if traj['instr_id'] in self.results:
looped = True
else:
self.results[traj['instr_id']] = traj
if looped:
break
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None):
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
self.critic.train()
else:
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
super(Seq2SeqAgent, self).test(iters)
def zero_grad(self):
self.loss = 0.
self.losses = []
for model, optimizer in zip(self.models, self.optimizers):
model.train()
optimizer.zero_grad()
def accumulate_gradient(self, feedback='teacher', **kwargs):
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
def optim_step(self):
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def train(self, n_iters, feedback='teacher', **kwargs):
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.critic.train()
self.losses = []
for iter in tqdm(range(1, n_iters + 1)):
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
self.loss = 0
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
if args.ml_weight != 0:
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def save(self, epoch, path):
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path):
states = torch.load(path)
def recover_state(name, model, optimizer):
state = model.state_dict()
model_keys = set(state.keys())
load_keys = set(states[name]['state_dict'].keys())
if model_keys != load_keys:
print("NOTICE: DIFFERENT KEYS IN THE LISTEREN")
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
| true
| true
|
790cc949e50ab6912df998fbdc372c01c447156f
| 1,934
|
py
|
Python
|
components/aws/sagemaker/tests/integration_tests/utils/kfp_client_utils.py
|
Intellicode/pipelines
|
f1d90407a8a2f56db11199c9c73e6df6c4a8b093
|
[
"Apache-2.0"
] | 1
|
2020-10-13T13:28:42.000Z
|
2020-10-13T13:28:42.000Z
|
components/aws/sagemaker/tests/integration_tests/utils/kfp_client_utils.py
|
Intellicode/pipelines
|
f1d90407a8a2f56db11199c9c73e6df6c4a8b093
|
[
"Apache-2.0"
] | null | null | null |
components/aws/sagemaker/tests/integration_tests/utils/kfp_client_utils.py
|
Intellicode/pipelines
|
f1d90407a8a2f56db11199c9c73e6df6c4a8b093
|
[
"Apache-2.0"
] | null | null | null |
import os
import utils
import pytest
from utils import argo_utils
def compile_and_run_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
):
pipeline_path = os.path.join(output_file_dir, pipeline_name)
utils.run_command(
f"dsl-compile --py {pipeline_definition} --output {pipeline_path}.yaml"
)
run = client.run_pipeline(
experiment_id, pipeline_name, f"{pipeline_path}.yaml", input_params
)
return run.id
def wait_for_job_status(client, run_id, timeout, status_to_check="succeeded"):
response = None
try:
response = client.wait_for_run_completion(run_id, timeout)
except TimeoutError:
print(f"run-id: {run_id} did not stop within specified timeout")
response = client.get_run(run_id)
status = False
if response and response.run.status:
status = response.run.status.lower() == status_to_check
return status
def get_workflow_json(client, run_id):
# API not in readthedocs
# Refer: https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/_client.py#L663
return client._get_workflow_json(run_id)
def compile_run_monitor_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
timeout,
status_to_check="succeeded",
check=True,
):
run_id = compile_and_run_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
)
status = wait_for_job_status(client, run_id, timeout, status_to_check)
workflow_json = get_workflow_json(client, run_id)
if check and not status:
argo_utils.print_workflow_logs(workflow_json["metadata"]["name"])
pytest.fail(f"Test Failed: {pipeline_name}. Run-id: {run_id}")
return run_id, status, workflow_json
| 26.493151
| 93
| 0.701138
|
import os
import utils
import pytest
from utils import argo_utils
def compile_and_run_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
):
pipeline_path = os.path.join(output_file_dir, pipeline_name)
utils.run_command(
f"dsl-compile --py {pipeline_definition} --output {pipeline_path}.yaml"
)
run = client.run_pipeline(
experiment_id, pipeline_name, f"{pipeline_path}.yaml", input_params
)
return run.id
def wait_for_job_status(client, run_id, timeout, status_to_check="succeeded"):
response = None
try:
response = client.wait_for_run_completion(run_id, timeout)
except TimeoutError:
print(f"run-id: {run_id} did not stop within specified timeout")
response = client.get_run(run_id)
status = False
if response and response.run.status:
status = response.run.status.lower() == status_to_check
return status
def get_workflow_json(client, run_id):
return client._get_workflow_json(run_id)
def compile_run_monitor_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
timeout,
status_to_check="succeeded",
check=True,
):
run_id = compile_and_run_pipeline(
client,
experiment_id,
pipeline_definition,
input_params,
output_file_dir,
pipeline_name,
)
status = wait_for_job_status(client, run_id, timeout, status_to_check)
workflow_json = get_workflow_json(client, run_id)
if check and not status:
argo_utils.print_workflow_logs(workflow_json["metadata"]["name"])
pytest.fail(f"Test Failed: {pipeline_name}. Run-id: {run_id}")
return run_id, status, workflow_json
| true
| true
|
790cca98712755173d39f9bcd58d99751d1d3c8b
| 8,255
|
py
|
Python
|
app/resources/pymo/pymo/parsers.py
|
seanschneeweiss/RoSeMotion
|
4ef7997c8976a8489798a427c768af5114f6b31e
|
[
"MIT"
] | 11
|
2021-01-03T07:31:56.000Z
|
2022-03-26T20:21:25.000Z
|
app/resources/pymo/pymo/parsers.py
|
seanschneeweiss/RoSeMotion
|
4ef7997c8976a8489798a427c768af5114f6b31e
|
[
"MIT"
] | 5
|
2021-01-04T07:22:32.000Z
|
2022-02-01T00:38:52.000Z
|
app/resources/pymo/pymo/parsers.py
|
seanschneeweiss/RoSeMotion
|
4ef7997c8976a8489798a427c768af5114f6b31e
|
[
"MIT"
] | 3
|
2021-03-06T17:00:26.000Z
|
2022-01-18T01:37:43.000Z
|
'''
BVH Parser Class
By Omid Alemi
Created: June 12, 2017
Based on: https://gist.github.com/johnfredcee/2007503
'''
import re
import numpy as np
from data import Joint, MocapData
class BVHScanner:
'''
A wrapper class for re.Scanner
'''
def __init__(self):
def identifier(scanner, token):
return 'IDENT', token
def operator(scanner, token):
return 'OPERATOR', token
def digit(scanner, token):
return 'DIGIT', token
def open_brace(scanner, token):
return 'OPEN_BRACE', token
def close_brace(scanner, token):
return 'CLOSE_BRACE', token
self.scanner = re.Scanner([
(r'[a-zA-Z_]\w*', identifier),
#(r'-*[0-9]+(\.[0-9]+)?', digit), # won't work for .34
#(r'[-+]?[0-9]*\.?[0-9]+', digit), # won't work for 4.56e-2
#(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'-*[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'}', close_brace),
(r'}', close_brace),
(r'{', open_brace),
(r':', None),
(r'\s+', None)
])
def scan(self, stuff):
return self.scanner.scan(stuff)
class BVHParser():
'''
A class to parse a BVH file.
Extracts the skeleton and channel values
'''
def __init__(self, filename=None):
self.reset()
def reset(self):
self._skeleton = {}
self.bone_context = []
self._motion_channels = []
self._motions = []
self.current_token = 0
self.framerate = 0.0
self.root_name = ''
self.scanner = BVHScanner()
self.data = MocapData()
def parse(self, filename):
self.reset()
with open(filename, 'r') as bvh_file:
raw_contents = bvh_file.read()
tokens, remainder = self.scanner.scan(raw_contents)
self._parse_hierarchy(tokens)
self.current_token = self.current_token + 1
self._parse_motion(tokens)
self.data.skeleton = self._skeleton
self.data.channel_names = self._motion_channels
self.data.values = self._to_DataFrame()
self.data.root_name = self.root_name
self.data.framerate = self.framerate
return self.data
def _to_DataFrame(self):
'''Returns all of the channels parsed from the file as a pandas DataFrame'''
import pandas as pd
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
frames = [f[1] for f in self._motions]
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels]
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
def _new_bone(self, parent, name):
bone = {'parent': parent, 'channels': [], 'offsets': [],'children': []}
return bone
def _push_bone_context(self,name):
self.bone_context.append(name)
def _get_bone_context(self):
return self.bone_context[len(self.bone_context)-1]
def _pop_bone_context(self):
self.bone_context = self.bone_context[:-1]
return self.bone_context[len(self.bone_context)-1]
def _read_offset(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'OFFSET'):
return None, None
token_index = token_index + 1
offsets = [0.0] * 3
for i in range(3):
offsets[i] = float(bvh[token_index][1])
token_index = token_index + 1
return offsets, token_index
def _read_channels(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'CHANNELS'):
return None, None
token_index = token_index + 1
channel_count = int(bvh[token_index][1])
token_index = token_index + 1
channels = [""] * channel_count
for i in range(channel_count):
channels[i] = bvh[token_index][1]
token_index = token_index + 1
return channels, token_index
def _parse_joint(self, bvh, token_index):
end_site = False
joint_id = bvh[token_index][1]
token_index = token_index + 1
joint_name = bvh[token_index][1]
token_index = token_index + 1
parent_name = self._get_bone_context()
if (joint_id == "End"):
joint_name = parent_name+ '_Nub'
end_site = True
joint = self._new_bone(parent_name, joint_name)
if bvh[token_index][0] != 'OPEN_BRACE':
print('Was expecting brance, got ', bvh[token_index])
return None
token_index = token_index + 1
offsets, token_index = self._read_offset(bvh, token_index)
joint['offsets'] = offsets
if not end_site:
channels, token_index = self._read_channels(bvh, token_index)
joint['channels'] = channels
for channel in channels:
self._motion_channels.append((joint_name, channel))
self._skeleton[joint_name] = joint
self._skeleton[parent_name]['children'].append(joint_name)
while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'):
self._push_bone_context(joint_name)
token_index = self._parse_joint(bvh, token_index)
self._pop_bone_context()
if bvh[token_index][0] == 'CLOSE_BRACE':
return token_index + 1
print('Unexpected token ', bvh[token_index])
def _parse_hierarchy(self, bvh):
self.current_token = 0
if bvh[self.current_token] != ('IDENT', 'HIERARCHY'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token] != ('IDENT', 'ROOT'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][0] != 'IDENT':
return None
root_name = bvh[self.current_token][1]
root_bone = self._new_bone(None, root_name)
self.current_token = self.current_token + 2 #skipping open brace
offsets, self.current_token = self._read_offset(bvh, self.current_token)
channels, self.current_token = self._read_channels(bvh, self.current_token)
root_bone['offsets'] = offsets
root_bone['channels'] = channels
self._skeleton[root_name] = root_bone
self._push_bone_context(root_name)
for channel in channels:
self._motion_channels.append((root_name, channel))
while bvh[self.current_token][1] == 'JOINT':
self.current_token = self._parse_joint(bvh, self.current_token)
self.root_name = root_name
def _parse_motion(self, bvh):
if bvh[self.current_token][0] != 'IDENT':
print('Unexpected text')
return None
if bvh[self.current_token][1] != 'MOTION':
print('No motion section')
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frames':
return None
self.current_token = self.current_token + 1
frame_count = int(bvh[self.current_token][1])
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frame':
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Time':
return None
self.current_token = self.current_token + 1
frame_rate = float(bvh[self.current_token][1])
self.framerate = frame_rate
self.current_token = self.current_token + 1
frame_time = 0.0
self._motions = [()] * frame_count
for i in range(frame_count):
channel_values = []
for channel in self._motion_channels:
channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1])))
self.current_token = self.current_token + 1
self._motions[i] = (frame_time, channel_values)
frame_time = frame_time + frame_rate
| 33.831967
| 152
| 0.586796
|
import re
import numpy as np
from data import Joint, MocapData
class BVHScanner:
def __init__(self):
def identifier(scanner, token):
return 'IDENT', token
def operator(scanner, token):
return 'OPERATOR', token
def digit(scanner, token):
return 'DIGIT', token
def open_brace(scanner, token):
return 'OPEN_BRACE', token
def close_brace(scanner, token):
return 'CLOSE_BRACE', token
self.scanner = re.Scanner([
(r'[a-zA-Z_]\w*', identifier),
]?[0-9]*\.?[0-9]+', digit), # won't work for 4.56e-2
(r'-*[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit),
(r'}', close_brace),
(r'}', close_brace),
(r'{', open_brace),
(r':', None),
(r'\s+', None)
])
def scan(self, stuff):
return self.scanner.scan(stuff)
class BVHParser():
def __init__(self, filename=None):
self.reset()
def reset(self):
self._skeleton = {}
self.bone_context = []
self._motion_channels = []
self._motions = []
self.current_token = 0
self.framerate = 0.0
self.root_name = ''
self.scanner = BVHScanner()
self.data = MocapData()
def parse(self, filename):
self.reset()
with open(filename, 'r') as bvh_file:
raw_contents = bvh_file.read()
tokens, remainder = self.scanner.scan(raw_contents)
self._parse_hierarchy(tokens)
self.current_token = self.current_token + 1
self._parse_motion(tokens)
self.data.skeleton = self._skeleton
self.data.channel_names = self._motion_channels
self.data.values = self._to_DataFrame()
self.data.root_name = self.root_name
self.data.framerate = self.framerate
return self.data
def _to_DataFrame(self):
import pandas as pd
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
frames = [f[1] for f in self._motions]
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels]
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
def _new_bone(self, parent, name):
bone = {'parent': parent, 'channels': [], 'offsets': [],'children': []}
return bone
def _push_bone_context(self,name):
self.bone_context.append(name)
def _get_bone_context(self):
return self.bone_context[len(self.bone_context)-1]
def _pop_bone_context(self):
self.bone_context = self.bone_context[:-1]
return self.bone_context[len(self.bone_context)-1]
def _read_offset(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'OFFSET'):
return None, None
token_index = token_index + 1
offsets = [0.0] * 3
for i in range(3):
offsets[i] = float(bvh[token_index][1])
token_index = token_index + 1
return offsets, token_index
def _read_channels(self, bvh, token_index):
if bvh[token_index] != ('IDENT', 'CHANNELS'):
return None, None
token_index = token_index + 1
channel_count = int(bvh[token_index][1])
token_index = token_index + 1
channels = [""] * channel_count
for i in range(channel_count):
channels[i] = bvh[token_index][1]
token_index = token_index + 1
return channels, token_index
def _parse_joint(self, bvh, token_index):
end_site = False
joint_id = bvh[token_index][1]
token_index = token_index + 1
joint_name = bvh[token_index][1]
token_index = token_index + 1
parent_name = self._get_bone_context()
if (joint_id == "End"):
joint_name = parent_name+ '_Nub'
end_site = True
joint = self._new_bone(parent_name, joint_name)
if bvh[token_index][0] != 'OPEN_BRACE':
print('Was expecting brance, got ', bvh[token_index])
return None
token_index = token_index + 1
offsets, token_index = self._read_offset(bvh, token_index)
joint['offsets'] = offsets
if not end_site:
channels, token_index = self._read_channels(bvh, token_index)
joint['channels'] = channels
for channel in channels:
self._motion_channels.append((joint_name, channel))
self._skeleton[joint_name] = joint
self._skeleton[parent_name]['children'].append(joint_name)
while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'):
self._push_bone_context(joint_name)
token_index = self._parse_joint(bvh, token_index)
self._pop_bone_context()
if bvh[token_index][0] == 'CLOSE_BRACE':
return token_index + 1
print('Unexpected token ', bvh[token_index])
def _parse_hierarchy(self, bvh):
self.current_token = 0
if bvh[self.current_token] != ('IDENT', 'HIERARCHY'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token] != ('IDENT', 'ROOT'):
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][0] != 'IDENT':
return None
root_name = bvh[self.current_token][1]
root_bone = self._new_bone(None, root_name)
self.current_token = self.current_token + 2
offsets, self.current_token = self._read_offset(bvh, self.current_token)
channels, self.current_token = self._read_channels(bvh, self.current_token)
root_bone['offsets'] = offsets
root_bone['channels'] = channels
self._skeleton[root_name] = root_bone
self._push_bone_context(root_name)
for channel in channels:
self._motion_channels.append((root_name, channel))
while bvh[self.current_token][1] == 'JOINT':
self.current_token = self._parse_joint(bvh, self.current_token)
self.root_name = root_name
def _parse_motion(self, bvh):
if bvh[self.current_token][0] != 'IDENT':
print('Unexpected text')
return None
if bvh[self.current_token][1] != 'MOTION':
print('No motion section')
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frames':
return None
self.current_token = self.current_token + 1
frame_count = int(bvh[self.current_token][1])
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Frame':
return None
self.current_token = self.current_token + 1
if bvh[self.current_token][1] != 'Time':
return None
self.current_token = self.current_token + 1
frame_rate = float(bvh[self.current_token][1])
self.framerate = frame_rate
self.current_token = self.current_token + 1
frame_time = 0.0
self._motions = [()] * frame_count
for i in range(frame_count):
channel_values = []
for channel in self._motion_channels:
channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1])))
self.current_token = self.current_token + 1
self._motions[i] = (frame_time, channel_values)
frame_time = frame_time + frame_rate
| true
| true
|
790ccaef13b8a06d76ca10c214ed5313b78c3fd5
| 8,543
|
py
|
Python
|
armi/utils/directoryChangers.py
|
wilcoxjd/armi
|
6de79e77bd2e58625efce8e9d9914cfd6cd3952a
|
[
"Apache-2.0"
] | null | null | null |
armi/utils/directoryChangers.py
|
wilcoxjd/armi
|
6de79e77bd2e58625efce8e9d9914cfd6cd3952a
|
[
"Apache-2.0"
] | null | null | null |
armi/utils/directoryChangers.py
|
wilcoxjd/armi
|
6de79e77bd2e58625efce8e9d9914cfd6cd3952a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
import string
import armi
from armi import runLog
from armi.utils import pathTools
def _changeDirectory(destination):
if os.path.exists(destination):
os.chdir(destination)
else:
raise IOError(
"Cannot change directory to non-existent location: {}".format(destination)
)
class DirectoryChanger(object):
"""
Utility to change directory.
Parameters
----------
destination : str
Path of directory to change into
filesToMove : list of str, optional
Filenames to bring from the CWD into the destination
filesToRetrieve : list of str, optional
Filenames to bring back from the destination to the cwd
dumpOnException : bool, optional
Flag to tell system to retrieve the entire directory if an exception
is raised within a the context manager.
Use with 'with' statements to execute code in a different dir, guaranteeing a clean
return to the original directory
>>> with DirectoryChanger('C:\\whatever')
... pass
"""
def __init__(
self, destination, filesToMove=None, filesToRetrieve=None, dumpOnException=True
):
"""Establish the new and return directories"""
self.initial = pathTools.armiAbsPath(os.getcwd())
self.destination = None
if destination is not None:
self.destination = pathTools.armiAbsPath(destination)
self._filesToMove = filesToMove or []
self._filesToRetrieve = filesToRetrieve or []
self._dumpOnException = dumpOnException
def __enter__(self):
"""At the inception of a with command, navigate to a new directory if one is supplied."""
runLog.debug("Changing directory to {}".format(self.destination))
self.moveFiles()
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""At the termination of a with command, navigate back to the original directory."""
runLog.debug("Returning to directory {}".format(self.initial))
if exc_type is not None and self._dumpOnException:
runLog.info(
"An exception was raised within a DirectoryChanger. "
"Retrieving entire folder for debugging."
)
self._retrieveEntireFolder()
else:
self.retrieveFiles()
self.close()
def __repr__(self):
"""Print the initial and destination paths"""
return "<{} {} to {}>".format(
self.__class__.__name__, self.initial, self.destination
)
def open(self):
"""
User requested open, used to stalling the close from a with statement.
This method has been made for old uses of :code:`os.chdir()` and is not
recommended. Please use the with statements
"""
if self.destination:
_changeDirectory(self.destination)
def close(self):
"""User requested close."""
if self.initial != os.getcwd():
_changeDirectory(self.initial)
def moveFiles(self):
initialPath = self.initial
destinationPath = self.destination
self._transferFiles(initialPath, destinationPath, self._filesToMove)
def retrieveFiles(self):
"""Retrieve any desired files."""
initialPath = self.destination
destinationPath = self.initial
fileList = self._filesToRetrieve
self._transferFiles(initialPath, destinationPath, fileList)
def _retrieveEntireFolder(self):
"""Retrieve all files."""
initialPath = self.destination
destinationPath = self.initial
folderName = os.path.split(self.destination)[1]
destinationPath = os.path.join(destinationPath, f"dump-{folderName}")
fileList = os.listdir(self.destination)
self._transferFiles(initialPath, destinationPath, fileList)
@staticmethod
def _transferFiles(initialPath, destinationPath, fileList):
"""
Transfer files into or out of the directory.
.. warning:: On Windows the max number of characters in a path is 260.
If you exceed this you will see FileNotFound errors here.
"""
if not fileList:
return
if not os.path.exists(destinationPath):
os.mkdir(destinationPath)
for ff in fileList:
if isinstance(ff, tuple):
# allow renames in transit
fromName, destName = ff
else:
fromName, destName = ff, ff
fromPath = os.path.join(initialPath, fromName)
toPath = os.path.join(destinationPath, destName)
runLog.extra("Copying {} to {}".format(fromPath, toPath))
shutil.copy(fromPath, toPath)
class TemporaryDirectoryChanger(DirectoryChanger):
"""
Create temporary directory, changes into it, and if there is no error/exception
generated when using a :code:`with` statement, it deletes the directory.
Notes
-----
If there is an error/exception generated while in a :code:`with` statement, the
temporary directory contents will be copied to the original directory and then the
temporary directory will be deleted.
"""
_home = armi.context.FAST_PATH
def __init__(
self, root=None, filesToMove=None, filesToRetrieve=None, dumpOnException=True
):
DirectoryChanger.__init__(
self, root, filesToMove, filesToRetrieve, dumpOnException
)
root = root or TemporaryDirectoryChanger._home
if not os.path.exists(root):
os.makedirs(root)
self.initial = os.path.abspath(os.getcwd())
self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root)
while os.path.exists(self.destination):
self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root)
@classmethod
def GetRandomDirectory(cls, root):
return os.path.join(
root,
"temp-"
+ "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(10)
),
)
def __enter__(self):
os.mkdir(self.destination)
return DirectoryChanger.__enter__(self)
def __exit__(self, exc_type, exc_value, traceback):
DirectoryChanger.__exit__(self, exc_type, exc_value, traceback)
shutil.rmtree(self.destination)
class ForcedCreationDirectoryChanger(DirectoryChanger):
"""
Creates the directory tree necessary to reach your desired destination
Attributes
----------
clean : bool
if True and the directory exists, clear all contents on entry.
"""
def __init__(
self,
destination,
filesToMove=None,
filesToRetrieve=None,
dumpOnException=True,
clean=False,
):
DirectoryChanger.__init__(
self, destination, filesToMove, filesToRetrieve, dumpOnException
)
self.clean = clean
def __enter__(self):
if not os.path.exists(self.destination):
runLog.debug(f"Creating destination folder {self.destination}")
try:
os.makedirs(self.destination)
except OSError:
# even though we checked exists, this still fails
# sometimes when multiple MPI nodes try
# to make the dirs due to I/O delays
runLog.debug(f"Failed to make destination folder")
else:
runLog.debug(f"Destination folder already exists: {self.destination}")
DirectoryChanger.__enter__(self)
if self.clean:
shutil.rmtree(".", ignore_errors=True)
return self
def directoryChangerFactory():
if armi.MPI_SIZE > 1:
from .directoryChangersMpi import MpiDirectoryChanger
return MpiDirectoryChanger
else:
return DirectoryChanger
| 33.766798
| 97
| 0.64626
|
import os
import random
import shutil
import string
import armi
from armi import runLog
from armi.utils import pathTools
def _changeDirectory(destination):
if os.path.exists(destination):
os.chdir(destination)
else:
raise IOError(
"Cannot change directory to non-existent location: {}".format(destination)
)
class DirectoryChanger(object):
def __init__(
self, destination, filesToMove=None, filesToRetrieve=None, dumpOnException=True
):
self.initial = pathTools.armiAbsPath(os.getcwd())
self.destination = None
if destination is not None:
self.destination = pathTools.armiAbsPath(destination)
self._filesToMove = filesToMove or []
self._filesToRetrieve = filesToRetrieve or []
self._dumpOnException = dumpOnException
def __enter__(self):
runLog.debug("Changing directory to {}".format(self.destination))
self.moveFiles()
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
runLog.debug("Returning to directory {}".format(self.initial))
if exc_type is not None and self._dumpOnException:
runLog.info(
"An exception was raised within a DirectoryChanger. "
"Retrieving entire folder for debugging."
)
self._retrieveEntireFolder()
else:
self.retrieveFiles()
self.close()
def __repr__(self):
return "<{} {} to {}>".format(
self.__class__.__name__, self.initial, self.destination
)
def open(self):
if self.destination:
_changeDirectory(self.destination)
def close(self):
if self.initial != os.getcwd():
_changeDirectory(self.initial)
def moveFiles(self):
initialPath = self.initial
destinationPath = self.destination
self._transferFiles(initialPath, destinationPath, self._filesToMove)
def retrieveFiles(self):
initialPath = self.destination
destinationPath = self.initial
fileList = self._filesToRetrieve
self._transferFiles(initialPath, destinationPath, fileList)
def _retrieveEntireFolder(self):
initialPath = self.destination
destinationPath = self.initial
folderName = os.path.split(self.destination)[1]
destinationPath = os.path.join(destinationPath, f"dump-{folderName}")
fileList = os.listdir(self.destination)
self._transferFiles(initialPath, destinationPath, fileList)
@staticmethod
def _transferFiles(initialPath, destinationPath, fileList):
if not fileList:
return
if not os.path.exists(destinationPath):
os.mkdir(destinationPath)
for ff in fileList:
if isinstance(ff, tuple):
fromName, destName = ff
else:
fromName, destName = ff, ff
fromPath = os.path.join(initialPath, fromName)
toPath = os.path.join(destinationPath, destName)
runLog.extra("Copying {} to {}".format(fromPath, toPath))
shutil.copy(fromPath, toPath)
class TemporaryDirectoryChanger(DirectoryChanger):
_home = armi.context.FAST_PATH
def __init__(
self, root=None, filesToMove=None, filesToRetrieve=None, dumpOnException=True
):
DirectoryChanger.__init__(
self, root, filesToMove, filesToRetrieve, dumpOnException
)
root = root or TemporaryDirectoryChanger._home
if not os.path.exists(root):
os.makedirs(root)
self.initial = os.path.abspath(os.getcwd())
self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root)
while os.path.exists(self.destination):
self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root)
@classmethod
def GetRandomDirectory(cls, root):
return os.path.join(
root,
"temp-"
+ "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(10)
),
)
def __enter__(self):
os.mkdir(self.destination)
return DirectoryChanger.__enter__(self)
def __exit__(self, exc_type, exc_value, traceback):
DirectoryChanger.__exit__(self, exc_type, exc_value, traceback)
shutil.rmtree(self.destination)
class ForcedCreationDirectoryChanger(DirectoryChanger):
def __init__(
self,
destination,
filesToMove=None,
filesToRetrieve=None,
dumpOnException=True,
clean=False,
):
DirectoryChanger.__init__(
self, destination, filesToMove, filesToRetrieve, dumpOnException
)
self.clean = clean
def __enter__(self):
if not os.path.exists(self.destination):
runLog.debug(f"Creating destination folder {self.destination}")
try:
os.makedirs(self.destination)
except OSError:
runLog.debug(f"Failed to make destination folder")
else:
runLog.debug(f"Destination folder already exists: {self.destination}")
DirectoryChanger.__enter__(self)
if self.clean:
shutil.rmtree(".", ignore_errors=True)
return self
def directoryChangerFactory():
if armi.MPI_SIZE > 1:
from .directoryChangersMpi import MpiDirectoryChanger
return MpiDirectoryChanger
else:
return DirectoryChanger
| true
| true
|
790ccb73ab0335237d3cdf89c049d0689a78a21f
| 4,741
|
py
|
Python
|
test/test_cdf.py
|
li012589/NeuralWavelet
|
6e593ded5cb4ae80579cbf56eb9c346d808669cb
|
[
"Apache-2.0"
] | 28
|
2021-01-27T00:41:40.000Z
|
2022-02-14T10:11:51.000Z
|
test/test_cdf.py
|
li012589/NeuralWavelet
|
6e593ded5cb4ae80579cbf56eb9c346d808669cb
|
[
"Apache-2.0"
] | null | null | null |
test/test_cdf.py
|
li012589/NeuralWavelet
|
6e593ded5cb4ae80579cbf56eb9c346d808669cb
|
[
"Apache-2.0"
] | 6
|
2021-02-03T01:42:08.000Z
|
2021-12-03T17:47:19.000Z
|
import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import torch
import flow
from utils import cdfDiscreteLogitstic, cdfMixDiscreteLogistic
from utils import logDiscreteLogistic, logMixDiscreteLogistic
nbins = 4096
_bins = torch.arange(-nbins // 2, nbins // 2).reshape(-1, 1, 1, 1, 1)
decimal = flow.ScalingNshifting(256, -128)
def test_disLogisticCDF():
logscale = torch.tensor(
[[[[-3.6826, -3.0157, -3.6032],
[-3.7063, -3.0269, -3.5338],
[-3.5311, -2.9907, -3.3516],
[-3.9300, -3.3121, -3.8110]],
[[-3.1022, -3.0692, -3.2039],
[-2.9466, -3.0006, -3.2969],
[-2.7636, -2.5691, -2.9628],
[-3.3657, -3.2948, -3.5318]],
[[-3.9748, -3.0670, -3.2399],
[-3.9312, -3.0055, -3.1729],
[-3.8588, -2.9139, -3.1794],
[-4.1534, -3.2404, -3.5665]]]]
)
mean = torch.tensor(
[[[[ 0.0191, 0.0459, 0.0131],
[-0.0059, 0.0254, -0.0100],
[ 0.0359, 0.0406, 0.0242],
[ 0.0331, 0.0438, 0.0255]],
[[ 0.0214, 0.0502, 0.0622],
[ 0.0371, 0.0368, 0.0517],
[ 0.0217, 0.0855, 0.0874],
[ 0.0144, 0.0475, 0.0470]],
[[-0.0602, -0.0791, -0.0784],
[-0.0443, -0.0765, -0.0701],
[-0.0654, -0.0709, -0.0788],
[-0.0608, -0.0721, -0.0688]]]]
)
bins = _bins - 1 + torch.round(decimal.forward_(mean))
cdf = cdfDiscreteLogitstic(bins, mean, logscale, decimal=decimal).detach().numpy()
pList = []
for i in range(bins.shape[0]):
logp = logDiscreteLogistic(bins[i: i + 1], mean, logscale, decimal=decimal).detach().numpy()
pList.append(np.exp(logp).reshape(mean.shape))
pList = np.array(pList)
_cdf = np.cumsum(pList, 0)
assert np.allclose(cdf, _cdf)
def test_mixDixLogisticCDF():
mean = torch.tensor(
[[[[-0.2414, 0.2089, -0.0209, -0.1279]],
[[ 0.7791, 0.1031, 0.0940, 0.1678]],
[[ 0.0095, 0.0391, -0.0318, -0.2183]]],
[[[-0.1466, 0.2090, -0.0594, -0.0837]],
[[ 0.8711, 0.0540, 0.0940, 0.0859]],
[[-0.0683, -0.0204, -0.0340, -0.0587]]],
[[[-0.1994, -0.0442, -0.0307, -0.0823]],
[[ 1.0158, 0.0636, 0.0832, 0.0717]],
[[-0.1863, -0.0177, -0.0293, -0.0708]]],
[[[-0.3517, 0.1062, -0.0362, -0.1661]],
[[ 0.6567, 0.1452, 0.0294, 0.0864]],
[[-0.1384, -0.0171, -0.0195, -0.0710]]],
[[[-0.3158, 0.2068, 0.1114, -0.1251]],
[[ 0.5600, 0.1987, 0.1891, 0.1754]],
[[-0.2758, -0.1032, -0.0435, -0.1156]]]])
logscale = torch.tensor(
[[[[-3.1292, -4.0168, -3.2886, -2.5948]],
[[-2.8226, -2.3489, -2.8613, -2.3892]],
[[-3.3502, -3.4929, -2.9572, -2.7060]]],
[[[-3.4556, -4.0166, -2.7471, -3.1203]],
[[-2.6906, -3.6062, -2.8620, -3.0673]],
[[-3.2775, -3.3661, -3.2897, -4.0553]]],
[[[-3.4652, -3.3828, -3.3053, -3.6945]],
[[-2.7657, -2.9172, -3.4067, -3.7734]],
[[-3.4817, -3.0397, -2.8021, -3.1398]]],
[[[-2.7246, -3.7798, -4.1237, -2.8605]],
[[-3.0524, -2.6628, -2.4833, -3.0913]],
[[-4.0249, -3.8364, -3.7608, -2.7111]]],
[[[-3.5460, -4.0208, -2.9837, -3.1288]],
[[-3.2062, -2.1702, -2.2238, -2.6122]],
[[-3.1754, -3.0892, -2.3359, -2.4321]]]])
mixing = torch.tensor(
[[[[ 1.3161, 0.8664, 1.7648, -0.7598, -0.8658],
[-3.7472, -3.6553, 5.2783, 0.2242, -3.6304],
[-0.7378, 0.2730, 1.8044, 0.7450, -1.6218],
[-0.8105, 1.8833, 1.8243, -0.7879, -1.1211]]],
[[[ 1.3952, -0.8232, -1.0135, 1.8041, 0.9846],
[-0.4372, 1.1296, 1.5473, -0.0661, -0.5995],
[-0.5167, 1.5559, 1.2607, -0.3227, -0.8687],
[-0.6226, 1.5024, 1.4221, 1.4741, -0.4409]]],
[[[ 1.3045, 1.8551, 0.1755, -0.6253, -1.2045],
[-0.9858, 1.5529, -0.6332, 1.4569, -1.1089],
[-0.5954, 1.2305, 1.4068, 0.7919, -0.3811],
[-0.2997, 0.6804, 2.0660, 1.1353, -0.9155]]]])
bins = _bins - 1 + torch.round(decimal.forward_(mean.permute([1, 2, 3, 0])) * mixing).sum(-1).reshape(1, *mean.shape[1:])
cdf = cdfMixDiscreteLogistic(bins, mean, logscale, mixing, decimal=decimal)
pList = []
for i in range(bins.shape[0]):
logp = logMixDiscreteLogistic(bins[i: i + 1], mean, logscale, mixing, decimal=decimal).detach().numpy()
pList.append(np.exp(logp).reshape(logp.shape[1:]))
pList = np.array(pList)
_cdf = np.cumsum(pList, 0)
assert np.allclose(cdf, _cdf)
if __name__ == "__main__":
test_disLogisticCDF()
test_mixDixLogisticCDF()
| 36.19084
| 125
| 0.494832
|
import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import torch
import flow
from utils import cdfDiscreteLogitstic, cdfMixDiscreteLogistic
from utils import logDiscreteLogistic, logMixDiscreteLogistic
nbins = 4096
_bins = torch.arange(-nbins // 2, nbins // 2).reshape(-1, 1, 1, 1, 1)
decimal = flow.ScalingNshifting(256, -128)
def test_disLogisticCDF():
logscale = torch.tensor(
[[[[-3.6826, -3.0157, -3.6032],
[-3.7063, -3.0269, -3.5338],
[-3.5311, -2.9907, -3.3516],
[-3.9300, -3.3121, -3.8110]],
[[-3.1022, -3.0692, -3.2039],
[-2.9466, -3.0006, -3.2969],
[-2.7636, -2.5691, -2.9628],
[-3.3657, -3.2948, -3.5318]],
[[-3.9748, -3.0670, -3.2399],
[-3.9312, -3.0055, -3.1729],
[-3.8588, -2.9139, -3.1794],
[-4.1534, -3.2404, -3.5665]]]]
)
mean = torch.tensor(
[[[[ 0.0191, 0.0459, 0.0131],
[-0.0059, 0.0254, -0.0100],
[ 0.0359, 0.0406, 0.0242],
[ 0.0331, 0.0438, 0.0255]],
[[ 0.0214, 0.0502, 0.0622],
[ 0.0371, 0.0368, 0.0517],
[ 0.0217, 0.0855, 0.0874],
[ 0.0144, 0.0475, 0.0470]],
[[-0.0602, -0.0791, -0.0784],
[-0.0443, -0.0765, -0.0701],
[-0.0654, -0.0709, -0.0788],
[-0.0608, -0.0721, -0.0688]]]]
)
bins = _bins - 1 + torch.round(decimal.forward_(mean))
cdf = cdfDiscreteLogitstic(bins, mean, logscale, decimal=decimal).detach().numpy()
pList = []
for i in range(bins.shape[0]):
logp = logDiscreteLogistic(bins[i: i + 1], mean, logscale, decimal=decimal).detach().numpy()
pList.append(np.exp(logp).reshape(mean.shape))
pList = np.array(pList)
_cdf = np.cumsum(pList, 0)
assert np.allclose(cdf, _cdf)
def test_mixDixLogisticCDF():
mean = torch.tensor(
[[[[-0.2414, 0.2089, -0.0209, -0.1279]],
[[ 0.7791, 0.1031, 0.0940, 0.1678]],
[[ 0.0095, 0.0391, -0.0318, -0.2183]]],
[[[-0.1466, 0.2090, -0.0594, -0.0837]],
[[ 0.8711, 0.0540, 0.0940, 0.0859]],
[[-0.0683, -0.0204, -0.0340, -0.0587]]],
[[[-0.1994, -0.0442, -0.0307, -0.0823]],
[[ 1.0158, 0.0636, 0.0832, 0.0717]],
[[-0.1863, -0.0177, -0.0293, -0.0708]]],
[[[-0.3517, 0.1062, -0.0362, -0.1661]],
[[ 0.6567, 0.1452, 0.0294, 0.0864]],
[[-0.1384, -0.0171, -0.0195, -0.0710]]],
[[[-0.3158, 0.2068, 0.1114, -0.1251]],
[[ 0.5600, 0.1987, 0.1891, 0.1754]],
[[-0.2758, -0.1032, -0.0435, -0.1156]]]])
logscale = torch.tensor(
[[[[-3.1292, -4.0168, -3.2886, -2.5948]],
[[-2.8226, -2.3489, -2.8613, -2.3892]],
[[-3.3502, -3.4929, -2.9572, -2.7060]]],
[[[-3.4556, -4.0166, -2.7471, -3.1203]],
[[-2.6906, -3.6062, -2.8620, -3.0673]],
[[-3.2775, -3.3661, -3.2897, -4.0553]]],
[[[-3.4652, -3.3828, -3.3053, -3.6945]],
[[-2.7657, -2.9172, -3.4067, -3.7734]],
[[-3.4817, -3.0397, -2.8021, -3.1398]]],
[[[-2.7246, -3.7798, -4.1237, -2.8605]],
[[-3.0524, -2.6628, -2.4833, -3.0913]],
[[-4.0249, -3.8364, -3.7608, -2.7111]]],
[[[-3.5460, -4.0208, -2.9837, -3.1288]],
[[-3.2062, -2.1702, -2.2238, -2.6122]],
[[-3.1754, -3.0892, -2.3359, -2.4321]]]])
mixing = torch.tensor(
[[[[ 1.3161, 0.8664, 1.7648, -0.7598, -0.8658],
[-3.7472, -3.6553, 5.2783, 0.2242, -3.6304],
[-0.7378, 0.2730, 1.8044, 0.7450, -1.6218],
[-0.8105, 1.8833, 1.8243, -0.7879, -1.1211]]],
[[[ 1.3952, -0.8232, -1.0135, 1.8041, 0.9846],
[-0.4372, 1.1296, 1.5473, -0.0661, -0.5995],
[-0.5167, 1.5559, 1.2607, -0.3227, -0.8687],
[-0.6226, 1.5024, 1.4221, 1.4741, -0.4409]]],
[[[ 1.3045, 1.8551, 0.1755, -0.6253, -1.2045],
[-0.9858, 1.5529, -0.6332, 1.4569, -1.1089],
[-0.5954, 1.2305, 1.4068, 0.7919, -0.3811],
[-0.2997, 0.6804, 2.0660, 1.1353, -0.9155]]]])
bins = _bins - 1 + torch.round(decimal.forward_(mean.permute([1, 2, 3, 0])) * mixing).sum(-1).reshape(1, *mean.shape[1:])
cdf = cdfMixDiscreteLogistic(bins, mean, logscale, mixing, decimal=decimal)
pList = []
for i in range(bins.shape[0]):
logp = logMixDiscreteLogistic(bins[i: i + 1], mean, logscale, mixing, decimal=decimal).detach().numpy()
pList.append(np.exp(logp).reshape(logp.shape[1:]))
pList = np.array(pList)
_cdf = np.cumsum(pList, 0)
assert np.allclose(cdf, _cdf)
if __name__ == "__main__":
test_disLogisticCDF()
test_mixDixLogisticCDF()
| true
| true
|
790ccbf04d0a0c8937f8e6013f1a21b3be29c911
| 8,889
|
py
|
Python
|
nixnet/database/_subframe.py
|
ni-ldp/nixnet-python
|
83f30c5b44098de0dc4828838e263b7be0866228
|
[
"MIT"
] | 16
|
2017-06-14T19:44:45.000Z
|
2022-02-06T15:14:52.000Z
|
nixnet/database/_subframe.py
|
ni-ldp/nixnet-python
|
83f30c5b44098de0dc4828838e263b7be0866228
|
[
"MIT"
] | 216
|
2017-06-15T16:41:10.000Z
|
2021-09-23T23:00:50.000Z
|
nixnet/database/_subframe.py
|
ni-ldp/nixnet-python
|
83f30c5b44098de0dc4828838e263b7be0866228
|
[
"MIT"
] | 23
|
2017-06-14T22:51:08.000Z
|
2022-03-03T03:04:40.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing # NOQA: F401
from nixnet import _cconsts
from nixnet import _errors
from nixnet import _props
from nixnet import constants
from nixnet.database import _collection
from nixnet.database import _database_object
from nixnet.database import _find_object
from nixnet.database import _frame
# workaround to avoid circular imports caused by mypy type annotations
MYPY = False
if MYPY:
from nixnet.database import _pdu # NOQA: F401
class SubFrame(_database_object.DatabaseObject):
"""Database subframe"""
def __init__(
self,
**kwargs # type: int
):
# type: (...) -> None
if not kwargs or '_handle' not in kwargs:
raise TypeError()
self._handle = kwargs['_handle']
from nixnet.database import _signal
self._dyn_signals = _collection.DbCollection(
self._handle, constants.ObjectClass.SIGNAL, _cconsts.NX_PROP_SUBFRM_DYN_SIG_REFS, _signal.Signal)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._handle == other._handle
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
return hash(self._handle)
def __repr__(self):
return '{}(handle={})'.format(type(self).__name__, self._handle)
def check_config_status(self):
# type: () -> None
"""Check this subframe's configuration status.
By default, incorrectly configured subframes in the database are not returned from
:any:`Frame.mux_subframes` because they cannot be used in the bus communication.
You can change this behavior by setting :any:`Database.show_invalid_from_open` to `True`.
When a subframe configuration status becomes invalid after the database is opened,
the subframe still is returned from :any:`Frame.mux_subframes`
even if :any:`Database.show_invalid_from_open` is `False`.
Raises:
:any:`XnetError`: The subframe is incorrectly configured.
"""
status_code = _props.get_subframe_config_status(self._handle)
_errors.check_for_error(status_code)
def find(
self,
object_class, # type: typing.Type[_database_object.DatabaseObject]
object_name, # type: typing.Text
):
# type: (...) -> _database_object.DatabaseObject
"""Finds an object in the database.
This function finds a database object relative to this parent object.
This object may be a grandparent or great-grandparent.
If this object is a direct parent
(for example, :any:`Frame<_frame.Frame>` for :any:`Signal<_signal.Signal>`),
the ``object_name`` to search for can be short, and the search proceeds quickly.
If this object is not a direct parent
(for example, :any:`Database` for :any:`Signal<_signal.Signal>`),
the ``object_name`` to search for must be qualified such
that it is unique within the scope of this object.
For example, if the class of this object is :any:`Cluster`,
and ``object_class`` is :any:`Signal<_signal.Signal>`,
you can specify ``object_name`` of ``mySignal``,
assuming that signal name is unique to the cluster.
If not, you must include the :any:`Frame<_frame.Frame>` name as a prefix,
such as ``myFrameA.mySignal``.
NI-XNET supports the following subclasses of ``DatabaseObject`` as arguments for ``object_class``:
* :any:`nixnet.database.Cluster<Cluster>`
* :any:`nixnet.database.Frame<_frame.Frame>`
* :any:`nixnet.database.Pdu<Pdu>`
* :any:`nixnet.database.Signal<_signal.Signal>`
* :any:`nixnet.database.SubFrame<SubFrame>`
* :any:`nixnet.database.Ecu<Ecu>`
* :any:`nixnet.database.LinSched<LinSched>`
* :any:`nixnet.database.LinSchedEntry<LinSchedEntry>`
Args:
object_class(``DatabaseObject``): The class of the object to find.
object_name(str): The name of the object to find.
Returns:
An instance of the found object.
Raises:
ValueError: Unsupported value provided for argument ``object_class``.
:any:`XnetError`: The object is not found.
"""
return _find_object.find_object(self._handle, object_class, object_name)
@property
def dyn_signals(self):
# type: () -> _collection.DbCollection
""":any:`DbCollection`: Returns a collection of dynamic :any:`Signal<_signal.Signal>` objects in the subframe.
Those signals are transmitted when the multiplexer signal
in the frame has the multiplexer value defined in the subframe.
"""
return self._dyn_signals
@property
def frm(self):
# type: () -> _frame.Frame
""":any:`Frame<_frame.Frame>`: Returns the reference to the parent frame.
The parent frame is defined when the subframe is created,
and you cannot change it afterwards.
"""
handle = _props.get_subframe_frm_ref(self._handle)
return _frame.Frame(_handle=handle)
@property
def mux_value(self):
# type: () -> int
"""int: Get or set the multiplexer value for this subframe.
This property specifies the multiplexer signal value used when the
dynamic signals in this subframe are transmitted in the frame.
Only one subframe is transmitted at a time in the frame.
There also is a multiplexer value for a signal object as a read-only property.
It reflects the value set on the parent subframe object.
This property is required. If the property does not contain a valid value,
and you create an XNET session that uses this subframe,
the session returns an error.
To ensure that the property contains a valid value,
you can do one of the following:
* Use a database file (or alias) to create the session.
The file formats require a valid value in the text for this property.
* Set a value at runtime using this property.
This is needed when you create your own in-memory database (*:memory:*) rather than use a file.
The property does not contain a default in this case,
so you must set a valid value prior to creating a session.
"""
return _props.get_subframe_mux_value(self._handle)
@mux_value.setter
def mux_value(self, value):
# type: (int) -> None
_props.set_subframe_mux_value(self._handle, value)
@property
def name(self):
# type: () -> typing.Text
"""str: Get or set the name of the subframe object.
Lowercase letters, uppercase letters, numbers,
and the underscore (_) are valid characters for the short name.
The space ( ), period (.), and other special characters are not supported within the name.
The short name must begin with a letter (uppercase or lowercase) or underscore, and not a number.
The short name is limited to 128 characters.
A subframe name must be unique for all subframes in a frame.
This short name does not include qualifiers to ensure that it is unique,
such as the database, cluster, and frame name. It is for display purposes.
"""
return _props.get_subframe_name(self._handle)
@name.setter
def name(self, value):
# type: (typing.Text) -> None
_props.set_subframe_name(self._handle, value)
@property
def pdu(self):
# type: () -> _pdu.Pdu
""":any:`Pdu`: Returns the subframe's parent PDU.
This property returns the reference to the subframe's parent PDU.
The parent PDU is defined when the subframe object is created.
You cannot change it afterwards.
"""
from nixnet.database import _pdu # NOQA: F811
handle = _props.get_subframe_pdu_ref(self._handle)
return _pdu.Pdu(_handle=handle)
@property
def name_unique_to_cluster(self):
# type: () -> typing.Text
"""str: Returns a subframe name unique to the cluster that contains the subframe.
If the single name is not unique within the cluster, the name is <frame-name>.<subframe-name>.
You can pass the name to the `find` function to retrieve the reference to the object,
while the single name is not guaranteed success in `find`
because it may be not unique in the cluster.
"""
return _props.get_subframe_name_unique_to_cluster(self._handle)
| 38.647826
| 118
| 0.657779
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing
from nixnet import _cconsts
from nixnet import _errors
from nixnet import _props
from nixnet import constants
from nixnet.database import _collection
from nixnet.database import _database_object
from nixnet.database import _find_object
from nixnet.database import _frame
MYPY = False
if MYPY:
from nixnet.database import _pdu
class SubFrame(_database_object.DatabaseObject):
def __init__(
self,
**kwargs
):
if not kwargs or '_handle' not in kwargs:
raise TypeError()
self._handle = kwargs['_handle']
from nixnet.database import _signal
self._dyn_signals = _collection.DbCollection(
self._handle, constants.ObjectClass.SIGNAL, _cconsts.NX_PROP_SUBFRM_DYN_SIG_REFS, _signal.Signal)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._handle == other._handle
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
return hash(self._handle)
def __repr__(self):
return '{}(handle={})'.format(type(self).__name__, self._handle)
def check_config_status(self):
status_code = _props.get_subframe_config_status(self._handle)
_errors.check_for_error(status_code)
def find(
self,
object_class,
object_name,
):
return _find_object.find_object(self._handle, object_class, object_name)
@property
def dyn_signals(self):
return self._dyn_signals
@property
def frm(self):
handle = _props.get_subframe_frm_ref(self._handle)
return _frame.Frame(_handle=handle)
@property
def mux_value(self):
return _props.get_subframe_mux_value(self._handle)
@mux_value.setter
def mux_value(self, value):
_props.set_subframe_mux_value(self._handle, value)
@property
def name(self):
return _props.get_subframe_name(self._handle)
@name.setter
def name(self, value):
_props.set_subframe_name(self._handle, value)
@property
def pdu(self):
from nixnet.database import _pdu
handle = _props.get_subframe_pdu_ref(self._handle)
return _pdu.Pdu(_handle=handle)
@property
def name_unique_to_cluster(self):
return _props.get_subframe_name_unique_to_cluster(self._handle)
| true
| true
|
790cccf2c4d720eb44d91f23e94c4eb73b9788d1
| 3,201
|
py
|
Python
|
chb/util/dotutil.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/util/dotutil.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/util/dotutil.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021-2022 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Utilities to print and save graphviz dot files."""
import os
import subprocess
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from chb.util.DotGraph import DotGraph
def print_dot(
path: str,
filename: str,
g: "DotGraph") -> str:
if not os.path.isabs(filename):
filename = os.path.join(path, filename)
dotfilename = filename + ".dot"
pdffilename = filename + ".pdf"
# write graph to dot format
with open(dotfilename, "w") as fp:
fp.write(str(g))
# convert dot file to pdf
cmd = ["dot", "-Tpdf", "-o", pdffilename, dotfilename]
try:
subprocess.call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Error in processing dot file: " + dotfilename)
print(e.output)
print(e.args)
exit(1)
return pdffilename
def save_dot(path: str, filename: str, g: "DotGraph") -> None:
if not os.path.isabs(filename):
filename = os.path.join(path, filename)
dotfilename = filename + ".dot"
with open(dotfilename, "w") as fp:
fp.write(str(g))
def save_svg(path: str, filename: str, g: "DotGraph") -> None:
if not os.path.isabs(filename):
filename = os.path.join(path, filename)
dotfilename = filename + ".dot"
svgfilename = filename + ".svg"
with open(dotfilename, "w") as fp:
fp.write(str(g))
cmd = ["dot", "-Tsvg", "-o", svgfilename, dotfilename]
try:
subprocess.call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Error in processing dot file: " + dotfilename)
print(e.output)
print(e.args)
exit(1)
| 36.375
| 80
| 0.634177
|
import os
import subprocess
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from chb.util.DotGraph import DotGraph
def print_dot(
path: str,
filename: str,
g: "DotGraph") -> str:
if not os.path.isabs(filename):
filename = os.path.join(path, filename)
dotfilename = filename + ".dot"
pdffilename = filename + ".pdf"
with open(dotfilename, "w") as fp:
fp.write(str(g))
cmd = ["dot", "-Tpdf", "-o", pdffilename, dotfilename]
try:
subprocess.call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Error in processing dot file: " + dotfilename)
print(e.output)
print(e.args)
exit(1)
return pdffilename
def save_dot(path: str, filename: str, g: "DotGraph") -> None:
if not os.path.isabs(filename):
filename = os.path.join(path, filename)
dotfilename = filename + ".dot"
with open(dotfilename, "w") as fp:
fp.write(str(g))
def save_svg(path: str, filename: str, g: "DotGraph") -> None:
if not os.path.isabs(filename):
filename = os.path.join(path, filename)
dotfilename = filename + ".dot"
svgfilename = filename + ".svg"
with open(dotfilename, "w") as fp:
fp.write(str(g))
cmd = ["dot", "-Tsvg", "-o", svgfilename, dotfilename]
try:
subprocess.call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("Error in processing dot file: " + dotfilename)
print(e.output)
print(e.args)
exit(1)
| true
| true
|
790ccddf625df7077e9f9cd8d1083aa2f99c21c0
| 1,286
|
py
|
Python
|
src/bos/operators/utils/clients/bos/__init__.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | 1
|
2022-03-15T18:17:11.000Z
|
2022-03-15T18:17:11.000Z
|
src/bos/operators/utils/clients/bos/__init__.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | null | null | null |
src/bos/operators/utils/clients/bos/__init__.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | 1
|
2022-03-06T12:47:06.000Z
|
2022-03-06T12:47:06.000Z
|
# Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
from bos.operators.utils import PROTOCOL
API_VERSION = 'v1'
SERVICE_NAME = 'cray-bos'
ENDPOINT = "%s://%s/%s" % (PROTOCOL, SERVICE_NAME, API_VERSION)
| 47.62963
| 76
| 0.773717
|
from bos.operators.utils import PROTOCOL
API_VERSION = 'v1'
SERVICE_NAME = 'cray-bos'
ENDPOINT = "%s://%s/%s" % (PROTOCOL, SERVICE_NAME, API_VERSION)
| true
| true
|
790ccece6af78a479088fbc4ad29bcc5905f31d8
| 352
|
py
|
Python
|
anaconda/6.00.1x.PSet1.P1.py
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
[
"MIT"
] | null | null | null |
anaconda/6.00.1x.PSet1.P1.py
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
[
"MIT"
] | null | null | null |
anaconda/6.00.1x.PSet1.P1.py
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 12 11:56:36 2017
Problemset1 - Problem 1
Note:
's' is given by system like s = 'azcbobobegghakl'
@author: coskun
"""
s = 'azcbobobegghakl'
# Paste your code into this box
nvl=0
for c in s:
if c=='a' or c=='e' or c=='i' or c=='o' or c=='u':
nvl += 1
print("Number of vowels: " + str(nvl))
| 22
| 54
| 0.590909
|
s = 'azcbobobegghakl'
nvl=0
for c in s:
if c=='a' or c=='e' or c=='i' or c=='o' or c=='u':
nvl += 1
print("Number of vowels: " + str(nvl))
| true
| true
|
790ccf1f2beb415d25e395355cb51083863e7fb0
| 2,566
|
py
|
Python
|
source/FAST/Examples/Python/pyfast_and_pyside2_custom_window.py
|
andreped/FAST
|
361819190ea0ae5a2f068e7bd808a1c70af5a171
|
[
"BSD-2-Clause"
] | null | null | null |
source/FAST/Examples/Python/pyfast_and_pyside2_custom_window.py
|
andreped/FAST
|
361819190ea0ae5a2f068e7bd808a1c70af5a171
|
[
"BSD-2-Clause"
] | null | null | null |
source/FAST/Examples/Python/pyfast_and_pyside2_custom_window.py
|
andreped/FAST
|
361819190ea0ae5a2f068e7bd808a1c70af5a171
|
[
"BSD-2-Clause"
] | null | null | null |
## @example pyfast_and_pyside2_custom_window.py
# This example demonstrates how to use FAST in an existing PySide2 application.
#
# @m_class{m-block m-warning} @par PySide2 Qt Version
# @parblock
# For this example you <b>must</b> use the same Qt version of PySide2 as used in FAST (5.14.0)
# Do this with: <b>pip install pyside2==5.14.0</b>
# @endparblock
#
# @image html images/examples/python/pyfast_and_pyside_custom_window.jpg width=350px;
from PySide2.QtWidgets import *
from PySide2.QtOpenGL import QGLWidget
from PySide2.QtCore import Slot
import PySide2.QtSvg # Must import this before fast due to conflicting symbols
from shiboken2 import wrapInstance
import fast
import threading
import sys
#fast.Reporter.setGlobalReportMethod(fast.Reporter.COUT)
# Create a simple window widget with pyside2
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
self.setWindowTitle('pyFAST + PySide2')
# Create button
self.button = QPushButton("Restart FAST pipeline")
# Create FAST view
self.view = fast.View()
self.installEventFilter(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
self.view.set2DMode()
# Create layout and add widgets
layout = QVBoxLayout()
layout.addWidget(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
layout.addWidget(self.button)
self.setLayout(layout)
# Connect button click event
self.button.clicked.connect(self.restartPipeline)
self.resize(512, 512)
@Slot()
def restartPipeline(self):
# Create FAST computation thread
# This is needed to run computations smoothly in the background
# The computation thread must live in the object to avoid being destroyed when this function is done.
self.computationThread = fast.ComputationThread.create()
self.computationThread.addView(self.view)
# Setup a FAST pipeline
streamer = fast.ImageFileStreamer \
.create(fast.Config.getTestDataPath() + '/US/Heart/ApicalFourChamber/US-2D_#.mhd')
renderer = fast.ImageRenderer.create() \
.connect(streamer)
self.view.removeAllRenderers()
self.view.addRenderer(renderer)
self.view.reinitialize()
self.computationThread.start()
if __name__ == '__main__':
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the window
window = Window()
window.show()
# Run the main Qt loop
sys.exit(app.exec_())
| 32.897436
| 109
| 0.693687
|
m PySide2.QtOpenGL import QGLWidget
from PySide2.QtCore import Slot
import PySide2.QtSvg
from shiboken2 import wrapInstance
import fast
import threading
import sys
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
self.setWindowTitle('pyFAST + PySide2')
self.button = QPushButton("Restart FAST pipeline")
self.view = fast.View()
self.installEventFilter(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
self.view.set2DMode()
layout = QVBoxLayout()
layout.addWidget(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.restartPipeline)
self.resize(512, 512)
@Slot()
def restartPipeline(self):
self.computationThread = fast.ComputationThread.create()
self.computationThread.addView(self.view)
streamer = fast.ImageFileStreamer \
.create(fast.Config.getTestDataPath() + '/US/Heart/ApicalFourChamber/US-2D_#.mhd')
renderer = fast.ImageRenderer.create() \
.connect(streamer)
self.view.removeAllRenderers()
self.view.addRenderer(renderer)
self.view.reinitialize()
self.computationThread.start()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| true
| true
|
790cd075c19b673485ce18212759c2af68ca0bb6
| 10,825
|
py
|
Python
|
tvdb_api/models/movie.py
|
h3llrais3r/tvdbapi-v2-client
|
1210df9dd5869ccc5b63149b1b80630310a14f40
|
[
"MIT"
] | 2
|
2021-01-24T07:45:22.000Z
|
2021-11-15T11:29:25.000Z
|
tvdb_api/models/movie.py
|
h3llrais3r/tvdb_api_v2
|
1210df9dd5869ccc5b63149b1b80630310a14f40
|
[
"MIT"
] | null | null | null |
tvdb_api/models/movie.py
|
h3llrais3r/tvdb_api_v2
|
1210df9dd5869ccc5b63149b1b80630310a14f40
|
[
"MIT"
] | 1
|
2020-05-07T10:16:15.000Z
|
2020-05-07T10:16:15.000Z
|
# coding: utf-8
"""
TheTVDB API v2
API v3 targets v2 functionality with a few minor additions. The API is accessible via https://api.thetvdb.com and provides the following REST endpoints in JSON format. How to use this API documentation ---------------- You may browse the API routes without authentication, but if you wish to send requests to the API and see response data, then you must authenticate. 1. Obtain a JWT token by `POST`ing to the `/login` route in the `Authentication` section with your API key and credentials. 1. Paste the JWT token from the response into the \"JWT Token\" field at the top of the page and click the 'Add Token' button. You will now be able to use the remaining routes to send requests to the API and get a response. Language Selection ---------------- Language selection is done via the `Accept-Language` header. At the moment, you may only pass one language abbreviation in the header at a time. Valid language abbreviations can be found at the `/languages` route.. Authentication ---------------- Authentication to use the API is similar to the How-to section above. Users must `POST` to the `/login` route with their API key and credentials in the following format in order to obtain a JWT token. `{\"apikey\":\"APIKEY\",\"username\":\"USERNAME\",\"userkey\":\"USERKEY\"}` Note that the username and key are ONLY required for the `/user` routes. The user's key is labled `Account Identifier` in the account section of the main site. The token is then used in all subsequent requests by providing it in the `Authorization` header. The header will look like: `Authorization: Bearer <yourJWTtoken>`. Currently, the token expires after 24 hours. You can `GET` the `/refresh_token` route to extend that expiration date. Versioning ---------------- You may request a different version of the API by including an `Accept` header in your request with the following format: `Accept:application/vnd.thetvdb.v$VERSION`. This documentation automatically uses the version seen at the top and bottom of the page. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Movie(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'artworks': 'list[MovieArtwork]',
'genres': 'list[MovieGenre]',
'id': 'int',
'people': 'MoviePeople',
'release_dates': 'list[MovieReleaseDate]',
'remoteids': 'list[MovieRemoteId]',
'runtime': 'int',
'trailers': 'list[MovieTrailer]',
'translations': 'list[MovieTranslation]',
'url': 'str'
}
attribute_map = {
'artworks': 'artworks',
'genres': 'genres',
'id': 'id',
'people': 'people',
'release_dates': 'release_dates',
'remoteids': 'remoteids',
'runtime': 'runtime',
'trailers': 'trailers',
'translations': 'translations',
'url': 'url'
}
def __init__(self, artworks=None, genres=None, id=None, people=None, release_dates=None, remoteids=None, runtime=None, trailers=None, translations=None, url=None): # noqa: E501
"""Movie - a model defined in Swagger""" # noqa: E501
self._artworks = None
self._genres = None
self._id = None
self._people = None
self._release_dates = None
self._remoteids = None
self._runtime = None
self._trailers = None
self._translations = None
self._url = None
self.discriminator = None
if artworks is not None:
self.artworks = artworks
if genres is not None:
self.genres = genres
if id is not None:
self.id = id
if people is not None:
self.people = people
if release_dates is not None:
self.release_dates = release_dates
if remoteids is not None:
self.remoteids = remoteids
if runtime is not None:
self.runtime = runtime
if trailers is not None:
self.trailers = trailers
if translations is not None:
self.translations = translations
if url is not None:
self.url = url
@property
def artworks(self):
"""Gets the artworks of this Movie. # noqa: E501
:return: The artworks of this Movie. # noqa: E501
:rtype: list[MovieArtwork]
"""
return self._artworks
@artworks.setter
def artworks(self, artworks):
"""Sets the artworks of this Movie.
:param artworks: The artworks of this Movie. # noqa: E501
:type: list[MovieArtwork]
"""
self._artworks = artworks
@property
def genres(self):
"""Gets the genres of this Movie. # noqa: E501
:return: The genres of this Movie. # noqa: E501
:rtype: list[MovieGenre]
"""
return self._genres
@genres.setter
def genres(self, genres):
"""Sets the genres of this Movie.
:param genres: The genres of this Movie. # noqa: E501
:type: list[MovieGenre]
"""
self._genres = genres
@property
def id(self):
"""Gets the id of this Movie. # noqa: E501
:return: The id of this Movie. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Movie.
:param id: The id of this Movie. # noqa: E501
:type: int
"""
self._id = id
@property
def people(self):
"""Gets the people of this Movie. # noqa: E501
:return: The people of this Movie. # noqa: E501
:rtype: MoviePeople
"""
return self._people
@people.setter
def people(self, people):
"""Sets the people of this Movie.
:param people: The people of this Movie. # noqa: E501
:type: MoviePeople
"""
self._people = people
@property
def release_dates(self):
"""Gets the release_dates of this Movie. # noqa: E501
:return: The release_dates of this Movie. # noqa: E501
:rtype: list[MovieReleaseDate]
"""
return self._release_dates
@release_dates.setter
def release_dates(self, release_dates):
"""Sets the release_dates of this Movie.
:param release_dates: The release_dates of this Movie. # noqa: E501
:type: list[MovieReleaseDate]
"""
self._release_dates = release_dates
@property
def remoteids(self):
"""Gets the remoteids of this Movie. # noqa: E501
:return: The remoteids of this Movie. # noqa: E501
:rtype: list[MovieRemoteId]
"""
return self._remoteids
@remoteids.setter
def remoteids(self, remoteids):
"""Sets the remoteids of this Movie.
:param remoteids: The remoteids of this Movie. # noqa: E501
:type: list[MovieRemoteId]
"""
self._remoteids = remoteids
@property
def runtime(self):
"""Gets the runtime of this Movie. # noqa: E501
:return: The runtime of this Movie. # noqa: E501
:rtype: int
"""
return self._runtime
@runtime.setter
def runtime(self, runtime):
"""Sets the runtime of this Movie.
:param runtime: The runtime of this Movie. # noqa: E501
:type: int
"""
self._runtime = runtime
@property
def trailers(self):
"""Gets the trailers of this Movie. # noqa: E501
:return: The trailers of this Movie. # noqa: E501
:rtype: list[MovieTrailer]
"""
return self._trailers
@trailers.setter
def trailers(self, trailers):
"""Sets the trailers of this Movie.
:param trailers: The trailers of this Movie. # noqa: E501
:type: list[MovieTrailer]
"""
self._trailers = trailers
@property
def translations(self):
"""Gets the translations of this Movie. # noqa: E501
:return: The translations of this Movie. # noqa: E501
:rtype: list[MovieTranslation]
"""
return self._translations
@translations.setter
def translations(self, translations):
"""Sets the translations of this Movie.
:param translations: The translations of this Movie. # noqa: E501
:type: list[MovieTranslation]
"""
self._translations = translations
@property
def url(self):
"""Gets the url of this Movie. # noqa: E501
:return: The url of this Movie. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Movie.
:param url: The url of this Movie. # noqa: E501
:type: str
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Movie, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Movie):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.928571
| 2,040
| 0.591132
|
import pprint
import re
import six
class Movie(object):
swagger_types = {
'artworks': 'list[MovieArtwork]',
'genres': 'list[MovieGenre]',
'id': 'int',
'people': 'MoviePeople',
'release_dates': 'list[MovieReleaseDate]',
'remoteids': 'list[MovieRemoteId]',
'runtime': 'int',
'trailers': 'list[MovieTrailer]',
'translations': 'list[MovieTranslation]',
'url': 'str'
}
attribute_map = {
'artworks': 'artworks',
'genres': 'genres',
'id': 'id',
'people': 'people',
'release_dates': 'release_dates',
'remoteids': 'remoteids',
'runtime': 'runtime',
'trailers': 'trailers',
'translations': 'translations',
'url': 'url'
}
def __init__(self, artworks=None, genres=None, id=None, people=None, release_dates=None, remoteids=None, runtime=None, trailers=None, translations=None, url=None):
self._artworks = None
self._genres = None
self._id = None
self._people = None
self._release_dates = None
self._remoteids = None
self._runtime = None
self._trailers = None
self._translations = None
self._url = None
self.discriminator = None
if artworks is not None:
self.artworks = artworks
if genres is not None:
self.genres = genres
if id is not None:
self.id = id
if people is not None:
self.people = people
if release_dates is not None:
self.release_dates = release_dates
if remoteids is not None:
self.remoteids = remoteids
if runtime is not None:
self.runtime = runtime
if trailers is not None:
self.trailers = trailers
if translations is not None:
self.translations = translations
if url is not None:
self.url = url
@property
def artworks(self):
return self._artworks
@artworks.setter
def artworks(self, artworks):
self._artworks = artworks
@property
def genres(self):
return self._genres
@genres.setter
def genres(self, genres):
self._genres = genres
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def people(self):
return self._people
@people.setter
def people(self, people):
self._people = people
@property
def release_dates(self):
return self._release_dates
@release_dates.setter
def release_dates(self, release_dates):
self._release_dates = release_dates
@property
def remoteids(self):
return self._remoteids
@remoteids.setter
def remoteids(self, remoteids):
self._remoteids = remoteids
@property
def runtime(self):
return self._runtime
@runtime.setter
def runtime(self, runtime):
self._runtime = runtime
@property
def trailers(self):
return self._trailers
@trailers.setter
def trailers(self, trailers):
self._trailers = trailers
@property
def translations(self):
return self._translations
@translations.setter
def translations(self, translations):
self._translations = translations
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Movie, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Movie):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790cd132c4483bca78043365a481b7ec7d11cbe9
| 2,367
|
py
|
Python
|
qiskit_aqua/algorithms/components/optimizers/nlopts/esch.py
|
msoeken/aqua
|
af6a459621bcee90ed832a644ef9220644b84b03
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/algorithms/components/optimizers/nlopts/esch.py
|
msoeken/aqua
|
af6a459621bcee90ed832a644ef9220644b84b03
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/algorithms/components/optimizers/nlopts/esch.py
|
msoeken/aqua
|
af6a459621bcee90ed832a644ef9220644b84b03
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit_aqua.algorithms.components.optimizers import Optimizer
from ._nloptimizer import minimize
import logging
try:
import nlopt
except ImportError:
raise ImportWarning('nlopt cannot be imported')
logger = logging.getLogger(__name__)
class ESCH(Optimizer):
"""ESCH (evolutionary algorithm)
NLopt global optimizer, derivative-free
http://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#esch-evolutionary-algorithm
"""
ESCH_CONFIGURATION = {
'name': 'ESCH',
'description': 'GN_ESCH Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'esch_schema',
'type': 'object',
'properties': {
'max_evals': {
'type': 'integer',
'default': 1000
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['max_evals'],
'optimizer': ['global']
}
def __init__(self, configuration=None):
super().__init__(configuration or self.ESCH_CONFIGURATION.copy())
def init_args(self):
pass
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
return minimize(nlopt.GN_ESCH, objective_function, variable_bounds, initial_point, **self._options)
| 32.875
| 119
| 0.636248
|
from qiskit_aqua.algorithms.components.optimizers import Optimizer
from ._nloptimizer import minimize
import logging
try:
import nlopt
except ImportError:
raise ImportWarning('nlopt cannot be imported')
logger = logging.getLogger(__name__)
class ESCH(Optimizer):
ESCH_CONFIGURATION = {
'name': 'ESCH',
'description': 'GN_ESCH Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'esch_schema',
'type': 'object',
'properties': {
'max_evals': {
'type': 'integer',
'default': 1000
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['max_evals'],
'optimizer': ['global']
}
def __init__(self, configuration=None):
super().__init__(configuration or self.ESCH_CONFIGURATION.copy())
def init_args(self):
pass
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
return minimize(nlopt.GN_ESCH, objective_function, variable_bounds, initial_point, **self._options)
| true
| true
|
790cd19ca8b22937365bf24b6e40ed90c79ee12b
| 1,301
|
py
|
Python
|
tests/cache_tests.py
|
valhallasw/pywikibot-core
|
32a8c3c1298a5cb077381fe202daefde82c1c5d3
|
[
"MIT"
] | null | null | null |
tests/cache_tests.py
|
valhallasw/pywikibot-core
|
32a8c3c1298a5cb077381fe202daefde82c1c5d3
|
[
"MIT"
] | null | null | null |
tests/cache_tests.py
|
valhallasw/pywikibot-core
|
32a8c3c1298a5cb077381fe202daefde82c1c5d3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""API Request cache tests."""
#
# (C) Pywikibot team, 2012-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 790cd19ca8b22937365bf24b6e40ed90c79ee12b $'
#
from pywikibot.site import BaseSite
import scripts.maintenance.cache as cache
from tests import _cache_dir
from tests.aspects import unittest, TestCase
class RequestCacheTests(TestCase):
"""Validate cache entries."""
net = False
def _check_cache_entry(self, entry):
"""Assert validity of the cache entry."""
self.assertIsInstance(entry.site, BaseSite)
self.assertIsInstance(entry.site._loginstatus, int)
self.assertIsInstance(entry.site._username, list)
if entry.site._loginstatus >= 1:
self.assertIsNotNone(entry.site._username[0])
self.assertIsInstance(entry._params, dict)
self.assertIsNotNone(entry._params)
# TODO: more tests on entry._params, and possibly fixes needed
# to make it closely replicate the original object.
def test_cache(self):
"""Test the apicache by doing _check_cache_entry over each entry."""
cache.process_entries(_cache_dir, self._check_cache_entry)
if __name__ == '__main__':
unittest.main()
| 28.911111
| 76
| 0.707917
|
from __future__ import unicode_literals
__version__ = '$Id: 790cd19ca8b22937365bf24b6e40ed90c79ee12b $'
from pywikibot.site import BaseSite
import scripts.maintenance.cache as cache
from tests import _cache_dir
from tests.aspects import unittest, TestCase
class RequestCacheTests(TestCase):
net = False
def _check_cache_entry(self, entry):
self.assertIsInstance(entry.site, BaseSite)
self.assertIsInstance(entry.site._loginstatus, int)
self.assertIsInstance(entry.site._username, list)
if entry.site._loginstatus >= 1:
self.assertIsNotNone(entry.site._username[0])
self.assertIsInstance(entry._params, dict)
self.assertIsNotNone(entry._params)
def test_cache(self):
cache.process_entries(_cache_dir, self._check_cache_entry)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790cd20955ec12b08cb4a8d15625f8fda87894b3
| 1,188
|
py
|
Python
|
floodsystem/flood.py
|
LakeeSiv/Flood
|
d6bc5bccb04711de99714ecb279d9896c47c4f07
|
[
"MIT"
] | null | null | null |
floodsystem/flood.py
|
LakeeSiv/Flood
|
d6bc5bccb04711de99714ecb279d9896c47c4f07
|
[
"MIT"
] | null | null | null |
floodsystem/flood.py
|
LakeeSiv/Flood
|
d6bc5bccb04711de99714ecb279d9896c47c4f07
|
[
"MIT"
] | null | null | null |
from .station import consistant_typical_range_stations
def stations_level_over_threshold(stations: list, tol: float) -> list:
"""function takes in stations and returns a list of tuples contating station and
relative water lever where the relative water level greater than tol """
stations = consistant_typical_range_stations(stations) # gets consistant stations
res_list = []
for station in stations:
rel_level = station.relative_water_level()
if rel_level is not None: # ensures water level is not None
if rel_level > tol:
res_list.append((station, rel_level))
return res_list
def stations_highest_rel_level(stations, N):
"""Returns a list of N MonitoringStation objects ordered from highest to lowest risk"""
stations = consistant_typical_range_stations(stations)
def key(x):
if x.relative_water_level() is not None:
return x.relative_water_level()
else:
return float(0)
stationByHighestLevel = sorted(stations, key=key, reverse=True) # Hoping this will work we shall see
NstationByLevel = stationByHighestLevel[:N]
return NstationByLevel
| 33
| 105
| 0.710438
|
from .station import consistant_typical_range_stations
def stations_level_over_threshold(stations: list, tol: float) -> list:
stations = consistant_typical_range_stations(stations)
res_list = []
for station in stations:
rel_level = station.relative_water_level()
if rel_level is not None:
if rel_level > tol:
res_list.append((station, rel_level))
return res_list
def stations_highest_rel_level(stations, N):
stations = consistant_typical_range_stations(stations)
def key(x):
if x.relative_water_level() is not None:
return x.relative_water_level()
else:
return float(0)
stationByHighestLevel = sorted(stations, key=key, reverse=True)
NstationByLevel = stationByHighestLevel[:N]
return NstationByLevel
| true
| true
|
790cd20c1eb080db8972ea17344464e89aba0020
| 4,856
|
py
|
Python
|
docs/source/conf.py
|
aditya-a-patil/FHash
|
1de0b6de02ac48c77c706b50a63cb160367791da
|
[
"Unlicense"
] | null | null | null |
docs/source/conf.py
|
aditya-a-patil/FHash
|
1de0b6de02ac48c77c706b50a63cb160367791da
|
[
"Unlicense"
] | null | null | null |
docs/source/conf.py
|
aditya-a-patil/FHash
|
1de0b6de02ac48c77c706b50a63cb160367791da
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# FHash documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 21 20:02:16 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx_rtd_theme
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FHash'
copyright = u'2017, Aditya Patil'
author = u'Aditya Patil'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'FHashdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FHash.tex', u'FHash Documentation',
u'Aditya Patil', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fhash', u'FHash Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FHash', u'FHash Documentation',
author, 'FHash', 'One line description of project.',
'Miscellaneous'),
]
| 29.609756
| 79
| 0.682043
|
import sphinx_rtd_theme
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'FHash'
copyright = u'2017, Aditya Patil'
author = u'Aditya Patil'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'FHashdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FHash.tex', u'FHash Documentation',
u'Aditya Patil', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fhash', u'FHash Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FHash', u'FHash Documentation',
author, 'FHash', 'One line description of project.',
'Miscellaneous'),
]
| true
| true
|
790cd3fccecabdb2dd1cb0b0786fb112775bc3f4
| 8,093
|
py
|
Python
|
src/oci/devops/models/create_deployment_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/devops/models/create_deployment_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/devops/models/create_deployment_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateDeploymentDetails(object):
"""
The information about new deployment.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateDeploymentDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.devops.models.CreateDeployPipelineRedeploymentDetails`
* :class:`~oci.devops.models.CreateDeployPipelineDeploymentDetails`
* :class:`~oci.devops.models.CreateSingleDeployStageDeploymentDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param deploy_pipeline_id:
The value to assign to the deploy_pipeline_id property of this CreateDeploymentDetails.
:type deploy_pipeline_id: str
:param deployment_type:
The value to assign to the deployment_type property of this CreateDeploymentDetails.
:type deployment_type: str
:param display_name:
The value to assign to the display_name property of this CreateDeploymentDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateDeploymentDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateDeploymentDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'deploy_pipeline_id': 'str',
'deployment_type': 'str',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'deploy_pipeline_id': 'deployPipelineId',
'deployment_type': 'deploymentType',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._deploy_pipeline_id = None
self._deployment_type = None
self._display_name = None
self._freeform_tags = None
self._defined_tags = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['deploymentType']
if type == 'PIPELINE_REDEPLOYMENT':
return 'CreateDeployPipelineRedeploymentDetails'
if type == 'PIPELINE_DEPLOYMENT':
return 'CreateDeployPipelineDeploymentDetails'
if type == 'SINGLE_STAGE_DEPLOYMENT':
return 'CreateSingleDeployStageDeploymentDetails'
else:
return 'CreateDeploymentDetails'
@property
def deploy_pipeline_id(self):
"""
**[Required]** Gets the deploy_pipeline_id of this CreateDeploymentDetails.
The OCID of a pipeline.
:return: The deploy_pipeline_id of this CreateDeploymentDetails.
:rtype: str
"""
return self._deploy_pipeline_id
@deploy_pipeline_id.setter
def deploy_pipeline_id(self, deploy_pipeline_id):
"""
Sets the deploy_pipeline_id of this CreateDeploymentDetails.
The OCID of a pipeline.
:param deploy_pipeline_id: The deploy_pipeline_id of this CreateDeploymentDetails.
:type: str
"""
self._deploy_pipeline_id = deploy_pipeline_id
@property
def deployment_type(self):
"""
**[Required]** Gets the deployment_type of this CreateDeploymentDetails.
Specifies type for this deployment.
:return: The deployment_type of this CreateDeploymentDetails.
:rtype: str
"""
return self._deployment_type
@deployment_type.setter
def deployment_type(self, deployment_type):
"""
Sets the deployment_type of this CreateDeploymentDetails.
Specifies type for this deployment.
:param deployment_type: The deployment_type of this CreateDeploymentDetails.
:type: str
"""
self._deployment_type = deployment_type
@property
def display_name(self):
"""
Gets the display_name of this CreateDeploymentDetails.
Deployment display name. Avoid entering confidential information.
:return: The display_name of this CreateDeploymentDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateDeploymentDetails.
Deployment display name. Avoid entering confidential information.
:param display_name: The display_name of this CreateDeploymentDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateDeploymentDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See `Resource Tags`__. Example: `{\"bar-key\": \"value\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateDeploymentDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateDeploymentDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See `Resource Tags`__. Example: `{\"bar-key\": \"value\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateDeploymentDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateDeploymentDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. See `Resource Tags`__. Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateDeploymentDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateDeploymentDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. See `Resource Tags`__. Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateDeploymentDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.495614
| 245
| 0.66922
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateDeploymentDetails(object):
def __init__(self, **kwargs):
self.swagger_types = {
'deploy_pipeline_id': 'str',
'deployment_type': 'str',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'deploy_pipeline_id': 'deployPipelineId',
'deployment_type': 'deploymentType',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._deploy_pipeline_id = None
self._deployment_type = None
self._display_name = None
self._freeform_tags = None
self._defined_tags = None
@staticmethod
def get_subtype(object_dictionary):
type = object_dictionary['deploymentType']
if type == 'PIPELINE_REDEPLOYMENT':
return 'CreateDeployPipelineRedeploymentDetails'
if type == 'PIPELINE_DEPLOYMENT':
return 'CreateDeployPipelineDeploymentDetails'
if type == 'SINGLE_STAGE_DEPLOYMENT':
return 'CreateSingleDeployStageDeploymentDetails'
else:
return 'CreateDeploymentDetails'
@property
def deploy_pipeline_id(self):
return self._deploy_pipeline_id
@deploy_pipeline_id.setter
def deploy_pipeline_id(self, deploy_pipeline_id):
self._deploy_pipeline_id = deploy_pipeline_id
@property
def deployment_type(self):
return self._deployment_type
@deployment_type.setter
def deployment_type(self, deployment_type):
self._deployment_type = deployment_type
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def freeform_tags(self):
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790cd474124af4aa4ad07f2cfbb6061ed2aed215
| 141
|
py
|
Python
|
cogwhitelist/__init__.py
|
notodinair/RedV3-Cogs
|
47747ccc33617dcaa3851ff12c6f95aee675d1e6
|
[
"MIT"
] | 1
|
2020-06-08T13:39:30.000Z
|
2020-06-08T13:39:30.000Z
|
cogwhitelist/__init__.py
|
Tominous/Swift-Cogs
|
47747ccc33617dcaa3851ff12c6f95aee675d1e6
|
[
"MIT"
] | null | null | null |
cogwhitelist/__init__.py
|
Tominous/Swift-Cogs
|
47747ccc33617dcaa3851ff12c6f95aee675d1e6
|
[
"MIT"
] | 1
|
2020-06-08T13:39:32.000Z
|
2020-06-08T13:39:32.000Z
|
from redbot.core.bot import Red
from cogwhitelist.cogwhitelist import CogWhitelist
def setup(bot: Red):
bot.add_cog(CogWhitelist(bot))
| 20.142857
| 50
| 0.787234
|
from redbot.core.bot import Red
from cogwhitelist.cogwhitelist import CogWhitelist
def setup(bot: Red):
bot.add_cog(CogWhitelist(bot))
| true
| true
|
790cd47f782ec185d034063836af6adfc3e82b78
| 849
|
py
|
Python
|
src/app/tests/owl/tests_owl_functional.py
|
denkasyanov/education-backend
|
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
|
[
"MIT"
] | 62
|
2021-09-22T18:38:26.000Z
|
2022-03-29T06:09:42.000Z
|
src/app/tests/owl/tests_owl_functional.py
|
denkasyanov/education-backend
|
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
|
[
"MIT"
] | 50
|
2021-09-16T07:17:31.000Z
|
2022-03-26T12:06:58.000Z
|
src/app/tests/owl/tests_owl_functional.py
|
denkasyanov/education-backend
|
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
|
[
"MIT"
] | 16
|
2021-10-17T17:43:31.000Z
|
2022-03-26T11:22:45.000Z
|
import pytest
from django.core import mail
from app.mail.owl import TemplOwl # type: ignore
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def _enable_email(settings):
settings.EMAIL_ENABLED = True
@pytest.fixture
def owl():
return TemplOwl(
to='f@f213.in',
template_id=100500,
)
def test_sending(owl):
owl.send()
assert len(mail.outbox) == 1
@pytest.mark.parametrize('switch', [
lambda settings: setattr(settings, 'EMAIL_ENABLED', False),
])
def test_kill_switch(owl, switch, settings):
switch(settings)
owl.send()
assert len(mail.outbox) == 0
def test_attaching(owl):
owl.attach(filename='testing_file_name_100500.txt', content=b'just testing')
assert len(owl.msg.attachments) == 1
assert 'testing_file_name_100500.txt' in owl.msg.attachments[0]
| 19.295455
| 80
| 0.699647
|
import pytest
from django.core import mail
from app.mail.owl import TemplOwl
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def _enable_email(settings):
settings.EMAIL_ENABLED = True
@pytest.fixture
def owl():
return TemplOwl(
to='f@f213.in',
template_id=100500,
)
def test_sending(owl):
owl.send()
assert len(mail.outbox) == 1
@pytest.mark.parametrize('switch', [
lambda settings: setattr(settings, 'EMAIL_ENABLED', False),
])
def test_kill_switch(owl, switch, settings):
switch(settings)
owl.send()
assert len(mail.outbox) == 0
def test_attaching(owl):
owl.attach(filename='testing_file_name_100500.txt', content=b'just testing')
assert len(owl.msg.attachments) == 1
assert 'testing_file_name_100500.txt' in owl.msg.attachments[0]
| true
| true
|
790cd5393642b080cb5c3a25937c16f011a6984c
| 5,321
|
py
|
Python
|
src/penn_chime/charts.py
|
nickcanz/chime
|
cb03218ee5cc71b92704c8be379924ac459259d7
|
[
"MIT"
] | 1
|
2020-05-09T14:43:53.000Z
|
2020-05-09T14:43:53.000Z
|
src/penn_chime/charts.py
|
nickcanz/chime
|
cb03218ee5cc71b92704c8be379924ac459259d7
|
[
"MIT"
] | null | null | null |
src/penn_chime/charts.py
|
nickcanz/chime
|
cb03218ee5cc71b92704c8be379924ac459259d7
|
[
"MIT"
] | null | null | null |
from math import ceil
import datetime
from altair import Chart # type: ignore
import pandas as pd # type: ignore
import numpy as np
from .parameters import Parameters
from .utils import add_date_column
from .presentation import DATE_FORMAT
def new_admissions_chart(
alt, projection_admits: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: "day", True: "date:T"}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
# TODO fix the fold to allow any number of dispositions
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return (
alt.Chart(ceiled_admits.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Daily admissions", scale=y_scale),
color="key:N",
tooltip=[
tooltip_dict[as_date],
alt.Tooltip("value:Q", format=".0f", title="Admissions"),
"key:N",
],
)
.interactive()
)
def admitted_patients_chart(
alt, census: pd.DataFrame, parameters: Parameters
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
idx = "date:T"
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
idx = "day"
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
# TODO fix the fold to allow any number of dispositions
return (
alt.Chart(census.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Census", scale=y_scale),
color="key:N",
tooltip=[
idx,
alt.Tooltip("value:Q", format=".0f", title="Census"),
"key:N",
],
)
.interactive()
)
def additional_projections_chart(
alt, model, parameters
) -> Chart:
# TODO use subselect of df_raw instead of creating a new df
raw_df = model.raw_df
dat = pd.DataFrame({
"infected": raw_df.infected,
"recovered": raw_df.recovered
})
dat["day"] = dat.index
as_date = parameters.as_date
max_y_axis = parameters.max_y_axis
if as_date:
dat = add_date_column(dat)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
return (
alt.Chart(dat)
.transform_fold(fold=["infected", "recovered"])
.mark_line()
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Case Volume", scale=y_scale),
tooltip=["key:N", "value:Q"],
color="key:N",
)
.interactive()
)
def chart_descriptions(chart: Chart, labels, suffix: str = ""):
"""
:param chart: Chart: The alt chart to be used in finding max points
:param suffix: str: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: str: Returns a multi-line string description of the results
"""
messages = []
cols = ["hospitalized", "icu", "ventilated"]
asterisk = False
day = "date" if "date" in chart.data.columns else "day"
for col in cols:
if chart.data[col].idxmax() + 1 == len(chart.data):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if day == "date":
on = datetime.datetime.strftime(on, "%b %d") # todo: bring this to an optional arg / i18n
else:
on += 1 # 0 index issue
messages.append(
"{}{} peaks at {:,} on day {}{}".format(
labels[col],
suffix,
ceil(chart.data[col].max()),
on,
"*" if asterisk else "",
)
)
if asterisk:
messages.append("_* The max is at the upper bound of the data, and therefore may not be the actual max_")
return "\n\n".join(messages)
| 29.893258
| 113
| 0.584852
|
from math import ceil
import datetime
from altair import Chart
import pandas as pd
import numpy as np
from .parameters import Parameters
from .utils import add_date_column
from .presentation import DATE_FORMAT
def new_admissions_chart(
alt, projection_admits: pd.DataFrame, parameters: Parameters
) -> Chart:
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: "day", True: "date:T"}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return (
alt.Chart(ceiled_admits.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Daily admissions", scale=y_scale),
color="key:N",
tooltip=[
tooltip_dict[as_date],
alt.Tooltip("value:Q", format=".0f", title="Admissions"),
"key:N",
],
)
.interactive()
)
def admitted_patients_chart(
alt, census: pd.DataFrame, parameters: Parameters
) -> Chart:
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
idx = "date:T"
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
idx = "day"
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
return (
alt.Chart(census.head(plot_projection_days))
.transform_fold(fold=["hospitalized", "icu", "ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Census", scale=y_scale),
color="key:N",
tooltip=[
idx,
alt.Tooltip("value:Q", format=".0f", title="Census"),
"key:N",
],
)
.interactive()
)
def additional_projections_chart(
alt, model, parameters
) -> Chart:
raw_df = model.raw_df
dat = pd.DataFrame({
"infected": raw_df.infected,
"recovered": raw_df.recovered
})
dat["day"] = dat.index
as_date = parameters.as_date
max_y_axis = parameters.max_y_axis
if as_date:
dat = add_date_column(dat)
x_kwargs = {"shorthand": "date:T", "title": "Date", "axis": alt.Axis(format=(DATE_FORMAT))}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
return (
alt.Chart(dat)
.transform_fold(fold=["infected", "recovered"])
.mark_line()
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Case Volume", scale=y_scale),
tooltip=["key:N", "value:Q"],
color="key:N",
)
.interactive()
)
def chart_descriptions(chart: Chart, labels, suffix: str = ""):
messages = []
cols = ["hospitalized", "icu", "ventilated"]
asterisk = False
day = "date" if "date" in chart.data.columns else "day"
for col in cols:
if chart.data[col].idxmax() + 1 == len(chart.data):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if day == "date":
on = datetime.datetime.strftime(on, "%b %d")
else:
on += 1
messages.append(
"{}{} peaks at {:,} on day {}{}".format(
labels[col],
suffix,
ceil(chart.data[col].max()),
on,
"*" if asterisk else "",
)
)
if asterisk:
messages.append("_* The max is at the upper bound of the data, and therefore may not be the actual max_")
return "\n\n".join(messages)
| true
| true
|
790cd558e9b23c59ef942ba9bd925d54f00ffdf7
| 973
|
py
|
Python
|
tests/test_simple.py
|
pooya/disco
|
e03a337b3b20e191459c74a367b9e89e873f71ff
|
[
"BSD-3-Clause"
] | 786
|
2015-01-01T12:35:40.000Z
|
2022-03-19T04:39:22.000Z
|
tests/test_simple.py
|
pooya/disco
|
e03a337b3b20e191459c74a367b9e89e873f71ff
|
[
"BSD-3-Clause"
] | 51
|
2015-01-19T20:07:01.000Z
|
2019-10-19T21:03:06.000Z
|
tests/test_simple.py
|
pooya/disco
|
e03a337b3b20e191459c74a367b9e89e873f71ff
|
[
"BSD-3-Clause"
] | 122
|
2015-01-05T18:16:03.000Z
|
2021-07-10T12:35:22.000Z
|
from disco.test import TestCase, TestJob
from disco.compat import bytes_to_str
class SimpleJob(TestJob):
@staticmethod
def map(e, params):
yield int(e), (bytes_to_str(e)).strip()
@staticmethod
def reduce(iter, out, params):
for k, v in sorted(iter):
out.add(k, v)
class SimplerJob(SimpleJob):
@staticmethod
def reduce(iter, params):
return sorted(iter)
class SimpleTestCase(TestCase):
input = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
def answers(self):
return ((i, str(i)) for i in self.input for x in range(10))
def serve(self, path):
return '\n'.join([path] * 10)
def test_simple(self):
self.job = SimpleJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
def test_simpler(self):
self.job = SimplerJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
| 27.8
| 76
| 0.634121
|
from disco.test import TestCase, TestJob
from disco.compat import bytes_to_str
class SimpleJob(TestJob):
@staticmethod
def map(e, params):
yield int(e), (bytes_to_str(e)).strip()
@staticmethod
def reduce(iter, out, params):
for k, v in sorted(iter):
out.add(k, v)
class SimplerJob(SimpleJob):
@staticmethod
def reduce(iter, params):
return sorted(iter)
class SimpleTestCase(TestCase):
input = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
def answers(self):
return ((i, str(i)) for i in self.input for x in range(10))
def serve(self, path):
return '\n'.join([path] * 10)
def test_simple(self):
self.job = SimpleJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
def test_simpler(self):
self.job = SimplerJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
| true
| true
|
790cd5f3efbb4e5b2afb556e2d0a477098397709
| 6,796
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
|
OuyangChao/Paddle
|
cac9635a6733ffbbd816b33e21c3054e0cd81ab1
|
[
"Apache-2.0"
] | 3
|
2021-06-08T14:24:36.000Z
|
2021-06-08T14:24:38.000Z
|
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | 1
|
2020-09-22T08:54:49.000Z
|
2020-09-22T11:44:09.000Z
|
python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py
|
chenyanlei1/Paddle
|
f249a5f05f0f5832279244d88c8cb4eaaad1fbd4
|
[
"Apache-2.0"
] | 1
|
2021-08-04T14:28:58.000Z
|
2021-08-04T14:28:58.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import paddle.fluid as fluid
import os
from fleet_meta_optimizer_base import TestFleetMetaOptimizer
from paddle.distributed.fleet.meta_optimizers import RecomputeOptimizer
paddle.enable_static()
class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer):
def test_recompute_optimizer_backward(self):
""" test recompute optimizer backward """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward_gradients(self):
""" test recompute optimizer backward + gradients """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
with fluid.program_guard(train_prog, startup_prog):
opt.apply_gradients(params_grads)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward_optimize(self):
""" test recompute optimizer backward + optimize """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
opt.apply_optimize(avg_cost, startup_prog, params_grads)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward(self):
""" test recompute optimizer backward """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward(self):
""" test recompute optimizer backward """
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_lars_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
self.set_strategy(strategy, 'lars')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
self.assertIn('lars_momentum', ops)
def test_recompute_lamb_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
self.set_strategy(strategy, 'lamb')
self.optimizer(avg_cost, strategy, train_prog, startup_prog, 'adam')
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
self.assertIn('lamb', ops)
def test_recompute_offload(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute-offload')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops
if op.type == 'memcpy'
]
self.assertIn('memcpy', ops)
self.assertIn('@Pinned', ''.join(outs))
self.assertIn('@Fetch', ''.join(outs))
if __name__ == "__main__":
unittest.main()
| 39.283237
| 80
| 0.657004
|
import unittest
import paddle
import paddle.fluid as fluid
import os
from fleet_meta_optimizer_base import TestFleetMetaOptimizer
from paddle.distributed.fleet.meta_optimizers import RecomputeOptimizer
paddle.enable_static()
class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer):
def test_recompute_optimizer_backward(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward_gradients(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
with fluid.program_guard(train_prog, startup_prog):
opt.apply_gradients(params_grads)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward_optimize(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
opt.apply_optimize(avg_cost, startup_prog, params_grads)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer_backward(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
opt = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
opt = RecomputeOptimizer(opt)
opt.user_defined_strategy = strategy
params_grads = opt.backward(avg_cost, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
def test_recompute_lars_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
self.set_strategy(strategy, 'lars')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
self.assertIn('lars_momentum', ops)
def test_recompute_lamb_optimizer(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute')
self.set_strategy(strategy, 'lamb')
self.optimizer(avg_cost, strategy, train_prog, startup_prog, 'adam')
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops if op.type == 'mul'
]
self.assertIn('subprog', ''.join(outs))
self.assertIn('lamb', ops)
def test_recompute_offload(self):
train_prog, startup_prog = fluid.Program(), fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
self.set_strategy(strategy, 'recompute-offload')
self.optimizer(avg_cost, strategy, train_prog, startup_prog)
ops = [op.type for op in avg_cost.block.ops]
outs = [
op.output('Out')[0] for op in avg_cost.block.ops
if op.type == 'memcpy'
]
self.assertIn('memcpy', ops)
self.assertIn('@Pinned', ''.join(outs))
self.assertIn('@Fetch', ''.join(outs))
if __name__ == "__main__":
unittest.main()
| true
| true
|
790cd6097aeef92cfda5f0eb2ecabaee480a01dc
| 2,544
|
py
|
Python
|
neural_network_lrp.py
|
ahijevyc/NSC_objects
|
322728a71ec011b681b0038e9dcd86df1f73b2fd
|
[
"MIT"
] | null | null | null |
neural_network_lrp.py
|
ahijevyc/NSC_objects
|
322728a71ec011b681b0038e9dcd86df1f73b2fd
|
[
"MIT"
] | null | null | null |
neural_network_lrp.py
|
ahijevyc/NSC_objects
|
322728a71ec011b681b0038e9dcd86df1f73b2fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import datetime as dt
import sys, os, pickle, time
from keras.models import Model, save_model, load_model
from keras.regularizers import l2
from keras.optimizers import SGD, Adam
import keras.backend as K
import tensorflow as tf
import pandas as pd
import innvestigate
import innvestigate.utils as iutils
from ml_functions import read_csv_files, normalize_multivariate_data, log, get_features
def brier_score_keras(obs, preds):
return K.mean((preds - obs) ** 2)
def brier_skill_score_keras(obs, preds):
climo = K.mean((obs - K.mean(obs)) ** 2)
bs = brier_score_keras(obs, preds)
ratio = (bs / climo)
return climo
def auc(obs, preds):
auc = tf.metrics.auc(obs, preds)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
def log(msg):
print( time.ctime(time.time()), msg )
### NEURAL NETWORK PARAMETERS ###
nn_params = { 'num_layers': 1, 'num_neurons': [ 1024 ], 'dropout': 0.1, 'lr': 0.001, 'num_epochs': 30, \
'report_window_space':[ int(sys.argv[1]) ], 'report_window_time':[ int(sys.argv[2]) ] }
dataset = 'RT2020'
scaling_dataset = 'NSC3km-12sec'
scaling_file = '/glade/work/sobash/NSC_objects/scaling_values_all_%s.pk'%scaling_dataset
trained_models_dir = '/glade/work/sobash/NSC_objects/trained_models_paper'
sdate = dt.datetime(2020,5,1,0,0,0)
edate = dt.datetime(2020,5,10,0,0,0)
dateinc = dt.timedelta(days=1)
features = get_features('basic')
log('Reading Data')
# read data and reassign data types to float32 to save memory
type_dict = {}
for f in features: type_dict[f]='float32'
df, numfcsts = read_csv_files(sdate, edate, dataset)
print(numfcsts)
scaling_values = pickle.load(open(scaling_file, 'rb'))
norm_in_data, scaling_values = normalize_multivariate_data(df[features].values.astype(np.float32), features, scaling_values=scaling_values)
dense_model = None
model_fname = '%s/neural_network_2016_120km_2hr_nn%d_drop%.1f_basic.h5'%(trained_models_dir,nn_params['num_neurons'][0],nn_params['dropout'])
dense_model = load_model(model_fname, custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })
print(norm_in_data.shape)
analyzer = innvestigate.create_analyzer('lrp.alpha_2_beta_1', dense_model, neuron_selection_mode='index')
a = analyzer.analyze(norm_in_data, 0)
a /= np.max(np.abs(a))
a = a.reshape((36,1298,-1))
a = np.mean(a[24,:,:], axis=0)
print(a.shape)
for i,f in enumerate(features):
print(f, a[i])
log('Finished')
| 31.8
| 157
| 0.737421
|
import numpy as np
import datetime as dt
import sys, os, pickle, time
from keras.models import Model, save_model, load_model
from keras.regularizers import l2
from keras.optimizers import SGD, Adam
import keras.backend as K
import tensorflow as tf
import pandas as pd
import innvestigate
import innvestigate.utils as iutils
from ml_functions import read_csv_files, normalize_multivariate_data, log, get_features
def brier_score_keras(obs, preds):
return K.mean((preds - obs) ** 2)
def brier_skill_score_keras(obs, preds):
climo = K.mean((obs - K.mean(obs)) ** 2)
bs = brier_score_keras(obs, preds)
ratio = (bs / climo)
return climo
def auc(obs, preds):
auc = tf.metrics.auc(obs, preds)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
def log(msg):
print( time.ctime(time.time()), msg )
.1, 'lr': 0.001, 'num_epochs': 30, \
'report_window_space':[ int(sys.argv[1]) ], 'report_window_time':[ int(sys.argv[2]) ] }
dataset = 'RT2020'
scaling_dataset = 'NSC3km-12sec'
scaling_file = '/glade/work/sobash/NSC_objects/scaling_values_all_%s.pk'%scaling_dataset
trained_models_dir = '/glade/work/sobash/NSC_objects/trained_models_paper'
sdate = dt.datetime(2020,5,1,0,0,0)
edate = dt.datetime(2020,5,10,0,0,0)
dateinc = dt.timedelta(days=1)
features = get_features('basic')
log('Reading Data')
type_dict = {}
for f in features: type_dict[f]='float32'
df, numfcsts = read_csv_files(sdate, edate, dataset)
print(numfcsts)
scaling_values = pickle.load(open(scaling_file, 'rb'))
norm_in_data, scaling_values = normalize_multivariate_data(df[features].values.astype(np.float32), features, scaling_values=scaling_values)
dense_model = None
model_fname = '%s/neural_network_2016_120km_2hr_nn%d_drop%.1f_basic.h5'%(trained_models_dir,nn_params['num_neurons'][0],nn_params['dropout'])
dense_model = load_model(model_fname, custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })
print(norm_in_data.shape)
analyzer = innvestigate.create_analyzer('lrp.alpha_2_beta_1', dense_model, neuron_selection_mode='index')
a = analyzer.analyze(norm_in_data, 0)
a /= np.max(np.abs(a))
a = a.reshape((36,1298,-1))
a = np.mean(a[24,:,:], axis=0)
print(a.shape)
for i,f in enumerate(features):
print(f, a[i])
log('Finished')
| true
| true
|
790cd740173cdfadf13951914be2ab38241e3456
| 1,650
|
py
|
Python
|
SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Middlewares/ST/STM32_AI_AudioPreprocessing_Library/Python/MFCC.py
|
MahendraSondagar/STMicroelectronics
|
1b3cab9da8e9a23b2372573b08f6a55ea4424668
|
[
"MIT"
] | null | null | null |
SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Middlewares/ST/STM32_AI_AudioPreprocessing_Library/Python/MFCC.py
|
MahendraSondagar/STMicroelectronics
|
1b3cab9da8e9a23b2372573b08f6a55ea4424668
|
[
"MIT"
] | null | null | null |
SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Middlewares/ST/STM32_AI_AudioPreprocessing_Library/Python/MFCC.py
|
MahendraSondagar/STMicroelectronics
|
1b3cab9da8e9a23b2372573b08f6a55ea4424668
|
[
"MIT"
] | 1
|
2021-05-19T11:35:09.000Z
|
2021-05-19T11:35:09.000Z
|
#!/usr/bin/env python
# coding: utf-8
# This software component is licensed by ST under BSD 3-Clause license,
# the "License"; You may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
# https://opensource.org/licenses/BSD-3-Clause
"""KWS Feature Extraction example."""
import numpy as np
import librosa
import scipy
from scipy.signal import hann
from scipy.fftpack import dct
def mfcc_col(buff_test):
window = 2048
half_window = int(window / 2)
n_mels = 128
n_coeff = 13
assert buff_test.shape == (window,)
hann_asym_f32 = hann(window, sym=False).astype('float32')
assert hann_asym_f32.shape == (window,), hann_asym_f32.shape
buff_hann = buff_test * hann_asym_f32
assert buff_hann.shape == (window,), buff_hann.shape
fft = np.fft.fft(buff_hann, window)[:half_window + 1]
assert fft.shape == (half_window + 1,), fft.shape
ps = np.abs(fft)**2
assert ps.shape == (half_window + 1,)
mel = librosa.filters.mel(sr, window, n_mels)
assert mel.shape == (n_mels, half_window + 1)
energy = np.dot(mel, ps)
assert energy.shape == (n_mels,)
logamplitude = 10 * np.log10(energy)
assert logamplitude.shape == (n_mels,)
dct_out = dct(logamplitude, type=3)
assert dct_out.shape == (n_mels,)
return(dct_out[1:(n_coeff + 1)])
# buffer_bus_01 is made of first 2048 samples of "bus.wav" file
sr, ys = scipy.io.wavfile.read("bus.wav")
buffer_01 = ys[0:2048]
mfcc_col = mfcc_col(buffer_01)
print('mfcc = ', mfcc_col[:])
| 26.190476
| 75
| 0.643636
|
import numpy as np
import librosa
import scipy
from scipy.signal import hann
from scipy.fftpack import dct
def mfcc_col(buff_test):
window = 2048
half_window = int(window / 2)
n_mels = 128
n_coeff = 13
assert buff_test.shape == (window,)
hann_asym_f32 = hann(window, sym=False).astype('float32')
assert hann_asym_f32.shape == (window,), hann_asym_f32.shape
buff_hann = buff_test * hann_asym_f32
assert buff_hann.shape == (window,), buff_hann.shape
fft = np.fft.fft(buff_hann, window)[:half_window + 1]
assert fft.shape == (half_window + 1,), fft.shape
ps = np.abs(fft)**2
assert ps.shape == (half_window + 1,)
mel = librosa.filters.mel(sr, window, n_mels)
assert mel.shape == (n_mels, half_window + 1)
energy = np.dot(mel, ps)
assert energy.shape == (n_mels,)
logamplitude = 10 * np.log10(energy)
assert logamplitude.shape == (n_mels,)
dct_out = dct(logamplitude, type=3)
assert dct_out.shape == (n_mels,)
return(dct_out[1:(n_coeff + 1)])
sr, ys = scipy.io.wavfile.read("bus.wav")
buffer_01 = ys[0:2048]
mfcc_col = mfcc_col(buffer_01)
print('mfcc = ', mfcc_col[:])
| true
| true
|
790cd82bd1fd5436917e52ba00b5728b6618f83e
| 402
|
py
|
Python
|
blog/migrations/0036_auto_20190503_1645.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 1
|
2020-05-20T08:42:49.000Z
|
2020-05-20T08:42:49.000Z
|
blog/migrations/0036_auto_20190503_1645.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 14
|
2020-03-24T17:31:08.000Z
|
2022-03-11T23:59:30.000Z
|
blog/migrations/0036_auto_20190503_1645.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 1
|
2020-04-13T12:37:37.000Z
|
2020-04-13T12:37:37.000Z
|
# Generated by Django 2.1.5 on 2019-05-03 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0035_post_video'),
]
operations = [
migrations.AlterField(
model_name='post',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/'),
),
]
| 21.157895
| 80
| 0.59204
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0035_post_video'),
]
operations = [
migrations.AlterField(
model_name='post',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/'),
),
]
| true
| true
|
790cd9c5dc8128e7a217f6660368342c46113ae7
| 2,829
|
py
|
Python
|
jiant/tasks/lib/wnli.py
|
yzpang/jiant
|
192d6b525c06f33010b59044df40cb86bbfba4ea
|
[
"MIT"
] | 1,108
|
2019-04-22T09:19:19.000Z
|
2022-03-31T13:23:51.000Z
|
jiant/tasks/lib/wnli.py
|
yzpang/jiant
|
192d6b525c06f33010b59044df40cb86bbfba4ea
|
[
"MIT"
] | 737
|
2019-04-22T14:30:36.000Z
|
2022-03-31T22:22:17.000Z
|
jiant/tasks/lib/wnli.py
|
yzpang/jiant
|
192d6b525c06f33010b59044df40cb86bbfba4ea
|
[
"MIT"
] | 273
|
2019-04-23T01:42:11.000Z
|
2022-03-25T15:59:38.000Z
|
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
GlueMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import double_sentence_featurize, labels_to_bimap
from jiant.utils.python.io import read_jsonl
@dataclass
class Example(BaseExample):
guid: str
input_premise: str
input_hypothesis: str
label: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
input_premise=tokenizer.tokenize(self.input_premise),
input_hypothesis=tokenizer.tokenize(self.input_hypothesis),
label_id=WnliTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
input_premise: List
input_hypothesis: List
label_id: int
def featurize(self, tokenizer, feat_spec):
return double_sentence_featurize(
guid=self.guid,
input_tokens_a=self.input_premise,
input_tokens_b=self.input_hypothesis,
label_id=self.label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=DataRow,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_id: int
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_id: torch.LongTensor
tokens: list
class WnliTask(GlueMixin, Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.CLASSIFICATION
LABELS = ["0", "1"]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
def get_train_examples(self):
return self._create_examples(lines=read_jsonl(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_jsonl(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_jsonl(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
examples.append(
Example(
# NOTE: get_glue_preds() is dependent on this guid format.
guid="%s-%s" % (set_type, i),
input_premise=line["premise"],
input_hypothesis=line["hypothesis"],
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
return examples
| 26.688679
| 89
| 0.655002
|
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
GlueMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import double_sentence_featurize, labels_to_bimap
from jiant.utils.python.io import read_jsonl
@dataclass
class Example(BaseExample):
guid: str
input_premise: str
input_hypothesis: str
label: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
input_premise=tokenizer.tokenize(self.input_premise),
input_hypothesis=tokenizer.tokenize(self.input_hypothesis),
label_id=WnliTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
input_premise: List
input_hypothesis: List
label_id: int
def featurize(self, tokenizer, feat_spec):
return double_sentence_featurize(
guid=self.guid,
input_tokens_a=self.input_premise,
input_tokens_b=self.input_hypothesis,
label_id=self.label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=DataRow,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_id: int
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_id: torch.LongTensor
tokens: list
class WnliTask(GlueMixin, Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.CLASSIFICATION
LABELS = ["0", "1"]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
def get_train_examples(self):
return self._create_examples(lines=read_jsonl(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_jsonl(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_jsonl(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
examples.append(
Example(
guid="%s-%s" % (set_type, i),
input_premise=line["premise"],
input_hypothesis=line["hypothesis"],
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
return examples
| true
| true
|
790cda14ea0bb293e4b8bc537d023829f45b9266
| 12,231
|
py
|
Python
|
step_2/scripts/sample_subjectivity_tweets.py
|
chuajiesheng/twitter-sentiment-analysis
|
7617243c953a20c517a737c79fe0f54e55aef140
|
[
"Apache-2.0"
] | null | null | null |
step_2/scripts/sample_subjectivity_tweets.py
|
chuajiesheng/twitter-sentiment-analysis
|
7617243c953a20c517a737c79fe0f54e55aef140
|
[
"Apache-2.0"
] | null | null | null |
step_2/scripts/sample_subjectivity_tweets.py
|
chuajiesheng/twitter-sentiment-analysis
|
7617243c953a20c517a737c79fe0f54e55aef140
|
[
"Apache-2.0"
] | null | null | null |
import sys
import json
import hashlib
import gc
from operator import *
import shlex
from pyspark import StorageLevel
from pyspark.sql import SQLContext
from pyspark.sql.functions import *
from pyspark.sql.types import *
import numpy as np
from subjectivity_clues import clues
def expect(name, var, expected, op=eq):
if op(var, expected):
log('[checkpoint] {} = {}'.format(name, expected))
else:
log('[error] {} = {}'.format(name, expected))
raise Exception(name)
def log(message):
log_file = 'sample_subjectivity_tweets.log'
with open(log_file, 'a') as f:
f.write(message)
f.write('\n')
f.flush()
f.close()
print message
def to_json(name, jsons):
filename = '{}.json'.format(name)
with open(filename, 'w') as f:
for j in jsons:
f.write(j)
f.write('\n')
def to_csv(name, jsons):
filename = '{}.csv'.format(name)
with open(filename, 'w') as f:
for tweet in jsons:
t = json.loads(tweet)
body = t['body'].replace('\n', ' ').replace('\r', '').replace('"', '""')
f.write('"{}",{},{},"{}"\n'.format(t['id'], t['verb'], t['postedTime'], body))
def sample(rdd, size, seed):
items = rdd.collect()
rand = np.random.RandomState(seed)
sampled = rand.choice(items, size=size, replace=False)
expect('sampled', len(set(sampled)), size)
return sampled.tolist()
def sha(name, ext='json'):
BUF_SIZE = 65536
filename = '{}.{}'.format(name, ext)
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def read_and_parse_clues():
DEFAULT_FILENAME = os.getcwd() + os.sep + 'subjectivity_clues' + os.sep + 'subjclueslen1-HLTEMNLP05.tff'
lines = None
with open(DEFAULT_FILENAME, 'r') as f:
lines = f.readlines()
clues = dict()
for l in lines:
clue = dict(token.split('=') for token in shlex.split(l))
word = clue['word1']
clues[word] = clue
return clues
def calculate_relevant(lexicons, sentence):
PRIORPOLARITY = {
'positive': 1,
'negative': -1,
'both': 0,
'neutral': 0
}
TYPE = {
'strongsubj': 2,
'weaksubj': 1
}
total_score = 0
for w in sentence.split(' '):
if w not in lexicons.keys():
continue
total_score += PRIORPOLARITY[lexicons[w]['priorpolarity']] * TYPE[lexicons[w]['type']]
return total_score
# Make sure Python uses UTF-8 as tweets contains emoticon and unicode
reload(sys)
sys.setdefaultencoding('utf-8')
# Use SQLContext for better support
sqlContext = SQLContext(sc)
# Define storage level
DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# Read GNIP's JSON file
directory = "tweets"
datasets = sqlContext.read.json(directory)
log('# Completed reading JSON files')
# Check checksum count
file_count = datasets.where(datasets['verb'].isNull()).count()
expect('file_count', file_count, 21888)
# Check post count
all_posts = datasets.where(datasets['verb'] == 'post')
all_posts_count = all_posts.count()
expect('all_posts_count', all_posts_count, 1570398)
# Check share count
all_shares = datasets.where(datasets['verb'] == 'share')
all_shares_count = all_shares.count()
expect('all_shares_count', all_shares_count, 1112590)
# Check dataset count
info_dataset = datasets.select('info')
info_dataset.registerTempTable('info')
all_tweets_count = info_dataset.select('info.activity_count').groupBy().sum('activity_count').collect()[0][0]
expect('all_tweets_count', all_tweets_count, 2682988)
expect('all_tweets_count', all_tweets_count, all_posts_count + all_shares_count)
log('# Completed validating tweets count')
# Remove post authored by @ChipotleTweet and news agencies
chipotle_tweet = 'id:twitter.com:141341662'
users_to_remove = [chipotle_tweet, 'id:twitter.com:759251', 'id:twitter.com:91478624', 'id:twitter.com:28785486',
'id:twitter.com:1652541', 'id:twitter.com:51241574', 'id:twitter.com:807095',
'id:twitter.com:34713362', 'id:twitter.com:3090733766', 'id:twitter.com:1367531',
'id:twitter.com:14293310', 'id:twitter.com:3108351', 'id:twitter.com:14173315',
'id:twitter.com:292777349', 'id:twitter.com:428333', 'id:twitter.com:624413',
'id:twitter.com:20562637', 'id:twitter.com:13918492', 'id:twitter.com:16184358',
'id:twitter.com:625697849', 'id:twitter.com:2467791', 'id:twitter.com:9763482',
'id:twitter.com:14511951', 'id:twitter.com:6017542', 'id:twitter.com:26574283',
'id:twitter.com:115754870']
all_posts_wo_specific_users = all_posts.where(~ col('actor.id').isin(users_to_remove))
all_posts_w_specific_users = all_posts.where(col('actor.id').isin(users_to_remove)).count()
expect('all_posts_wo_specific_users', all_posts_wo_specific_users.count(), all_posts_count - all_posts_w_specific_users)
# Remove share retweet of tweet by @ChipotleTweet and news agencies
all_shares_wo_specific_users = all_shares.where(~ col('object.actor.id').isin(users_to_remove))
all_shares_w_specific_users = all_shares.where(col('object.actor.id').isin(users_to_remove)).count()
expect('all_shares_wo_specific_users', all_shares_wo_specific_users.count(), all_shares_count - all_shares_w_specific_users)
# Generate tweets pool with only English tweet
tweets_pool = all_posts_wo_specific_users.unionAll(all_shares_wo_specific_users).filter("twitter_lang = 'en'")
tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool_count = tweets_pool.count()
# Adding all post to all share will be greater than tweet pool because of non-English tweet
expected_tweets_pool_count = all_posts_count - all_posts_w_specific_users + \
all_shares_count - all_shares_w_specific_users
expect('tweets_pool_count', tweets_pool_count, expected_tweets_pool_count, op=lt)
log('# Completed constructing tweets pool')
# Check language of tweets
languages = tweets_pool.select('twitter_lang').distinct()
languages_count = languages.count()
language_check = languages.first()['twitter_lang']
expect('languages_count', languages_count, 1)
expect('language_check', language_check, 'en')
log('# Completed validating language variety')
# Take top 80% of tweets by length
tweets_pool_str_lengths = tweets_pool.select(length('body').alias('length')).rdd.map(lambda x: x.length).collect()
lengths_np = np.array(tweets_pool_str_lengths)
p = np.percentile(lengths_np, 20)
final_tweets_pool = tweets_pool.filter(length('body') >= p)
final_tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool.unpersist(blocking=True)
final_tweets_pool_count = final_tweets_pool.count()
percentage_kept = float(final_tweets_pool_count) / tweets_pool_count
expect('percentage_kept', percentage_kept, 0.8, op=gt)
log('# Completed sampling top 80% of tweets by body length')
# Sampling
final_tweets_ids = final_tweets_pool.select(final_tweets_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id)
# Development tweets
dev_seed = 10102016
number_of_dev_samples = 3000
dev_posts = sample(final_tweets_ids, number_of_dev_samples, dev_seed)
dev_posts_count = len(dev_posts)
expect('dev_posts_count', dev_posts_count, number_of_dev_samples)
log('# Completed sampling dev tweets')
dev_posts_file = "dev_posts"
dev_posts_jsons = final_tweets_pool[final_tweets_pool['id'].isin(dev_posts)].toJSON().collect()
to_json(dev_posts_file, dev_posts_jsons)
to_csv(dev_posts_file, dev_posts_jsons)
expect('dev_posts_file', sha(dev_posts_file), '74447296831c8e3061fc0ee739f549c5b08b85a3')
expect('dev_posts_file', sha(dev_posts_file, ext='csv'), '6acfd1f8d238bc5d25d97d2c9e6f6b177699389a')
log('Exporting dev post to {}'.format(dev_posts_file))
log('# Completed exporting dev tweets')
del dev_posts_jsons
gc.collect()
# Find distinct set of tweets (unique body text)
post_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'post')
post_pool.persist(MEMORY_AND_DISK)
post_pool_ids = post_pool.select(post_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id).collect()
expect('post_pool', post_pool.count(), 1124935)
share_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'share')
share_pool.persist(MEMORY_AND_DISK)
expect('share_pool', share_pool.count(), 846141)
broadcast_post_ids = sc.broadcast(set(post_pool_ids))
unique_share_ids = share_pool.select(share_pool['id'], share_pool['object.id'].alias('object_id')).rdd.filter(lambda row: row['object_id'] not in broadcast_post_ids.value).map(lambda row: row.id).collect()
expect('unique_share_pool', len(unique_share_ids), 193006)
log('# Completed finding unique share tweet')
# Constructing distinct tweet pool
broadcast_unique_share_ids = sc.broadcast(unique_share_ids)
distinct_tweets_pool = final_tweets_pool.\
select(final_tweets_pool['id'], final_tweets_pool['body']).\
rdd.\
filter(lambda row: row['id'] in broadcast_post_ids.value or row['id'] in broadcast_unique_share_ids.value)
distinct_tweets_pool.persist(MEMORY_AND_DISK)
distinct_tweets_count = distinct_tweets_pool.count()
expect('distinct_tweets_pool', distinct_tweets_count, 1124935 + 193006)
# Exclude development tweets
tweets_unsampled = distinct_tweets_pool.toDF().where(~ col('id').isin(dev_posts))
tweets_unsampled.persist(MEMORY_AND_DISK)
tweets_unsampled_count = tweets_unsampled.count()
# no. of dev intersect post pool: 1718, no. of share dev intersect unique share pool: 293
expect('tweets_unsampled', tweets_unsampled_count, 1124935 + 193006 - 1718 - 293)
log('# Completed constructing unsampled tweets')
# Calculate subjectivity
lexicons = read_and_parse_clues()
udfBodyToRelevant = udf(lambda body: calculate_relevant(lexicons, body), IntegerType())
tweets_lexicon = tweets_unsampled.withColumn('score', udfBodyToRelevant('body'))
tweets_lexicon.persist(MEMORY_AND_DISK)
log('# Completed constructing tweet lexicon')
# Take top and bottom
number_of_tweets_each = 1500
positive_tweets = tweets_lexicon.orderBy(desc('score')).take(number_of_tweets_each)
negative_tweets = tweets_lexicon.orderBy(asc('score')).take(number_of_tweets_each)
# Cut top and bottom via score for more deterministic sampling
min_positive_score = positive_tweets[-1]['score']
min_negative_score = negative_tweets[-1]['score']
expect('min_positive_score', min_positive_score, 7)
expect('min_negative_score', min_negative_score, -5)
positive_tweets = tweets_lexicon.filter('score > {}'.format(min_positive_score - 1)).orderBy(desc('score')).collect()
expect('positive_tweets', len(positive_tweets), 2012)
negative_tweets = tweets_lexicon.filter('score < {}'.format(min_negative_score + 1)).orderBy(asc('score')).collect()
expect('positive_tweets', len(negative_tweets), 1715)
positive_tweet_file = "positive_tweets"
positive_tweets_ids = map(lambda t: t['id'], positive_tweets)
positive_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(positive_tweets_ids)].toJSON().collect()
to_json(positive_tweet_file, positive_tweet_jsons)
to_csv(positive_tweet_file, positive_tweet_jsons)
log('Exporting positive tweets to {}'.format(positive_tweet_file))
log('# Completed exporting positive tweets')
expect('positive_tweet_file', sha(positive_tweet_file), 'cb2f8b691ccf3eae9846c67735f413a49befea28')
expect('positive_tweet_file', sha(positive_tweet_file, ext='csv'), 'd3d43ab4e03fdf106b9191f4e0161cfcde3f040e')
negative_tweet_file = "negative_tweets"
negative_tweet_ids = map(lambda t: t['id'], negative_tweets)
negative_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(negative_tweet_ids)].toJSON().collect()
to_json(negative_tweet_file, negative_tweet_jsons)
to_csv(negative_tweet_file, negative_tweet_jsons)
log('Exporting negative tweets to {}'.format(negative_tweet_file))
log('# Completed exporting negative tweets')
expect('negative_tweet_file', sha(negative_tweet_file), '086c43427078092e538a779b8b06a71341b8da48')
expect('negative_tweet_file', sha(negative_tweet_file, ext='csv'), 'd10a1a95156c28d844e9c4e668d766963c0636a4')
| 39.711039
| 205
| 0.739024
|
import sys
import json
import hashlib
import gc
from operator import *
import shlex
from pyspark import StorageLevel
from pyspark.sql import SQLContext
from pyspark.sql.functions import *
from pyspark.sql.types import *
import numpy as np
from subjectivity_clues import clues
def expect(name, var, expected, op=eq):
if op(var, expected):
log('[checkpoint] {} = {}'.format(name, expected))
else:
log('[error] {} = {}'.format(name, expected))
raise Exception(name)
def log(message):
log_file = 'sample_subjectivity_tweets.log'
with open(log_file, 'a') as f:
f.write(message)
f.write('\n')
f.flush()
f.close()
print message
def to_json(name, jsons):
filename = '{}.json'.format(name)
with open(filename, 'w') as f:
for j in jsons:
f.write(j)
f.write('\n')
def to_csv(name, jsons):
filename = '{}.csv'.format(name)
with open(filename, 'w') as f:
for tweet in jsons:
t = json.loads(tweet)
body = t['body'].replace('\n', ' ').replace('\r', '').replace('"', '""')
f.write('"{}",{},{},"{}"\n'.format(t['id'], t['verb'], t['postedTime'], body))
def sample(rdd, size, seed):
items = rdd.collect()
rand = np.random.RandomState(seed)
sampled = rand.choice(items, size=size, replace=False)
expect('sampled', len(set(sampled)), size)
return sampled.tolist()
def sha(name, ext='json'):
BUF_SIZE = 65536
filename = '{}.{}'.format(name, ext)
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def read_and_parse_clues():
DEFAULT_FILENAME = os.getcwd() + os.sep + 'subjectivity_clues' + os.sep + 'subjclueslen1-HLTEMNLP05.tff'
lines = None
with open(DEFAULT_FILENAME, 'r') as f:
lines = f.readlines()
clues = dict()
for l in lines:
clue = dict(token.split('=') for token in shlex.split(l))
word = clue['word1']
clues[word] = clue
return clues
def calculate_relevant(lexicons, sentence):
PRIORPOLARITY = {
'positive': 1,
'negative': -1,
'both': 0,
'neutral': 0
}
TYPE = {
'strongsubj': 2,
'weaksubj': 1
}
total_score = 0
for w in sentence.split(' '):
if w not in lexicons.keys():
continue
total_score += PRIORPOLARITY[lexicons[w]['priorpolarity']] * TYPE[lexicons[w]['type']]
return total_score
# Make sure Python uses UTF-8 as tweets contains emoticon and unicode
reload(sys)
sys.setdefaultencoding('utf-8')
# Use SQLContext for better support
sqlContext = SQLContext(sc)
# Define storage level
DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# Read GNIP's JSON file
directory = "tweets"
datasets = sqlContext.read.json(directory)
log('# Completed reading JSON files')
# Check checksum count
file_count = datasets.where(datasets['verb'].isNull()).count()
expect('file_count', file_count, 21888)
# Check post count
all_posts = datasets.where(datasets['verb'] == 'post')
all_posts_count = all_posts.count()
expect('all_posts_count', all_posts_count, 1570398)
# Check share count
all_shares = datasets.where(datasets['verb'] == 'share')
all_shares_count = all_shares.count()
expect('all_shares_count', all_shares_count, 1112590)
# Check dataset count
info_dataset = datasets.select('info')
info_dataset.registerTempTable('info')
all_tweets_count = info_dataset.select('info.activity_count').groupBy().sum('activity_count').collect()[0][0]
expect('all_tweets_count', all_tweets_count, 2682988)
expect('all_tweets_count', all_tweets_count, all_posts_count + all_shares_count)
log('# Completed validating tweets count')
# Remove post authored by @ChipotleTweet and news agencies
chipotle_tweet = 'id:twitter.com:141341662'
users_to_remove = [chipotle_tweet, 'id:twitter.com:759251', 'id:twitter.com:91478624', 'id:twitter.com:28785486',
'id:twitter.com:1652541', 'id:twitter.com:51241574', 'id:twitter.com:807095',
'id:twitter.com:34713362', 'id:twitter.com:3090733766', 'id:twitter.com:1367531',
'id:twitter.com:14293310', 'id:twitter.com:3108351', 'id:twitter.com:14173315',
'id:twitter.com:292777349', 'id:twitter.com:428333', 'id:twitter.com:624413',
'id:twitter.com:20562637', 'id:twitter.com:13918492', 'id:twitter.com:16184358',
'id:twitter.com:625697849', 'id:twitter.com:2467791', 'id:twitter.com:9763482',
'id:twitter.com:14511951', 'id:twitter.com:6017542', 'id:twitter.com:26574283',
'id:twitter.com:115754870']
all_posts_wo_specific_users = all_posts.where(~ col('actor.id').isin(users_to_remove))
all_posts_w_specific_users = all_posts.where(col('actor.id').isin(users_to_remove)).count()
expect('all_posts_wo_specific_users', all_posts_wo_specific_users.count(), all_posts_count - all_posts_w_specific_users)
# Remove share retweet of tweet by @ChipotleTweet and news agencies
all_shares_wo_specific_users = all_shares.where(~ col('object.actor.id').isin(users_to_remove))
all_shares_w_specific_users = all_shares.where(col('object.actor.id').isin(users_to_remove)).count()
expect('all_shares_wo_specific_users', all_shares_wo_specific_users.count(), all_shares_count - all_shares_w_specific_users)
# Generate tweets pool with only English tweet
tweets_pool = all_posts_wo_specific_users.unionAll(all_shares_wo_specific_users).filter("twitter_lang = 'en'")
tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool_count = tweets_pool.count()
# Adding all post to all share will be greater than tweet pool because of non-English tweet
expected_tweets_pool_count = all_posts_count - all_posts_w_specific_users + \
all_shares_count - all_shares_w_specific_users
expect('tweets_pool_count', tweets_pool_count, expected_tweets_pool_count, op=lt)
log('# Completed constructing tweets pool')
# Check language of tweets
languages = tweets_pool.select('twitter_lang').distinct()
languages_count = languages.count()
language_check = languages.first()['twitter_lang']
expect('languages_count', languages_count, 1)
expect('language_check', language_check, 'en')
log('# Completed validating language variety')
# Take top 80% of tweets by length
tweets_pool_str_lengths = tweets_pool.select(length('body').alias('length')).rdd.map(lambda x: x.length).collect()
lengths_np = np.array(tweets_pool_str_lengths)
p = np.percentile(lengths_np, 20)
final_tweets_pool = tweets_pool.filter(length('body') >= p)
final_tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool.unpersist(blocking=True)
final_tweets_pool_count = final_tweets_pool.count()
percentage_kept = float(final_tweets_pool_count) / tweets_pool_count
expect('percentage_kept', percentage_kept, 0.8, op=gt)
log('# Completed sampling top 80% of tweets by body length')
# Sampling
final_tweets_ids = final_tweets_pool.select(final_tweets_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id)
# Development tweets
dev_seed = 10102016
number_of_dev_samples = 3000
dev_posts = sample(final_tweets_ids, number_of_dev_samples, dev_seed)
dev_posts_count = len(dev_posts)
expect('dev_posts_count', dev_posts_count, number_of_dev_samples)
log('# Completed sampling dev tweets')
dev_posts_file = "dev_posts"
dev_posts_jsons = final_tweets_pool[final_tweets_pool['id'].isin(dev_posts)].toJSON().collect()
to_json(dev_posts_file, dev_posts_jsons)
to_csv(dev_posts_file, dev_posts_jsons)
expect('dev_posts_file', sha(dev_posts_file), '74447296831c8e3061fc0ee739f549c5b08b85a3')
expect('dev_posts_file', sha(dev_posts_file, ext='csv'), '6acfd1f8d238bc5d25d97d2c9e6f6b177699389a')
log('Exporting dev post to {}'.format(dev_posts_file))
log('# Completed exporting dev tweets')
del dev_posts_jsons
gc.collect()
# Find distinct set of tweets (unique body text)
post_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'post')
post_pool.persist(MEMORY_AND_DISK)
post_pool_ids = post_pool.select(post_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id).collect()
expect('post_pool', post_pool.count(), 1124935)
share_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'share')
share_pool.persist(MEMORY_AND_DISK)
expect('share_pool', share_pool.count(), 846141)
broadcast_post_ids = sc.broadcast(set(post_pool_ids))
unique_share_ids = share_pool.select(share_pool['id'], share_pool['object.id'].alias('object_id')).rdd.filter(lambda row: row['object_id'] not in broadcast_post_ids.value).map(lambda row: row.id).collect()
expect('unique_share_pool', len(unique_share_ids), 193006)
log('# Completed finding unique share tweet')
# Constructing distinct tweet pool
broadcast_unique_share_ids = sc.broadcast(unique_share_ids)
distinct_tweets_pool = final_tweets_pool.\
select(final_tweets_pool['id'], final_tweets_pool['body']).\
rdd.\
filter(lambda row: row['id'] in broadcast_post_ids.value or row['id'] in broadcast_unique_share_ids.value)
distinct_tweets_pool.persist(MEMORY_AND_DISK)
distinct_tweets_count = distinct_tweets_pool.count()
expect('distinct_tweets_pool', distinct_tweets_count, 1124935 + 193006)
# Exclude development tweets
tweets_unsampled = distinct_tweets_pool.toDF().where(~ col('id').isin(dev_posts))
tweets_unsampled.persist(MEMORY_AND_DISK)
tweets_unsampled_count = tweets_unsampled.count()
# no. of dev intersect post pool: 1718, no. of share dev intersect unique share pool: 293
expect('tweets_unsampled', tweets_unsampled_count, 1124935 + 193006 - 1718 - 293)
log('# Completed constructing unsampled tweets')
# Calculate subjectivity
lexicons = read_and_parse_clues()
udfBodyToRelevant = udf(lambda body: calculate_relevant(lexicons, body), IntegerType())
tweets_lexicon = tweets_unsampled.withColumn('score', udfBodyToRelevant('body'))
tweets_lexicon.persist(MEMORY_AND_DISK)
log('# Completed constructing tweet lexicon')
# Take top and bottom
number_of_tweets_each = 1500
positive_tweets = tweets_lexicon.orderBy(desc('score')).take(number_of_tweets_each)
negative_tweets = tweets_lexicon.orderBy(asc('score')).take(number_of_tweets_each)
# Cut top and bottom via score for more deterministic sampling
min_positive_score = positive_tweets[-1]['score']
min_negative_score = negative_tweets[-1]['score']
expect('min_positive_score', min_positive_score, 7)
expect('min_negative_score', min_negative_score, -5)
positive_tweets = tweets_lexicon.filter('score > {}'.format(min_positive_score - 1)).orderBy(desc('score')).collect()
expect('positive_tweets', len(positive_tweets), 2012)
negative_tweets = tweets_lexicon.filter('score < {}'.format(min_negative_score + 1)).orderBy(asc('score')).collect()
expect('positive_tweets', len(negative_tweets), 1715)
positive_tweet_file = "positive_tweets"
positive_tweets_ids = map(lambda t: t['id'], positive_tweets)
positive_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(positive_tweets_ids)].toJSON().collect()
to_json(positive_tweet_file, positive_tweet_jsons)
to_csv(positive_tweet_file, positive_tweet_jsons)
log('Exporting positive tweets to {}'.format(positive_tweet_file))
log('# Completed exporting positive tweets')
expect('positive_tweet_file', sha(positive_tweet_file), 'cb2f8b691ccf3eae9846c67735f413a49befea28')
expect('positive_tweet_file', sha(positive_tweet_file, ext='csv'), 'd3d43ab4e03fdf106b9191f4e0161cfcde3f040e')
negative_tweet_file = "negative_tweets"
negative_tweet_ids = map(lambda t: t['id'], negative_tweets)
negative_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(negative_tweet_ids)].toJSON().collect()
to_json(negative_tweet_file, negative_tweet_jsons)
to_csv(negative_tweet_file, negative_tweet_jsons)
log('Exporting negative tweets to {}'.format(negative_tweet_file))
log('# Completed exporting negative tweets')
expect('negative_tweet_file', sha(negative_tweet_file), '086c43427078092e538a779b8b06a71341b8da48')
expect('negative_tweet_file', sha(negative_tweet_file, ext='csv'), 'd10a1a95156c28d844e9c4e668d766963c0636a4')
| false
| true
|
790cda4bf2faa9f70329ed0bb38027dd59328653
| 2,208
|
py
|
Python
|
resources/lib/xbmcswift2/cli/cli.py
|
liberty-developer/plugin.video.metalliq-forqed
|
5477783a00672c9ae315c7897617d7bba8d746fd
|
[
"Apache-2.0"
] | 2
|
2018-09-07T06:56:06.000Z
|
2021-03-18T05:18:22.000Z
|
resources/lib/xbmcswift2/cli/cli.py
|
liberty-developer/plugin.video.metalliq-forqed
|
5477783a00672c9ae315c7897617d7bba8d746fd
|
[
"Apache-2.0"
] | null | null | null |
resources/lib/xbmcswift2/cli/cli.py
|
liberty-developer/plugin.video.metalliq-forqed
|
5477783a00672c9ae315c7897617d7bba8d746fd
|
[
"Apache-2.0"
] | 2
|
2020-04-23T18:06:15.000Z
|
2021-03-18T05:18:25.000Z
|
'''
xbmcswift2.cli.cli
------------------
The main entry point for the xbmcswift2 console script. CLI commands can be
registered in this module.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
import sys
from optparse import OptionParser
from xbmcswift2.cli.app import RunCommand
from xbmcswift2.cli.create import CreateCommand
# TODO: Make an ABC for Command
COMMANDS = {
RunCommand.command: RunCommand,
CreateCommand.command: CreateCommand,
}
# TODO: Make this usage dynamic based on COMMANDS dict
USAGE = '''%prog <command>
Commands:
create
Create a new plugin project.
run
Run an xbmcswift2 plugin from the command line.
Help:
To see options for a command, run `xbmcswift2 <command> -h`
'''
def main():
'''The entry point for the console script xbmcswift2.
The 'xbcmswift2' script is command bassed, so the second argument is always
the command to execute. Each command has its own parser options and usages.
If no command is provided or the -h flag is used without any other
commands, the general help message is shown.
'''
parser = OptionParser()
if len(sys.argv) == 1:
parser.set_usage(USAGE)
parser.error('At least one command is required.')
# spy sys.argv[1] in order to use correct opts/args
command = sys.argv[1]
if command == '-h':
parser.set_usage(USAGE)
opts, args = parser.parse_args()
if command not in COMMANDS.keys():
parser.error('Invalid command')
# We have a proper command, set the usage and options list according to the
# specific command
manager = COMMANDS[command]
if hasattr(manager, 'option_list'):
for args, kwargs in manager.option_list:
parser.add_option(*args, **kwargs)
if hasattr(manager, 'usage'):
parser.set_usage(manager.usage)
opts, args = parser.parse_args()
# Since we are calling a specific comamnd's manager, we no longer need the
# actual command in sys.argv so we slice from position 1
manager.run(opts, args[1:])
| 28.675325
| 80
| 0.652627
|
import sys
from optparse import OptionParser
from xbmcswift2.cli.app import RunCommand
from xbmcswift2.cli.create import CreateCommand
COMMANDS = {
RunCommand.command: RunCommand,
CreateCommand.command: CreateCommand,
}
USAGE = '''%prog <command>
Commands:
create
Create a new plugin project.
run
Run an xbmcswift2 plugin from the command line.
Help:
To see options for a command, run `xbmcswift2 <command> -h`
'''
def main():
parser = OptionParser()
if len(sys.argv) == 1:
parser.set_usage(USAGE)
parser.error('At least one command is required.')
command = sys.argv[1]
if command == '-h':
parser.set_usage(USAGE)
opts, args = parser.parse_args()
if command not in COMMANDS.keys():
parser.error('Invalid command')
manager = COMMANDS[command]
if hasattr(manager, 'option_list'):
for args, kwargs in manager.option_list:
parser.add_option(*args, **kwargs)
if hasattr(manager, 'usage'):
parser.set_usage(manager.usage)
opts, args = parser.parse_args()
# actual command in sys.argv so we slice from position 1
manager.run(opts, args[1:])
| true
| true
|
790cdbbe1658c1b58ae557798b2a62f86ce73895
| 630
|
py
|
Python
|
pincer/objects/message/reaction.py
|
mjneff2/Pincer
|
a11bc3e4bad319fdf927d913c58c933576ec7c99
|
[
"MIT"
] | null | null | null |
pincer/objects/message/reaction.py
|
mjneff2/Pincer
|
a11bc3e4bad319fdf927d913c58c933576ec7c99
|
[
"MIT"
] | null | null | null |
pincer/objects/message/reaction.py
|
mjneff2/Pincer
|
a11bc3e4bad319fdf927d913c58c933576ec7c99
|
[
"MIT"
] | null | null | null |
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
if TYPE_CHECKING:
from ..message.emoji import Emoji
@dataclass
class Reaction(APIObject):
"""
Represents a Discord Reaction object
:param count:
times this emoji has been used to react
:param me:
whether the current user reacted using this emoji
:param emoji:
emoji information
"""
count: int
me: bool
emoji: Emoji
| 19.6875
| 65
| 0.706349
|
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
if TYPE_CHECKING:
from ..message.emoji import Emoji
@dataclass
class Reaction(APIObject):
count: int
me: bool
emoji: Emoji
| true
| true
|
790cdc4d9b91f6b3c77afc2a43eaebd858044833
| 2,137
|
py
|
Python
|
Revision/vis3d/prepare.py
|
qai222/ATMOxide
|
42702c1ce299233569c8a3c0a9712b0e62ef6b16
|
[
"MIT"
] | null | null | null |
Revision/vis3d/prepare.py
|
qai222/ATMOxide
|
42702c1ce299233569c8a3c0a9712b0e62ef6b16
|
[
"MIT"
] | null | null | null |
Revision/vis3d/prepare.py
|
qai222/ATMOxide
|
42702c1ce299233569c8a3c0a9712b0e62ef6b16
|
[
"MIT"
] | null | null | null |
from rdkit import Chem
from AnalysisModule.routines.util import load_pkl
# logit_result = yaml_fileread("../logistic.yml")
logit_result = load_pkl("../clf3d/logistic.pkl")
"""
epg-string --> maxscore
--> [(f, s)] --> xx, yy, zz, [(x, y, d)] --> refcode, amine
"""
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
def moltosvg(mol, molSize=(450, 450), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
# drawer.DrawMolecule(mc, legend="lalala") # legend fontsize hardcoded, too small
drawer.DrawMolecule(mc, )
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
return svg.replace('svg:', '')
def plot_amine(smi):
m = Chem.MolFromSmiles(smi)
return moltosvg(m)
def insert_url(svg, n=12, url="https://www.google.com", urlname="ABCDEF"):
lines = svg.split("\n")
template = '<a xmlns="http://www.w3.org/2000/svg" xlink:href="{}" xmlns:xlink="http://www.w3.org/1999/xlink" target="__blank"><text x="150" y="400" font-size="4em" fill="black">{}</text></a>'.format(
url, urlname)
s = ""
for il, l in enumerate(lines):
if il == n:
s += template + "\n"
s += l + "\n"
return s
for epg, epginfo in logit_result.items():
if epginfo is None:
print(epg, "info is None")
continue
for i, refcode in enumerate(epginfo["refcodes"]):
a = epginfo["amines"][i]
svg = plot_amine(a)
url = "https://www.ccdc.cam.ac.uk/structures/Search?Ccdcid={}".format(refcode)
# svg = insert_url(svg, urlname=refcode, url=url)
with open("amines/{}.svg".format(refcode), "w") as f:
f.write(svg)
| 33.390625
| 203
| 0.621432
|
from rdkit import Chem
from AnalysisModule.routines.util import load_pkl
logit_result = load_pkl("../clf3d/logistic.pkl")
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
def moltosvg(mol, molSize=(450, 450), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
wer.FinishDrawing()
svg = drawer.GetDrawingText()
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
return svg.replace('svg:', '')
def plot_amine(smi):
m = Chem.MolFromSmiles(smi)
return moltosvg(m)
def insert_url(svg, n=12, url="https://www.google.com", urlname="ABCDEF"):
lines = svg.split("\n")
template = '<a xmlns="http://www.w3.org/2000/svg" xlink:href="{}" xmlns:xlink="http://www.w3.org/1999/xlink" target="__blank"><text x="150" y="400" font-size="4em" fill="black">{}</text></a>'.format(
url, urlname)
s = ""
for il, l in enumerate(lines):
if il == n:
s += template + "\n"
s += l + "\n"
return s
for epg, epginfo in logit_result.items():
if epginfo is None:
print(epg, "info is None")
continue
for i, refcode in enumerate(epginfo["refcodes"]):
a = epginfo["amines"][i]
svg = plot_amine(a)
url = "https://www.ccdc.cam.ac.uk/structures/Search?Ccdcid={}".format(refcode)
# svg = insert_url(svg, urlname=refcode, url=url)
with open("amines/{}.svg".format(refcode), "w") as f:
f.write(svg)
| true
| true
|
790cdc5677023f1caeda62f7c3d38c1e283ad395
| 1,256
|
bzl
|
Python
|
test/extra_exec_rustc_flags/defs.bzl
|
silas-enf/rules_rust
|
41b39f0c9951dfda3bd0a95df31695578dd3f5ea
|
[
"Apache-2.0"
] | 1
|
2017-06-12T02:10:48.000Z
|
2017-06-12T02:10:48.000Z
|
test/extra_exec_rustc_flags/defs.bzl
|
silas-enf/rules_rust
|
41b39f0c9951dfda3bd0a95df31695578dd3f5ea
|
[
"Apache-2.0"
] | null | null | null |
test/extra_exec_rustc_flags/defs.bzl
|
silas-enf/rules_rust
|
41b39f0c9951dfda3bd0a95df31695578dd3f5ea
|
[
"Apache-2.0"
] | null | null | null |
"""Test transitions to test extra_exec_rustc_flags."""
def _extra_exec_rustc_flags_transition_impl(_settings, attr):
return {
"//:extra_exec_rustc_flags": attr.extra_exec_rustc_flags,
}
_extra_exec_rustc_flags_transition = transition(
implementation = _extra_exec_rustc_flags_transition_impl,
inputs = [],
outputs = ["//:extra_exec_rustc_flags"],
)
def _with_extra_exec_rustc_flags_cfg_impl(ctx):
return [DefaultInfo(files = depset(ctx.files.srcs))]
with_extra_exec_rustc_flags_cfg = rule(
implementation = _with_extra_exec_rustc_flags_cfg_impl,
attrs = {
"extra_exec_rustc_flags": attr.string_list(
mandatory = True,
),
"srcs": attr.label_list(
allow_files = True,
cfg = _extra_exec_rustc_flags_transition,
),
"_allowlist_function_transition": attr.label(
default = Label("//tools/allowlists/function_transition_allowlist"),
),
},
)
def _with_exec_cfg_impl(ctx):
return [DefaultInfo(files = depset(ctx.files.srcs))]
with_exec_cfg = rule(
implementation = _with_exec_cfg_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
cfg = "exec",
),
},
)
| 27.911111
| 80
| 0.663217
|
def _extra_exec_rustc_flags_transition_impl(_settings, attr):
return {
"//:extra_exec_rustc_flags": attr.extra_exec_rustc_flags,
}
_extra_exec_rustc_flags_transition = transition(
implementation = _extra_exec_rustc_flags_transition_impl,
inputs = [],
outputs = ["//:extra_exec_rustc_flags"],
)
def _with_extra_exec_rustc_flags_cfg_impl(ctx):
return [DefaultInfo(files = depset(ctx.files.srcs))]
with_extra_exec_rustc_flags_cfg = rule(
implementation = _with_extra_exec_rustc_flags_cfg_impl,
attrs = {
"extra_exec_rustc_flags": attr.string_list(
mandatory = True,
),
"srcs": attr.label_list(
allow_files = True,
cfg = _extra_exec_rustc_flags_transition,
),
"_allowlist_function_transition": attr.label(
default = Label("//tools/allowlists/function_transition_allowlist"),
),
},
)
def _with_exec_cfg_impl(ctx):
return [DefaultInfo(files = depset(ctx.files.srcs))]
with_exec_cfg = rule(
implementation = _with_exec_cfg_impl,
attrs = {
"srcs": attr.label_list(
allow_files = True,
cfg = "exec",
),
},
)
| true
| true
|
790cdc9973b62eeb3220bc841bae964fe6ed1651
| 6,863
|
py
|
Python
|
src/tissue_purifier/models/_optim_scheduler.py
|
broadinstitute/tissue_purifier
|
989ce9d58bba99a3f1c49743eed22dcc64e5f159
|
[
"Apache-2.0"
] | null | null | null |
src/tissue_purifier/models/_optim_scheduler.py
|
broadinstitute/tissue_purifier
|
989ce9d58bba99a3f1c49743eed22dcc64e5f159
|
[
"Apache-2.0"
] | null | null | null |
src/tissue_purifier/models/_optim_scheduler.py
|
broadinstitute/tissue_purifier
|
989ce9d58bba99a3f1c49743eed22dcc64e5f159
|
[
"Apache-2.0"
] | null | null | null |
from typing import Tuple
import math
import torch
from torch.optim.optimizer import Optimizer
def linear_warmup_and_cosine_protocol(
f_values: Tuple[float, float, float],
x_milestones: Tuple[int, int, int, int]):
"""
There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first constant piece)
2. x2=x1 (to eliminate the second constant piece)
3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)
"""
assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3]
def fn(step):
if step <= x_milestones[0]:
return float(f_values[0])
elif (step > x_milestones[0]) and (step <= x_milestones[1]):
m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0]))
return float(f_values[0]) + m * float(step - x_milestones[0])
elif (step > x_milestones[1]) and (step <= x_milestones[2]):
return float(f_values[1])
elif (step > x_milestones[2]) and (step <= x_milestones[3]):
progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1)
tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0)
return float(f_values[2]) + tmp * float(f_values[1] - f_values[2])
else:
return float(f_values[2])
return fn
class LARS(Optimizer):
"""
Extends SGD in PyTorch with LARS scaling from the paper
'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001)
eps (float, optional): eps for division denominator (default: 1e-8)
Example:
>>> model = torch.nn.Linear(10, 1)
>>> input = torch.Tensor(10)
>>> target = torch.Tensor([1.])
>>> loss_fn = lambda input, target: (input - target) ** 2
>>> #
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
Note:
The application of momentum in the SGD part is modified according to
the PyTorch standards. LARS scaling fits into the equation in the
following fashion.
.. math::
\begin{aligned}
g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\
v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\\end{aligned}
where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the
parameters, gradient, velocity, momentum, and weight decay respectively.
The :math:`lars_lr` is defined by Eq. 6 in the paper.
The Nesterov version is analogously modified.
.. warning::
Parameters with weight decay set to 0 will automatically be excluded from
layer-wise LR scaling. This is to ensure consistency with papers like SimCLR
and BYOL.
"""
def __init__(
self,
params,
lr=None,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coefficient=0.001,
eps=1e-8,
):
if lr is None or lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"])
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
# sgd part
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss
| 38.127778
| 115
| 0.560105
|
from typing import Tuple
import math
import torch
from torch.optim.optimizer import Optimizer
def linear_warmup_and_cosine_protocol(
f_values: Tuple[float, float, float],
x_milestones: Tuple[int, int, int, int]):
assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3]
def fn(step):
if step <= x_milestones[0]:
return float(f_values[0])
elif (step > x_milestones[0]) and (step <= x_milestones[1]):
m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0]))
return float(f_values[0]) + m * float(step - x_milestones[0])
elif (step > x_milestones[1]) and (step <= x_milestones[2]):
return float(f_values[1])
elif (step > x_milestones[2]) and (step <= x_milestones[3]):
progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2]))
tmp = 0.5 * (1.0 + math.cos(math.pi * progress))
return float(f_values[2]) + tmp * float(f_values[1] - f_values[2])
else:
return float(f_values[2])
return fn
class LARS(Optimizer):
def __init__(
self,
params,
lr=None,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coefficient=0.001,
eps=1e-8,
):
if lr is None or lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"])
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss
| true
| true
|
790cdcdc82117b3c9160fdd4b290fbace6a359f3
| 234
|
py
|
Python
|
Lib/site-packages/nbconvert/tests/files/hello.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | 1
|
2021-12-14T18:49:11.000Z
|
2021-12-14T18:49:11.000Z
|
Lib/site-packages/nbconvert/tests/files/hello.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/nbconvert/tests/files/hello.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
from nbconvert.writers.base import WriterBase
class HelloWriter(WriterBase):
def write(self, output, resources, notebook_name=None, **kw):
with open("hello.txt", "w") as outfile:
outfile.write("hello world")
| 29.25
| 65
| 0.683761
|
from nbconvert.writers.base import WriterBase
class HelloWriter(WriterBase):
def write(self, output, resources, notebook_name=None, **kw):
with open("hello.txt", "w") as outfile:
outfile.write("hello world")
| true
| true
|
790cdd1f25475d11a3eed1a60bb988ffcfb980d5
| 1,616
|
py
|
Python
|
tests/basetest.py
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
a941a6b0cd89297491d8d2b8fa3efc7a2993c132
|
[
"Apache-2.0"
] | null | null | null |
tests/basetest.py
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
a941a6b0cd89297491d8d2b8fa3efc7a2993c132
|
[
"Apache-2.0"
] | 26
|
2021-12-11T09:01:25.000Z
|
2022-03-25T09:05:19.000Z
|
tests/basetest.py
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
a941a6b0cd89297491d8d2b8fa3efc7a2993c132
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2021-08-19
@author: wf
'''
from unittest import TestCase
import time
import getpass
import os
class BaseTest(TestCase):
'''
base test case
'''
def setUp(self,debug=False,profile=True):
'''
setUp test environment
'''
TestCase.setUp(self)
self.debug=debug
self.profile=profile
msg=f"test {self._testMethodName}, debug={self.debug}"
self.profiler=Profiler(msg,profile=self.profile)
def tearDown(self):
TestCase.tearDown(self)
self.profiler.time()
@staticmethod
def inPublicCI():
'''
are we running in a public Continuous Integration Environment?
'''
publicCI=getpass.getuser() in ["travis", "runner"]
jenkins= "JENKINS_HOME" in os.environ;
return publicCI or jenkins
class Profiler:
'''
simple profiler
'''
def __init__(self,msg,profile=True):
'''
construct me with the given msg and profile active flag
Args:
msg(str): the message to show if profiling is active
profile(bool): True if messages should be shown
'''
self.msg=msg
self.profile=profile
self.starttime=time.time()
if profile:
print(f"Starting {msg} ...")
def time(self,extraMsg=""):
'''
time the action and print if profile is active
'''
elapsed=time.time()-self.starttime
if self.profile:
print(f"{self.msg}{extraMsg} took {elapsed:5.1f} s")
return elapsed
| 25.25
| 70
| 0.571782
|
from unittest import TestCase
import time
import getpass
import os
class BaseTest(TestCase):
def setUp(self,debug=False,profile=True):
TestCase.setUp(self)
self.debug=debug
self.profile=profile
msg=f"test {self._testMethodName}, debug={self.debug}"
self.profiler=Profiler(msg,profile=self.profile)
def tearDown(self):
TestCase.tearDown(self)
self.profiler.time()
@staticmethod
def inPublicCI():
publicCI=getpass.getuser() in ["travis", "runner"]
jenkins= "JENKINS_HOME" in os.environ;
return publicCI or jenkins
class Profiler:
def __init__(self,msg,profile=True):
self.msg=msg
self.profile=profile
self.starttime=time.time()
if profile:
print(f"Starting {msg} ...")
def time(self,extraMsg=""):
elapsed=time.time()-self.starttime
if self.profile:
print(f"{self.msg}{extraMsg} took {elapsed:5.1f} s")
return elapsed
| true
| true
|
790cddbbccae17b65b924bcd5b757e4998a93ad0
| 46
|
py
|
Python
|
grade/tests/constants.py
|
ProgrammingDaisukiClub/Orientation2015Problems
|
ea778f830b427980690b9bf36be27851cb05c584
|
[
"MIT"
] | 1
|
2017-10-05T09:26:45.000Z
|
2017-10-05T09:26:45.000Z
|
grade/tests/constants.py
|
ProgrammingDaisukiClub/Orientation2015Problems
|
ea778f830b427980690b9bf36be27851cb05c584
|
[
"MIT"
] | 1
|
2015-06-25T00:09:08.000Z
|
2015-06-25T00:09:08.000Z
|
grade/tests/constants.py
|
ProgrammingDaisukiClub/Orientation2015Problems
|
ea778f830b427980690b9bf36be27851cb05c584
|
[
"MIT"
] | 3
|
2015-06-24T13:21:30.000Z
|
2020-05-15T14:04:32.000Z
|
#!/usr/bin/python2
MIN = -10000
MAX = 10000
| 7.666667
| 18
| 0.630435
|
MIN = -10000
MAX = 10000
| true
| true
|
790cddf0a9b9395c3b600894c840659887dfacc3
| 773
|
py
|
Python
|
src/test/resources/simpleapp.py
|
shaipraj/databricks-client-java
|
720f680a3c7fd8cd4174aa412f2608de1816bec3
|
[
"Apache-2.0"
] | 8
|
2017-09-15T05:24:08.000Z
|
2021-03-24T14:36:34.000Z
|
src/test/resources/simpleapp.py
|
shaipraj/databricks-client-java
|
720f680a3c7fd8cd4174aa412f2608de1816bec3
|
[
"Apache-2.0"
] | 9
|
2018-07-09T17:39:26.000Z
|
2021-12-09T19:48:18.000Z
|
src/test/resources/simpleapp.py
|
shaipraj/databricks-client-java
|
720f680a3c7fd8cd4174aa412f2608de1816bec3
|
[
"Apache-2.0"
] | 5
|
2018-07-10T01:36:23.000Z
|
2019-12-02T17:39:52.000Z
|
from pyspark.sql import SparkSession
def get_spark():
return (SparkSession.builder
.appName("simpleapp")
.master("local")
.getOrCreate())
from pyspark import SparkConf, SparkContext
import sys
def main(sc, args):
print("SimpleApp Arguments")
for x in args:
print x
simple_data = [
("Group A", "Section 1", 50),
("Group B", "Section 2", 75),
("Group A", "Section 1", 25),
("Group C", "section 2", 75)
]
simple_df = get_spark().createDataFrame(
simple_data,
["Group", "Section", "Amount"]
)
simple_df.show()
if __name__ == "__main__":
# Configure Spark
sc = get_spark()
# Execute Main functionality
main(sc, sys.argv)
| 20.891892
| 44
| 0.564036
|
from pyspark.sql import SparkSession
def get_spark():
return (SparkSession.builder
.appName("simpleapp")
.master("local")
.getOrCreate())
from pyspark import SparkConf, SparkContext
import sys
def main(sc, args):
print("SimpleApp Arguments")
for x in args:
print x
simple_data = [
("Group A", "Section 1", 50),
("Group B", "Section 2", 75),
("Group A", "Section 1", 25),
("Group C", "section 2", 75)
]
simple_df = get_spark().createDataFrame(
simple_data,
["Group", "Section", "Amount"]
)
simple_df.show()
if __name__ == "__main__":
sc = get_spark()
main(sc, sys.argv)
| false
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.