hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e53df4421c0f110a8a5316f715c86d10dbca14f | 3,219 | py | Python | models/evaluate/image.py | wengithz/t-wji | 482c5a435a72dbf88fdfab385f74d4f42f4afec1 | [
"MIT"
] | 246 | 2019-07-03T12:03:33.000Z | 2022-03-19T08:43:38.000Z | models/evaluate/image.py | wengithz/t-wji | 482c5a435a72dbf88fdfab385f74d4f42f4afec1 | [
"MIT"
] | 3 | 2019-08-19T02:23:24.000Z | 2020-04-20T07:45:03.000Z | models/evaluate/image.py | wengithz/t-wji | 482c5a435a72dbf88fdfab385f74d4f42f4afec1 | [
"MIT"
] | 70 | 2019-07-03T13:29:11.000Z | 2022-03-01T06:49:08.000Z | '''
File: image.py
Project: evaluate
File Created: Wednesday, 28th November 2018 4:14:46 pm
Author: xiaofeng (sxf1052566766@163.com)
-----
Last Modified: Saturday, 22nd December 2018 4:00:03 pm
Modified By: xiaofeng (sxf1052566766@163.com>)
-----
2018.06 - 2018 Latex Math, Latex Math
'''
from os import listdir
from os.path import isfile, join
import distance
import numpy as np
from scipy.misc import imread
def get_files(dir_name):
files = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return files
def score_dirs(dir_ref, dir_hyp, prepro_img):
"""Returns scores from a dir with images
Args:
dir_ref: (string)
dir_hyp: (string)
prepro_img: (lambda function)
Returns:
scores: (dict)
"""
img_refs = [f for f in get_files(dir_ref) if f.split('.')[-1] == "png"]
img_hyps = [f for f in get_files(dir_hyp) if f.split('.')[-1] == "png"]
em_tot = l_dist_tot = length_tot = n_ex = 0
for img_name in img_refs:
img_ref = imread(dir_ref + img_name)
img_ref = prepro_img(img_ref)
if img_name in img_hyps:
img_hyp = imread(dir_hyp + img_name)
img_hyp = prepro_img(img_hyp)
l_dist, length = img_edit_distance(img_ref, img_hyp)
else:
l_dist = length = img_ref.shape[1]
l_dist_tot += l_dist
length_tot += length
if l_dist < 1:
em_tot += 1
n_ex += 1
# compute scores
scores = dict()
scores["EM"] = em_tot / float(n_ex) if n_ex > 0 else 0
scores["Lev"] = 1 - l_dist_tot / float(length_tot) if length_tot > 0 else 0
return scores
def img_edit_distance(img1, img2):
"""Computes Levenshtein distance between two images.
(From Harvard's NLP github)
Slices the images into columns and consider one column as a character.
Args:
im1, im2: np arrays of shape (H, W, 1)
Returns:
column wise levenshtein distance
max length of the two sequences
"""
# load the image (H, W)
img1, img2 = img1[:, :, 0], img2[:, :, 0]
# transpose and convert to 0 or 1
img1 = np.transpose(img1)
h1 = img1.shape[1]
w1 = img1.shape[0]
img1 = (img1 <= 128).astype(np.uint8)
img2 = np.transpose(img2)
h2 = img2.shape[1]
w2 = img2.shape[0]
img2 = (img2 <= 128).astype(np.uint8)
# create binaries for each column
if h1 == h2:
seq1 = [''.join([str(i) for i in item]) for item in img1]
seq2 = [''.join([str(i) for i in item]) for item in img2]
elif h1 > h2:
seq1 = [''.join([str(i) for i in item]) for item in img1]
seq2 = [''.join([str(i) for i in item])+''.join(['0']*(h1-h2)) for
item in img2]
else:
seq1 = [''.join([str(i) for i in item])+''.join(['0']*(h2-h1)) for
item in img1]
seq2 = [''.join([str(i) for i in item]) for item in img2]
# convert each column binary into int
seq1_int = [int(item, 2) for item in seq1]
seq2_int = [int(item, 2) for item in seq2]
# distance
l_dist = distance.levenshtein(seq1_int, seq2_int)
length = float(max(len(seq1_int), len(seq2_int)))
return l_dist, length
| 27.279661 | 79 | 0.600186 |
28f89fb49764196e9ed155bc2dcf5107fabae670 | 8,135 | py | Python | alttprbot_discord/cogs/role.py | skyscooby/sahasrahbot | 16fce824bd024f6357a8f260e2447ba477dcdac2 | [
"MIT"
] | null | null | null | alttprbot_discord/cogs/role.py | skyscooby/sahasrahbot | 16fce824bd024f6357a8f260e2447ba477dcdac2 | [
"MIT"
] | null | null | null | alttprbot_discord/cogs/role.py | skyscooby/sahasrahbot | 16fce824bd024f6357a8f260e2447ba477dcdac2 | [
"MIT"
] | null | null | null |
import re
import csv
import io
import discord
from discord.ext import commands
from emoji import UNICODE_EMOJI
from alttprbot.database import role # TODO switch to ORM
from alttprbot.exceptions import SahasrahBotException
from ..util import embed_formatter
class Role(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
emoji = str(payload.emoji)
roles = await role.get_role_by_group_emoji(payload.channel_id, payload.message_id, emoji, payload.guild_id)
if len(roles) == 0:
return # we don't want to continue, as there isn't really anything more we need to do here
guild = await self.bot.fetch_guild(payload.guild_id)
member = await guild.fetch_member(payload.user_id)
for roleids in roles:
role_obj = guild.get_role(roleids['role_id'])
if role_obj is None:
continue
else:
await member.add_roles(role_obj, reason="Added by message reaction.")
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
emoji = str(payload.emoji)
roles = await role.get_role_by_group_emoji(payload.channel_id, payload.message_id, emoji, payload.guild_id)
if len(roles) == 0:
return # we don't want to continue, as there isn't really anything more we need to do here
guild = await self.bot.fetch_guild(payload.guild_id)
member = await guild.fetch_member(payload.user_id)
for roleids in roles:
role_obj = guild.get_role(roleids['role_id'])
if role_obj is None:
continue
else:
await member.remove_roles(role_obj, reason="Removed by message reaction.")
@commands.group(aliases=['rr'])
@commands.check_any(commands.has_permissions(manage_roles=True), commands.is_owner())
async def reactionrole(self, ctx):
pass
@reactionrole.command(name='create', aliases=['c'])
async def role_create(self, ctx, group_id: int, role_name: discord.Role, name, description, emoji, protect_mentions: bool = True):
existing_roles = await role.get_group_roles(group_id, ctx.guild.id)
if len(existing_roles) >= 20:
raise SahasrahBotException(
'No more than 20 roles can be on a group. Please create a new group.')
# if discord.utils.find(lambda e: str(e) == emoji, ctx.bot.emojis) is None and not is_emoji(emoji):
# raise SahasrahBotException(
# 'Custom emoji is not available to this bot.')
await role.create_role(ctx.guild.id, group_id, role_name.id, name, emoji, description, protect_mentions)
await refresh_bot_message(ctx, group_id)
@reactionrole.command(name='update', aliases=['u'])
async def role_update(self, ctx, role_id: int, name, description, protect_mentions: bool = False):
await role.update_role(ctx.guild.id, role_id, name, description, protect_mentions)
groups = await role.get_role_group(role_id, ctx.guild.id)
await refresh_bot_message(ctx, groups[0]['id'])
# this is a whole pile of trash...
@reactionrole.command(name='delete', aliases=['del'])
async def role_delete(self, ctx, role_id: int):
groups = await role.get_role_group(role_id, ctx.guild.id)
channel = ctx.guild.get_channel(groups[0]['channel_id'])
message = await channel.fetch_message(groups[0]['message_id'])
await message.remove_reaction(strip_custom_emoji(groups[0]['emoji']), ctx.bot.user)
await role.delete_role(ctx.guild.id, role_id)
await refresh_bot_message(ctx, groups[0]['id'])
@reactionrole.command(name='list', aliases=['l'])
async def role_list(self, ctx, group_id: int):
roles = await role.get_group_roles(group_id, ctx.guild.id)
await ctx.reply(embed=embed_formatter.reaction_role_list(ctx, roles))
@commands.group(aliases=['rg'])
@commands.check_any(commands.has_permissions(manage_roles=True), commands.is_owner())
async def reactiongroup(self, ctx):
pass
@reactiongroup.command(name='create', aliases=['c'])
async def group_create(self, ctx, channel: discord.TextChannel, name, description=None, bot_managed: bool = True, message_id: int = None):
if bot_managed:
message = await channel.send('temp message')
else:
message = await channel.fetch_message(message_id)
await role.create_group(ctx.guild.id, channel.id, message.id, name, description, bot_managed)
@reactiongroup.command(name='update', aliases=['u'])
async def group_update(self, ctx, group_id: int, name, description):
await role.update_group(ctx.guild.id, group_id, name, description)
await refresh_bot_message(ctx, group_id)
@reactiongroup.command(name='refresh', aliases=['r'])
async def group_refresh(self, ctx, group_id: int):
await refresh_bot_message(ctx, group_id)
@reactiongroup.command(name='delete', aliases=['d'])
async def group_delete(self, ctx, group_id: int):
await role.delete_group(ctx.guild.id, group_id)
@reactiongroup.command(name='list', aliases=['l'])
async def group_list(self, ctx, group_id: int = None):
if group_id is None:
groups = await role.get_guild_groups(ctx.guild.id)
else:
groups = await role.get_guild_group_by_id(group_id, ctx.guild.id)
await ctx.reply(embed=await embed_formatter.reaction_group_list(ctx, groups))
@commands.command()
@commands.check_any(commands.has_permissions(manage_roles=True), commands.is_owner())
async def importroles(self, ctx, mode=None):
if ctx.message.attachments:
content = await ctx.message.attachments[0].read()
role_import_list = csv.DictReader(
io.StringIO(content.decode()))
for i in role_import_list:
try:
role_obj = await commands.RoleConverter().convert(ctx, i['role'])
except commands.BadArgument:
await ctx.reply(f"Failed to find role identified by {i['role']}")
continue
try:
member_obj = await commands.MemberConverter().convert(ctx, i['member'])
except commands.BadArgument:
await ctx.reply(f"Failed to find member identified by {i['member']}")
continue
if not mode == "dry":
await member_obj.add_roles(role_obj)
else:
raise SahasrahBotException("You must supply a valid csv file.")
async def refresh_bot_message(ctx, group_id):
groups = await role.get_guild_group_by_id(group_id, ctx.guild.id)
group = groups[0]
roles = await role.get_group_roles(group_id, ctx.guild.id)
channel = ctx.guild.get_channel(group['channel_id'])
message = await channel.fetch_message(group['message_id'])
for item in roles:
# try:
await message.add_reaction(strip_custom_emoji(item['emoji']))
# except discord.errors.HTTPException as err:
# if err.code == 10014:
# await ctx.reply("That emoji is unknown to this bot. It may be a subscriber-only or an emoji from a server this bot cannot access. Please manually add it to the role menu!\n\nPlease note that the emoji could not be displayed on the role menu.")
# else:
# raise
if group['bot_managed']:
embed = embed_formatter.reaction_menu(ctx, group, roles)
await message.edit(content=None, embed=embed)
def strip_custom_emoji(emoji):
emoji = re.sub('^<', '', emoji)
emoji = re.sub('>$', '', emoji)
return emoji
def is_emoji(s):
return True if s in UNICODE_EMOJI else False
def setup(bot):
bot.add_cog(Role(bot))
| 42.369792 | 263 | 0.642532 |
c57bbc744bba668df32a697ca949a6db0b58d6a7 | 2,111 | py | Python | ginkgo/async/eventlet.py | rlugojr/ginkgo | 440b75186506bf9a8badba038068dd97293ea4b8 | [
"MIT"
] | 28 | 2015-01-04T15:47:05.000Z | 2019-07-19T11:23:06.000Z | ginkgo/async/eventlet.py | inconshreveable/ginkgo | b4857f6ed493f4903a6de168de64e859c8606309 | [
"MIT"
] | null | null | null | ginkgo/async/eventlet.py | inconshreveable/ginkgo | b4857f6ed493f4903a6de168de64e859c8606309 | [
"MIT"
] | 14 | 2015-05-29T23:49:28.000Z | 2021-06-20T03:29:51.000Z | from __future__ import absolute_import
import eventlet
import eventlet.greenpool
import eventlet.greenthread
import eventlet.event
import eventlet.queue
import eventlet.timeout
import eventlet.semaphore
from ..core import BasicService, Service
from ..util import defaultproperty
class AsyncManager(BasicService):
"""Async primitives from eventlet"""
stop_timeout = defaultproperty(int, 1)
def __init__(self):
self._greenlets = eventlet.greenpool.GreenPool()
def do_stop(self):
if eventlet.greenthread.getcurrent() in self._greenlets.coroutines_running:
return eventlet.spawn(self.do_stop).join()
if self._greenlets.running():
with eventlet.timeout.Timeout(self.stop_timeout, False):
self._greenlets.waitall() # put in timeout for stop_timeout
for g in list(self._greenlets.coroutines_running):
with eventlet.timeout.Timeout(1, False):
g.kill() # timeout of 1 sec?
def spawn(self, func, *args, **kwargs):
"""Spawn a greenlet under this service"""
return self._greenlets.spawn(func, *args, **kwargs)
def spawn_later(self, seconds, func, *args, **kwargs):
"""Spawn a greenlet in the future under this service"""
def spawner():
self.spawn(func, *args, **kwargs)
return eventlet.spawn_after(seconds, spawner)
def sleep(self, seconds):
return eventlet.sleep(seconds)
def queue(self, *args, **kwargs):
return eventlet.queue.Queue(*args, **kwargs)
def event(self, *args, **kwargs):
return Event(*args, **kwargs)
def lock(self, *args, **kwargs):
return eventlet.semaphore.Semaphore(*args, **kwargs)
class Event(eventlet.event.Event):
def clear(self):
if not self.ready():
return
self.reset()
def set(self):
self.send()
def wait(self, timeout=None):
if timeout:
with eventlet.timeout.Timeout(timeout, False):
super(Event, self).wait()
else:
super(Event, self).wait()
| 31.044118 | 83 | 0.64235 |
c1901d0b0888d422a74117da135e1e6b87984b37 | 856 | py | Python | digital_forensic/follower.py | udhayprakash/python_for_security | 5db5d3efdd8349e94f89b176d0f8651c4a9a1136 | [
"Apache-2.0"
] | null | null | null | digital_forensic/follower.py | udhayprakash/python_for_security | 5db5d3efdd8349e94f89b176d0f8651c4a9a1136 | [
"Apache-2.0"
] | null | null | null | digital_forensic/follower.py | udhayprakash/python_for_security | 5db5d3efdd8349e94f89b176d0f8651c4a9a1136 | [
"Apache-2.0"
] | null | null | null | import tweepy
import time
twitter_app_consumer_key = '**************************'
twitter_consumer_secret = '**************************'
twitter_access_token = '**************************'
twitter_access_secret = '**************************'
MyAuth = tweepy.auth.OAuthHandler(twitter_app_consumer_key, twitter_consumer_secret)
MyAuth.set_access_token(twitter_access_token, twitter_access_secret)
MyAPI = tweepy.API(MyAuth)
followerlist = open('followerslist.txt', 'w')
if (MyAPI.verify_credentials):
print
'Connected to Twitter Server'
user = tweepy.Cursor(api.followers, twitter_screen_name="gauravkumarin").items()
while True:
try:
u = next(twitteruser)
followerlist.write(u.twitter_screen_name + ' \n')
except:
time.sleep(15 * 60)
u = next(twitteruser)
followerlist.write(u.twitter_screen_name + ' \n')
followerlist.close()
| 34.24 | 84 | 0.672897 |
78d00ed6376fa62df37925ef23044e0abcaf8ece | 139 | py | Python | urlsnap/about/views.py | x4dx48/urlsnap | 9fd0e5cf98bc3e22acd745b3fae4583e43b7a553 | [
"BSD-2-Clause"
] | 1 | 2018-07-05T15:34:33.000Z | 2018-07-05T15:34:33.000Z | urlsnap/about/views.py | x4dx48/urlsnap | 9fd0e5cf98bc3e22acd745b3fae4583e43b7a553 | [
"BSD-2-Clause"
] | null | null | null | urlsnap/about/views.py | x4dx48/urlsnap | 9fd0e5cf98bc3e22acd745b3fae4583e43b7a553 | [
"BSD-2-Clause"
] | null | null | null | from django.shortcuts import render
# Create your views here.
def about(request):
return render(request, "about.html", {'logo': True}) | 27.8 | 56 | 0.726619 |
1284dd0272996511e01c1ec23402131ac0618edd | 856 | py | Python | alpyro_msgs/smach_msgs/smachcontainerstructure.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | 1 | 2020-12-13T13:07:10.000Z | 2020-12-13T13:07:10.000Z | alpyro_msgs/smach_msgs/smachcontainerstructure.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | null | null | null | alpyro_msgs/smach_msgs/smachcontainerstructure.py | rho2/alpyro_msgs | b5a680976c40c83df70d61bb2db1de32a1cde8d3 | [
"MIT"
] | null | null | null | from typing import List
from typing_extensions import Annotated
from alpyro_msgs import RosMessage, string
from alpyro_msgs.std_msgs.header import Header
class SmachContainerStructure(RosMessage):
__msg_typ__ = "smach_msgs/SmachContainerStructure"
__msg_def__ = "c3RkX21zZ3MvSGVhZGVyIGhlYWRlcgogIHVpbnQzMiBzZXEKICB0aW1lIHN0YW1wCiAgc3RyaW5nIGZyYW1lX2lkCnN0cmluZyBwYXRoCnN0cmluZ1tdIGNoaWxkcmVuCnN0cmluZ1tdIGludGVybmFsX291dGNvbWVzCnN0cmluZ1tdIG91dGNvbWVzX2Zyb20Kc3RyaW5nW10gb3V0Y29tZXNfdG8Kc3RyaW5nW10gY29udGFpbmVyX291dGNvbWVzCgo="
__md5_sum__ = "3d3d1e0d0f99779ee9e58101a5dcf7ea"
header: Header
path: string
children: Annotated[List[string], 0, 0]
internal_outcomes: Annotated[List[string], 0, 0]
outcomes_from: Annotated[List[string], 0, 0]
outcomes_to: Annotated[List[string], 0, 0]
container_outcomes: Annotated[List[string], 0, 0]
| 45.052632 | 282 | 0.849299 |
7e94631bd095373535ee9d70792a80a563cf5784 | 1,463 | py | Python | Python/[6 kyu] pokemon damage calculator.py | KonstantinosAng/CodeWars | 9ec9da9ed95b47b9656a5ecf77f486230fd15e3a | [
"MIT"
] | null | null | null | Python/[6 kyu] pokemon damage calculator.py | KonstantinosAng/CodeWars | 9ec9da9ed95b47b9656a5ecf77f486230fd15e3a | [
"MIT"
] | null | null | null | Python/[6 kyu] pokemon damage calculator.py | KonstantinosAng/CodeWars | 9ec9da9ed95b47b9656a5ecf77f486230fd15e3a | [
"MIT"
] | null | null | null | # see https://www.codewars.com/kata/536e9a7973130a06eb000e9f/solutions/python
from TestFunction import Test
def calculate_damage(your_type, opponent_type, attack, defense):
if your_type == 'fire':
if opponent_type == 'grass': eff = 2
elif opponent_type == 'water': eff = 0.5
elif opponent_type == 'fire': eff = .5
else: eff = 1
elif your_type == 'water':
if opponent_type == 'grass': eff = 0.5
elif opponent_type == 'water': eff = .5
elif opponent_type == 'fire': eff = 2
else: eff = 0.5
elif your_type == 'grass':
if opponent_type == 'grass': eff = .5
elif opponent_type == 'water': eff = 2
elif opponent_type == 'fire': eff = 0.5
else: eff = 1
else:
if opponent_type == 'grass': eff = 1
elif opponent_type == 'water': eff = 2
elif opponent_type == 'fire': eff = 1
else: eff = .5
return 50 * (attack / defense) * eff
Test = Test(None)
Test.assert_equals(calculate_damage("fire", "water", 100, 100), 25)
Test.assert_equals(calculate_damage("grass", "water", 100, 100), 100)
Test.assert_equals(calculate_damage("electric", "fire", 100, 100), 50)
Test.assert_equals(calculate_damage("grass", "electric", 57, 19), 150)
Test.assert_equals(calculate_damage("grass", "water", 40, 40), 100)
Test.assert_equals(calculate_damage("grass", "fire", 35, 5), 175)
Test.assert_equals(calculate_damage("fire", "electric", 10, 2), 250)
Test.assert_equals(calculate_damage("grass", "grass", 93, 31), 75)
| 39.540541 | 77 | 0.668489 |
2e0c4fc6ccf4bd62e90dc2038b7aca36cbc14ff0 | 949 | py | Python | fixture/application.py | rooksever/python_training_skiba | be861d72d6f07fa1565ed12b97f6d4f04e6be1dc | [
"Apache-2.0"
] | null | null | null | fixture/application.py | rooksever/python_training_skiba | be861d72d6f07fa1565ed12b97f6d4f04e6be1dc | [
"Apache-2.0"
] | null | null | null | fixture/application.py | rooksever/python_training_skiba | be861d72d6f07fa1565ed12b97f6d4f04e6be1dc | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except Exception as e:
print(e)
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| 25.648649 | 65 | 0.601686 |
e14da82d479067be2c57213477c50e0b2684c348 | 490 | py | Python | src/Books/Order.py | Mimik1/stock_market_simulator_project | 4636557731f0373d86bc99bc392ac5f19a016041 | [
"CNRI-Python"
] | 2 | 2021-08-21T06:46:40.000Z | 2022-02-28T16:23:54.000Z | src/Books/Order.py | Mimik1/stock_market_simulator_project | 4636557731f0373d86bc99bc392ac5f19a016041 | [
"CNRI-Python"
] | null | null | null | src/Books/Order.py | Mimik1/stock_market_simulator_project | 4636557731f0373d86bc99bc392ac5f19a016041 | [
"CNRI-Python"
] | 1 | 2020-06-20T10:19:18.000Z | 2020-06-20T10:19:18.000Z | class Order:
def __init__(self, order_id, trader_id, quantity, price, timestamp):
self.orderID = order_id
self.traderID = trader_id
self.quantity = quantity
self.price = price
self.timestamp = timestamp
def getOrderID(self):
return self.orderID
def getQuantity(self):
return self.quantity
def changeQuantity(self, newQuantity):
self.quantity = newQuantity
def getPrice(self):
return self.price
| 24.5 | 72 | 0.642857 |
6a39bfebdfc16562752967ed728ae44ae3369097 | 641 | py | Python | setup.py | phuctu1901/aries-cloudagent-webhook-relay | 95c88e78b099d415029653ad8ce878c3be578977 | [
"Apache-2.0"
] | 4 | 2020-07-16T08:55:39.000Z | 2021-03-25T08:04:44.000Z | setup.py | phuctu1901/aries-cloudagent-webhook-relay | 95c88e78b099d415029653ad8ce878c3be578977 | [
"Apache-2.0"
] | 3 | 2020-11-20T16:39:54.000Z | 2022-01-24T18:11:32.000Z | setup.py | phuctu1901/aries-cloudagent-webhook-relay | 95c88e78b099d415029653ad8ce878c3be578977 | [
"Apache-2.0"
] | 6 | 2020-08-31T02:39:10.000Z | 2021-07-23T13:53:44.000Z | from setuptools import setup, find_packages
setup(
name='aries_cloudagent_webhook_relay',
version='1.0',
description='Collects and cache\'s aca-py webhook calls until requested by controller',
author='Karim Stekelenburg',
maintainer='Karim Stekelenbrug',
author_email='karim.stekelenburg@me.com',
maintainer_email='karim.stekelenburg@me.com',
install_requires=[
'aiohttp',
],
package_dir={
'webhook_relay': 'webhook_relay',
'webhook_relay.lib': 'webhook_relay/lib'
},
packages=['webhook_relay', 'webhook_relay.lib'],
entry_points={
'console_scripts': ['webhook-relay=webhook_relay.main:main']
}
) | 29.136364 | 89 | 0.730109 |
31a99d869f4e375ef39aeee9816ae013b8b987c1 | 2,667 | py | Python | jax/draw_ising_optim_ed.py | phyjoon/circuit_comparison | a5ee8d0acd3ecc0893cb088ea4b4692e5e83965d | [
"MIT"
] | null | null | null | jax/draw_ising_optim_ed.py | phyjoon/circuit_comparison | a5ee8d0acd3ecc0893cb088ea4b4692e5e83965d | [
"MIT"
] | null | null | null | jax/draw_ising_optim_ed.py | phyjoon/circuit_comparison | a5ee8d0acd3ecc0893cb088ea4b4692e5e83965d | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
import wandb
matplotlib.rcParams['mathtext.fontset'] = 'stix'
color_list = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
def main():
n_qubits = 8
n_layers_list = [32, 64, 80, 96]
project = 'IsingModel'
target_cfgs = {
'config.n_qubits': n_qubits,
'config.n_layers': {"$in": n_layers_list},
'config.g': 2,
'config.h': 0,
'config.lr': 0.05,
'config.seed': 96,
'config.scheduler_name': 'exponential_decay',
}
print(f'Downloading experiment results from {project}')
print(f'| Target constraints: {target_cfgs}')
api = wandb.Api()
runs = api.runs(project, filters=target_cfgs)
history = {}
for run in runs:
if run.state == 'finished':
print(run.name)
n_layers = run.config['n_layers']
h = run.history()
# Theoretically E(\theta) >= E_0 and fidelity <= 1.
# If it is negative, it must be a precision error.
h['loss'] = h['loss'].clip(lower=0.)
h['fidelity/ground'] = h['fidelity/ground'].clip(upper=1.)
history[n_layers] = h
print('Download done')
assert set(history.keys()) == set(n_layers_list)
linestyles = ['-', '-.', '--', ':']
linewidths = [1.2, 1.2, 1.3, 1.4]
xlim = 0, 500
plt.subplot(211)
for i, n_layers in enumerate(n_layers_list):
h = history[n_layers]
plt.plot(h._step, h.loss, linestyles[i],
color=color_list[i],
linewidth=linewidths[i],
alpha=1.,
markersize=5,
label=f'L={n_layers}')
plt.xlim(*xlim)
plt.yscale('log')
plt.ylabel(r'$E(\mathbf{\theta}) - E_0$', fontsize=13)
plt.grid(True, c='0.5', ls=':', lw=0.5)
# plt.legend(loc='upper right')
plt.subplot(212)
for i, n_layers in enumerate(n_layers_list):
h = history[n_layers]
plt.plot(h._step, h['fidelity/ground'], linestyles[i],
color=color_list[i],
linewidth=linewidths[i],
alpha=1.,
markersize=5,
label=f'L={n_layers}')
plt.xlim(*xlim)
plt.xlabel('Optimization Steps', fontsize=13)
plt.ylabel(r'$|\,\langle \psi(\mathbf{\theta^*})\, |\, \phi \rangle\, |^2$', fontsize=13)
plt.grid(True, c='0.5', ls=':', lw=0.5)
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig('fig/ising_optimization_ed.pdf', bbox_inches='tight')
plt.show()
if __name__ == '__main__':
main()
| 30.306818 | 123 | 0.551181 |
b5b552a7e14ea5ea9b6901a2158a7662fb1fbe3e | 549 | py | Python | fantasy_news/apps/index/migrations/0002_auto_20191101_1603.py | DooBeDooBa/RecNewsSys | 3da812dd881e67c600b6b7fbe96f507ce835de5b | [
"MIT"
] | 2 | 2019-11-08T12:44:59.000Z | 2019-11-08T12:59:45.000Z | fantasy_news/apps/index/migrations/0002_auto_20191101_1603.py | DooBeDooBa/RecNewsSys | 3da812dd881e67c600b6b7fbe96f507ce835de5b | [
"MIT"
] | 1 | 2019-11-08T13:08:24.000Z | 2019-11-08T13:08:24.000Z | fantasy_news/apps/index/migrations/0002_auto_20191101_1603.py | DooBeDooBa/RecNewsSys | 3da812dd881e67c600b6b7fbe96f507ce835de5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-11-01 08:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('index', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='new',
name='new_cate',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='类别', to='index.Cate'),
),
]
| 24.954545 | 128 | 0.642987 |
c0886ca381bb6b8b11dbfe20fec08b490412497f | 1,793 | py | Python | Egitim.py | BatuhanYerinde/Gercek_Zamanl-_Goruntu_-sleme_-le_Otomatik_Kimlik_Kontrolu | 237bcf6ea909ddd7c07cf1f87f6cde087f947354 | [
"Apache-2.0"
] | null | null | null | Egitim.py | BatuhanYerinde/Gercek_Zamanl-_Goruntu_-sleme_-le_Otomatik_Kimlik_Kontrolu | 237bcf6ea909ddd7c07cf1f87f6cde087f947354 | [
"Apache-2.0"
] | null | null | null | Egitim.py | BatuhanYerinde/Gercek_Zamanl-_Goruntu_-sleme_-le_Otomatik_Kimlik_Kontrolu | 237bcf6ea909ddd7c07cf1f87f6cde087f947354 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
from PIL import Image
import os
# veri setlerinin bulunduğu yolun ismi değişkene atılıyor.
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create() #egitim icin nesne olusturuluyor.
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); # yuzun on kısmını algılamak ıcın cascade sınıflandırıcısından nesne tanımlanıyor.""
# resimler ve etiketleri veriseti icerisinden bulunup idleri ile birlikte bu fonksiyonda kaydediliyor.
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # Resim gray levela ceviriliyor.
img_numpy = np.array(PIL_img,'uint8') #resim numpy array biciminde kaydediliyor.
id = int(os.path.split(imagePath)[-1].split(".")[1]) # idler dosya isimlerinden okunuyor.
faces = detector.detectMultiScale(img_numpy) #yuzlerın cercevesı algılanarak faces isimli listeye atılıyor.
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w]) # cerceve koordinatları numpy diziye ekleniyor.
ids.append(id) idler diziye kaydediliyor.
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path) #resim id kaydedici fonksiyon cagırılıyor.
recognizer.train(faces, np.array(ids)) # cerceve ve ide parametrelerine gore egitim gercekleştiriliyor.
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') # egitim dosyası trainer adlı klasor altına kaydediliyor.
#kac adet yuz egitildigi ekranda gosteriliyor.
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
| 42.690476 | 157 | 0.743447 |
4a3f5b1a15970b3333edb855373c5e469eb52b2d | 1,916 | py | Python | algorithm/k_means_image.py | AxelThevenot/K-Means | 4602792c269c909fb7b50a54700b7a7386a860db | [
"MIT"
] | 1 | 2019-03-31T20:28:31.000Z | 2019-03-31T20:28:31.000Z | algorithm/k_means_image.py | AxelThevenot/K-Means | 4602792c269c909fb7b50a54700b7a7386a860db | [
"MIT"
] | null | null | null | algorithm/k_means_image.py | AxelThevenot/K-Means | 4602792c269c909fb7b50a54700b7a7386a860db | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import k_means as km
IMAGE = mpimg.imread('image.jpg') # pick up the image as a matrix of pixel
pixels = np.concatenate(IMAGE[:][:]) # all the pixels of the image in an array
EPSILON = 1000 # to test convergence
K_TO_TEST = np.array([1, 2, 3, 4]) # k to test have to be at number of 4 with this script !!
fig = plt.figure(figsize=(8, 8))
print(np.unique(pixels, axis=0).shape[0]) # number of different colors on the image
for i, k in enumerate(K_TO_TEST):
iteration = 0
centroids = np.random.rand(k, 3) * 256
# Initialize a value to keep the last cost value to know when there is a convergence
last_cost = 0
cost = EPSILON + 1 # make sure to start the while loop
# Update the centroid while not convergence
while not abs(cost - last_cost) < EPSILON:
print('iteration {0}... '.format(iteration + 1))
# keep the current cost before the adjustments to know if there is a convergence
last_cost = cost
# pick up the nearest centroid indexes of each samples
nearest = km.nearest_centroid(pixels, centroids)
# adjust the current centroids
k_centroids = km.adjust_centroid(pixels, nearest, centroids)
# calculation of the current cost
cost = km.calculate_cost(pixels, centroids)
print('cost : {0}'.format(cost))
print('centroids : \n{0}'.format(centroids))
iteration += 1
# reassociate each color to its nearest centroid
nearest = km.nearest_centroid(pixels, centroids)
# create the pixel array
new_image = np.array([centroids[number] for _, number in enumerate(nearest)])
# reform the image
new_image = new_image.reshape(IMAGE.shape).astype(int)
# plot it
fig.add_subplot(2, 2, i + 1)
plt.imshow(new_image)
plt.show()
| 39.916667 | 94 | 0.661795 |
73ab7e1fd26d80801122255946130680a87b49f7 | 945 | py | Python | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/vendors_storage_structures/apple.py | CSIRT-MU/CRUSOE | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | [
"MIT"
] | 3 | 2021-11-09T09:55:17.000Z | 2022-02-19T02:58:27.000Z | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/vendors_storage_structures/apple.py | CSIRT-MU/CRUSOE | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | [
"MIT"
] | null | null | null | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/vendors_storage_structures/apple.py | CSIRT-MU/CRUSOE | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | [
"MIT"
] | null | null | null | """Module contains class for storing information about CVEs from vendor Apple."""
from cve_connector.vendor_cve.implementation.vendors_storage_structures.general_vendor import Vendor
from cve_connector.vendor_cve.implementation.utilities.check_correctness import is_correct_cve_id
class Apple(Vendor):
"""
Class for storing information about CVEs from vendor Apple.
"""
def __init__(self, cve_id='', available_for='', impact='', description='',
published=None, descr_all=''):
super().__init__()
self.cve_id = cve_id
self.available_for = available_for
self.impact = impact
self.published = published
self.description = descr_all
self.patch_available = True
def is_valid_entity(self):
"""
Tests correctness of created instance of this class.
:return: True if valid
"""
return is_correct_cve_id(self.cve_id)
| 31.5 | 100 | 0.683598 |
091a8333c1fe8f0eee01877f3db44af48ac5984c | 3,969 | py | Python | lab-notebook/kchu/2019-01-22-KTC-initial_ICA_exploration.py | velexi-corporation/spectra-ml | 10fab9e72437e79b6f7ff5ae4b9592bc7c48f10d | [
"Apache-2.0"
] | null | null | null | lab-notebook/kchu/2019-01-22-KTC-initial_ICA_exploration.py | velexi-corporation/spectra-ml | 10fab9e72437e79b6f7ff5ae4b9592bc7c48f10d | [
"Apache-2.0"
] | null | null | null | lab-notebook/kchu/2019-01-22-KTC-initial_ICA_exploration.py | velexi-corporation/spectra-ml | 10fab9e72437e79b6f7ff5ae4b9592bc7c48f10d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# ## 2019-01-22: Initial ICA Exploration
#
# ### Authors
# * Kevin Chu (kevin@velexi.com)
#
# ### Notes
#
# * In theory, ICA should "learn" spectra features that are common across multiple materials. Projection of a new spectra onto these "component spectra" could be used to construct the input vector to a supervised learning system.
#
# * The independent components can be computed from the mixing matrix (FastICA.mixing_). Note that FastICA automatically "whitens" the training dataset, so the mean spectra (FastICA.mean_) needs to be added to each column of the mixing matrix.
#
# * To compute the representation (i.e., coefficients) of a spectra with respect to the independent components, multiply the spectra by the unmixing matrix (FastICA.components_).
# ## Preparations
# In[1]:
# --- Imports
# Standard libraries
import os
import re
# External packages
import matplotlib.pyplot as plt
import numpy
import pandas
from sklearn.decomposition import FastICA
# In[2]:
# --- Configuration Parameters
# Data directory
data_dir = os.environ['DATA_DIR']
# Materials
materials = {
'actinolite': 0,
'alunite': 1,
'calcite': 2,
}
# ### Data Preparation
# In[3]:
# --- Load data from files
# Get file list
data_files = [os.path.join(data_dir, file_name) for file_name in os.listdir(data_dir)
if not file_name.startswith('.') and
os.path.isfile(os.path.join(data_dir, file_name))]
# Initialize spectra dataset
spectra_data = pandas.DataFrame()
# Initialize material labels
class_labels = []
# Load data files
for file_name in data_files:
# Read data into DataFrame
raw_data = pandas.read_csv(file_name)
# Clean up header
spectra_id = raw_data.columns[0].strip()
raw_data.columns = [spectra_id]
# Replace missing values (set to -1.23e+34 in raw data) with 0
raw_data[spectra_id][raw_data[spectra_id] < 0] = 0.0
# Append spectra
spectra_data[spectra_id] = raw_data[spectra_id]
# Assign class label
for material, label in materials.items():
if re.search(material, spectra_id, re.IGNORECASE):
class_labels.append(label)
break
# Calculate dataset parameters
spectrum_length, num_spectra = spectra_data.shape
# Convert labels to numpy array
class_labels = numpy.array(class_labels)
class_labels.resize([num_spectra, 1])
# ## Data Exploration
# In[4]:
# --- Plot spectra by material
for material_name, material_id in materials.items():
# Get indices for spectra for material
spectra_indices = numpy.argwhere(class_labels==material_id)[:, 0]
# Plot spectra in their own figure
plt.figure()
plt.title(material_name)
plt.plot(spectra_data.iloc[:, spectra_indices])
# ## ICA Exploration
# In[5]:
# --- Generate ICA model
# ICA Parameters
num_components = 5
# Create FastICA object
ica = FastICA(n_components=num_components)
# Fit ICA model
X = spectra_data.values.T
S = ica.fit_transform(X)
# Compute independent spectra components
# Note: mixing (not unmixing) matrix holds independent components
mean_spectra = ica.mean_.reshape([spectrum_length, 1])
spectra_components = ica.mixing_ + numpy.tile(mean_spectra, [1, num_components])
# Display results
print("Number of components generated:", num_components)
print("Number of fitting iterations:", ica.n_iter_)
# Display independent spectra components
for i in range(spectra_components.shape[1]):
plt.title('Component {}'.format(i))
plt.plot(spectra_components[:, i])
plt.figure()
# In[6]:
# --- Compute representation for spectra
# Get unmixing matrix
unmixing_matrix = ica.components_
# spectra_data[0]
print("Coefficients from fit_transform():", S[0, :])
coefficients = numpy.dot(unmixing_matrix,
X[0, :].reshape([spectrum_length, 1]) - mean_spectra).T
print("Coefficients from multiplying by unmixing matrix:", coefficients)
| 24.20122 | 243 | 0.71101 |
3fc53f53f301578651c2ea955018b0517b9268c0 | 18,541 | py | Python | src/plotman/interactive.py | lopesmcc/plotman | e4798f2bd69c246f6736df0d1e90dcda4032bb7a | [
"Apache-2.0"
] | null | null | null | src/plotman/interactive.py | lopesmcc/plotman | e4798f2bd69c246f6736df0d1e90dcda4032bb7a | [
"Apache-2.0"
] | null | null | null | src/plotman/interactive.py | lopesmcc/plotman | e4798f2bd69c246f6736df0d1e90dcda4032bb7a | [
"Apache-2.0"
] | null | null | null | import curses
import datetime
import locale
import math
import os
import subprocess
import shlex
import typing
import sys
from subprocess import DEVNULL, STDOUT, check_call, CalledProcessError
from plotman import archive, configuration, manager, reporting
from plotman.job import Job
from plotman.archive_job import EgressArchiveJob
ON_POSIX = 'posix' in sys.builtin_module_names
class TerminalTooSmallError(Exception):
pass
class Log:
entries: typing.List[str]
cur_pos: int
def __init__(self) -> None:
self.entries = []
self.cur_pos = 0
# TODO: store timestamp as actual timestamp indexing the messages
def log(self, msg: str) -> None:
'''Log the message and scroll to the end of the log'''
ts = datetime.datetime.now().strftime('%m-%d %H:%M:%S')
self.entries.append(ts + ' ' + msg)
self.cur_pos = len(self.entries)
def tail(self, num_entries: int) -> typing.List[str]:
'''Return the entries at the end of the log. Consider cur_slice() instead.'''
return self.entries[-num_entries:]
def shift_slice(self, offset: int) -> None:
'''Positive shifts towards end, negative shifts towards beginning'''
self.cur_pos = max(0, min(len(self.entries), self.cur_pos + offset))
def shift_slice_to_end(self) -> None:
self.cur_pos = len(self.entries)
def get_cur_pos(self) -> int:
return self.cur_pos
def cur_slice(self, num_entries: int) -> typing.List[str]:
'''Return num_entries log entries up to the current slice position'''
return self.entries[max(0, self.cur_pos - num_entries) : self.cur_pos]
def fill_log(self) -> None:
'''Add a bunch of stuff to the log. Useful for testing.'''
for i in range(100):
self.log('Log line %d' % i)
def plotting_status_msg(active: bool, status: str) -> str:
if active:
return '(active) ' + status
else:
return '(inactive) ' + status
def archiving_status_msg(configured: bool, active: bool, status: str) -> str:
if configured:
if active:
return '(active) ' + status
else:
return '(inactive) ' + status
else:
return '(not configured)'
# cmd_autostart_plotting is the (optional) argument passed from the command line. May be None
def curses_main(stdscr: typing.Any, cmd_autostart_plotting: typing.Optional[bool], cmd_autostart_archiving: typing.Optional[bool], cfg: configuration.PlotmanConfig) -> None:
log = Log()
if should_use_external_plotting(cfg):
plotting_active = False
elif cmd_autostart_plotting is not None:
plotting_active = cmd_autostart_plotting
else:
plotting_active = cfg.commands.interactive.autostart_plotting
archiving_configured = cfg.archiving is not None
if not archiving_configured or should_use_external_plotting(cfg):
archiving_active = False
elif cmd_autostart_archiving is not None:
archiving_active = cmd_autostart_archiving
else:
archiving_active = cfg.commands.interactive.autostart_archiving
plotting_status = '<startup>' # todo rename these msg?
archiving_status: typing.Union[bool, str, typing.Dict[str, object]] = '<startup>'
curses.start_color()
stdscr.nodelay(True) # make getch() non-blocking
stdscr.timeout(2000)
# Create windows. We'll size them in the main loop when we have their content.
header_win = curses.newwin(1, 1, 1, 0)
log_win = curses.newwin(1, 1, 1, 0)
jobs_win = curses.newwin(1, 1, 1, 0)
dirs_win = curses.newwin(1, 1, 1, 0)
jobs = Job.get_running_jobs(cfg.logging.plots)
last_refresh = None
pressed_key = '' # For debugging
archdir_freebytes = None
aging_reason = None
arch_jobs = None
while True:
# A full refresh scans for and reads info for running jobs from
# scratch (i.e., reread their logfiles). Otherwise we'll only
# initialize new jobs, and mostly rely on cached info.
do_full_refresh = False
elapsed = 0 # Time since last refresh, or zero if no prev. refresh
if last_refresh is None:
do_full_refresh = True
else:
elapsed = (datetime.datetime.now() - last_refresh).total_seconds()
do_full_refresh = elapsed >= cfg.scheduling.polling_time_s
if not do_full_refresh:
jobs = Job.get_running_jobs(cfg.logging.plots, cached_jobs=jobs)
else:
last_refresh = datetime.datetime.now()
jobs = Job.get_running_jobs(cfg.logging.plots)
arch_jobs = EgressArchiveJob.get_archive_running_jobs(arch_cfg=cfg.archiving)
if plotting_active or is_external_plotting_active(cfg):
(started, msg) = manager.maybe_start_new_plot(
cfg.directories, cfg.scheduling, cfg.plotting, cfg.logging, should_use_external_plotting(cfg)
)
if (started):
if not should_use_external_plotting(cfg):
if aging_reason is not None:
log.log(aging_reason)
aging_reason = None
log.log(msg)
plotting_status = '<just started job>'
jobs = Job.get_running_jobs(cfg.logging.plots, cached_jobs=jobs)
else:
# If a plot is delayed for any reason other than stagger, log it
if msg.find("stagger") < 0:
aging_reason = msg
plotting_status = msg
if archiving_configured:
if archiving_active or is_external_archiving_active(cfg):
archiving_status, log_messages = archive.spawn_archive_process(cfg.directories, cfg.archiving, cfg.logging, jobs, should_use_external_archiver(cfg))
for log_message in log_messages:
log.log(log_message)
archdir_freebytes, log_messages = archive.get_archdir_freebytes(cfg.archiving)
for log_message in log_messages:
log.log(log_message)
# Get terminal size. Recommended method is stdscr.getmaxyx(), but this
# does not seem to work on some systems. It may be a bug in Python
# curses, maybe having to do with registering sigwinch handlers in
# multithreaded environments. See e.g.
# https://stackoverflow.com/questions/33906183#33906270
# Alternative option is to call out to `stty size`. For now, we
# support both strategies, selected by a config option.
# TODO: also try shutil.get_terminal_size()
n_rows: int
n_cols: int
if cfg.user_interface.use_stty_size:
completed_process = subprocess.run(
['stty', 'size'], check=True, encoding='utf-8', stdout=subprocess.PIPE
)
elements = completed_process.stdout.split()
(n_rows, n_cols) = [int(v) for v in elements]
else:
(n_rows, n_cols) = map(int, stdscr.getmaxyx())
stdscr.clear()
stdscr.resize(n_rows, n_cols)
curses.resize_term(n_rows, n_cols)
#
# Obtain and measure content
#
# Directory prefixes, for abbreviation
tmp_prefix = os.path.commonpath(cfg.directories.tmp)
dst_dir = cfg.directories.get_dst_directories()
dst_prefix = os.path.commonpath(dst_dir)
if archdir_freebytes is not None:
archive_directories = list(archdir_freebytes.keys())
if len(archive_directories) == 0:
arch_prefix = ''
else:
arch_prefix = os.path.commonpath(archive_directories)
n_tmpdirs = len(cfg.directories.tmp)
# Directory reports.
tmp_report = reporting.tmp_dir_report(
jobs, cfg.directories, cfg.scheduling, n_cols, 0, n_tmpdirs, tmp_prefix)
dst_report = reporting.dst_dir_report(
jobs, dst_dir, n_cols, dst_prefix)
hide_full = cfg.commands.interactive.hide_full_arch_dirs
if archdir_freebytes is not None:
arch_report = reporting.arch_dir_report(archdir_freebytes, n_cols, arch_prefix, hide_full)
if not arch_report:
arch_report = '<no archive dir info>'
else:
arch_report = '<archiving not configured>'
#
# Layout
#
tmp_h = len(tmp_report.splitlines())
tmp_w = len(max(tmp_report.splitlines(), key=len)) + 1
dst_h = len(dst_report.splitlines())
dst_w = len(max(dst_report.splitlines(), key=len)) + 1
arch_h = len(arch_report.splitlines()) + 1
arch_w = n_cols
arch_job_h = len(arch_jobs) + 2
arch_job_w = n_cols
header_h = 3
dirs_h = max(tmp_h, dst_h) + arch_job_h + arch_h
remainder = n_rows - (header_h + dirs_h)
jobs_h = max(5, math.floor(remainder * 0.6))
header_pos = 0
jobs_pos = header_pos + header_h
stdscr.resize(n_rows, n_cols)
dirs_pos = jobs_pos + jobs_h
logscreen_pos = dirs_pos + dirs_h
linecap = n_cols - 1
if cfg.commands.interactive.show_logs:
logs_h = n_rows - (header_h + jobs_h + dirs_h)
else:
logs_h = 0
jobs_h = n_rows - (header_h + dirs_h)
dirs_pos = jobs_pos + jobs_h
try:
header_win = curses.newwin(header_h, n_cols, header_pos, 0)
if cfg.commands.interactive.show_logs:
log_win = curses.newwin(logs_h, n_cols, logscreen_pos, 0)
jobs_win = curses.newwin(jobs_h, n_cols, jobs_pos, 0)
dirs_win = curses.newwin(dirs_h, n_cols, dirs_pos, 0)
except Exception:
raise Exception('Failed to initialize curses windows, try a larger '
'terminal window.')
#
# Write
#
# Header
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
header_win.addnstr(0, 0, 'Plotman', linecap, curses.A_BOLD)
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
refresh_msg = "now" if do_full_refresh else f"{int(elapsed)}s/{cfg.scheduling.polling_time_s}"
header_win.addnstr(f" {timestamp} (refresh {refresh_msg})", linecap)
header_win.addnstr(' | <P>lotting: ', linecap, curses.A_BOLD)
if plotting_active or is_external_plotting_active(cfg):
header_win.addnstr('(active)', linecap, curses.color_pair(2))
else:
header_win.addnstr('(inactive)', linecap, curses.color_pair(1) | curses.A_BOLD)
header_win.addnstr(' ' + plotting_status, linecap)
header_win.addnstr(' <A>rchival: ', linecap, curses.A_BOLD)
if archiving_configured:
if archiving_active or is_external_archiving_active(cfg):
header_win.addnstr('(active)', linecap, curses.color_pair(2))
else:
header_win.addnstr('(inactive)', linecap, curses.color_pair(1) | curses.A_BOLD)
header_win.addnstr(' ' + archiving_status, linecap)
else:
header_win.addnstr(' (not configured)', linecap)
# Oneliner progress display
header_win.addnstr(1, 0, 'Jobs (%d): ' % len(jobs), linecap)
header_win.addnstr('[' + reporting.job_viz(jobs) + ']', linecap)
# These are useful for debugging.
# header_win.addnstr(' term size: (%d, %d)' % (n_rows, n_cols), linecap) # Debuggin
# if pressed_key:
# header_win.addnstr(' (keypress %s)' % str(pressed_key), linecap)
header_win.addnstr(2, 0, 'Prefixes:', linecap, curses.A_BOLD)
header_win.addnstr(' tmp=', linecap, curses.A_BOLD)
header_win.addnstr(tmp_prefix, linecap)
header_win.addnstr(' dst=', linecap, curses.A_BOLD)
header_win.addnstr(dst_prefix, linecap)
if archiving_configured:
header_win.addnstr(' archive=', linecap, curses.A_BOLD)
header_win.addnstr(arch_prefix, linecap)
header_win.addnstr(' (remote)', linecap)
# Jobs
jobs_win.addstr(0, 0, reporting.status_report(jobs, n_cols, jobs_h,
tmp_prefix, dst_prefix))
jobs_win.chgat(0, 0, curses.A_REVERSE)
# Dirs
tmpwin_dstwin_gutter = 6
maxtd_h = max([tmp_h, dst_h])
tmpwin = curses.newwin(
tmp_h, tmp_w,
dirs_pos + int(maxtd_h - tmp_h), 0)
tmpwin.addstr(tmp_report)
tmpwin.chgat(0, 0, curses.A_REVERSE)
dstwin = curses.newwin(
dst_h, dst_w,
dirs_pos + int((maxtd_h - dst_h) / 2), tmp_w + tmpwin_dstwin_gutter)
dstwin.addstr(dst_report)
dstwin.chgat(0, 0, curses.A_REVERSE)
archjobwin = None
if arch_jobs:
archjobwin = curses.newwin(arch_job_h, arch_job_w, dirs_pos + maxtd_h, 0)
archjobwin.addstr(0, 0, 'Archive job', curses.A_REVERSE)
archjobwin.addstr(1, 0, reporting.arch_job_report(arch_jobs, n_cols, arch_job_h - 1))
archwin = curses.newwin(arch_h, arch_w, dirs_pos + maxtd_h + arch_job_h, 0)
archwin.addstr(0, 0, 'Archive dirs free space', curses.A_REVERSE)
archwin.addstr(1, 0, arch_report)
if cfg.commands.interactive.show_logs:
# Log. Could use a pad here instead of managing scrolling ourselves, but
# this seems easier.
log_win.addnstr(0, 0, ('Log: %d (<up>/<down>/<end> to scroll)\n' % log.get_cur_pos() ),
linecap, curses.A_REVERSE)
for i, logline in enumerate(log.cur_slice(logs_h - 1)):
log_win.addnstr(i + 1, 0, logline, linecap)
stdscr.noutrefresh()
header_win.noutrefresh()
jobs_win.noutrefresh()
tmpwin.noutrefresh()
dstwin.noutrefresh()
if archjobwin is not None:
archjobwin.noutrefresh()
archwin.noutrefresh()
if cfg.commands.interactive.show_logs:
log_win.noutrefresh()
curses.doupdate()
try:
key = stdscr.getch()
except KeyboardInterrupt:
key = ord('q')
if key == curses.KEY_UP:
log.shift_slice(-1)
pressed_key = 'up'
elif key == curses.KEY_DOWN:
log.shift_slice(1)
pressed_key = 'dwn'
elif key == curses.KEY_END:
log.shift_slice_to_end()
pressed_key = 'end'
elif key == ord('p'):
if should_use_external_plotting(cfg):
toggle_external_plotter(cfg)
else:
plotting_active = not plotting_active
pressed_key = 'p'
elif key == ord('a'):
if should_use_external_archiver(cfg):
toggle_external_archiver(cfg)
else:
archiving_active = not archiving_active
pressed_key = 'a'
elif key == ord('q'):
break
else:
pressed_key = key
def should_use_external_plotting(cfg):
has_start_plotter_cmd = cfg.commands.interactive.start_plotter_cmd is not None
has_stop_plotter_cmd = cfg.commands.interactive.stop_plotter_cmd is not None
has_is_plotter_active_cmd = cfg.commands.interactive.is_plotter_active_cmd is not None
if has_start_plotter_cmd and has_stop_plotter_cmd and has_is_plotter_active_cmd:
return True
if has_start_plotter_cmd or has_stop_plotter_cmd or has_is_plotter_active_cmd:
raise Exception('Invalid configuration for the UI external plotter control: '
'all 3 fields are required to enable it.')
return False
def is_external_plotting_active(cfg):
if not should_use_external_plotting(cfg):
return False
cmd = shlex.split(cfg.commands.interactive.is_plotter_active_cmd)
try:
check_call(cmd, stdout=DEVNULL, stderr=STDOUT)
return True
except CalledProcessError as e:
return False
def toggle_external_plotter(cfg):
if is_external_plotting_active(cfg):
cmd = shlex.split(cfg.commands.interactive.stop_plotter_cmd)
check_call(cmd, stdout=DEVNULL, stderr=STDOUT)
else:
cmd = shlex.split(cfg.commands.interactive.start_plotter_cmd)
check_call(cmd, stdout=DEVNULL, stderr=STDOUT)
def should_use_external_archiver(cfg):
has_start_archiver_cmd = cfg.commands.interactive.start_archiver_cmd is not None
has_stop_archiver_cmd = cfg.commands.interactive.stop_archiver_cmd is not None
has_is_archiver_active_cmd = cfg.commands.interactive.is_archiver_active_cmd is not None
if has_start_archiver_cmd and has_stop_archiver_cmd and has_is_archiver_active_cmd:
return True
if has_start_archiver_cmd or has_stop_archiver_cmd or has_is_archiver_active_cmd:
raise Exception('Invalid configuration for the UI external archiver control: '
'all 3 fields are required to enable it.')
return False
def is_external_archiving_active(cfg):
if not should_use_external_archiver(cfg):
return False
cmd = shlex.split(cfg.commands.interactive.is_archiver_active_cmd)
try:
check_call(cmd, stdout=DEVNULL, stderr=STDOUT)
return True
except CalledProcessError as e:
return False
def toggle_external_archiver(cfg):
if is_external_archiving_active(cfg):
cmd = shlex.split(cfg.commands.interactive.stop_archiver_cmd)
check_call(cmd, stdout=DEVNULL, stderr=STDOUT)
else:
cmd = shlex.split(cfg.commands.interactive.start_archiver_cmd)
check_call(cmd, stdout=DEVNULL, stderr=STDOUT)
def run_interactive(cfg: configuration.PlotmanConfig, autostart_plotting: typing.Optional[bool] = None, autostart_archiving: typing.Optional[bool] = None) -> None:
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
# Then use code as the encoding for str.encode() calls.
try:
curses.wrapper(
curses_main,
cmd_autostart_plotting=autostart_plotting,
cmd_autostart_archiving=autostart_archiving,
cfg=cfg,
)
except curses.error as e:
raise TerminalTooSmallError(
"Your terminal may be too small, try making it bigger.",
) from e
| 39.198732 | 173 | 0.633892 |
1c3fa1697442963e61d94c3212fdaec06e8e6352 | 2,485 | py | Python | doctor/views.py | naitik2314/E-Health-Care | 246774d4abdc01d829effd58b6bebae947c9c9c5 | [
"MIT"
] | null | null | null | doctor/views.py | naitik2314/E-Health-Care | 246774d4abdc01d829effd58b6bebae947c9c9c5 | [
"MIT"
] | null | null | null | doctor/views.py | naitik2314/E-Health-Care | 246774d4abdc01d829effd58b6bebae947c9c9c5 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from doctor.models import DoctorInfo
from django.contrib import messages
from doctor.forms import UserForm
from django.db.models import Q
from django.contrib.auth.decorators import user_passes_test, login_required
from patient.models import Disease1, WhoPredictDisease
# Create your views here.
def doctor_login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
# if request.user.groups.filter(name='DOCTOR').exists():
return redirect('dashboard_doctor')
# else:
# messages.info(request, "Please login from valid panel")
# return render(request, 'docotor/login.html')
else:
messages.info(request, "Please enter valid credentials")
return render(request, 'doctor/login.html')
else:
return render(request, 'doctor/login.html')
def doctor_logout(request):
print("logout user")
logout(request)
return redirect("/")
# Decorators to check whether a user is doctor or not to access his assigned features
def is_doctor(user):
return user.groups.filter(name='DOCTOR').exists()
# def is_patient(user):
# return user.groups.filter(name='PATIENT').exists()
@login_required(login_url='doctor_login')
# @user_passes_test(is_doctor)
def dashboard_doctor(request):
search_term = request.GET.get('term')
# users=User.objects.filter(groups__name="PATIENT")
contex = {}
disease1 = Disease1.objects.filter(doctor__id=request.user.id)
disease = []
for d in disease1:
# print(d.name)
disease.append(d.name)
if search_term == None:
search_term = ""
new_predictions = WhoPredictDisease.objects.filter(
predicted_disease__in=disease).filter(Q(predicted_disease__icontains=search_term) | Q(predict_by__name__icontains=search_term) | Q(predict_by__name__icontains=search_term))
# print(new_predictions)
# for p in new_predictions:
# print(p.predict_by.address)
contex = {
'predictions': new_predictions
}
return render(request, 'doctor/dashboard_doctor.html', contex)
# return render(request,'doctor/dashboard_doctor.html', contex)
| 35 | 180 | 0.696982 |
ab4a808659927131c59a39b256108d9e418783a4 | 449 | py | Python | run_crnn.py | hushukai/Chinese-ancient-book-recognition-HSK | de5b6474dc4346524d95b405223c721aae5b500b | [
"Apache-2.0"
] | 2 | 2020-04-12T08:33:50.000Z | 2020-07-03T09:15:56.000Z | run_crnn.py | yufish/Chinese-ancient-book-recognition-HSK | c7302fdd6e86b57223cfa1906e8bb365702c8240 | [
"Apache-2.0"
] | null | null | null | run_crnn.py | yufish/Chinese-ancient-book-recognition-HSK | c7302fdd6e86b57223cfa1906e8bb365702c8240 | [
"Apache-2.0"
] | 4 | 2020-07-03T09:15:58.000Z | 2020-07-17T09:24:08.000Z | # -*- encoding: utf-8 -*-
# Author: hushukai
from recog_with_crnn.train import train
from recog_with_crnn.predict import predict
from config import ONE_TEXT_LINE_IMGS_H, ONE_TEXT_LINE_IMGS_V
if __name__ == '__main__':
# train(num_epochs=200, start_epoch=0, model_type="vertical", model_struc="densenet_gru")
predict(imgs_dir=ONE_TEXT_LINE_IMGS_V, model_epoch=3, model_type="vertical", model_struc="densenet_gru")
print("Done !")
| 32.071429 | 108 | 0.761693 |
a688e8bd95d73bf2264a8d96e19b1d3ada75edbd | 291 | py | Python | wordnet/__init__.py | InSanityHQ/inscriptio | 931fe575e6671b43a693a05a24d39fe492df9511 | [
"Unlicense"
] | null | null | null | wordnet/__init__.py | InSanityHQ/inscriptio | 931fe575e6671b43a693a05a24d39fe492df9511 | [
"Unlicense"
] | null | null | null | wordnet/__init__.py | InSanityHQ/inscriptio | 931fe575e6671b43a693a05a24d39fe492df9511 | [
"Unlicense"
] | 1 | 2021-05-10T04:45:08.000Z | 2021-05-10T04:45:08.000Z | import nltk
from nltk.corpus import wordnet as wn
import nltk
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('wordnet')
| 15.315789 | 72 | 0.80756 |
d5d4853dc0de109ba11a1da5b94752d625665798 | 543 | py | Python | LeetCode/Python/0797. All Paths From Source to Target.py | rayvantsahni/Competitive-Programming-Codes | 39ba91b69ad8ce7dce554f7817c2f0d5545ef471 | [
"MIT"
] | 1 | 2021-07-05T14:01:36.000Z | 2021-07-05T14:01:36.000Z | LeetCode/Python/0797. All Paths From Source to Target.py | rayvantsahni/Competitive-Programming-and-Interview-Prep | 39ba91b69ad8ce7dce554f7817c2f0d5545ef471 | [
"MIT"
] | null | null | null | LeetCode/Python/0797. All Paths From Source to Target.py | rayvantsahni/Competitive-Programming-and-Interview-Prep | 39ba91b69ad8ce7dce554f7817c2f0d5545ef471 | [
"MIT"
] | null | null | null | class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
all_paths = []
self._allPathsSourceTarget(graph, 0, [0], all_paths)
return all_paths
def _allPathsSourceTarget(self, graph, source, current, all_paths):
if source == len(graph) - 1:
all_paths.append(current)
return
neighbors = graph[source]
for neighbor in neighbors:
self._allPathsSourceTarget(graph, neighbor, current + [neighbor], all_paths)
| 36.2 | 88 | 0.618785 |
4428b8219b3370c6cefee8f2fdbf5ea385139db5 | 647 | py | Python | fireworks/examples/custom_firetasks/hello_world/hello_world_run.py | water-e/fireworks | 5db359430adc138a313326de3049e6f89dec4cbc | [
"BSD-3-Clause-LBNL"
] | 2 | 2017-06-27T07:12:27.000Z | 2017-09-22T12:06:18.000Z | fireworks/examples/custom_firetasks/hello_world/hello_world_run.py | water-e/fireworks | 5db359430adc138a313326de3049e6f89dec4cbc | [
"BSD-3-Clause-LBNL"
] | null | null | null | fireworks/examples/custom_firetasks/hello_world/hello_world_run.py | water-e/fireworks | 5db359430adc138a313326de3049e6f89dec4cbc | [
"BSD-3-Clause-LBNL"
] | 1 | 2018-10-28T01:41:15.000Z | 2018-10-28T01:41:15.000Z | from fireworks import LaunchPad, Firework, Workflow
from fireworks.core.rocket_launcher import launch_rocket
from fireworks.examples.custom_firetasks.hello_world.hello_world_task import HelloTask
if __name__ == "__main__":
# initialize the database
lp = LaunchPad() # you might need to modify the connection settings here
# lp.reset() # uncomment this line and set the appropriate parameters if you want to reset the database
# create the workflow and store it in the database
my_fw = Firework([HelloTask()])
my_wflow = Workflow.from_Firework(my_fw)
lp.add_wf(my_wflow)
# run the workflow
launch_rocket(lp) | 40.4375 | 108 | 0.758887 |
c0c38d7a393970248dc6968c46bcc85587e12743 | 390 | py | Python | aliyun/api/rest/Slb20140515DeleteLoadBalancerListenerRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Slb20140515DeleteLoadBalancerListenerRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Slb20140515DeleteLoadBalancerListenerRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2015.01.23
'''
from aliyun.api.base import RestApi
class Slb20140515DeleteLoadBalancerListenerRequest(RestApi):
def __init__(self,domain='slb.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.ListenerPort = None
self.LoadBalancerId = None
def getapiname(self):
return 'slb.aliyuncs.com.DeleteLoadBalancerListener.2014-05-15'
| 30 | 66 | 0.758974 |
529bdc9d94dca41697e8277cd64e84d19ae138c2 | 9,844 | py | Python | test/unit/test_environment.py | uditbhatia/sagemaker-containers | 3c499c8a4e00c7ff7486a4632c9330b5ea2313d3 | [
"Apache-2.0"
] | null | null | null | test/unit/test_environment.py | uditbhatia/sagemaker-containers | 3c499c8a4e00c7ff7486a4632c9330b5ea2313d3 | [
"Apache-2.0"
] | null | null | null | test/unit/test_environment.py | uditbhatia/sagemaker-containers | 3c499c8a4e00c7ff7486a4632c9330b5ea2313d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import itertools
import json
import logging
import os
import socket
from mock import Mock, patch
import pytest
import six
import sagemaker_containers
from sagemaker_containers import _env, _params
import test
builtins_open = '__builtin__.open' if six.PY2 else 'builtins.open'
RESOURCE_CONFIG = dict(current_host='algo-1', hosts=['algo-1', 'algo-2', 'algo-3'])
INPUT_DATA_CONFIG = {
'train': {
'ContentType': 'trainingContentType',
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
},
'validation': {
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
}
}
USER_HYPERPARAMETERS = {
'batch_size': 32,
'learning_rate': .001,
'hosts': ['algo-1', 'algo-2'],
}
SAGEMAKER_HYPERPARAMETERS = {
'sagemaker_region': 'us-west-2',
'default_user_module_name': 'net',
'sagemaker_job_name': 'sagemaker-training-job',
'sagemaker_program': 'main.py',
'sagemaker_submit_directory': 'imagenet',
'sagemaker_enable_cloudwatch_metrics': True,
'sagemaker_container_log_level': logging.WARNING,
'_tuning_objective_metric': 'loss:3.4',
'sagemaker_parameter_server_num': 2,
'sagemaker_s3_output': 's3://bucket'
}
ALL_HYPERPARAMETERS = dict(itertools.chain(USER_HYPERPARAMETERS.items(), SAGEMAKER_HYPERPARAMETERS.items()))
def test_read_hyperparameters():
test.write_json(ALL_HYPERPARAMETERS, _env.hyperparameters_file_dir)
assert _env.read_hyperparameters() == ALL_HYPERPARAMETERS
def test_read_value_serialized_hyperparameters():
serialized_hps = {k: json.dumps(v) for k, v in ALL_HYPERPARAMETERS.items()}
test.write_json(serialized_hps, _env.hyperparameters_file_dir)
assert _env.read_hyperparameters() == ALL_HYPERPARAMETERS
def test_read_value_serialized_and_non_value_serialized_hyperparameters():
hyperparameters = {k: json.dumps(v) for k, v in SAGEMAKER_HYPERPARAMETERS.items()}
hyperparameters.update(USER_HYPERPARAMETERS)
test.write_json(hyperparameters, _env.hyperparameters_file_dir)
assert _env.read_hyperparameters() == ALL_HYPERPARAMETERS
@patch('sagemaker_containers._env._read_json', lambda x: {
'a': 1})
@patch('json.loads')
def test_read_exception(loads):
loads.side_effect = ValueError('Unable to read.')
assert _env.read_hyperparameters() == {
'a': 1}
def test_resource_config():
test.write_json(RESOURCE_CONFIG, _env.resource_config_file_dir)
assert _env.read_resource_config() == RESOURCE_CONFIG
def test_input_data_config():
test.write_json(INPUT_DATA_CONFIG, _env.input_data_config_file_dir)
assert _env.read_input_data_config() == INPUT_DATA_CONFIG
def test_channel_input_dirs():
input_data_path = _env._input_data_dir
assert _env.channel_path('evaluation') == os.path.join(input_data_path, 'evaluation')
assert _env.channel_path('training') == os.path.join(input_data_path, 'training')
@patch('subprocess.check_output', lambda s: b'GPU 0\nGPU 1')
def test_gpu_count_in_gpu_instance():
assert _env.num_gpus() == 2
@patch('subprocess.check_output', side_effect=OSError())
def test_gpu_count_in_cpu_instance(check_output):
assert _env.num_gpus() == 0
@patch('multiprocessing.cpu_count', lambda: 2)
def test_cpu_count():
assert _env.num_cpus() == 2
@pytest.fixture(name='training_env')
def create_training_env():
with patch('sagemaker_containers._env.read_resource_config', lambda: RESOURCE_CONFIG), \
patch('sagemaker_containers._env.read_input_data_config', lambda: INPUT_DATA_CONFIG), \
patch('sagemaker_containers._env.read_hyperparameters', lambda: ALL_HYPERPARAMETERS), \
patch('sagemaker_containers._env.num_cpus', lambda: 8), \
patch('sagemaker_containers._env.num_gpus', lambda: 4):
session_mock = Mock()
session_mock.region_name = 'us-west-2'
old_environ = os.environ.copy()
os.environ[_params.TRAINING_JOB_ENV] = 'training-job-42'
yield sagemaker_containers.training_env()
os.environ = old_environ
@pytest.fixture(name='serving_env')
def create_serving_env():
with patch('sagemaker_containers._env.num_cpus', lambda: 8), patch('sagemaker_containers._env.num_gpus', lambda: 4):
old_environ = os.environ.copy()
os.environ[_params.USE_NGINX_ENV] = 'false'
os.environ[_params.MODEL_SERVER_TIMEOUT_ENV] = '20'
os.environ[_params.CURRENT_HOST_ENV] = 'algo-1'
os.environ[_params.USER_PROGRAM_ENV] = 'main.py'
os.environ[_params.SUBMIT_DIR_ENV] = 'my_dir'
os.environ[_params.ENABLE_METRICS_ENV] = 'true'
os.environ[_params.REGION_NAME_ENV] = 'us-west-2'
yield _env.ServingEnv()
os.environ = old_environ
def test_create_training_env_without_training_files_and_directories_should_not_fail():
training_env = sagemaker_containers.training_env()
hostname = socket.gethostname()
assert training_env.current_host == hostname
assert training_env.hosts == [hostname]
def test_env():
assert _env.input_dir.endswith('/opt/ml/input')
assert _env.input_config_dir.endswith('/opt/ml/input/config')
assert _env.model_dir.endswith('/opt/ml/model')
assert _env.output_dir.endswith('/opt/ml/output')
def test_training_env(training_env):
assert training_env.num_gpus == 4
assert training_env.num_cpus == 8
assert training_env.input_dir.endswith('/opt/ml/input')
assert training_env.input_config_dir.endswith('/opt/ml/input/config')
assert training_env.model_dir.endswith('/opt/ml/model')
assert training_env.output_dir.endswith('/opt/ml/output')
assert training_env.hyperparameters == USER_HYPERPARAMETERS
assert training_env.resource_config == RESOURCE_CONFIG
assert training_env.input_data_config == INPUT_DATA_CONFIG
assert training_env.output_data_dir.endswith('/opt/ml/output/data')
assert training_env.hosts == RESOURCE_CONFIG['hosts']
assert training_env.channel_input_dirs['train'].endswith('/opt/ml/input/data/train')
assert training_env.channel_input_dirs['validation'].endswith('/opt/ml/input/data/validation')
assert training_env.current_host == RESOURCE_CONFIG['current_host']
assert training_env.module_name == 'main'
assert training_env.user_entry_point == 'main.py'
assert training_env.module_dir == 'imagenet'
assert training_env.log_level == logging.WARNING
assert training_env.network_interface_name == 'ethwe'
assert training_env.job_name == 'training-job-42'
assert training_env.additional_framework_parameters == {'sagemaker_parameter_server_num': 2}
def test_serving_env(serving_env):
assert serving_env.num_gpus == 4
assert serving_env.num_cpus == 8
assert serving_env.use_nginx is False
assert serving_env.model_server_timeout == 20
assert serving_env.model_server_workers == 8
assert serving_env.module_name == 'main'
assert serving_env.user_entry_point == 'main.py'
assert serving_env.framework_module is None
def test_env_mapping_properties(training_env):
assert set(training_env.properties()) == {
'additional_framework_parameters', 'channel_input_dirs', 'current_host', 'framework_module', 'hosts',
'hyperparameters', 'input_config_dir', 'input_data_config', 'input_dir', 'log_level', 'model_dir',
'module_dir', 'module_name', 'network_interface_name', 'num_cpus', 'num_gpus', 'output_data_dir',
'output_dir', 'resource_config', 'user_entry_point', 'job_name', 'output_intermediate_dir'}
def test_serving_env_properties(serving_env):
assert set(serving_env.properties()) == {
'current_host', 'default_accept', 'framework_module', 'http_port', 'log_level', 'model_dir',
'model_server_timeout', 'model_server_workers', 'module_dir', 'module_name', 'num_cpus',
'num_gpus', 'safe_port_range', 'user_entry_point', 'use_nginx'}
def test_request_properties(serving_env):
assert set(serving_env.properties()) == {
'current_host', 'default_accept', 'framework_module', 'http_port', 'log_level', 'model_dir',
'model_server_timeout', 'model_server_workers', 'module_dir', 'module_name', 'num_cpus',
'num_gpus', 'user_entry_point', 'safe_port_range', 'use_nginx'}
@patch('sagemaker_containers._env.num_cpus', lambda: 8)
@patch('sagemaker_containers._env.num_gpus', lambda: 4)
def test_env_dictionary():
session_mock = Mock()
session_mock.region_name = 'us-west-2'
os.environ[_params.USER_PROGRAM_ENV] = 'my_app.py'
env = _env._Env()
assert len(env) == len(env.properties())
assert env['module_name'] == 'my_app'
assert env['log_level'] == logging.INFO
@pytest.mark.parametrize('sagemaker_program', ['program.py', 'program'])
def test_env_module_name(sagemaker_program):
session_mock = Mock()
session_mock.region_name = 'us-west-2'
os.environ[_params.USER_PROGRAM_ENV] = sagemaker_program
module_name = _env._Env().module_name
del os.environ[_params.USER_PROGRAM_ENV]
assert module_name == 'program'
| 37.572519 | 120 | 0.724197 |
699d7a81d491fcc8d5322c4b059de18560307681 | 625 | py | Python | playeragent.py | Thiele/dragster-bot | 49f0203d6e914c1ede5c192406faf8a5ef5cb2ca | [
"MIT"
] | null | null | null | playeragent.py | Thiele/dragster-bot | 49f0203d6e914c1ede5c192406faf8a5ef5cb2ca | [
"MIT"
] | null | null | null | playeragent.py | Thiele/dragster-bot | 49f0203d6e914c1ede5c192406faf8a5ef5cb2ca | [
"MIT"
] | null | null | null | class PlayerAgent:
def __init__(self,actions):
self.actions = actions
def act(self, observation):
print("Please act with one of the following ints:")
for i in range(len(self.actions)):
print(str(i)+": "+str(self.actions[i]))
r = input("Enter action...")
if r is None or r == '':
r = self.actions.index(None)
return int(r)
def observe(self, terminal = False, reward = 0):
print("Is terminal: "+str(terminal)+", reward was: "+str(reward))
return None
def load(self):
pass
def save_model(self):
pass
| 27.173913 | 73 | 0.5584 |
8f603ee3b9042f688bae7510b8aaf9fa167a9e3d | 653 | py | Python | setup.py | agloks/Megahack_03_2020 | 7b04f606a59da84c91f811b53a694b6fead24205 | [
"MIT"
] | null | null | null | setup.py | agloks/Megahack_03_2020 | 7b04f606a59da84c91f811b53a694b6fead24205 | [
"MIT"
] | null | null | null | setup.py | agloks/Megahack_03_2020 | 7b04f606a59da84c91f811b53a694b6fead24205 | [
"MIT"
] | 1 | 2020-07-05T23:50:58.000Z | 2020-07-05T23:50:58.000Z | """
Hello World app for running Python apps on Bluemix
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Meu Garçom',
version='1.0.0',
description='Meu Garçon app for running Python apps on Bluemix',
long_description=long_description,
# url='https://github.com/IBM-Bluemix/Meu_Garçom',
license='Apache-2.0'
)
| 26.12 | 68 | 0.727412 |
27135163d6711a9eafd74c1a141ca1c1e0c595cf | 658 | py | Python | setup.py | jwergieluk/openfigi | db13524fb94ca9a4973b20cb9219ea3e2607b7f0 | [
"MIT"
] | 26 | 2017-01-25T03:34:56.000Z | 2021-12-01T11:52:18.000Z | setup.py | jwergieluk/openfigi | db13524fb94ca9a4973b20cb9219ea3e2607b7f0 | [
"MIT"
] | 2 | 2018-05-01T22:44:15.000Z | 2021-05-28T23:24:11.000Z | setup.py | jwergieluk/openfigi | db13524fb94ca9a4973b20cb9219ea3e2607b7f0 | [
"MIT"
] | 6 | 2017-01-07T18:01:06.000Z | 2019-11-15T03:39:30.000Z | from setuptools import setup, find_packages
with open('LICENSE') as f:
license = f.read()
setup(
name='openfigi',
version='0.0.9',
description='A simple wrapper for openfigi.com',
author='Julian Wergieluk',
author_email='julian@wergieluk.com',
url='https://github.com/jwergieluk/openfigi',
license=license,
packages=find_packages(),
install_requires=['requests', 'click'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
entry_points={'console_scripts': ['ofg = openfigi.__main__:call_figi']},
)
| 28.608696 | 76 | 0.647416 |
9a2e79e4c0875fc5bfe25af4bf92052dbef4efa9 | 3,502 | py | Python | zinv/scripts/zinv_analyse.py | shane-breeze/zinv-analysis | 496abf9cb0e77831d580be417bcad7845c347704 | [
"MIT"
] | 1 | 2019-02-06T12:15:42.000Z | 2019-02-06T12:15:42.000Z | zinv/scripts/zinv_analyse.py | shane-breeze/zinv-analysis | 496abf9cb0e77831d580be417bcad7845c347704 | [
"MIT"
] | 12 | 2019-03-27T15:52:34.000Z | 2020-02-06T12:09:37.000Z | zinv/scripts/zinv_analyse.py | shane-breeze/zinv-analysis | 496abf9cb0e77831d580be417bcad7845c347704 | [
"MIT"
] | 1 | 2019-03-14T17:23:33.000Z | 2019-03-14T17:23:33.000Z | #!/usr/bin/env python
from zinv.modules import analyse
import warnings
warnings.filterwarnings('ignore')
import logging
logging.getLogger(__name__).setLevel(logging.INFO)
logging.getLogger("alphatwirl").setLevel(logging.INFO)
logging.getLogger("alphatwirl.progressbar.ProgressReport").setLevel(logging.ERROR)
logging.getLogger(__name__).propagate = False
logging.getLogger("alphatwirl").propagate = False
logging.getLogger("atuproot.atuproot_main").propagate = False
logging.getLogger("alphatwirl.progressbar.ProgressReport").propagate = False
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("dataset_cfg", type=str,
help="Dataset config to run over")
parser.add_argument("sequence_cfg", type=str,
help="Config for how to process events")
parser.add_argument("event_selection_cfg", type=str,
help="Config for the event selection")
parser.add_argument("physics_object_cfg", type=str,
help="Config for the physics object selection")
parser.add_argument("trigger_cfg", type=str,
help="Config for the HLT trigger paths")
parser.add_argument("hdf_cfg", type=str,
help="Config for the output HDF files")
parser.add_argument("-n", "--name", default="zinv", type=str,
help="Name to pass to batch")
parser.add_argument("-o", "--outdir", default="output", type=str,
help="Where to save the results")
parser.add_argument("-t", "--tempdir", default="_ccsp_temp", type=str,
help="Where to store the temp directory")
parser.add_argument("--mode", default="multiprocessing", type=str,
help="Which mode to run in (multiprocessing, htcondor, "
"sge)")
parser.add_argument("--batch-opts", type=str,
default="-q hep.q -l h_rt=3:0:0 -l h_vmem=24G",
help="SGE options")
parser.add_argument("--ncores", default=0, type=int,
help="Number of cores to run on")
parser.add_argument("--nblocks-per-dataset", default=-1, type=int,
help="Number of blocks per dataset")
parser.add_argument("--nblocks-per-process", default=-1, type=int,
help="Number of blocks per process")
parser.add_argument("--nfiles-per-dataset", default=-1, type=int,
help="Number of files per dataset")
parser.add_argument("--nfiles-per-process", default=1, type=int,
help="Number of files per process")
parser.add_argument("--blocksize", default=1000000, type=int,
help="Number of events per block")
parser.add_argument("--cachesize", default=8*1024**3, type=int,
help="Branch cache size")
parser.add_argument("--quiet", default=False, action='store_true',
help="Keep progress report quiet")
parser.add_argument("--dryrun", default=False, action='store_true',
help="Don't submit the jobs to a batch system")
parser.add_argument("--sample", default=None, type=str,
help="Select some sample (comma delimited). Can "
"selected from (data, mc and more)")
return parser.parse_args()
if __name__ == "__main__":
analyse(**vars(parse_args()))
| 50.753623 | 82 | 0.613935 |
d7872f275f4baf0bb04bc1786443cdb33028a1a3 | 3,763 | py | Python | lib/galaxy/webapps/galaxy/controllers/openid.py | maikenp/galaxy | eb3f3c816f1f94bc328d092f30c8966d41a56a0d | [
"CC-BY-3.0"
] | 1 | 2021-10-08T02:14:24.000Z | 2021-10-08T02:14:24.000Z | lib/galaxy/webapps/galaxy/controllers/openid.py | maikenp/galaxy | eb3f3c816f1f94bc328d092f30c8966d41a56a0d | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/webapps/galaxy/controllers/openid.py | maikenp/galaxy | eb3f3c816f1f94bc328d092f30c8966d41a56a0d | [
"CC-BY-3.0"
] | null | null | null | """
Contains the OpenID interface in the Universe class
"""
import logging
from galaxy import web
from galaxy.openid.openid_manager import OpenIDManager
from galaxy.openid.providers import OpenIDProviders
from galaxy.structured_app import StructuredApp
from galaxy.util import unicodify
from galaxy.web import url_for
from galaxy.webapps.base.controller import BaseUIController
log = logging.getLogger(__name__)
class OpenID(BaseUIController):
def __init__(self, app: StructuredApp):
super().__init__(app)
if app.config.enable_openid:
self.openid_manager = OpenIDManager(app.config.openid_consumer_cache_path)
self.openid_providers = OpenIDProviders.from_file('lib/galaxy/openid/openid_conf.xml')
@web.expose
def openid_auth(self, trans, **kwd):
'''Handles user request to access an OpenID provider'''
if not trans.app.config.enable_openid:
return trans.show_error_message("OpenID authentication is not enabled in this instance of Galaxy.")
consumer = self.openid_manager.get_consumer(trans)
openid_provider = kwd.get('openid_provider')
if openid_provider:
openid_provider_obj = self.openid_providers.get(openid_provider)
else:
return trans.show_error_message("An OpenID provider was not specified.")
if not openid_provider_obj:
return trans.show_error_message("An OpenID provider is invalid.")
process_url = trans.request.base.rstrip('/') + url_for(controller='openid', action='openid_process', openid_provider=openid_provider)
request = None
try:
request = consumer.begin(openid_provider_obj.op_endpoint_url)
if request is None:
return trans.show_error_message("No OpenID services are available at %s." % openid_provider_obj.op_endpoint_url)
except Exception as e:
return trans.show_error_message("Failed to begin OpenID authentication: %s." % unicodify(e))
if request is not None:
self.openid_manager.add_sreg(trans, request, required=openid_provider_obj.sreg_required, optional=openid_provider_obj.sreg_optional)
if request.shouldSendRedirect():
redirect_url = request.redirectURL(
trans.request.base, process_url)
self.openid_manager.persist_session(trans, consumer)
return trans.response.send_redirect(redirect_url)
else:
form = request.htmlMarkup(trans.request.base, process_url, form_tag_attrs={'id': 'openid_message', 'target': '_top'})
self.openid_manager.persist_session(trans, consumer)
return form
return trans.show_error_message("OpenID request failed.")
@web.expose
def openid_process(self, trans, **kwd):
'''Handle's response from OpenID Providers'''
return_link = "Click <a href='%s'>here</a> to return." % url_for("/")
if not trans.app.config.enable_openid:
return trans.show_error_message("OpenID authentication is not enabled in this instance of Galaxy. %s" % return_link)
consumer = self.openid_manager.get_consumer(trans)
info = consumer.complete(kwd, trans.request.url)
openid_provider = kwd.get('openid_provider', None)
if info.status == self.openid_manager.SUCCESS:
openid_provider_obj = self.openid_providers.get(openid_provider)
openid_provider_obj.post_authentication(trans, self.openid_manager, info)
return trans.show_message("Processed OpenID authentication. %s" % return_link)
else:
return trans.show_error_message(f"Authentication via OpenID failed: {info.message}. {return_link}")
| 50.173333 | 144 | 0.69705 |
715da47b91be799efaad2830a92b8543759b36ac | 1,202 | py | Python | utils.py | GrzegorzMika/Towards-adaptivity-via-a-new-discrepancy-principle-for-Poisson-inverse-problems | 13f62a5fa2a446c48796e12536e61125302d638d | [
"MIT"
] | null | null | null | utils.py | GrzegorzMika/Towards-adaptivity-via-a-new-discrepancy-principle-for-Poisson-inverse-problems | 13f62a5fa2a446c48796e12536e61125302d638d | [
"MIT"
] | null | null | null | utils.py | GrzegorzMika/Towards-adaptivity-via-a-new-discrepancy-principle-for-Poisson-inverse-problems | 13f62a5fa2a446c48796e12536e61125302d638d | [
"MIT"
] | 1 | 2022-01-23T19:15:01.000Z | 2022-01-23T19:15:01.000Z | import os
from datetime import date
from io import StringIO
import pandas as pd
from google.cloud import storage
from tqdm import tqdm
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def download():
storage_client = storage.Client.from_service_account_json(find('secretgc_ip.json', '/home'))
files = os.listdir('./Simulations')
blobs = storage_client.list_blobs('ip-free')
for blob in tqdm(blobs):
if blob.name not in files:
blob.download_to_filename(os.path.join('Simulations', blob.name))
def upload():
storage_client = storage.Client.from_service_account_json(find('secretgc_ip.json', '/home'))
bucket = storage_client.bucket('ip-free')
files = os.listdir('./Simulations')
files = [f for f in files if 'csv' in f]
for file in tqdm(files):
data = pd.read_csv(os.path.join('Simulations', file))
name = file.split('.')[0]
f = StringIO()
data.to_csv(f, index_label=False)
f.seek(0)
blob = bucket.blob(name + '_' + str(date.today()) + '.csv')
blob.upload_from_file(f, content_type='text/csv')
| 28.619048 | 96 | 0.650582 |
7abe5db38d8e298415ef5aa791edb49a7bd8a806 | 644 | py | Python | sensors/configure.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 70 | 2015-11-16T18:04:01.000Z | 2022-03-05T09:04:02.000Z | sensors/configure.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 1 | 2016-08-03T05:13:19.000Z | 2016-08-03T06:19:39.000Z | sensors/configure.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 34 | 2015-12-15T17:29:23.000Z | 2021-11-18T14:15:12.000Z | #!/usr/bin/env python3
from build import ninja_common
build = ninja_common.Build('sensors')
build.build_shared('auvserial',
['serial/serial.cpp'])
# build.build_cmd('auv-podd',
# ['power/podd/main.cpp'],
# auv_deps=['auvserial', 'shm'])
# build.build_cmd('auv-hydrod-ui',
# ['hydrod2/hydro_ui.cpp'],
# auv_deps=['shm'],
# deps=['ncurses'])
build.install('auv-linearizerd', 'sensors/linearizer/auv-linearizerd.py')
build.install('auv-kalmand', 'sensors/kalman/auv-kalmand.py')
build.install('auv-zero-heading', 'sensors/kalman/set_zero_heading.py')
| 30.666667 | 73 | 0.618012 |
bf92d622e295aa061544959efd11d908255eb77e | 6,788 | py | Python | src/frozen_lake.py | moamenibrahim/reinforcement-learning | 2ea87eb5be4e6e9ae007affb7df17f1f3aacac95 | [
"Unlicense"
] | null | null | null | src/frozen_lake.py | moamenibrahim/reinforcement-learning | 2ea87eb5be4e6e9ae007affb7df17f1f3aacac95 | [
"Unlicense"
] | null | null | null | src/frozen_lake.py | moamenibrahim/reinforcement-learning | 2ea87eb5be4e6e9ae007affb7df17f1f3aacac95 | [
"Unlicense"
] | null | null | null | import gym
import numpy as np
env = gym.make('FrozenLake-v0')
def policy_evaluation(policy, environment, discount_factor=1.0, theta=1e-9, max_iterations=1e9):
# Number of evaluation iterations
evaluation_iterations = 1
# Initialize a value function for each state as zero
V = np.zeros(environment.nS)
# Repeat until change in value is below the threshold
for i in range(int(max_iterations)):
# Initialize a change of value function as zero
delta = 0
# Iterate though each state
for state in range(environment.nS):
# Initial a new value of current state
v = 0
# Try all possible actions which can be taken from this state
for action, action_probability in enumerate(policy[state]):
# Check how good next state will be
for state_probability, next_state, reward, terminated in environment.P[state][action]:
# Calculate the expected value
v += action_probability * state_probability * \
(reward + discount_factor * V[next_state])
# Calculate the absolute change of value function
delta = max(delta, np.abs(V[state] - v))
# Update value function
V[state] = v
evaluation_iterations += 1
# Terminate if value change is insignificant
if delta < theta:
print(f'Policy evaluated in {evaluation_iterations} iterations.')
return V
def one_step_lookahead(environment, state, V, discount_factor):
action_values = np.zeros(environment.nA)
for action in range(environment.nA):
for probability, next_state, reward, terminated in environment.P[state][action]:
action_values[action] += probability * \
(reward + discount_factor * V[next_state])
return action_values
def policy_iteration(environment, discount_factor=1.0, max_iterations=1e9):
# Start with a random policy
# num states x num actions / num actions
policy = np.ones([environment.nS, environment.nA]) / environment.nA
# Initialize counter of evaluated policies
evaluated_policies = 1
# Repeat until convergence or critical number of iterations reached
for i in range(int(max_iterations)):
stable_policy = True
# Evaluate current policy
V = policy_evaluation(policy, environment,
discount_factor=discount_factor)
# Go through each state and try to improve actions that were taken (policy Improvement)
for state in range(environment.nS):
# Choose the best action in a current state under current policy
current_action = np.argmax(policy[state])
# Look one step ahead and evaluate if current action is optimal
# We will try every possible action in a current state
action_value = one_step_lookahead(
environment, state, V, discount_factor)
# Select a better action
best_action = np.argmax(action_value)
# If action didn't change
if current_action != best_action:
stable_policy = True
# Greedy policy update
policy[state] = np.eye(environment.nA)[best_action]
evaluated_policies += 1
# If the algorithm converged and policy is not changing anymore, then return final policy and value function
if stable_policy:
print(f'Evaluated {evaluated_policies} policies.')
return policy, V
def value_iteration(environment, discount_factor=1.0, theta=1e-9, max_iterations=1e9):
# Initialize state-value function with zeros for each environment state
V = np.zeros(environment.nS)
for i in range(int(max_iterations)):
# Early stopping condition
delta = 0
# Update each state
for state in range(environment.nS):
# Do a one-step lookahead to calculate state-action values
action_value = one_step_lookahead(
environment, state, V, discount_factor)
# Select best action to perform based on the highest state-action value
best_action_value = np.max(action_value)
# Calculate change in value
delta = max(delta, np.abs(V[state] - best_action_value))
# Update the value function for current state
V[state] = best_action_value
# Check if we can stop
if delta < theta:
print(f'Value-iteration converged at iteration#{i}.')
break
# Create a deterministic policy using the optimal value function
policy = np.zeros([environment.nS, environment.nA])
for state in range(environment.nS):
# One step lookahead to find the best action for this state
action_value = one_step_lookahead(
environment, state, V, discount_factor)
# Select best action based on the highest state-action value
best_action = np.argmax(action_value)
# Update the policy to perform a better action at a current state
policy[state, best_action] = 1.0
return policy, V
def play_episodes(environment, n_episodes, policy):
wins = 0
total_reward = 0
for episode in range(n_episodes):
terminated = False
state = environment.reset()
while not terminated:
# Select best action to perform in a current state
action = np.argmax(policy[state])
# Perform an action an observe how environment acted in response
next_state, reward, terminated, info = environment.step(action)
# Summarize total reward
total_reward += reward
# Update current state
state = next_state
# Calculate number of wins over episodes
if terminated and reward == 1.0:
wins += 1
average_reward = total_reward / n_episodes
return wins, total_reward, average_reward
# Number of episodes to play
n_episodes = 10000
# Functions to find best policy
solvers = [('Policy Iteration', policy_iteration),
('Value Iteration', value_iteration)]
for iteration_name, iteration_func in solvers:
# Load a Frozen Lake environment
environment = gym.make('FrozenLake-v0')
# Search for an optimal policy using policy iteration
policy, V = iteration_func(environment.env)
# Apply best policy to the real environment
wins, total_reward, average_reward = play_episodes(
environment, n_episodes, policy)
print(f'{iteration_name} :: number of wins over {n_episodes} episodes = {wins}')
print(f'{iteration_name} :: average reward over {n_episodes} episodes = {average_reward} \n\n')
| 41.390244 | 116 | 0.649234 |
1ba64780503ddc40c98a976e57a18363d8ae8ccc | 2,287 | py | Python | ivi/rigol/rigolDP832.py | lude-ma/python-ivi | f62907a2922d5fc98e0a524ef6ddbaa62791ff14 | [
"MIT"
] | 1 | 2017-09-09T06:04:14.000Z | 2017-09-09T06:04:14.000Z | ivi/rigol/rigolDP832.py | lude-ma/python-ivi | f62907a2922d5fc98e0a524ef6ddbaa62791ff14 | [
"MIT"
] | null | null | null | ivi/rigol/rigolDP832.py | lude-ma/python-ivi | f62907a2922d5fc98e0a524ef6ddbaa62791ff14 | [
"MIT"
] | null | null | null | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .rigolDP800 import *
class rigolDP832(rigolDP800):
"Rigol DP832 IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DP832')
super(rigolDP832, self).__init__(*args, **kwargs)
self._output_count = 3
self._output_spec = [
{
'range': {
'P30V': (30.0, 3.0)
},
'ovp_max': 33.0,
'ocp_max': 3.3,
'voltage_max': 30.0,
'current_max': 3.0
},
{
'range': {
'P30V': (30.0, 3.0)
},
'ovp_max': 33.0,
'ocp_max': 3.3,
'voltage_max': 30.0,
'current_max': 3.0
},
{
'range': {
'P5V': (5.0, 3.0)
},
'ovp_max': 5.5,
'ocp_max': 3.3,
'voltage_max': 5.0,
'current_max': 3.0
}
]
self._init_outputs()
| 31.763889 | 77 | 0.572366 |
805ee21bdb95f1b70519c7ebc1ecaf6f34afa885 | 10,788 | py | Python | petek.py | Yoav6/Petek | d37e59f929b7123235f1fca6dd65d805c56f3838 | [
"MIT"
] | null | null | null | petek.py | Yoav6/Petek | d37e59f929b7123235f1fca6dd65d805c56f3838 | [
"MIT"
] | null | null | null | petek.py | Yoav6/Petek | d37e59f929b7123235f1fca6dd65d805c56f3838 | [
"MIT"
] | null | null | null | import sys
from SQLper import *
#from PyQt5 import uic
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
from PyQt5 import QtGui as qtg
from stylesheet import styleSheet
from main_window import Ui_MainWindow
from homepage_layout import Ui_homepage_layout
from tab_window import Ui_TabWindow
from add_person import Ui_add_person_popup
print('finished importing\nInitiating app...')
def set_search_box(parent):
parent.addWidget(searchbox.search_box)
def open_add_person_popup(first_name='', last_name='', parent=None):
popup = addPersonPopup(parent)
popup.show()
if parent:
popup.ui.type_cbox.setCurrentIndex(1)
if first_name:
popup.ui.first_name_field.setText(first_name)
if last_name:
popup.ui.last_name_field.setText(last_name)
popup.exec()
def check_name_and_add_person(first_name, last_name, **kwargs):
instances = check_if_name_exists(first_name, last_name)
print(3)
if instances:
full_name = first_name + ' ' + last_name
messagebox = \
qtw.QMessageBox.question(None, 'נמצא אדם נוסף במערכת עם אותו השם',
f'נמצאו {len(instances)} מופעים של השם {full_name}. האם להוסיף אדם נוסף עם אותו השם?'
f'', qtw.QMessageBox.No | qtw.QMessageBox.Yes, qtw.QMessageBox.Yes)
print(4)
if messagebox == qtw.QMessageBox.Yes:
instances = False
if not instances:
print(4)
return insert_row_to_table('people', first_name=first_name, last_name=last_name, **kwargs)
def split_full_name(name: str):
try:
first_name, last_name = name.split(' ', 1)
except:
first_name, last_name = name, ''
return first_name, last_name
class appWindow(qtw.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#self.setCentralWidget(homepage)
#homepage.setParent(self.ui.window_frame)
self.home_btn = qtw.QAction(qtg.QIcon('home_btn.png'), 'דף בית', self)
self.ui.toolBar.addAction(self.home_btn)
self.home_btn.triggered.connect(self.go_to_home_page)
self.setStyleSheet(stylesheet.stylesheet)
def go_to_home_page(self):
print(homepage)
self.setCentralWidget(homepage)
class searchBox(qtw.QWidget):
def __init__(self):
super().__init__()
self.search_box = qtw.QComboBox(self)
self.search_box.setObjectName('search_box')
size_policy = qtw.QSizePolicy(qtw.QSizePolicy.Preferred, qtw.QSizePolicy.Preferred)
self.search_box.setSizePolicy(size_policy)
self.list = [""]
self.edit = qtw.QLineEdit(self)
self.search_box.setLineEdit(self.edit)
self.line = self.search_box.lineEdit()
self.edit.setPlaceholderText('חיפוש')
self.search_box.setLayoutDirection(qtc.Qt.RightToLeft)
#self.line.setFocusPolicy(qtc.Qt.StrongFocus)
self.search_box.setInsertPolicy(self.search_box.NoInsert)
self.search_box.completer().setCompletionMode(qtw.QCompleter.PopupCompletion)
#self.search_box.currentIndexChanged.connect(self.match_id)
def match_id(self):
index = self.search_box.currentIndex()
if index:
id_ = self.id_list[index - 1]
return id_
def add_items(self, item_list):
def sort_first(val):
return val[0]
item_list.sort(key=sort_first)
item_list, self.id_list = map(list, zip(*item_list)) # splits list of tuples into 2 lists
self.item_list = self.list + item_list
self.search_box.addItems(self.item_list)
def set_list(self, func):
person_list = func()
self.add_items(person_list)
class homepage_layout(qtw.QWidget):
def __init__(self):
super().__init__()
self.ui = Ui_homepage_layout()
self.ui.setupUi(self)
self.ui.searchbox_container.addWidget(searchbox.search_box)
searchbox.set_list(get_all_people)
searchbox.search_box.currentIndexChanged.connect(self.search_item_selected)
self.ui.new_person_btn.clicked.connect(open_add_person_popup)
def search_item_selected(self):
id_ = searchbox.match_id()
if id_:
name = searchbox.edit.text()
searchbox.search_box.clearFocus()
searchbox.search_box.clearEditText()
searchbox.search_box.setEditText('')
searchbox.search_box.setCurrentIndex(0)
window.setCentralWidget(profile_page_layout)
set_search_box(profile_page_layout.ui.searchbox_container)
profile_page_layout.add_tab(id_, name)
class profilePageLayout(qtw.QWidget):
def __init__(self):
super().__init__()
self.ui = Ui_TabWindow()
self.ui.setupUi(self)
self.open_tabs = [] # IDs
searchbox.search_box.currentIndexChanged.connect(self.search_item_selected)
self.ui.tab_widget.tabCloseRequested.connect(self.close_tab)
def add_tab(self, id_, title):
print('id: ', id_) # gets full name using id_
self.open_tabs.append(id_)
print(self.open_tabs)
tab = qtw.QWidget()
tab.setObjectName(str(id_))
self.ui.tab_widget.addTab(tab, title)
self.ui.tab_widget.setCurrentWidget(tab)
#setting tab layout
tab.setContentsMargins(0, 5, 0, 0)
tab.setLayout(qtw.QGridLayout())
tab.font().setPointSize(10)
#setup inner tab
inner_tab_widget = qtw.QTabWidget()
inner_tab_widget.setParent(tab)
inner_tab = qtw.QWidget()
inner_tab_widget.addTab(inner_tab, 'ראשי')
inner_tab_widget.setTabBarAutoHide(False)
self.setup_profile(inner_tab_widget, id_)
tab = inner_tab_widget = inner_tab = None
def setup_profile(self, tab_widget, id_):
#tab = qtw.QWidget()
#tab_widget.addTab(tab, 'ראשי')
print(tab_widget, '| id: ', id_)
def search_item_selected(self):
id_ = searchbox.match_id()
if id_:
if id_ not in self.open_tabs:
name = searchbox.edit.text()
self.add_tab(id_, name)
else:
target_tab = self.ui.tab_widget.findChild(qtw.QWidget, str(id_))
self.ui.tab_widget.setCurrentWidget(target_tab)
searchbox.search_box.clearFocus()
searchbox.search_box.clearEditText()
searchbox.search_box.setCurrentIndex(0)
def close_tab(self, tab_index):
tab_object = self.ui.tab_widget.widget(tab_index)
id_ = tab_object.objectName()
#print('id: ', id_)
self.open_tabs.remove(int(id_))
#print(self.open_tabs)
self.ui.tab_widget.removeTab(tab_index)
class addPersonPopup(qtw.QDialog): # , qtc.Qt
def __init__(self, parent=None):
super().__init__()
self.ui = Ui_add_person_popup()
self.ui.setupUi(self)
#self.AA_DisableWindowContextHelpButton(True)
self.mode = 'student'
self.set_type()
plus_icon = qtg.QIcon()
plus_icon.addPixmap(qtg.QPixmap('plus_btn.png'))
self.ui.new_mentor_btn.setIcon(plus_icon)
self.ui.new_mentor_btn.clicked.connect(self.new_mentor)
self.ui.ok_btn.clicked.connect(self.ok)
self.ui.cancel_btn.clicked.connect(self.cancel)
self.ui.type_cbox.currentIndexChanged.connect(self.set_type)
"""self.phone_field = qtw.QLineEdit()
self.phone_field.setParent(self.ui.mentor_cbox_container)"""
if parent:
self.ui.type_cbox.setDisabled(True)
self.mentor_field = searchBox()
self.mentor_field.search_box.setParent(self.ui.mentor_cbox_container)
self.mentor_field.set_list(get_all_staff)
# self.mentor_field.search_box.setSizePolicy(
# qtw.QSizePolicy(qtw.QSizePolicy.Preferred, qtw.QSizePolicy.Preferred))
self.mentor_field.edit.setPlaceholderText('')
def set_type(self):
if self.ui.type_cbox.currentIndex() == 0:
self.mode = 'student'
self.ui.mentor_label.setText('חונך')
self.ui.new_mentor_btn.setDisabled(False)
self.ui.mentor_label.setDisabled(False)
self.ui.mentor_field.setDisabled(False)
elif self.ui.type_cbox.currentIndex() == 1:
self.mode = 'staff'
self.mentor_field = qtw.QLineEdit()
self.mentor_field.setParent(self.ui.mentor_cbox_container)
self.ui.mentor_label.setText('טלפון')
self.ui.mentor_field.setDisabled(False)
self.ui.new_mentor_btn.setDisabled(True)
self.ui.mentor_label.setDisabled(False)
else:
self.mode = 'other'
self.ui.mentor_label.setText('חונך')
self.ui.mentor_label.setDisabled(True)
self.ui.mentor_field.setDisabled(True)
self.ui.new_mentor_btn.setDisabled(True)
def ok(self):
first_name = self.ui.first_name_field.text()
last_name = self.ui.last_name_field.text()
if self.mode == 'student':
if self.mentor_field.search_box.currentIndex() != 0:
mentor_id = self.mentor_field.match_id()
student_id = check_name_and_add_person(first_name, last_name, student=1, current_mentor=mentor_id)
if student_id:
add_student_mentor_relation(student_id, mentor_id)
else:
pass # announce it
elif self.mode == 'staff':
check_name_and_add_person(first_name, last_name, staff=1, phone=self.mentor_field.edit.text())
else:
check_name_and_add_person(first_name, last_name)
def cancel(self):
self.close()
def new_mentor(self):
first_name, last_name = split_full_name(self.mentor_field.edit.text())
open_add_person_popup(first_name=first_name, last_name=last_name, parent=self)
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
stylesheet = styleSheet()
searchbox = searchBox()
homepage = homepage_layout()
print('building main window...')
window = appWindow()
window.setCentralWidget(homepage)
print('loading main window...')
window.showMaximized()
profile_page_layout = profilePageLayout()
socket_main_thread = threading.Thread(target=activate_socket, daemon=True)
socket_main_thread.start()
app.exec()
conn.close()
sys.exit()
| 39.661765 | 119 | 0.641917 |
aa9853ce6132882b4783a995f79a95db9d4ba8f9 | 928 | py | Python | 79-desafio.py | SweydAbdul/EstudosPythonCeV | 5eb61d4e1d47b99d57de776c835aa9f3c2bcee3b | [
"MIT"
] | null | null | null | 79-desafio.py | SweydAbdul/EstudosPythonCeV | 5eb61d4e1d47b99d57de776c835aa9f3c2bcee3b | [
"MIT"
] | null | null | null | 79-desafio.py | SweydAbdul/EstudosPythonCeV | 5eb61d4e1d47b99d57de776c835aa9f3c2bcee3b | [
"MIT"
] | null | null | null | '''
n = []
nc = n[:]
c = 0
while True:
n.append(int(input('Digite um valor: ')))
if n[c] in nc:
print('Valor duplicado nao vou adicionar')
n.remove(n[c])
else:
nc.append(n[c])
print('Valor adicionado com sucesso...')
c+=1
while True:
q = str(input('Quer continuar? [S/N] ')).upper()
if q in 'SN':
if q == 'S':
break
elif q == 'N':
break
if q == 'N':
break
print('-='*30)
nc.sort()
print(f'Voce digitou os valores {nc}')
input()
'''
#Guanabara soluction
numeros = list()
while True:
n = int(input('Digite um valor: '))
if n not in numeros:
numeros.append(n)
else:
print('Valor duplicado! Nao vou adicionar...')
r = str(input('Quer continuar? [S/N] '))
if r in 'Nn':
break
print('-='*30)
numeros.sort()
print(f'Voce digitou os valores {numeros}') | 22.634146 | 56 | 0.510776 |
58d017b23f789dfda1a70cd834c6e7e40d9f84d8 | 4,177 | py | Python | main.py | AaronX121/Soft-Decision-Tree | 9b02e635a1265b62df2d831f7f15e1742b0d5002 | [
"BSD-3-Clause"
] | 46 | 2019-03-24T14:46:04.000Z | 2020-12-10T03:48:00.000Z | main.py | AaronX121/Soft-Decision-Tree | 9b02e635a1265b62df2d831f7f15e1742b0d5002 | [
"BSD-3-Clause"
] | 4 | 2019-06-18T09:48:42.000Z | 2020-05-05T14:29:07.000Z | main.py | AaronX121/Soft-Decision-Tree | 9b02e635a1265b62df2d831f7f15e1742b0d5002 | [
"BSD-3-Clause"
] | 10 | 2020-02-19T12:27:31.000Z | 2020-12-14T11:04:54.000Z | """Training and evaluating a soft decision tree on the MNIST dataset."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from SDT import SDT
def onehot_coding(target, device, output_dim):
"""Convert the class labels into one-hot encoded vectors."""
target_onehot = torch.FloatTensor(target.size()[0], output_dim).to(device)
target_onehot.data.zero_()
target_onehot.scatter_(1, target.view(-1, 1), 1.0)
return target_onehot
if __name__ == "__main__":
# Parameters
input_dim = 28 * 28 # the number of input dimensions
output_dim = 10 # the number of outputs (i.e., # classes on MNIST)
depth = 5 # tree depth
lamda = 1e-3 # coefficient of the regularization term
lr = 1e-3 # learning rate
weight_decaly = 5e-4 # weight decay
batch_size = 128 # batch size
epochs = 50 # the number of training epochs
log_interval = 100 # the number of batches to wait before printing logs
use_cuda = False # whether to use GPU
# Model and Optimizer
tree = SDT(input_dim, output_dim, depth, lamda, use_cuda)
optimizer = torch.optim.Adam(tree.parameters(),
lr=lr,
weight_decay=weight_decaly)
# Load data
data_dir = "../Dataset/mnist"
transformer = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dir,
train=True,
download=True,
transform=transformer),
batch_size=batch_size,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dir,
train=False,
transform=transformer),
batch_size=batch_size,
shuffle=True,
)
# Utils
best_testing_acc = 0.0
testing_acc_list = []
training_loss_list = []
criterion = nn.CrossEntropyLoss()
device = torch.device("cuda" if use_cuda else "cpu")
for epoch in range(epochs):
# Training
tree.train()
for batch_idx, (data, target) in enumerate(train_loader):
batch_size = data.size()[0]
data, target = data.to(device), target.to(device)
target_onehot = onehot_coding(target, device, output_dim)
output, penalty = tree.forward(data, is_training_data=True)
loss = criterion(output, target.view(-1))
loss += penalty
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
pred = output.data.max(1)[1]
correct = pred.eq(target.view(-1).data).sum()
msg = (
"Epoch: {:02d} | Batch: {:03d} | Loss: {:.5f} |"
" Correct: {:03d}/{:03d}"
)
print(msg.format(epoch, batch_idx, loss, correct, batch_size))
training_loss_list.append(loss.cpu().data.numpy())
# Evaluating
tree.eval()
correct = 0.
for batch_idx, (data, target) in enumerate(test_loader):
batch_size = data.size()[0]
data, target = data.to(device), target.to(device)
output = F.softmax(tree.forward(data), dim=1)
pred = output.data.max(1)[1]
correct += pred.eq(target.view(-1).data).sum()
accuracy = 100.0 * float(correct) / len(test_loader.dataset)
if accuracy > best_testing_acc:
best_testing_acc = accuracy
msg = (
"\nEpoch: {:02d} | Testing Accuracy: {}/{} ({:.3f}%) |"
" Historical Best: {:.3f}%\n"
)
print(
msg.format(
epoch, correct,
len(test_loader.dataset),
accuracy,
best_testing_acc
)
)
testing_acc_list.append(accuracy)
| 31.171642 | 79 | 0.555901 |
e4c4a02279c6f0d38fbd93c5746deaf45fa91165 | 10,687 | py | Python | src/scripts/segmentation/analysis/render_general.py | hendraet/IIC | a5bab915eda133b0ecfd42eaacd60c7b26807cb6 | [
"MIT"
] | null | null | null | src/scripts/segmentation/analysis/render_general.py | hendraet/IIC | a5bab915eda133b0ecfd42eaacd60c7b26807cb6 | [
"MIT"
] | null | null | null | src/scripts/segmentation/analysis/render_general.py | hendraet/IIC | a5bab915eda133b0ecfd42eaacd60c7b26807cb6 | [
"MIT"
] | null | null | null | import argparse
import os
import pickle
import sys
from datetime import datetime
import numpy as np
import torch
import src.archs as archs
from src.utils.cluster.cluster_eval import \
_get_assignment_data_matches
from src.utils.cluster.transforms import sobel_process
from src.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
from src.utils.segmentation.render import render
from src.utils.segmentation.segmentation_eval import \
_segmentation_get_data, segmentation_eval
# Render images for segmentation models
parser = argparse.ArgumentParser()
parser.add_argument("--model_inds", type=int, nargs="+", default=[])
parser.add_argument("--net_name", type=str, default="best")
parser.add_argument("--imgs_dataloaders", type=str, nargs="+", default=["test"])
parser.add_argument("--num", type=int, default=100)
parser.add_argument("--reassess_acc", default=False, action="store_true")
parser.add_argument("--get_match_only", default=False, action="store_true")
args = parser.parse_args()
model_inds = args.model_inds
epochs = args.epochs
net_name_prefix = args.net_name
num = args.num
reassess_acc = args.reassess_acc
print("imgs_dataloaders passed:")
print(args.imgs_dataloaders)
out_root = "/scratch/shared/slow/xuji/iid_private"
for model_ind in model_inds:
out_dir = os.path.join(out_root, str(model_ind))
net_names = [net_name_prefix + "_net.pytorch"]
reloaded_config_path = os.path.join(out_dir, "config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == model_ind)
if not hasattr(config, "use_doersch_datasets"):
config.use_doersch_datasets = False
if "Coco" in config.dataset:
dataloaders_train, mapping_assignment_dataloader, mapping_test_dataloader \
= make_Coco_dataloaders(config)
all_label_names = [
"sky-stuff",
"plant-stuff",
"ground-stuff",
]
if config.include_things_labels:
all_label_names += ["person-things"]
if config.incl_animal_things:
all_label_names += ["animal-things"]
elif config.dataset == "Potsdam":
dataloaders_train, mapping_assignment_dataloader, mapping_test_dataloader \
= make_Potsdam_dataloaders(config)
if config.use_coarse_labels:
all_label_names = ["roads and cars",
"buildings and clutter",
"vegetation and trees"]
else:
all_label_names = ["roads",
"buildings",
"vegetation",
"trees",
"cars",
"clutter"]
assert (len(all_label_names) == config.gt_k)
print("dataloader sizes: %d %d %d" % (len(dataloaders_train[0]),
len(mapping_assignment_dataloader),
len(mapping_test_dataloader)))
# ------------------------------
for imgs_dataloader_name in args.imgs_dataloaders:
for net_name in net_names:
print("%s %s %s" % (
config.out_dir, imgs_dataloader_name, net_name.split(".")[0]))
net_name_outdir = os.path.join(config.out_dir,
imgs_dataloader_name,
net_name.split(".")[0])
if not os.path.exists(net_name_outdir):
os.makedirs(net_name_outdir)
print("doing net_name %s to %s" % (net_name, net_name_outdir))
sys.stdout.flush()
# load model
net = archs.__dict__[config.arch](config)
model_path = os.path.join(config.out_dir, net_name)
print("getting model path %s " % model_path)
net.load_state_dict(
torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.module.eval()
if reassess_acc:
print("... reassessing acc %s" % datetime.now())
sys.stdout.flush()
stats_dict = segmentation_eval(config, net,
mapping_assignment_dataloader,
mapping_test_dataloader,
sobel=(not config.no_sobel),
return_only=True,
verbose=0)
acc = stats_dict["best"]
print("... reassessment finished, got acc %f" % acc)
sys.stdout.flush()
continue
print(
"starting to run test data through for rendering %s" % datetime.now())
all_matches, all_accs = _get_assignment_data_matches(net,
mapping_assignment_dataloader,
config, sobel=(not config.no_sobel),
using_IR=config.using_IR,
get_data_fn=_segmentation_get_data,
just_matches=False,
verbose=1)
head_i = np.argmax(all_accs)
match = all_matches[head_i]
print("got best head %d %s" % (head_i, datetime.now()))
print("best match %s" % str(match))
if args.get_match_only:
exit(0)
colour_map_raw = [(np.random.rand(3) * 255.).astype(np.uint8)
for _ in xrange(max(config.output_k, config.gt_k))]
# coco: green (veg) (7, 130, 42), blue (sky) (39, 159, 216),
# grey (road) (82, 91, 96), red (person - if used) (229, 57, 57)
if "Coco" in config.dataset:
colour_map_gt = [np.array([39, 159, 216], dtype=np.uint8),
np.array([7, 130, 42], dtype=np.uint8),
np.array([82, 91, 96], dtype=np.uint8),
np.array([229, 57, 57], dtype=np.uint8)
]
else:
colour_map_gt = colour_map_raw
# render first batch
predicted_all = [0 for _ in xrange(config.gt_k)]
correct_all = [0 for _ in xrange(config.gt_k)]
all_all = [0 for _ in xrange(config.gt_k)]
if imgs_dataloader_name == "test":
imgs_dataloader = mapping_test_dataloader
elif imgs_dataloader_name == "train":
imgs_dataloader = mapping_assignment_dataloader
else:
assert (False)
print("length of imgs_dataloader %d" % len(imgs_dataloader))
next_img_ind = 0
for b_i, batch in enumerate(imgs_dataloader):
orig_imgs, flat_targets, mask = batch
orig_imgs, flat_targets, mask = \
orig_imgs.cuda(), flat_targets.numpy(), mask.numpy().astype(np.bool)
if not config.no_sobel:
imgs = sobel_process(orig_imgs, config.include_rgb,
using_IR=config.using_IR)
else:
imgs = orig_imgs
with torch.no_grad():
x_outs_all = net(imgs)
x_outs = x_outs_all[head_i]
x_outs = x_outs.cpu().numpy()
flat_preds = np.argmax(x_outs, axis=1)
n, h, w = flat_preds.shape
num_imgs_curr = flat_preds.shape[0]
reordered_preds = np.zeros((num_imgs_curr, h, w),
dtype=flat_targets.dtype)
for pred_i, target_i in match:
reordered_preds[flat_preds == pred_i] = target_i
assert (mask.shape == reordered_preds.shape)
assert (flat_targets.shape == reordered_preds.shape)
masked = np.logical_not(mask)
reordered_preds[masked] = -1
flat_targets[masked] = -1 # not in colourmaps, hence will be black
assert (reordered_preds.max() < config.gt_k)
assert (flat_targets.max() < config.gt_k)
# print iou per class
for c in xrange(config.gt_k):
preds = (reordered_preds == c)
targets = (flat_targets == c)
predicted = preds.sum()
correct = (preds * targets).sum()
all = ((preds + targets) >= 1).sum()
predicted_all[c] += predicted
correct_all[c] += correct
all_all[c] += all
if next_img_ind >= num:
print("not rendering batch")
continue # already rendered num
elif next_img_ind + num_imgs_curr > num:
relevant_inds = range(0, num - next_img_ind)
else:
relevant_inds = range(0, num_imgs_curr)
orig_imgs = orig_imgs[relevant_inds, :, :, :]
imgs = imgs[relevant_inds, :, :, :]
flat_preds = flat_preds[relevant_inds, :, :]
reordered_preds = reordered_preds[relevant_inds, :, :]
flat_targets = flat_targets[relevant_inds, :, :]
if "Coco" in config.dataset:
# blue and red channels are swapped
orig_imgs_swapped = torch.zeros(orig_imgs.shape,
dtype=orig_imgs.dtype)
orig_imgs_swapped[:, 0, :, :] = orig_imgs[:, 2, :, :]
orig_imgs_swapped[:, 1, :, :] = orig_imgs[:, 1, :, :]
orig_imgs_swapped[:, 2, :, :] = orig_imgs[:, 0, :, :] # ignore others
render(orig_imgs_swapped, mode="image", name=("%d_img" % model_ind),
offset=next_img_ind,
out_dir=net_name_outdir)
render(imgs, mode="image_as_feat", name=("%d_img_feat" % model_ind),
offset=next_img_ind,
out_dir=net_name_outdir)
elif "Potsdam" in config.dataset:
render(orig_imgs, mode="image_ir", name=("%d_img" % model_ind),
offset=next_img_ind,
out_dir=net_name_outdir)
render(flat_preds, mode="preds", name=("%d_raw_preds" % model_ind),
offset=next_img_ind,
colour_map=colour_map_raw,
out_dir=net_name_outdir)
render(reordered_preds, mode="preds",
name=("%d_reordered_preds" % model_ind),
offset=next_img_ind,
colour_map=colour_map_gt,
out_dir=net_name_outdir)
render(flat_targets, mode="preds", name=("%d_targets" % model_ind),
offset=next_img_ind,
colour_map=colour_map_gt,
out_dir=net_name_outdir)
next_img_ind += num_imgs_curr
print("... rendered batch %d, next_img_ind %d " % (b_i, next_img_ind))
sys.stdout.flush()
for c in xrange(config.gt_k):
iou = correct_all[c] / float(all_all[c])
print("class %d: name %s: pred %d correct %d all %d %f iou" %
(c, all_label_names[c], predicted_all[c], correct_all[c],
all_all[c], iou))
| 37.236934 | 87 | 0.582858 |
f33789cab3cb4886b5bb8b21580767b427b47501 | 11,673 | py | Python | Survey.py | ebellm/VolumetricSurveySpeed | cc255292592e47aa15d6dab328770950d17d362f | [
"BSD-3-Clause"
] | 1 | 2018-06-10T06:55:32.000Z | 2018-06-10T06:55:32.000Z | Survey.py | ebellm/VolumetricSurveySpeed | cc255292592e47aa15d6dab328770950d17d362f | [
"BSD-3-Clause"
] | null | null | null | Survey.py | ebellm/VolumetricSurveySpeed | cc255292592e47aa15d6dab328770950d17d362f | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division
import numpy as N
from Telescope import P48, Blanco
from Camera import PTF_cam, ZTF_cam
import cosmolopy as cp
import matplotlib.pyplot as plt
from scipy.integrate import quad
from scipy.optimize import golden
SR_TO_SQ_DEG = 3282.8
SIDEREAL_DAY_SEC = 23.9344699 * 3600.
SEC2HR = 1. / 3600.
HR2SEC = 1. / SEC2HR
MIN2HR = 1. / 60.
DAY2HR = 24.
HR2DAY = 1. / DAY2HR
SEC2DAY = SEC2HR * HR2DAY
DAY2SEC = DAY2HR * HR2SEC
LUN2HR = 24 * 28.
YEAR2HR = 365.25 * 24.
def limiting_z(apparent_mag, absolute_mag, k_corr=None):
# solve for redshift of source given its apparent & absolute mags
# use k-correction for an f_lambda standard: k = -2.5 log10(1./(1+z))
# see Hogg 99 eqn. 27
if k_corr is None:
k_corr = lambda z: -2.5 * N.log10(1. / (1. + z))
def f(z):
if z > 0:
# abs to use minimization routines rather than root finding
return N.abs(absolute_mag +
cp.magnitudes.distance_modulus(z, **cp.fidcosmo) + k_corr(z) -
apparent_mag)
else:
# don't let it pass negative values
return N.inf
#res = brute(f, ((1e-8,10),), finish=fmin, full_output=True)
res = golden(f)
return res
def volumetric_survey_rate(absolute_mag,
snapshot_area_sqdeg, DIQ_fwhm_arcsec, slew_time=15., label=None,
sky_brightness=None, transmission=None, plot=True, readnoise=None,
telescope=P48, camera=ZTF_cam, filterkey='MouldR',
max_lim_mag=None, obstimes=None, k_corr=None, **kwargs):
"""calculate the volume/sec/snapshot in Mpc^3"""
if obstimes is None:
obstimes = N.logspace(0, 2, 100) # seconds
# obstimes = N.linspace(5,100,20) # seconds
#obstimes = N.array([30,45,60,120,180,300,500])
if transmission is not None:
raise NotImplementedError(
'check for correctness: varying camera obscuration now incorported in Camera.beam_obscuration')
telescope.transmission = transmission
if camera is not None:
telescope.set_camera(camera)
if readnoise is not None:
telescope.Camera.Detector.readnoise = readnoise
if sky_brightness is None:
# half moon in both g' and r'
sky_brightness = 19.9
limiting_mags = N.array([telescope.limiting_mag(time, DIQ_fwhm_arcsec,
sky_brightness, airmass=1.15, filterkey=filterkey) for time in obstimes])
if max_lim_mag is not None:
limiting_mags[limiting_mags >= max_lim_mag] = max_lim_mag
exptimes = obstimes + slew_time
zs = [limiting_z(m, absolute_mag, k_corr=k_corr) for m in limiting_mags]
com_volumes = cp.distance.comoving_volume(zs, **cp.fidcosmo)
vol_survey_rate = com_volumes * \
(snapshot_area_sqdeg / (4. * N.pi * SR_TO_SQ_DEG)) / exptimes
if plot:
plt.plot(obstimes, vol_survey_rate, label=label, **kwargs)
plt.xlabel('Integration time (sec)')
plt.ylabel('Volumetric Survey Rate per Exposure (Mpc$^3$ s$^{-1}$)')
plt.xscale('log')
plt.yscale('log')
plt.xlim([obstimes.min(), obstimes.max()])
if False:
plt.plot(obstimes, limiting_mags, label=label)
plt.xlabel('Integration time (sec)')
plt.ylabel('Limiting Magnitude ({})'.format(filterkey))
plt.xscale('log')
plt.yscale('linear')
plt.xlim([obstimes.min(), obstimes.max()])
# print vol_survey_rate.max(), obstimes[vol_survey_rate.argmax()]
return vol_survey_rate.max(), obstimes[vol_survey_rate.argmax()]
# return vol_survey_rate, obstimes, limiting_mags
def spectroscopic_cost(z, absolute_mag):
"""defines a cost (in terms of fractions of a night) needed for followup
classification spectroscopy. Numbers are rough, but scale for P200:
20 minutes for a mag 20 target, plus 5 minutes of overhead independent
of the magnitude. Normalize by a 6-hour night (average, with Palomar
weather)"""
DM = cp.magnitudes.distance_modulus(z, **cp.fidcosmo)
mag = DM + absolute_mag
# background-limited exposure time at constant S/N
# t_exp[mag_ref] * 10**(0.8(mag - mag_ref)) + t_OH
mag_ref = 20
texp_ref = 20. # minutes
t_oh = 5. # minutes, overhead/minimum exposure time
return (texp_ref * 10**(0.8 * (mag - mag_ref)) + t_oh) / 360.
def unweighted_survey_volume(absolute_mag, limiting_mag, k_corr=None):
"""determine what spatial volume a survey can see absolute_mag objects to
given its limiting_mag. """
# TODO add saturation magnitude for a lower limit...
# or just call with limiting_mag = saturation mag and subtract
z_limit = limiting_z(limiting_mag, absolute_mag, k_corr=k_corr)
# testing the integration
return cp.distance.comoving_volume(z_limit, **cp.fidcosmo)
def unweighted_survey_speed(absolute_mag, limiting_mag, fov, time_per_obs,
k_corr=None):
return (unweighted_survey_volume(absolute_mag, limiting_mag,
k_corr=k_corr) *
(fov / (4. * N.pi * SR_TO_SQ_DEG)) / time_per_obs)
def fraction_spectroscopic_volume(absolute_mag, limiting_mag,
spectroscopic_limit=21, k_corr=None):
frac = (unweighted_survey_volume(absolute_mag, spectroscopic_limit,
k_corr=k_corr) /
unweighted_survey_volume(absolute_mag, limiting_mag, k_corr=k_corr))
if frac > 1.:
return 1
else:
return frac
def weighted_survey_volume(absolute_mag, limiting_mag, k_corr=None):
"""determine what spatial volume a survey can see absolute_mag objects to
given its limiting_mag. Weight the volume elements by the cost of
spectroscopic followup (in fraction of a night) at that distance."""
# TODO add saturation magnitude for a lower limit...
z_limit = limiting_z(limiting_mag, absolute_mag, k_corr=k_corr)
# testing the integration
# print cp.distance.comoving_volume(z_limit,**cp.fidcosmo)
# print 4*N.pi*quad(lambda z: cp.distance.diff_comoving_volume(z,
# **cp.fidcosmo), 0,z_limit)[0]
# integrate the cost function over the volume
return 4 * N.pi * quad(lambda z: cp.distance.diff_comoving_volume(z,
**cp.fidcosmo) / spectroscopic_cost(z, absolute_mag), 0, z_limit)[0]
def compare_weighted_survey_speed(absolute_mag, limiting_mag, fov, time_per_obs):
"""weighted survey speed relative to PTF"""
ptf = (weighted_survey_volume(absolute_mag, 20.7) *
(7.26 / (4. * N.pi * SR_TO_SQ_DEG)) / 106.)
other = (weighted_survey_volume(absolute_mag, limiting_mag) *
(fov / (4. * N.pi * SR_TO_SQ_DEG)) / time_per_obs)
return other / ptf
def wrap_survey_speeds(absolute_mag, fov, time_per_obs, limiting_mag,
ptfspeed=2790.):
# use for survey comparison tables
# ptf speed = unweighted_survey_speed(-19,21,7.26,106) * 1.0 = 4001
# ptf speed = unweighted_survey_speed(-19,20.7,7.26,106) * 1.0 = 2790
# ptf speed = unweighted_survey_speed(-19,20.6,7.26,106) * 1.0 = 2473
speed = unweighted_survey_speed(
absolute_mag, limiting_mag, fov, time_per_obs)
frac = fraction_spectroscopic_volume(absolute_mag, limiting_mag)
omega_dot = float(fov) / time_per_obs * 3600.
nexps = n_exposures_per_field_per_year(camera_fov_sqdeg=fov,
time_per_image_sec=time_per_obs)
vdot = speed
fspec = frac
print "{:d} & {:d} & \\num{{ {:.1e} }} & {:.2f} \\\\".format(
int(omega_dot), int(nexps), vdot, fspec)
def linear_control_time(absolute_mag, limiting_mag, tau_eff, z, k_corr=None):
# estimate of control time from simple light curve model: see 8/18/15 notes
# absolute_mag is peak absolute mag
# tau_eff is days to decline 1 mag
# returns control time in days
DM = cp.magnitudes.distance_modulus(z, **cp.fidcosmo)
# use k-correction for an f_lambda standard: k = -2.5 log10(1./(1+z))
# see Hogg 99 eqn. 27
if k_corr is None:
k_corr = lambda z: -2.5 * N.log10(1. / (1. + z))
apparent_mag_peak = absolute_mag + DM + k_corr(z)
ct = -1. * (apparent_mag_peak - limiting_mag) * tau_eff * (1. + z)
if ct > 0:
return ct
else:
return 0.
def sum_control_time_one_consecutive(obs_points_jd, control_time_days):
# control time is in observer frame
# returns years
dt_obs_days = N.diff(obs_points_jd)
# see Zwicky 1942
w_dt_lt = dt_obs_days < control_time_days
return (control_time_days + N.sum(dt_obs_days[w_dt_lt]) +
N.sum(~w_dt_lt) * control_time_days) / 365.25
def sum_control_time_k_consecutive(obs_points_jd, control_time_days,
k_consecutive):
# control time is in observer frame
# returns years
dt_obs_days = obs_points_jd[(k_consecutive - 1):] - \
obs_points_jd[:-(k_consecutive - 1)]
ctj_prev = 0.
ct_sum = 0.
for dtj in dt_obs_days:
if dtj > control_time_days:
ctj = 0.
else:
if ctj_prev == 0.:
ctj = control_time_days
else:
ctj = dtj
ct_sum += ctj
ctj_prev = ctj
return ct_sum / 365.25
def sum_control_time(obs_points_jd, control_time_days, k_consecutive=1):
# control time is in observer frame
# returns years
if k_consecutive == 1:
return sum_control_time_one_consecutive(obs_points_jd,
control_time_days)
else:
return sum_control_time_k_consecutive(obs_points_jd,
control_time_days, k_consecutive)
def n_transients_per_year(survey, absolute_mags, tau_effs, cadence_days,
rate_z=None, k_corr=None, max_mlim=None,
max_zenith_angle=66.4, k_consecutive=1):
# rate a function of z in events Mpc^-3 yr^-1
# for efficiency, require scalar cadences
obs_points_jd = survey.yearly_cadence_points(cadence_days)
if rate_z is None:
# use the Ia rate by default (see LSST SB)
rate_z = lambda z: 3.E-5
doy = N.arange(365)
n_events = N.zeros([absolute_mags.size, tau_effs.size])
for i, absolute_mag in enumerate(absolute_mags):
# we're being a little fast and loose here with the snapshot/all-sky
# distinction
area, vol, mlim = zip(*[survey.snapshot_size(cadence_days, doy=d, max_zenith_angle=max_zenith_angle,
absolute_mag=absolute_mag, max_mlim=max_mlim, k_corr=k_corr) for d in doy])
snap_area = N.mean(area)
limiting_mag = N.mean(mlim)
z_limit = limiting_z(limiting_mag, absolute_mag, k_corr=k_corr)
for j, tau_eff in enumerate(tau_effs):
def integrand(z):
ctz = linear_control_time(absolute_mag, limiting_mag,
tau_eff, z, k_corr=k_corr)
return rate_z(z) / (1. + z) * \
sum_control_time(obs_points_jd, ctz,
k_consecutive=k_consecutive) * \
snap_area / SR_TO_SQ_DEG * \
cp.distance.diff_comoving_volume(z, **cp.fidcosmo)
n_events[i, j] = quad(integrand, 0, z_limit)[0]
print absolute_mag, tau_eff, n_events[i, j]
return n_events
| 36.823344 | 138 | 0.630858 |
67aa2fe5c9674002096c5061ace64c6a2defa06d | 22,705 | py | Python | sdk/python/pulumi_azure_native/cache/v20170201/redis.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cache/v20170201/redis.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cache/v20170201/redis.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RedisArgs', 'Redis']
@pulumi.input_type
class RedisArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['SkuArgs'],
enable_non_ssl_port: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
redis_configuration: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
static_ip: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Redis resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['SkuArgs'] sku: The SKU of the Redis cache to deploy.
:param pulumi.Input[bool] enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] name: The name of the Redis cache.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:param pulumi.Input[int] shard_count: The number of shards to be created on a Premium Cluster Cache.
:param pulumi.Input[str] static_ip: Static IP address. Required when deploying a Redis cache inside an existing Azure Virtual Network.
:param pulumi.Input[str] subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis cache in. Example format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tenant_settings: tenantSettings
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
if enable_non_ssl_port is not None:
pulumi.set(__self__, "enable_non_ssl_port", enable_non_ssl_port)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if redis_configuration is not None:
pulumi.set(__self__, "redis_configuration", redis_configuration)
if shard_count is not None:
pulumi.set(__self__, "shard_count", shard_count)
if static_ip is not None:
pulumi.set(__self__, "static_ip", static_ip)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tenant_settings is not None:
pulumi.set(__self__, "tenant_settings", tenant_settings)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['SkuArgs']:
"""
The SKU of the Redis cache to deploy.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['SkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="enableNonSslPort")
def enable_non_ssl_port(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the non-ssl Redis server port (6379) is enabled.
"""
return pulumi.get(self, "enable_non_ssl_port")
@enable_non_ssl_port.setter
def enable_non_ssl_port(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_non_ssl_port", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Redis cache.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="redisConfiguration")
def redis_configuration(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
"""
return pulumi.get(self, "redis_configuration")
@redis_configuration.setter
def redis_configuration(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "redis_configuration", value)
@property
@pulumi.getter(name="shardCount")
def shard_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of shards to be created on a Premium Cluster Cache.
"""
return pulumi.get(self, "shard_count")
@shard_count.setter
def shard_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "shard_count", value)
@property
@pulumi.getter(name="staticIP")
def static_ip(self) -> Optional[pulumi.Input[str]]:
"""
Static IP address. Required when deploying a Redis cache inside an existing Azure Virtual Network.
"""
return pulumi.get(self, "static_ip")
@static_ip.setter
def static_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "static_ip", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The full resource ID of a subnet in a virtual network to deploy the Redis cache in. Example format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tenantSettings")
def tenant_settings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
tenantSettings
"""
return pulumi.get(self, "tenant_settings")
@tenant_settings.setter
def tenant_settings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tenant_settings", value)
class Redis(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enable_non_ssl_port: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
redis_configuration: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
static_ip: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A single Redis item in List or Get Operation.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] name: The name of the Redis cache.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[int] shard_count: The number of shards to be created on a Premium Cluster Cache.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The SKU of the Redis cache to deploy.
:param pulumi.Input[str] static_ip: Static IP address. Required when deploying a Redis cache inside an existing Azure Virtual Network.
:param pulumi.Input[str] subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis cache in. Example format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tenant_settings: tenantSettings
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RedisArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A single Redis item in List or Get Operation.
:param str resource_name: The name of the resource.
:param RedisArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RedisArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enable_non_ssl_port: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
redis_configuration: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
static_ip: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RedisArgs.__new__(RedisArgs)
__props__.__dict__["enable_non_ssl_port"] = enable_non_ssl_port
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["redis_configuration"] = redis_configuration
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["shard_count"] = shard_count
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["static_ip"] = static_ip
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["tags"] = tags
__props__.__dict__["tenant_settings"] = tenant_settings
__props__.__dict__["access_keys"] = None
__props__.__dict__["host_name"] = None
__props__.__dict__["linked_servers"] = None
__props__.__dict__["port"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["redis_version"] = None
__props__.__dict__["ssl_port"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:cache/v20170201:Redis"), pulumi.Alias(type_="azure-native:cache:Redis"), pulumi.Alias(type_="azure-nextgen:cache:Redis"), pulumi.Alias(type_="azure-native:cache/v20150801:Redis"), pulumi.Alias(type_="azure-nextgen:cache/v20150801:Redis"), pulumi.Alias(type_="azure-native:cache/v20160401:Redis"), pulumi.Alias(type_="azure-nextgen:cache/v20160401:Redis"), pulumi.Alias(type_="azure-native:cache/v20171001:Redis"), pulumi.Alias(type_="azure-nextgen:cache/v20171001:Redis"), pulumi.Alias(type_="azure-native:cache/v20180301:Redis"), pulumi.Alias(type_="azure-nextgen:cache/v20180301:Redis"), pulumi.Alias(type_="azure-native:cache/v20190701:Redis"), pulumi.Alias(type_="azure-nextgen:cache/v20190701:Redis"), pulumi.Alias(type_="azure-native:cache/v20200601:Redis"), pulumi.Alias(type_="azure-nextgen:cache/v20200601:Redis")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Redis, __self__).__init__(
'azure-native:cache/v20170201:Redis',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Redis':
"""
Get an existing Redis resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RedisArgs.__new__(RedisArgs)
__props__.__dict__["access_keys"] = None
__props__.__dict__["enable_non_ssl_port"] = None
__props__.__dict__["host_name"] = None
__props__.__dict__["linked_servers"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["redis_configuration"] = None
__props__.__dict__["redis_version"] = None
__props__.__dict__["shard_count"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["ssl_port"] = None
__props__.__dict__["static_ip"] = None
__props__.__dict__["subnet_id"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["tenant_settings"] = None
__props__.__dict__["type"] = None
return Redis(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessKeys")
def access_keys(self) -> pulumi.Output['outputs.RedisAccessKeysResponse']:
"""
The keys of the Redis cache - not set if this object is not the response to Create or Update redis cache
"""
return pulumi.get(self, "access_keys")
@property
@pulumi.getter(name="enableNonSslPort")
def enable_non_ssl_port(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the non-ssl Redis server port (6379) is enabled.
"""
return pulumi.get(self, "enable_non_ssl_port")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Output[str]:
"""
Redis host name.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="linkedServers")
def linked_servers(self) -> pulumi.Output['outputs.RedisLinkedServerListResponse']:
"""
List of the linked servers associated with the cache
"""
return pulumi.get(self, "linked_servers")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
Redis non-SSL port.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Redis instance provisioning status.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redisConfiguration")
def redis_configuration(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
"""
return pulumi.get(self, "redis_configuration")
@property
@pulumi.getter(name="redisVersion")
def redis_version(self) -> pulumi.Output[str]:
"""
Redis version.
"""
return pulumi.get(self, "redis_version")
@property
@pulumi.getter(name="shardCount")
def shard_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of shards to be created on a Premium Cluster Cache.
"""
return pulumi.get(self, "shard_count")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The SKU of the Redis cache to deploy.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslPort")
def ssl_port(self) -> pulumi.Output[int]:
"""
Redis SSL port.
"""
return pulumi.get(self, "ssl_port")
@property
@pulumi.getter(name="staticIP")
def static_ip(self) -> pulumi.Output[Optional[str]]:
"""
Static IP address. Required when deploying a Redis cache inside an existing Azure Virtual Network.
"""
return pulumi.get(self, "static_ip")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[Optional[str]]:
"""
The full resource ID of a subnet in a virtual network to deploy the Redis cache in. Example format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantSettings")
def tenant_settings(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
tenantSettings
"""
return pulumi.get(self, "tenant_settings")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 46.336735 | 911 | 0.655803 |
1d29f3263832c4a4882ed628bca28c1872d1f65a | 21,127 | py | Python | sdks/python/apache_beam/coders/coders.py | ravwojdyla/beam | fbcde4cdc7d68de8734bf540c079b2747631a854 | [
"Apache-2.0"
] | 1 | 2020-07-14T16:30:12.000Z | 2020-07-14T16:30:12.000Z | sdks/python/apache_beam/coders/coders.py | kavyasmj/beam0.6 | d59dfeb339bd56feb7569531e5c421a297b0d3dc | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/coders/coders.py | kavyasmj/beam0.6 | d59dfeb339bd56feb7569531e5c421a297b0d3dc | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Collection of useful coders."""
import base64
import cPickle as pickle
import google.protobuf
from apache_beam.coders import coder_impl
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from stream import get_varint_size
except ImportError:
from slow_stream import get_varint_size
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
# pylint: disable=wrong-import-order, wrong-import-position
# Avoid dependencies on the full SDK.
try:
# Import dill from the pickler module to make sure our monkey-patching of dill
# occurs.
from apache_beam.internal.pickler import dill
except ImportError:
# We fall back to using the stock dill library in tests that don't use the
# full Python SDK.
import dill
def serialize_coder(coder):
from apache_beam.internal import pickler
return '%s$%s' % (coder.__class__.__name__, pickler.dumps(coder))
def deserialize_coder(serialized):
from apache_beam.internal import pickler
return pickler.loads(serialized.split('$', 1)[1])
# pylint: enable=wrong-import-order, wrong-import-position
class Coder(object):
"""Base class for coders."""
def encode(self, value):
"""Encodes the given object into a byte string."""
raise NotImplementedError('Encode not implemented: %s.' % self)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
raise NotImplementedError('Decode not implemented: %s.' % self)
def is_deterministic(self):
"""Whether this coder is guaranteed to encode values deterministically.
A deterministic coder is required for key coders in GroupByKey operations
to produce consistent results.
For example, note that the default coder, the PickleCoder, is not
deterministic: the ordering of picked entries in maps may vary across
executions since there is no defined order, and such a coder is not in
general suitable for usage as a key coder in GroupByKey operations, since
each instance of the same key may be encoded differently.
Returns:
Whether coder is deterministic.
"""
return False
def estimate_size(self, value):
"""Estimates the encoded size of the given value, in bytes.
Dataflow estimates the encoded size of a PCollection processed in a pipeline
step by using the estimated size of a random sample of elements in that
PCollection.
The default implementation encodes the given value and returns its byte
size. If a coder can provide a fast estimate of the encoded size of a value
(e.g., if the encoding has a fixed size), it can provide its estimate here
to improve performance.
Arguments:
value: the value whose encoded size is to be estimated.
Returns:
The estimated encoded size of the given value.
"""
return len(self.encode(value))
# ===========================================================================
# Methods below are internal SDK details that don't need to be modified for
# user-defined coders.
# ===========================================================================
def _create_impl(self):
"""Creates a CoderImpl to do the actual encoding and decoding.
"""
return coder_impl.CallbackCoderImpl(self.encode, self.decode,
self.estimate_size)
def get_impl(self):
if not hasattr(self, '_impl'):
self._impl = self._create_impl()
assert isinstance(self._impl, coder_impl.CoderImpl)
return self._impl
def __getstate__(self):
return self._dict_without_impl()
def _dict_without_impl(self):
if hasattr(self, '_impl'):
d = dict(self.__dict__)
del d['_impl']
return d
else:
return self.__dict__
@classmethod
def from_type_hint(cls, unused_typehint, unused_registry):
# If not overridden, just construct the coder without arguments.
return cls()
def is_kv_coder(self):
return False
def key_coder(self):
if self.is_kv_coder():
raise NotImplementedError('key_coder: %s' % self)
else:
raise ValueError('Not a KV coder: %s.' % self)
def value_coder(self):
if self.is_kv_coder():
raise NotImplementedError('value_coder: %s' % self)
else:
raise ValueError('Not a KV coder: %s.' % self)
def _get_component_coders(self):
"""Returns the internal component coders of this coder."""
# This is an internal detail of the Coder API and does not need to be
# refined in user-defined Coders.
return []
def as_cloud_object(self):
"""Returns Google Cloud Dataflow API description of this coder."""
# This is an internal detail of the Coder API and does not need to be
# refined in user-defined Coders.
value = {
# We pass coders in the form "<coder_name>$<pickled_data>" to make the
# job description JSON more readable. Data before the $ is ignored by
# the worker.
'@type': serialize_coder(self),
'component_encodings': list(
component.as_cloud_object()
for component in self._get_component_coders()
),
}
return value
def __repr__(self):
return self.__class__.__name__
def __eq__(self, other):
# pylint: disable=protected-access
return (self.__class__ == other.__class__
and self._dict_without_impl() == other._dict_without_impl())
# pylint: enable=protected-access
class StrUtf8Coder(Coder):
"""A coder used for reading and writing strings as UTF-8."""
def encode(self, value):
return value.encode('utf-8')
def decode(self, value):
return value.decode('utf-8')
def is_deterministic(self):
return True
class ToStringCoder(Coder):
"""A default string coder used if no sink coder is specified."""
def encode(self, value):
if isinstance(value, unicode):
return value.encode('utf-8')
elif isinstance(value, str):
return value
else:
return str(value)
def decode(self, _):
raise NotImplementedError('ToStringCoder cannot be used for decoding.')
def is_deterministic(self):
return True
class FastCoder(Coder):
"""Coder subclass used when a (faster) CoderImpl is supplied directly.
The Coder class defines _create_impl in terms of encode() and decode();
this class inverts that by defining encode() and decode() in terms of
_create_impl().
"""
def encode(self, value):
"""Encodes the given object into a byte string."""
return self.get_impl().encode(value)
def decode(self, encoded):
"""Decodes the given byte string into the corresponding object."""
return self.get_impl().decode(encoded)
def estimate_size(self, value):
return self.get_impl().estimate_size(value)
def _create_impl(self):
raise NotImplementedError
class BytesCoder(FastCoder):
"""Byte string coder."""
def _create_impl(self):
return coder_impl.BytesCoderImpl()
def is_deterministic(self):
return True
class VarIntCoder(FastCoder):
"""Variable-length integer coder."""
def _create_impl(self):
return coder_impl.VarIntCoderImpl()
def is_deterministic(self):
return True
class FloatCoder(FastCoder):
"""A coder used for floating-point values."""
def _create_impl(self):
return coder_impl.FloatCoderImpl()
def is_deterministic(self):
return True
class TimestampCoder(FastCoder):
"""A coder used for timeutil.Timestamp values."""
def _create_impl(self):
return coder_impl.TimestampCoderImpl()
def is_deterministic(self):
return True
class SingletonCoder(FastCoder):
"""A coder that always encodes exactly one value."""
def __init__(self, value):
self._value = value
def _create_impl(self):
return coder_impl.SingletonCoderImpl(self._value)
def is_deterministic(self):
return True
def maybe_dill_dumps(o):
"""Pickle using cPickle or the Dill pickler as a fallback."""
# We need to use the dill pickler for objects of certain custom classes,
# including, for example, ones that contain lambdas.
try:
return pickle.dumps(o)
except Exception: # pylint: disable=broad-except
return dill.dumps(o)
def maybe_dill_loads(o):
"""Unpickle using cPickle or the Dill pickler as a fallback."""
try:
return pickle.loads(o)
except Exception: # pylint: disable=broad-except
return dill.loads(o)
class _PickleCoderBase(FastCoder):
"""Base class for pickling coders."""
def is_deterministic(self):
# Note that the default coder, the PickleCoder, is not deterministic (for
# example, the ordering of picked entries in maps may vary across
# executions), and so is not in general suitable for usage as a key coder in
# GroupByKey operations.
return False
def as_cloud_object(self, is_pair_like=True):
value = super(_PickleCoderBase, self).as_cloud_object()
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(is_pair_like=False),
self.as_cloud_object(is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on PickleCoder since
# we can't always infer the return values of lambdas in ParDo operations, the
# result of which may be used in a GroupBykey.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class PickleCoder(_PickleCoderBase):
"""Coder using Python's pickle functionality."""
def _create_impl(self):
return coder_impl.CallbackCoderImpl(pickle.dumps, pickle.loads)
class DillCoder(_PickleCoderBase):
"""Coder using dill's pickle functionality."""
def _create_impl(self):
return coder_impl.CallbackCoderImpl(maybe_dill_dumps, maybe_dill_loads)
class DeterministicFastPrimitivesCoder(FastCoder):
"""Throws runtime errors when encoding non-deterministic values."""
def __init__(self, coder, step_label):
self._underlying_coder = coder
self._step_label = step_label
def _create_impl(self):
return coder_impl.DeterministicFastPrimitivesCoderImpl(
self._underlying_coder.get_impl(), self._step_label)
def is_deterministic(self):
return True
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class FastPrimitivesCoder(FastCoder):
"""Encodes simple primitives (e.g. str, int) efficiently.
For unknown types, falls back to another coder (e.g. PickleCoder).
"""
def __init__(self, fallback_coder=PickleCoder()):
self._fallback_coder = fallback_coder
def _create_impl(self):
return coder_impl.FastPrimitivesCoderImpl(
self._fallback_coder.get_impl())
def is_deterministic(self):
return self._fallback_coder.is_deterministic()
def as_cloud_object(self, is_pair_like=True):
value = super(FastCoder, self).as_cloud_object()
# We currently use this coder in places where we cannot infer the coder to
# use for the value type in a more granular way. In places where the
# service expects a pair, it checks for the "is_pair_like" key, in which
# case we would fail without the hack below.
if is_pair_like:
value['is_pair_like'] = True
value['component_encodings'] = [
self.as_cloud_object(is_pair_like=False),
self.as_cloud_object(is_pair_like=False)
]
return value
# We allow .key_coder() and .value_coder() to be called on FastPrimitivesCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class Base64PickleCoder(Coder):
"""Coder of objects by Python pickle, then base64 encoding."""
# TODO(robertwb): Do base64 encoding where it's needed (e.g. in json) rather
# than via a special Coder.
def encode(self, value):
return base64.b64encode(pickle.dumps(value))
def decode(self, encoded):
return pickle.loads(base64.b64decode(encoded))
def is_deterministic(self):
# Note that the Base64PickleCoder is not deterministic. See the
# corresponding comments for PickleCoder above.
return False
# We allow .key_coder() and .value_coder() to be called on Base64PickleCoder
# since we can't always infer the return values of lambdas in ParDo
# operations, the result of which may be used in a GroupBykey.
#
# TODO(ccy): this is currently only used for KV values from Create transforms.
# Investigate a way to unify this with PickleCoder.
def is_kv_coder(self):
return True
def key_coder(self):
return self
def value_coder(self):
return self
class ProtoCoder(FastCoder):
"""A Coder for Google Protocol Buffers.
It supports both Protocol Buffers syntax versions 2 and 3. However,
the runtime version of the python protobuf library must exactly match the
version of the protoc compiler what was used to generate the protobuf
messages.
ProtoCoder is registered in the global CoderRegistry as the default coder for
any protobuf Message object.
"""
def __init__(self, proto_message_type):
self.proto_message_type = proto_message_type
def _create_impl(self):
return coder_impl.ProtoCoderImpl(self.proto_message_type)
def is_deterministic(self):
# TODO(vikasrk): A proto message can be deterministic if it does not contain
# a Map.
return False
@staticmethod
def from_type_hint(typehint, unused_registry):
if issubclass(typehint, google.protobuf.message.Message):
return ProtoCoder(typehint)
else:
raise ValueError(('Expected a subclass of google.protobuf.message.Message'
', but got a %s' % typehint))
class TupleCoder(FastCoder):
"""Coder of tuple objects."""
def __init__(self, components):
self._coders = tuple(components)
def _create_impl(self):
return coder_impl.TupleCoderImpl([c.get_impl() for c in self._coders])
def is_deterministic(self):
return all(c.is_deterministic() for c in self._coders)
@staticmethod
def from_type_hint(typehint, registry):
return TupleCoder([registry.get_coder(t) for t in typehint.tuple_types])
def as_cloud_object(self):
if self.is_kv_coder():
return {
'@type': 'kind:pair',
'is_pair_like': True,
'component_encodings': list(
component.as_cloud_object()
for component in self._get_component_coders()
),
}
return super(TupleCoder, self).as_cloud_object()
def _get_component_coders(self):
return self.coders()
def coders(self):
return self._coders
def is_kv_coder(self):
return len(self._coders) == 2
def key_coder(self):
if len(self._coders) != 2:
raise ValueError('TupleCoder does not have exactly 2 components.')
return self._coders[0]
def value_coder(self):
if len(self._coders) != 2:
raise ValueError('TupleCoder does not have exactly 2 components.')
return self._coders[1]
def __repr__(self):
return 'TupleCoder[%s]' % ', '.join(str(c) for c in self._coders)
class TupleSequenceCoder(FastCoder):
"""Coder of homogeneous tuple objects."""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
return coder_impl.TupleSequenceCoderImpl(self._elem_coder.get_impl())
def is_deterministic(self):
return self._elem_coder.is_deterministic()
@staticmethod
def from_type_hint(typehint, registry):
return TupleSequenceCoder(registry.get_coder(typehint.inner_type))
def _get_component_coders(self):
return (self._elem_coder,)
def __repr__(self):
return 'TupleSequenceCoder[%r]' % self._elem_coder
class IterableCoder(FastCoder):
"""Coder of iterables of homogeneous objects."""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def _create_impl(self):
return coder_impl.IterableCoderImpl(self._elem_coder.get_impl())
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def as_cloud_object(self):
return {
'@type': 'kind:stream',
'is_stream_like': True,
'component_encodings': [self._elem_coder.as_cloud_object()],
}
def value_coder(self):
return self._elem_coder
@staticmethod
def from_type_hint(typehint, registry):
return IterableCoder(registry.get_coder(typehint.inner_type))
def _get_component_coders(self):
return (self._elem_coder,)
def __repr__(self):
return 'IterableCoder[%r]' % self._elem_coder
class WindowCoder(PickleCoder):
"""Coder for windows in windowed values."""
def _create_impl(self):
return coder_impl.CallbackCoderImpl(pickle.dumps, pickle.loads)
def is_deterministic(self):
# Note that WindowCoder as implemented is not deterministic because the
# implementation simply pickles windows. See the corresponding comments
# on PickleCoder for more details.
return False
def as_cloud_object(self):
return super(WindowCoder, self).as_cloud_object(is_pair_like=False)
class GlobalWindowCoder(SingletonCoder):
"""Coder for global windows."""
def __init__(self):
from apache_beam.transforms import window
super(GlobalWindowCoder, self).__init__(window.GlobalWindow())
def as_cloud_object(self):
return {
'@type': 'kind:global_window',
}
class IntervalWindowCoder(FastCoder):
"""Coder for an window defined by a start timestamp and a duration."""
def _create_impl(self):
return coder_impl.IntervalWindowCoderImpl()
def is_deterministic(self):
return True
def as_cloud_object(self):
return {
'@type': 'kind:interval_window',
}
class WindowedValueCoder(FastCoder):
"""Coder for windowed values."""
def __init__(self, wrapped_value_coder, window_coder=None):
if not window_coder:
window_coder = PickleCoder()
self.wrapped_value_coder = wrapped_value_coder
self.timestamp_coder = TimestampCoder()
self.window_coder = window_coder
def _create_impl(self):
return coder_impl.WindowedValueCoderImpl(
self.wrapped_value_coder.get_impl(),
self.timestamp_coder.get_impl(),
self.window_coder.get_impl())
def is_deterministic(self):
return all(c.is_deterministic() for c in [self.wrapped_value_coder,
self.timestamp_coder,
self.window_coder])
def as_cloud_object(self):
return {
'@type': 'kind:windowed_value',
'is_wrapper': True,
'component_encodings': [
component.as_cloud_object()
for component in self._get_component_coders()],
}
def _get_component_coders(self):
return [self.wrapped_value_coder, self.window_coder]
def is_kv_coder(self):
return self.wrapped_value_coder.is_kv_coder()
def key_coder(self):
return self.wrapped_value_coder.key_coder()
def value_coder(self):
return self.wrapped_value_coder.value_coder()
def __repr__(self):
return 'WindowedValueCoder[%s]' % self.wrapped_value_coder
class LengthPrefixCoder(FastCoder):
"""Coder which prefixes the length of the encoded object in the stream."""
def __init__(self, value_coder):
self._value_coder = value_coder
def _create_impl(self):
return coder_impl.LengthPrefixCoderImpl(self._value_coder)
def is_deterministic(self):
return self._value_coder.is_deterministic()
def estimate_size(self, value):
value_size = self._value_coder.estimate_size(value)
return get_varint_size(value_size) + value_size
def value_coder(self):
return self._value_coder
def as_cloud_object(self):
return {
'@type': 'kind:length_prefix',
'component_encodings': [self._value_coder.as_cloud_object()],
}
def _get_component_coders(self):
return (self._value_coder,)
def __repr__(self):
return 'LengthPrefixCoder[%r]' % self._value_coder
| 29.2213 | 80 | 0.707578 |
dc3a79cf095fb7cfc32a008341d3258781d41c4c | 938 | py | Python | melodic.py | boazbb/sight-music-generator | 84baa03c5483f3a02234b19a6207953d018047b9 | [
"MIT"
] | null | null | null | melodic.py | boazbb/sight-music-generator | 84baa03c5483f3a02234b19a6207953d018047b9 | [
"MIT"
] | null | null | null | melodic.py | boazbb/sight-music-generator | 84baa03c5483f3a02234b19a6207953d018047b9 | [
"MIT"
] | null | null | null | """
Writes the notes for melodic instruments (i.e. instruments that
play a single note at a time).
"""
import random
from note_data import *
REST_CHANCE = 5 # This means one in five notes will be rest
class Melodic:
def __init__(self, file):
self.file = file
# Writes piano bars into the file
def writeBars(self, barNum):
self.notePool = getNotePool("TWO_OCTAVES")
self.rhythmType = 'QUARTERS'
self.writeTreble(barNum)
# Writes the right hand bars
def writeTreble(self, barNum):
self.file.write('\t\\new Staff { \\time 4/4 ')
for i in range(barNum):
self.writeBar('RIGHT')
self.file.write('}\n')
# Writes the actual music content of a bar
def writeBar(self, hand):
rhythm = getRhythmPattern(self.rhythmType)
for i in rhythm:
if random.randint(0, (REST_CHANCE-1)) == 0:
note = 'r'
else:
note = random.choice(self.notePool[hand])
self.file.write(note+i) | 26.055556 | 65 | 0.66951 |
70909bb87e0d6f5abb7c53d6c49b9b9f2135411f | 870 | py | Python | pyQPanda/pyqpanda/TorchLayer/basic_eng.py | QianJianhua1/QPanda-2 | a13c7b733031b1d0007dceaf1dae6ad447bb969c | [
"Apache-2.0"
] | 631 | 2019-01-21T01:33:38.000Z | 2022-03-31T07:33:04.000Z | pyQPanda/pyqpanda/TorchLayer/basic_eng.py | QianJianhua1/QPanda-2 | a13c7b733031b1d0007dceaf1dae6ad447bb969c | [
"Apache-2.0"
] | 24 | 2019-02-01T10:12:45.000Z | 2021-12-02T01:49:57.000Z | pyQPanda/pyqpanda/TorchLayer/basic_eng.py | QianJianhua1/QPanda-2 | a13c7b733031b1d0007dceaf1dae6ad447bb969c | [
"Apache-2.0"
] | 80 | 2019-01-21T03:04:20.000Z | 2022-03-29T15:38:45.000Z | from pyqpanda import *
import numpy as np
def Generator_Weight_Circuit(Circuit,weights, qubits, rotation=None):
rotation = rotation or RX
shape = np.shape(weights)
if len(shape) != 2:
raise ValueError(f"Weights tensor must be 2-dimensional; got shape {shape}")
if shape[1] != len(qubits):
raise ValueError(
f"Weights tensor must have second dimension of length {len(qubits)}; got {shape[1]}"
)
for i in weights:
for k,j in enumerate(i):
Circuit << rotation(qubits[k], j)
if shape[1]>2:
for k,j in enumerate(i):
Circuit << CNOT(qubits[k], qubits[(k+1)%shape[1]])
else:
Circuit << CNOT(qubits[0], qubits[1])
return Circuit
| 33.461538 | 100 | 0.517241 |
3941200697be054e2559c5151ae113a8d3b197b2 | 19,862 | py | Python | fn_machine_learning/fn_machine_learning/bin/res_ml.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_machine_learning/fn_machine_learning/bin/res_ml.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_machine_learning/fn_machine_learning/bin/res_ml.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
#
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
#
"""
RES-ML
------
A command line tool to build machine learning model. It supports:
1. config. Generate a sample ml.config
2. download. Download incidents and save in CSV format.
3. build. Build machine model and save it into a file
4. count-value. Value count for a given field. Useful for discovering imbalanced dataset
5. view. View the summary of a saved model.
6. rebuild. Rebuild a saved model with latest data
Note the recommended steps to use our res-ml package are:
1. Use this command line tool to generate a sample ml.config
2. Use this command line tool to
a. download incidents
b. build and save a machine learning model
3. Use our function component to do prediction, by pointing to the saved model file
4. Rebuild the saved model periodically with updated incidents/samples.
"""
from __future__ import absolute_import
import argparse
import logging
import os, os.path
import resilient
import sys
from fn_machine_learning.lib.ml_model_common import MlModelCommon
from fn_machine_learning.lib.ml_config import MlConfig
import fn_machine_learning.lib.resilient_utils as resilient_utils
import fn_machine_learning.lib.model_utils as model_utils
from fn_machine_learning.lib.incident_time_filter import IncidentTimeFilter
import fn_machine_learning.lib.res_ml_config as res_ml_config
import requests
try:
# For all python < 3.2
import backports.configparser as configparser
except ImportError:
import configparser
if sys.version_info.major == 2:
from io import open
else:
unicode = str
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
LOG.addHandler(logging.StreamHandler())
RESILIENT_SECTION = "resilient"
MACHINE_LEARNING_SECTION = "machine_learning"
SAMPLE_CSV_FILE = "resilient_incidents.csv"
LOG_FILE = "res-ml.log"
class OptParser(resilient.ArgumentParser):
"""
This is a subclass of resilient.ArgumentParser. resilient.ArgumentParser takes care of both
1. Reading app.config
2. Validating required command line arguments.
Here we just want app.config, we are parsing/validating commandline arguments in our main function.
"""
def __init__(self, config_file=None):
self.config_file = config_file or resilient.get_config_file()
super(OptParser, self).__init__(config_file=self.config_file)
#
# Note this is a trick used by resilient-circuits. resilient.ArgumentParser will
# validate the arguments of the command line. Since we use command line
# argument of input/output files, we don't want that validation, so we
# erase them before we call parse_args(). So parse_args() only
# reads from app.config
#
sys.argv = sys.argv[0:1]
self.opts = self.parse_args()
if self.config:
for section in self.config.sections():
#
# Handle sections other than [resilient] in app.config
#
items = dict((item.lower(), self.config.get(section, item)) for item in self.config.options(section))
self.opts.update({section: items})
resilient.parse_parameters(self.opts)
def main():
"""
We support 6 sub-commands: config, build, rebuild, view, download, and count_value.
1. config: create a smaple config file
2. build: build a new model
-o Required flag, pointing to a file we can save the model to
-c Optional flag, pointing to a CSV file with samples. If this is absent, we will
download incidents and use them as samples.
Example: res-ml build -o logReg_adaboost.ml
3. rebuild: Rebuild a saved model
-i Required flag, file of saved model to rebuild
-c Optional falg, same as -c of build above
4. view: show summary of a saved model
-i Required flag, pointing to a saved model file
5. download: Download incidents and save as CSV file
-o Required flag, file of saved incidents in CSV
6. count_value: show value count for a given field. Normally this is the field to be predict.
This can help to determine whether the dataset is imbalance regarding this field
-i Required flag, pointing to a CSV file with samples
-f Required flag, the field to check value count
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Print debug output", action="store_true")
subparsers = parser.add_subparsers(title="subcommands",
help="one of these options must be provided",
description="valid subcommands",
dest="cmd")
subparsers.required = True
config_parser = subparsers.add_parser("config",
help="Generate a sample config file")
build_parser = subparsers.add_parser("build",
help="Build a machine model")
rebuild_parser = subparsers.add_parser("rebuild",
help="Rebuild an saved machine learning model")
view_parser = subparsers.add_parser("view",
help="View the summary of a saved machine learning model")
download_parser = subparsers.add_parser("download",
help="Download incidents and save into a CSV file")
count_value_parser = subparsers.add_parser("count_value",
help="Count value of a field")
# 1. config
# -o (Optional) name of sample config file. If not specified, ml.config will be used
#
config_parser.add_argument("-o", "--output",
help="Create sample config file as",
default=None)
# 2. build process
#
# -c (Optional) Specify a CSV file with samples. Otherwise download incidents
build_parser.add_argument("-c", "--csv",
help="Use samples from CSV file",
default=None)
#
# -f (Optional) Specify a config file for ml. Otherwise use ml.config
#
build_parser.add_argument("-f", "--file_config",
help="Use config file",
default=None)
#
# -o Save model as
#
build_parser.add_argument("-o", "--output",
help="Save model as",
required=True,
default=None)
# 3. rebuild process
#
# -c Specify a CSV file with samples. Otherwise download incidents
#
rebuild_parser.add_argument("-c", "--csv",
help="Use samples from CSV file",
default=None)
#
# -i Model file to rebuild
#
rebuild_parser.add_argument("-i", "--input",
help="Model file to rebuild",
required=True,
default=None)
#
# -f (Optional) Specify a config file for ml. Otherwise use ml.config
#
rebuild_parser.add_argument("-f", "--file_config",
help="Use config file",
default=None)
# 4. View
#
# -i Model file to view
#
view_parser.add_argument("-i", "--input",
help="Model file to rebuild",
required=True,
default=None)
# 5. Download
#
# -o file to save
#
download_parser.add_argument("-o", "--output",
help="CSV file to save samples",
required=True,
default=None)
#
# -f (Optional) Specify a config file for ml. Otherwise use ml.config
#
download_parser.add_argument("-f", "--file_config",
help="Use config file",
default=None)
# 6. Value count
#
# -i input CSV file with samples
# -f field to check
#
count_value_parser.add_argument("-i", "--input",
help="CSV file with samples",
required=True,
default=None)
count_value_parser.add_argument("-f", "--field",
help="value of which field to count",
required=True,
default=None)
args, unknown_args = parser.parse_known_args()
#
# Use res-ml -v sub-command.....
# to get debug level log
#
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.INFO)
if args.verbose:
fh.setLevel(logging.DEBUG)
LOG.info("Verbose Logging Enabled")
LOG.setLevel(logging.DEBUG)
LOG.addHandler(fh)
#
# Get config file
#
config_file = None
if args.cmd in ("download", "build", "rebuild"):
#
# For these three subcommands,
# uer can use -f to specify config file for machine learning
#
config_file = args.file_config
if config_file is None and os.path.isfile("./ml.config"):
#
# If ./ml.config exits and user doesn't specify what to use, use ./ml.config
#
config_file = "./ml.config"
opt_parser = OptParser(config_file=config_file)
if args.cmd == "config":
create_sample_config(args)
elif args.cmd == "build":
build_new_model(args, opt_parser)
elif args.cmd == "rebuild":
rebuild_model(args, opt_parser)
elif args.cmd == "view":
view_model(args)
elif args.cmd == "download":
csv_file = args.output
download_incidents_csv(opt_parser, csv_file)
elif args.cmd == "count_value":
count_value(args)
else:
LOG.error("Unknown command: " + args.cmd)
def create_sample_config(args):
"""
Create a sample config
:param args:
:param opt_parser:
:return:
"""
config_data = res_ml_config.get_config_data()
config_file = "ml.config"
#
# Check if config_file specified
#
if args.output is not None:
config_file = args.output
#
# Check if file already exists. If so, print out error message and quit
#
if os.path.isfile(config_file):
LOG.info("{} already exists. Please use another file name.".format(config_file))
return
with open(config_file, "w") as outfile:
outfile.write(config_data)
def count_value(args):
"""
Count values
:param args:
:return:
"""
csv_file = args.input
field = args.field
value_counts = model_utils.count_values(csv_file, field)
LOG.info("------------")
LOG.info("Value Counts")
LOG.info("------------")
LOG.info("Value counts for {} in {}:".format(field, csv_file))
LOG.info("{}".format(value_counts))
def download_incidents_csv(opt_parser, csv_file):
"""
Download incidents and convert json into CSV. Save the result to the csv_file.
:param opt_parser: Options/configurations and command line parameters
:param csv_file: CSV file to save samples/incidents to
:return: Number of incidents saved to the CSV file
"""
res_opt = opt_parser.opts.get(RESILIENT_SECTION)
host = res_opt.get("host", None)
email = res_opt.get("email", None)
password = res_opt.get("password", None)
org = res_opt.get("org", None)
num_inc = 0
if host and org and email and password:
url = "https://{}:443".format(host)
verify = True
try:
cafile = opt_parser.getopt(RESILIENT_SECTION, "cafile")
if cafile == "false" or cafile == "False":
#
# This is a security related feature. The user has to explicitly enter false or False to
# turn it off. We don't accept anything else.
#
LOG.debug("HTTPS certificate validation has been turned off.")
requests.packages.urllib3.disable_warnings()
verify = False
elif os.path.isfile(cafile):
#
# User specified a cafile for trusted certificate
#
verify = cafile
except:
verify = True
args = {"base_url": url,
"verify": verify,
"org_name": org}
resilient_client = resilient.SimpleClient(**args)
session = resilient_client.connect(email, password)
max_count = None
if opt_parser.getopt(MACHINE_LEARNING_SECTION, "max_count"):
max_count = int(opt_parser.getopt(MACHINE_LEARNING_SECTION, "max_count"))
time_start = opt_parser.getopt(MACHINE_LEARNING_SECTION, "time_start")
time_end = opt_parser.getopt(MACHINE_LEARNING_SECTION, "time_end")
res_filter = IncidentTimeFilter(time_start=time_start,
time_end=time_end,
in_log=LOG)
# get_incidents is going to download all the incidents using this resilient_client
# The optional max_count controls how many samples to process. The conversion from
# json to CSV will stop once reaches this limit.
num_inc = resilient_utils.get_incidents(res_client=resilient_client,
filename=csv_file,
filter=res_filter,
max_count=max_count,
in_log=LOG)
LOG.info("Saved {} samples into {}".format(num_inc, csv_file))
return num_inc
def build_model(model_file, opt_parser, csv_file=None, rebuilding=False):
"""
Build a model
:param model_file: Save built model to this file
:param opt_parser: information from app.config
:param csv_file: CSV file with samples
:param rebuilding: True if rebuilding saved model
:return:
"""
res_opt = opt_parser.opts.get(RESILIENT_SECTION)
ml_opt = opt_parser.opts.get(MACHINE_LEARNING_SECTION)
mlconfig = MlConfig()
if not csv_file:
#
# Users did not specify a CSV file with samples. So we
# need to download incidents first.
# Save them to SAMPLE_CSV_FLLE
#
num_inc = download_incidents_csv(opt_parser, SAMPLE_CSV_FILE)
LOG.info("Download and save samples to " + SAMPLE_CSV_FILE)
mlconfig.number_samples = num_inc
csv_file = SAMPLE_CSV_FILE
if rebuilding:
model_utils.update_config_from_saved_model(model_file, mlconfig)
else:
model_utils.update_config_from_app_config(ml_opt, mlconfig)
model = resilient_utils.get_model(name=mlconfig.model_name,
imbalance_upsampling=mlconfig.imbalance_upsampling,
class_weight=mlconfig.class_weight,
method=mlconfig.addition_method)
model.log = LOG
model.config.number_samples = mlconfig.number_samples
if model is not None:
model.build(csv_file=csv_file,
features=mlconfig.selected_features,
prediction=mlconfig.predict_field,
test_prediction=mlconfig.split_percentage,
unwanted_values=mlconfig.unwanted_values)
# Output summary of build
show_model_summary(model, os.path.abspath(model_file))
# save the model
model.save_to_file(os.path.abspath(model_file))
def build_new_model(args, opt_parser):
"""
Build a model.
This method is called when user uses the command line util to build a model
:param args:
:param opt_parser:
:return:
"""
LOG.debug("Building a new model: " + str(args))
file_name = args.output
csv_file = args.csv
build_model(file_name, opt_parser, csv_file)
def rebuild_model(args, opt_parser):
"""
This method is called when user uses the command line util to rebuild
a model, based on a saved model file
:param args: command line tools arguments
:param opt_parser: app.config information
:return:
"""
LOG.debug("Rebuilding a model: " + str(args))
model_file = args.input
csv_file = args.csv
build_model(model_file, opt_parser, csv_file, True)
def show_model_summary(model, model_file):
"""
Output summary of a given model
:param model: saved model re-constructed frome saved model
:param model_file: model file name
:return:
"""
try:
method_name = model.config.addition_method
except Exception:
method_name = "None"
LOG.info("--------")
LOG.info("Summary:")
LOG.info("--------")
LOG.info("File: {}".format(model_file))
LOG.info("Build time: {}".format(model.config.build_time))
LOG.info("Num_samples: {}".format(model.config.number_samples))
LOG.info("Algorithm: {}".format(model.get_name()))
LOG.info("Method: {}".format(method_name))
LOG.info("Prediction: {}".format(model.config.predict_field))
LOG.info("Features: {}".format(", ".join(model.config.selected_features)))
LOG.info("Class weight: {}".format(str(model.config.class_weight)))
LOG.info("Upsampling: {}".format(str(model.config.imbalance_upsampling)))
if model.config.unwanted_values is not None:
LOG.info("Unwanted Values: {}".format(", ".join(model.config.unwanted_values)))
LOG.info("Accuracy: {}".format(model.config.accuracy))
#
#@TODO: Does customer care about precision and recall? F1 is enough?
#
# if model.config.precision is not None:
# LOG.info("Precision: {}".format(model.config.precision))
# if model.config.recall is not None:
# LOG.info("Recall: }".format(model.config.recall))
if model.config.f1 is not None:
LOG.info("F1: {}".format(model.config.f1))
if model.config.analysis:
LOG.info(" Accuracy for {} value:".format(model.config.predict_field))
# check Python version and use appropriate method to return iterable list
if sys.version_info[0] < 3:
items = model.config.analysis.iteritems()
else:
items = model.config.analysis.items()
for key, value in items:
LOG.info(" %-*s %s" % (12, key + ":", value))
def view_model(args):
"""
Show the summary of a saved model file.
This method is call when user use command line util to view summary of
a saved model file.
:param args: Command line arguments
:return:
"""
file_name = args.input
file_exits = os.path.exists(file_name)
if file_exits:
#
# Deserialize model
#
model = MlModelCommon.load_from_file(file_name)
#
# Output the information of this model
#
show_model_summary(model, os.path.abspath(file_name))
else:
LOG.error("Model file {} does not exist.".format(file_name))
if __name__ == "__main__":
LOG.debug("Calling main")
main()
| 36.244526 | 117 | 0.59274 |
a67656edd46734ac55e77bed5a82d2c6b88928a0 | 1,025 | py | Python | pundit/base.py | sachinvettithanam/rule_engine | 4d6299d07594e959367c4126a18a325f5d5cc5c8 | [
"MIT"
] | 2 | 2017-11-07T06:35:53.000Z | 2019-03-18T12:32:55.000Z | pundit/base.py | sachinvettithanam/rule_engine | 4d6299d07594e959367c4126a18a325f5d5cc5c8 | [
"MIT"
] | 4 | 2016-04-20T13:58:37.000Z | 2016-04-23T15:14:50.000Z | pundit/base.py | sachinvettithanam/rule_engine | 4d6299d07594e959367c4126a18a325f5d5cc5c8 | [
"MIT"
] | 2 | 2017-11-07T06:35:55.000Z | 2019-12-05T11:44:54.000Z | import json
'''
PunditBase Parent Class
Arguments : name
'''
class PunditBase():
def __init__(self):
self.mine = 'asd'
def input_preprocess(self, id, function):
def lower(self, id):
self.processed_input = []
for x in self.input_set:
if x['id'] == id:
self.processed_input.append({'id': x['id'],'value': x['value'].lower(), 'type': x['type']})
else:
self.processed_input.append(x)
return
def upper(self, id):
self.processed_input = []
for x in self.input_set:
if x['id'] == id:
self.processed_input.append({{'id': x['id'],'value': x['value'].upper(), 'type': x['type']}})
else:
self.processed_input.append(x)
return
if function == 'lower':
lower(self, id)
elif function == 'upper':
upper(self, id)
else:
pass
def add_structure(self, *arg):
self.structure = []
length_of_struct = len(arg)
for z in enumerate(arg):
self.structure.append({ z[1]:{}})
return
@property
def structure(self):
return self.structure
| 16.803279 | 98 | 0.610732 |
951c4b64ebd8cf9fd25b5a064a34230e1e60170d | 1,734 | py | Python | official/cv/psenet/export.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/psenet/export.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/psenet/export.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air models#################
"""
import os
import numpy as np
import mindspore as ms
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
from src.model_utils.config import config
from src.PSENET.psenet import PSENet
from src.model_utils.moxing_adapter import moxing_wrapper
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
def modelarts_pre_process():
config.file_name = os.path.join(config.output_path, config.file_name)
if config.device_target == "Ascend":
context.set_context(device_id=config.device_id)
@moxing_wrapper(pre_process=modelarts_pre_process)
def model_export():
net = PSENet(config)
param_dict = load_checkpoint(config.ckpt)
load_param_into_net(net, param_dict)
input_arr = Tensor(np.ones([config.batch_size, 3, config.INFER_LONG_SIZE, config.INFER_LONG_SIZE]), ms.float32)
export(net, input_arr, file_name=config.file_name, file_format=config.file_format)
if __name__ == '__main__':
model_export()
| 34.68 | 115 | 0.732987 |
59a274412863f5f77740dd366416f46e4d35ddc4 | 4,607 | py | Python | myblog/myblog/settings.py | IAMJACKLiNOTBRUCELi/MyBlog | 6927b21873d1c866d79fe03b3afdbc6bd3812374 | [
"MIT"
] | 1 | 2019-02-17T07:53:13.000Z | 2019-02-17T07:53:13.000Z | myblog/myblog/settings.py | IAMJACKLiNOTBRUCELi/MyBlog | 6927b21873d1c866d79fe03b3afdbc6bd3812374 | [
"MIT"
] | null | null | null | myblog/myblog/settings.py | IAMJACKLiNOTBRUCELi/MyBlog | 6927b21873d1c866d79fe03b3afdbc6bd3812374 | [
"MIT"
] | null | null | null | """
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# apps-path
sys.path.insert(0, BASE_DIR)
sys.path.insert(1, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c^o3u_3bb)$-@m3@qz-xn*n))s$ld*7og9up0-g*+sc3-0vync'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user',
'doc',
'news',
'course',
'verifications',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myblog.urls'
AUTH_USER_MODEL = 'user.Users'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
#'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#
#}
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': 'utils/dbs/my.cnf',
}
}
}
# django-redis
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1:6379/0',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
},
'verify_codes': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1:6379/1',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
},
"session": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"sms_codes": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/3",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# 将用户的session保存到redis中
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# 指定缓存redis的别名
SESSION_CACHE_ALIAS = "session"
| 25.313187 | 91 | 0.651617 |
f9d99507b671690f709fd2ac1da62d24a64e8f3e | 2,937 | py | Python | proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/alveo/examples/caffe/pix2pix/maps_AtoB/maps_AtoB_fpga.py | timebe00/Mercenary | 7762bad28e4f49b2ad84fb8abbd8056bd01f61d4 | [
"MIT"
] | 3 | 2020-10-29T15:00:30.000Z | 2021-10-21T08:09:34.000Z | proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/alveo/examples/caffe/pix2pix/maps_AtoB/maps_AtoB_fpga.py | timebe00/Mercenary | 7762bad28e4f49b2ad84fb8abbd8056bd01f61d4 | [
"MIT"
] | 20 | 2020-10-31T03:19:03.000Z | 2020-11-02T18:59:49.000Z | proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/alveo/examples/caffe/pix2pix/maps_AtoB/maps_AtoB_fpga.py | timebe00/Mercenary | 7762bad28e4f49b2ad84fb8abbd8056bd01f61d4 | [
"MIT"
] | 9 | 2020-10-14T02:04:10.000Z | 2020-12-01T08:23:02.000Z | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##pix2pix caffe interference
# maps A to B
#%% import package
import numpy as np
import cv2
import os
import caffe
import matplotlib.pyplot as plt
import skimage.io as io
import argparse
#%% define functions
def load_images(fn):
# load image
img = cv2.imread(fn)
# resize as 256 x 256
img_A256 = cv2.resize(img,(256,256) )
# BGR to RGB
img_A1 = img_A256[...,::-1]
# normalize [-1,1]
img_A2 = (img_A1 / 127.5) - 1
# channel transpose NHWC to NCHW
img_A3 = np.transpose(img_A2,(2,0,1))
return img_A3
def norm_image(IMG):
# output scale: [0,1]
output = (IMG - np.min(IMG))/(np.max(IMG)-np.min(IMG))
# normalize [0,255]
output1 = output*255
# assure integer 8bit
output1 = output1.astype('uint8')
return output1
#%% main
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--output_path', default="./test_output/", help='Optionally, save all generated outputs in specified folder')
parser.add_argument('--image', default=None, help='User can provide an image to run')
args = vars(parser.parse_args())
VAI_ALVEO_ROOT=os.environ["VAI_ALVEO_ROOT"]
if not os.path.isdir(args["output_path"]):
os.mkdir(args["output_path"])
# model configuration
model_def = 'xfdnn_deploy.prototxt'
model_weights = VAI_ALVEO_ROOT+'/examples/caffe/models/maps_AtoB/deploy.caffemodel'
net = caffe.Net(model_def, model_weights, caffe.TEST)
if args["image"]:
fn = args["image"]
# load image
image = load_images(fn)
## preprocessing
# add one dimension
batch_A = np.expand_dims(image,0)
## net forward (feed into caffe network)
net.blobs['input_3'].data[...] = batch_A
net.forward()
fake_B = net.blobs['activation_10'].data
## post processing
# normalize output [0,255]
fake_B1 = norm_image(np.transpose(fake_B[0,:,:,:],(1,2,0)))
# save the output image as file
filename = 'output_'+fn
io.imsave(args["output_path"]+filename,fake_B1)
print('output file is saved in '+args["output_path"])
else:
print('Please provide input image as "--image filename"' )
| 27.194444 | 133 | 0.635683 |
855e427aaefe553362b140e3f24261768cb0fc85 | 22,338 | py | Python | TheFuzzer/lib/python2.7/site-packages/twisted/python/compat.py | akellermann97/college-dump | 5c82d93767038709ad71b8f212fdb6243eeb0aec | [
"MIT"
] | null | null | null | TheFuzzer/lib/python2.7/site-packages/twisted/python/compat.py | akellermann97/college-dump | 5c82d93767038709ad71b8f212fdb6243eeb0aec | [
"MIT"
] | null | null | null | TheFuzzer/lib/python2.7/site-packages/twisted/python/compat.py | akellermann97/college-dump | 5c82d93767038709ad71b8f212fdb6243eeb0aec | [
"MIT"
] | null | null | null | # -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import platform
import socket
import struct
import sys
import tokenize
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
if sys.version_info >= (3, 5, 0):
_PY35PLUS = True
else:
_PY35PLUS = False
if sys.version_info >= (3, 7, 0):
_PY37PLUS = True
else:
_PY37PLUS = False
if platform.python_implementation() == 'PyPy':
_PYPY = True
else:
_PYPY = False
def _shouldEnableNewStyle():
"""
Returns whether or not we should enable the new-style conversion of
old-style classes. It inspects the environment for C{TWISTED_NEWSTYLE},
accepting an empty string, C{no}, C{false}, C{False}, and C{0} as falsey
values and everything else as a truthy value.
@rtype: L{bool}
"""
value = os.environ.get('TWISTED_NEWSTYLE', '')
if value in ['', 'no', 'false', 'False', '0']:
return False
else:
return True
_EXPECT_NEWSTYLE = _PY3 or _shouldEnableNewStyle()
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
"""
Emulator of L{socket.inet_pton}.
@param af: An address family to parse; C{socket.AF_INET} or
C{socket.AF_INET6}.
@type af: L{int}
@param addr: An address.
@type addr: native L{str}
@return: The binary packed version of the passed address.
@rtype: L{bytes}
"""
if not addr:
raise ValueError("illegal IP address string passed to inet_pton")
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if '%' in addr and (addr.count('%') > 1 or addr.index("%") == 0):
raise ValueError("illegal IP address string passed to inet_pton")
addr = addr.split('%')[0]
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
with open(filename, "rb") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or L{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
view = memoryview(object)
if size is None:
return view[offset:]
else:
return view[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
range = range
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
range = xrange
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
This function is POSIX only; environment variables are always text strings
on Windows.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
try:
from sys import intern
except ImportError:
intern = intern
def _coercedUnicode(s):
"""
Coerce ASCII-only byte strings into unicode for Python 2.
In Python 2 C{unicode(b'bytes')} returns a unicode string C{'bytes'}. In
Python 3, the equivalent C{str(b'bytes')} will return C{"b'bytes'"}
instead. This function mimics the behavior for Python 2. It will decode the
byte string as ASCII. In Python 3 it simply raises a L{TypeError} when
passing a byte string. Unicode strings are returned as-is.
@param s: The string to coerce.
@type s: L{bytes} or L{unicode}
@raise UnicodeError: The input L{bytes} is not ASCII decodable.
@raise TypeError: The input is L{bytes} on Python 3.
"""
if isinstance(s, bytes):
if _PY3:
raise TypeError("Expected str not %r (bytes)" % (s,))
else:
return s.decode('ascii')
else:
return s
if _PY3:
unichr = chr
raw_input = input
else:
unichr = unichr
raw_input = raw_input
def _bytesRepr(bytestring):
"""
Provide a repr for a byte string that begins with 'b' on both
Python 2 and 3.
@param bytestring: The string to repr.
@type bytestring: L{bytes}
@raise TypeError: The input is not L{bytes}.
@return: The repr with a leading 'b'.
@rtype: L{bytes}
"""
if not isinstance(bytestring, bytes):
raise TypeError("Expected bytes not %r" % (bytestring,))
if _PY3:
return repr(bytestring)
else:
return 'b' + repr(bytestring)
if _PY3:
_tokenize = tokenize.tokenize
else:
_tokenize = tokenize.generate_tokens
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"range",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
"_coercedUnicode",
"_bytesRepr",
"intern",
"unichr",
"raw_input",
"_tokenize",
"Sequence",
]
| 25.441913 | 79 | 0.628257 |
259c42caa9ae45055e76dddf22797f799986008a | 9,491 | py | Python | tf_agents/bandits/environments/classification_environment_test.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | 1 | 2021-09-22T12:04:03.000Z | 2021-09-22T12:04:03.000Z | tf_agents/bandits/environments/classification_environment_test.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | null | null | null | tf_agents/bandits/environments/classification_environment_test.py | FlorisHoogenboom/agents | 2cd5a61e1838b52012271f1fb8617c29a55279a9 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.environments.classification_environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from absl.testing.absltest import mock
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.environments import classification_environment as ce
tfd = tfp.distributions
def deterministic_reward_distribution(reward_table):
"""Returns a deterministic distribution centered at `reward_table`."""
return tfd.Independent(tfd.Deterministic(loc=reward_table),
reinterpreted_batch_ndims=2)
class ClassificationEnvironmentTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_3x2x3',
tbl=[[[0, 1, 2],
[3, 4, 5]],
[[6, 7, 8],
[9, 10, 11]],
[[12, 13, 14],
[15, 16, 17]]],
row=[0, 1, 1],
col=[0, 2, 0],
expected=[0, 11, 15]),
)
def testBatchedTableLookup(self, tbl, row, col, expected):
actual = ce._batched_table_lookup(tbl, row, col)
np.testing.assert_almost_equal(expected, self.evaluate(actual))
@parameterized.named_parameters(
dict(
testcase_name='_scalar_batch_1',
context=np.array([[0], [1]]),
labels=np.array([0, 1]),
batch_size=1),
dict(
testcase_name='_multi_dim_batch_23',
context=np.arange(100).reshape(10, 10),
labels=np.arange(10),
batch_size=23),
)
def testObservationShapeAndValue(self, context, labels, batch_size):
"""Test that observations have correct shape and values from `context`."""
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
# Rewards of 1. is given when action == label
reward_distribution = deterministic_reward_distribution(
tf.eye(len(set(labels))))
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
expected_observation_shape = [batch_size] + list(context.shape[1:])
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(100):
observation = self.evaluate(env.reset().observation)
np.testing.assert_array_equal(observation.shape,
expected_observation_shape)
for o in observation:
self.assertIn(o, context)
def testReturnsCorrectRewards(self):
"""Test that rewards are being returned correctly for a simple case."""
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 32
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
self.evaluate(tf.compat.v1.global_variables_initializer())
for _ in range(10):
# Take the 'correct' action
observation = env.reset().observation
action = tf.math.mod(observation, 3)
reward = env.step(action).reward
np.testing.assert_almost_equal(self.evaluate(reward),
self.evaluate(tf.ones_like(reward)))
for _ in range(10):
# Take the 'incorrect' action
observation = env.reset().observation
action = tf.math.mod(observation + 1, 3)
reward = env.step(action).reward
np.testing.assert_almost_equal(self.evaluate(reward),
self.evaluate(tf.zeros_like(reward)))
def testPreviousLabelIsSetCorrectly(self):
"""Test that the previous label is set correctly for a simple case."""
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 4
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
self.evaluate(tf.compat.v1.global_variables_initializer())
time_step = env.reset()
time_step_label = tf.squeeze(tf.math.mod(time_step.observation, 3))
action = tf.math.mod(time_step.observation, 3)
next_time_step = env.step(action)
next_time_step_label = tf.squeeze(
tf.math.mod(next_time_step.observation, 3))
if tf.executing_eagerly():
np.testing.assert_almost_equal(
self.evaluate(time_step_label),
self.evaluate(env._previous_label))
np.testing.assert_almost_equal(
self.evaluate(next_time_step_label),
self.evaluate(env._current_label))
else:
with self.cached_session() as sess:
time_step_label_value, next_time_step_label_value = (
sess.run([time_step_label, next_time_step_label]))
previous_label_value = self.evaluate(env._previous_label)
np.testing.assert_almost_equal(
time_step_label_value, previous_label_value)
current_label_value = self.evaluate(env._current_label)
np.testing.assert_almost_equal(
next_time_step_label_value,
current_label_value)
def testShuffle(self):
"""Test that dataset is being shuffled when asked."""
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 32
dataset = (
tf.data.Dataset.from_tensor_slices(
(context, labels)).repeat().shuffle(4 * batch_size))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
# Note - shuffle should hapen *first* in call chain, so this
# test will fail if shuffle is called e.g. after batch or prefetch.
dataset.shuffle = mock.Mock(spec=dataset.shuffle,
side_effect=dataset.shuffle)
ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size)
dataset.shuffle.assert_not_called()
ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size, shuffle_buffer_size=3, seed=7)
dataset.shuffle.assert_called_with(
buffer_size=3, reshuffle_each_iteration=True, seed=7)
@mock.patch('tf_agents.bandits.environments.classification_environment'+
'.eager_utils.dataset_iterator')
def testPrefetch(self, mock_dataset_iterator):
"""Test that dataset is being prefetched when asked."""
mock_dataset_iterator.return_value = 'mock_iterator_result'
# Reward of 1 is given if action == (context % 3)
context = tf.reshape(tf.range(128), shape=[128, 1])
labels = tf.math.mod(context, 3)
batch_size = 32
dataset = tf.data.Dataset.from_tensor_slices((context, labels))
reward_distribution = deterministic_reward_distribution(tf.eye(3))
# Operation order should be batch() then prefetch(), have to jump
# through a couple hoops to get this sequence tested correctly.
# Save dataset.prefetch in temp mock_prefetch, return batched dataset to
# make down-stream logic work correctly with batch dimensions.
batched_dataset = dataset.batch(batch_size)
mock_prefetch = mock.Mock(spec=dataset.prefetch,
return_value=batched_dataset)
# Replace dataset.batch with mock batch that returns original dataset,
# in order to make mocking out it's prefetch call easier.
dataset.batch = mock.Mock(spec=batched_dataset,
return_value=batched_dataset)
# Replace dataset.prefetch with mock_prefetch.
batched_dataset.prefetch = mock_prefetch
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size, repeat_dataset=False)
dataset.batch.assert_called_with(batch_size, drop_remainder=True)
batched_dataset.prefetch.assert_not_called()
mock_dataset_iterator.assert_called_with(batched_dataset)
self.assertEqual(env._data_iterator, 'mock_iterator_result')
env = ce.ClassificationBanditEnvironment(
dataset, reward_distribution, batch_size, repeat_dataset=False,
prefetch_size=3)
dataset.batch.assert_called_with(batch_size, drop_remainder=True)
batched_dataset.prefetch.assert_called_with(3)
mock_dataset_iterator.assert_called_with(batched_dataset)
self.assertEqual(env._data_iterator, 'mock_iterator_result')
if __name__ == '__main__':
tf.test.main()
| 42.370536 | 80 | 0.692867 |
f4dae3bff631b24d97823b8edb20c35064d230f4 | 790 | py | Python | rest/urls.py | betosales/django-rest-api-consume | 7a9e3cbae4e59b87883b4986ca8256331c909796 | [
"MIT"
] | null | null | null | rest/urls.py | betosales/django-rest-api-consume | 7a9e3cbae4e59b87883b4986ca8256331c909796 | [
"MIT"
] | null | null | null | rest/urls.py | betosales/django-rest-api-consume | 7a9e3cbae4e59b87883b4986ca8256331c909796 | [
"MIT"
] | null | null | null | """rest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('app.urls')),
path('admin/', admin.site.urls),
]
| 34.347826 | 77 | 0.7 |
90c834230d1f4be57be4a5278225696b768c1c8d | 8,014 | py | Python | lambdas/dynamoDBToElasticSearch/lambda_function.py | zavier-sanders/serverless-media-library | 1f88f2845a2a50220ecdc797d319cefc42d814be | [
"MIT"
] | 1 | 2019-02-05T01:58:35.000Z | 2019-02-05T01:58:35.000Z | lambdas/dynamoDBToElasticSearch/lambda_function.py | zavier-sanders/serverless-media-library | 1f88f2845a2a50220ecdc797d319cefc42d814be | [
"MIT"
] | null | null | null | lambdas/dynamoDBToElasticSearch/lambda_function.py | zavier-sanders/serverless-media-library | 1f88f2845a2a50220ecdc797d319cefc42d814be | [
"MIT"
] | null | null | null | import base64
import datetime
import json
import logging
import os
import time
import traceback
import urllib
import urlparse
from botocore.auth import SigV4Auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import get_credentials
from botocore.endpoint import BotocoreHTTPSession
from botocore.session import Session
from boto3.dynamodb.types import TypeDeserializer
# The following parameters are required to configure the ES cluster
ES_ENDPOINT = 'https://search-gr-dam-dev-aghe545xvfzrol7bi4zb35vnpu.us-east-1.es.amazonaws.com/'
# The following parameters can be optionally customized
DOC_TABLE_FORMAT = 'assets' # Python formatter to generate index name from the DynamoDB table name
DOC_TYPE_FORMAT = '_type' # Python formatter to generate type name from the DynamoDB table name, default is to add '_type' suffix
ES_REGION = None # If not set, use the runtime lambda region
ES_MAX_RETRIES = 3 # Max number of retries for exponential backoff
DEBUG = True # Set verbose debugging information
print ("Streaming to ElasticSearch")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if DEBUG else logging.INFO)
# Subclass of boto's TypeDeserializer for DynamoDB to adjust for DynamoDB Stream format.
class StreamTypeDeserializer(TypeDeserializer):
def _deserialize_n(self, value):
return float(value)
def _deserialize_b(self, value):
return value # Already in Base64
class ES_Exception(Exception):
'''Exception capturing status_code from Client Request'''
status_code = 0
payload = ''
def __init__(self, status_code, payload):
self.status_code = status_code
self.payload = payload
Exception.__init__(self, 'ES_Exception: status_code={}, payload={}'.format(status_code, payload))
# Low-level POST data to Amazon Elasticsearch Service generating a Sigv4 signed request
def post_data_to_es(payload, region, creds, host, path, method='POST', proto='https://'):
'''Post data to ES endpoint with SigV4 signed http headers'''
req = AWSRequest(method=method, url=proto + host + urllib.quote(path), data=payload, headers={'Host': host, 'Content-Type' : 'application/json'})
SigV4Auth(creds, 'es', region).add_auth(req)
http_session = BotocoreHTTPSession()
res = http_session.send(req.prepare())
if res.status_code >= 200 and res.status_code <= 299:
return res._content
else:
raise ES_Exception(res.status_code, res._content)
# High-level POST data to Amazon Elasticsearch Service with exponential backoff
# according to suggested algorithm: http://docs.aws.amazon.com/general/latest/gr/api-retries.html
def post_to_es(payload):
'''Post data to ES cluster with exponential backoff'''
# Get aws_region and credentials to post signed URL to ES
es_region = ES_REGION or os.environ['AWS_REGION']
session = Session({'region': es_region})
creds = get_credentials(session)
es_url = urlparse.urlparse(ES_ENDPOINT)
es_endpoint = es_url.netloc or es_url.path # Extract the domain name in ES_ENDPOINT
# Post data with exponential backoff
retries = 0
while retries < ES_MAX_RETRIES:
if retries > 0:
seconds = (2 ** retries) * .1
time.sleep(seconds)
try:
es_ret_str = post_data_to_es(payload, es_region, creds, es_endpoint, '/_bulk')
es_ret = json.loads(es_ret_str)
if es_ret['errors']:
logger.error('ES post unsuccessful, errors present, took=%sms', es_ret['took'])
# Filter errors
es_errors = [item for item in es_ret['items'] if item.get('index').get('error')]
logger.error('List of items with errors: %s', json.dumps(es_errors))
else:
logger.info('ES post successful, took=%sms', es_ret['took'])
break # Sending to ES was ok, break retry loop
except ES_Exception as e:
if (e.status_code >= 500) and (e.status_code <= 599):
retries += 1 # Candidate for retry
else:
raise # Stop retrying, re-raise exception
# Extracts the DynamoDB table from an ARN
# ex: arn:aws:dynamodb:eu-west-1:123456789012:table/table-name/stream/2015-11-13T09:23:17.104 should return 'table-name'
def get_table_name_from_arn(arn):
return arn.split(':')[5].split('/')[1]
# Compute a compound doc index from the key(s) of the object in lexicographic order: "k1=key_val1|k2=key_val2"
def compute_doc_index(keys_raw, deserializer):
index = []
for key in sorted(keys_raw):
index.append('{}={}'.format(key, deserializer.deserialize(keys_raw[key])))
return '|'.join(index)
def _lambda_handler(event, context):
records = event['Records']
now = datetime.datetime.utcnow()
ddb_deserializer = StreamTypeDeserializer()
es_actions = [] # Items to be added/updated/removed from ES - for bulk API
cnt_insert = cnt_modify = cnt_remove = 0
for record in records:
# Handle both native DynamoDB Streams or Streams data from Kinesis (for manual replay)
if record.get('eventSource') == 'aws:dynamodb':
ddb = record['dynamodb']
ddb_table_name = get_table_name_from_arn(record['eventSourceARN'])
doc_seq = ddb['SequenceNumber']
elif record.get('eventSource') == 'aws:kinesis':
ddb = json.loads(base64.b64decode(record['kinesis']['data']))
ddb_table_name = ddb['SourceTable']
doc_seq = record['kinesis']['sequenceNumber']
else:
logger.error('Ignoring non-DynamoDB event sources: %s', record.get('eventSource'))
continue
# Compute DynamoDB table, type and index for item
doc_table = DOC_TABLE_FORMAT # Use formatter
doc_type = DOC_TYPE_FORMAT # Use formatter
doc_index = compute_doc_index(ddb['Keys'], ddb_deserializer)
# Dispatch according to event TYPE
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
# Treat events from a Kinesis stream as INSERTs
if event_name == 'AWS:KINESIS:RECORD':
event_name = 'INSERT'
# Update counters
if event_name == 'INSERT':
cnt_insert += 1
elif event_name == 'MODIFY':
cnt_modify += 1
elif event_name == 'REMOVE':
cnt_remove += 1
else:
logger.warning('Unsupported event_name: %s', event_name)
# If DynamoDB INSERT or MODIFY, send 'index' to ES
if (event_name == 'INSERT') or (event_name == 'MODIFY'):
if 'NewImage' not in ddb:
logger.warning('Cannot process stream if it does not contain NewImage')
continue
# Deserialize DynamoDB type to Python types
doc_fields = ddb_deserializer.deserialize({'M': ddb['NewImage']})
# Add metadata
doc_fields['@timestamp'] = now.isoformat()
doc_fields['@SequenceNumber'] = doc_seq
# Generate JSON payload
doc_json = json.dumps(doc_fields)
# Generate ES payload for item
action = {'index': {'_index': doc_table, '_type': doc_type, '_id': doc_index}}
es_actions.append(json.dumps(action)) # Action line with 'index' directive
es_actions.append(doc_json) # Payload line
# If DynamoDB REMOVE, send 'delete' to ES
elif event_name == 'REMOVE':
action = {'delete': {'_index': doc_table, '_type': doc_type, '_id': doc_index}}
es_actions.append(json.dumps(action))
# Prepare bulk payload
es_actions.append('') # Add one empty line to force final \n
es_payload = '\n'.join(es_actions)
post_to_es(es_payload) # Post to ES with exponential backoff
# Global lambda handler - catches all exceptions to avoid dead letter in the DynamoDB Stream
def lambda_handler(event, context):
try:
return _lambda_handler(event, context)
except Exception:
logger.error(traceback.format_exc()) | 40.474747 | 148 | 0.673696 |
a2fcfb623573ab41ce92ae5c1872899437f2dad3 | 1,828 | py | Python | data/python/countRelativeWeights.py | turger/serious-spin | 8a10750c8a0fce3953ff89a4a89d0ca499b1c31d | [
"MIT"
] | 1 | 2019-10-20T18:37:07.000Z | 2019-10-20T18:37:07.000Z | data/python/countRelativeWeights.py | turger/serious-spin | 8a10750c8a0fce3953ff89a4a89d0ca499b1c31d | [
"MIT"
] | null | null | null | data/python/countRelativeWeights.py | turger/serious-spin | 8a10750c8a0fce3953ff89a4a89d0ca499b1c31d | [
"MIT"
] | null | null | null | import json, sys
from math import pow
# FILE HANDLING #
def writeJsonToFile(json_data, file_path):
try:
with open(file_path, 'w') as outfile:
json.dump(json_data, outfile)
return True
except Exception as e:
print(e)
print('Failed to dump json to file ' + file_path)
return False
def getJsonFromFile(file_path):
try:
with open(file_path) as infile:
json_data = json.load(infile)
return json_data
except Exception as e:
print(e)
print('Failed to get json from file ' + file_path)
return False
if len(sys.argv) < 2:
print("Usage: %s fennica-all.json"%sys.argv[0])
sys.exit()
fennica_all = getJsonFromFile(sys.argv[1])
PATH_TO_FENNICA_ALL_JSON_FILE = './fennica-graph.json'
# DATA HANDLING #
def countMagicValue(this, mean, max):
if int(this) - int(mean) == 0:
return 50
elif int(this) < int(mean):
diff = 1 + (int(mean) - int(this)) / mean
return int(50 - 50 * (1 - 1 / diff))
elif int(this) > int(mean):
diff = 1 + (int(this) - int(mean))/ (max - mean)
return int(50 + 50 * (1 - 1 / diff))
else:
return 50
def getMeanAndMaxOfYear(json_data, year):
sum = 0
count = 0
max = 0
for word in json_data[year]:
count = count + 1
sum = sum + json_data[year][word]
if max < json_data[year][word]:
max = json_data[year][word]
return float(sum)/float(count), float(max)
def changeWordWeightsToRelativeOfMeanByYear(json_data, year):
mean, max = getMeanAndMaxOfYear(json_data, year)
for word in json_data[year]:
json_data[year][word] = countMagicValue(float(json_data[year][word]), mean, max)
def changeWordWeightsToRelative(json_data):
for year in json_data:
changeWordWeightsToRelativeOfMeanByYear(json_data, year)
return json_data
fennica_all_relative = changeWordWeightsToRelative(fennica_all)
writeJsonToFile(fennica_all_relative, 'fennica-graph.json')
| 26.492754 | 82 | 0.71116 |
1f89abeb08586de3715273f4a14705c875082653 | 3,951 | py | Python | aitextgen/utils.py | cdpierse/aitextgen | 64ca5234ba5a1e0136fc0a10ddbcc94226a51501 | [
"MIT"
] | 4 | 2020-07-10T09:42:35.000Z | 2020-09-27T17:19:49.000Z | aitextgen/utils.py | cdpierse/aitextgen | 64ca5234ba5a1e0136fc0a10ddbcc94226a51501 | [
"MIT"
] | 1 | 2020-10-01T20:44:13.000Z | 2020-10-05T17:50:04.000Z | aitextgen/utils.py | cdpierse/aitextgen | 64ca5234ba5a1e0136fc0a10ddbcc94226a51501 | [
"MIT"
] | null | null | null | import os
import requests
from tqdm.auto import tqdm
import torch
import numpy as np
import random
from transformers import GPT2Config
def download_gpt2(model_dir: str = "tf_model", model_name: str = "124M") -> None:
"""
Downloads the GPT-2 model (weights only) into the specified directory
from Google Cloud Storage.
If running in Colaboratory or Google Compute Engine,
this is substantially faster (and cheaper for HuggingFace) than using the
default model downloading. However, the model is in TensorFlow,
so the weights must be converted.
Adapted from gpt-2-simple.
"""
# create the <model_dir>/<model_name> subdirectory if not present
sub_dir = os.path.join(model_dir, model_name)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
sub_dir = sub_dir.replace("\\", "/") # needed for Windows
for file_name in [
"checkpoint",
"hparams.json",
"model.ckpt.data-00000-of-00001",
"model.ckpt.index",
"model.ckpt.meta",
]:
if not os.path.isfile(os.path.join(sub_dir, file_name)):
download_file_with_progress(
url_base="https://storage.googleapis.com/gpt-2",
sub_dir=sub_dir,
model_name=model_name,
file_name=file_name,
)
def download_file_with_progress(
url_base: str, sub_dir: str, model_name: str, file_name: str
):
"""
General utility for incrementally downloading files from the internet
with progress bar.
Adapted from gpt-2-simple.
"""
# set to download 1MB at a time. This could be much larger with no issue
DOWNLOAD_CHUNK_SIZE = 1024 * 1024
r = requests.get(
os.path.join(url_base, "models", model_name, file_name), stream=True
)
with open(os.path.join(sub_dir, file_name), "wb") as f:
file_size = int(r.headers["content-length"])
with tqdm(
desc="Fetching " + file_name, total=file_size, unit_scale=True,
) as pbar:
for chunk in r.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
f.write(chunk)
pbar.update(DOWNLOAD_CHUNK_SIZE)
def encode_text(text: str, tokenizer, device: str = "cpu"):
"""
Encodes text into an id-based tensor using the given tokenizer.
"""
return torch.tensor(tokenizer.encode(text), device=device).unsqueeze(0)
def set_seed(seed: int):
"""
Sets the seed for all potential generation libraries.
"""
assert isinstance(seed, int), "seed must be an integer."
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def reset_seed():
"""
Resets the seed for all potential generation libraries.
"""
random.seed()
np.random.seed()
# torch.seed()
# torch.cuda.seed_all()
def build_gpt2_config(
vocab_size: int = 10000,
bos_token_id: int = 0,
eos_token_id: int = 0,
max_length: int = 1024,
dropout: float = 0.0,
**kwargs
):
"""
Builds a custom GPT-2 config based on a given Transformers config,
with a few more user-friendly aliases.
"""
return GPT2Config(
vocab_size=vocab_size,
n_positions=max_length,
n_ctx=max_length,
resid_pdrop=dropout,
embd_pdrop=dropout,
attn_pdrop=dropout,
summary_first_dropout=dropout,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
def GPT2ConfigCPU(
vocab_size: int = 5000, bos_token_id: int = 0, eos_token_id: int = 0, **kwargs
):
"""
Returns a GPT-2 config more suitable for training on a regular consumer CPU.
"""
return GPT2Config(
vocab_size=vocab_size,
n_positions=64,
n_ctx=64,
n_embd=128,
n_layer=4,
n_head=4,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
| 27.248276 | 82 | 0.638319 |
dcdebabbd1d5cb4fecc29188e3a59aae63816794 | 4,382 | py | Python | usr/share/pyshared/ajenti/middleware.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | usr/share/pyshared/ajenti/middleware.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | null | null | null | usr/share/pyshared/ajenti/middleware.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | import hashlib
import time
import random
import gevent
import ajenti
from ajenti.api import *
from ajenti.cookies import Cookie, Cookies
from ajenti.plugins import manager
from ajenti.http import HttpHandler
from ajenti.users import UserManager
class Session (object):
"""
Holds the HTTP session data
"""
def __init__(self, manager, id):
self.touch()
self.id = id
self.data = {}
self.active = True
self.manager = manager
self.greenlets = []
def destroy(self):
"""
Marks this session as dead
"""
self.active = False
for g in self.greenlets:
g.kill()
self.manager.vacuum()
def touch(self):
"""
Updates the "last used" timestamp
"""
self.timestamp = time.time()
def spawn(self, *args, **kwargs):
"""
Spawns a ``greenlet`` that will be stopped and garbage-collected when the session is destroyed
:params: Same as for :func:`gevent.spawn`
"""
g = gevent.spawn(*args, **kwargs)
self.greenlets += [g]
def is_dead(self):
return not self.active or (time.time() - self.timestamp) > 3600
def set_cookie(self, context):
"""
Adds headers to :class:`ajenti.http.HttpContext` that set the session cookie
"""
context.add_header('Set-Cookie', Cookie('session', self.id, path='/', httponly=True).render_response())
@plugin
@persistent
@rootcontext
class SessionMiddleware (HttpHandler):
def __init__(self):
self.sessions = {}
def generate_session_id(self, context):
hash = str(random.random())
hash += context.env.get('REMOTE_ADDR', '')
hash += context.env.get('REMOTE_HOST', '')
hash += context.env.get('HTTP_USER_AGENT', '')
hash += context.env.get('HTTP_HOST', '')
return hashlib.sha1(hash).hexdigest()
def vacuum(self):
"""
Eliminates dead sessions
"""
for session in [x for x in self.sessions.values() if x.is_dead()]:
del self.sessions[session.id]
def open_session(self, context):
"""
Creates a new session for the :class:`ajenti.http.HttpContext`
"""
session_id = self.generate_session_id(context)
session = Session(self, session_id)
self.sessions[session_id] = session
return session
def handle(self, context):
self.vacuum()
cookie_str = context.env.get('HTTP_COOKIE', None)
context.session = None
if cookie_str:
cookie = Cookies.from_request(
cookie_str,
ignore_bad_cookies=True,
).get('session', None)
if cookie and cookie.value:
if cookie.value in self.sessions:
# Session found
context.session = self.sessions[cookie.value]
if context.session.is_dead():
context.session = None
if context.session is None:
context.session = self.open_session(context)
context.session.set_cookie(context)
context.session.touch()
@plugin
@persistent
@rootcontext
class AuthenticationMiddleware (HttpHandler):
def handle(self, context):
if not hasattr(context.session, 'identity'):
if ajenti.config.tree.authentication:
context.session.identity = None
else:
context.session.identity = 'root'
context.session.appcontext = AppContext(manager.context, context)
if context.session.identity:
context.add_header('X-Auth-Status', 'ok')
context.add_header('X-Auth-Identity', str(context.session.identity))
else:
context.add_header('X-Auth-Status', 'none')
def try_login(self, context, username, password, env=None):
if UserManager.get().check_password(username, password, env=env):
self.login(context, username)
return True
return False
def login(self, context, username):
context.session.identity = username
context.session.appcontext = AppContext(manager.context, context)
def logout(self, context):
context.session.identity = None
__all__ = ['Session', 'SessionMiddleware', 'AuthenticationMiddleware']
| 30.013699 | 111 | 0.60178 |
8d6c0732cbaa49ad1456361fb4e95e78eca40089 | 695 | py | Python | app.py | ap-t/yfinance-rest-api | df0f2d59a05637c9404740dc953bd546e5ee79bf | [
"MIT"
] | null | null | null | app.py | ap-t/yfinance-rest-api | df0f2d59a05637c9404740dc953bd546e5ee79bf | [
"MIT"
] | null | null | null | app.py | ap-t/yfinance-rest-api | df0f2d59a05637c9404740dc953bd546e5ee79bf | [
"MIT"
] | null | null | null | from flask import Flask
from flask_cors import CORS
from yfinancerestapi.home.routes import home
from yfinancerestapi.system.routes import system_api
from yfinancerestapi.finance.stocks.routes import stocks_api
from yfinancerestapi.finance.news.routes import news_api
def create_app():
app = Flask(__name__)
CORS(app)
# Register blueprints
app.register_blueprint(home, url_prefix='/')
app.register_blueprint(system_api, url_prefix='/api/v1/system')
app.register_blueprint(stocks_api, url_prefix='/api/v1/finance/stocks')
app.register_blueprint(news_api, url_prefix='/api/v1/finance/news')
return app
app = create_app()
if __name__ == "__main__":
app.run() | 31.590909 | 75 | 0.769784 |
cfce448de4932fcdd0211b3f96ebcde12de01549 | 920 | py | Python | python/examples/_paramiko.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | python/examples/_paramiko.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | 1 | 2021-03-10T04:00:01.000Z | 2021-03-10T04:00:01.000Z | python/examples/_paramiko.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | import getpass
import paramiko
class SSHConnection(object):
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def __enter__(self):
self.ssh.connect(self.host,
username=self.username, password=self.password)
return self.ssh
def __exit__(self):
self.ssh.close()
def hostname(host, username, password=getpass.getpass("Enter pass: ")):
with SSHConnection(host, username, password) as ssh:
stdin, stdout, stderr = ssh.exec_command('hostname')
with stdout as out:
for line in out:
print line
with stdout as error:
for line in error:
print line
hostname('localhost', '529567')
| 27.878788 | 72 | 0.619565 |
f08303f8b77e23cac257c905989876bf21a421ae | 1,359 | py | Python | ooobuild/dyn/drawing/x_layer_manager.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/drawing/x_layer_manager.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/drawing/x_layer_manager.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.drawing import XLayerManager as XLayerManager
setattr(XLayerManager, '__ooo_ns__', 'com.sun.star.drawing')
setattr(XLayerManager, '__ooo_full_ns__', 'com.sun.star.drawing.XLayerManager')
setattr(XLayerManager, '__ooo_type_name__', 'interface')
else:
from ...lo.drawing.x_layer_manager import XLayerManager as XLayerManager
__all__ = ['XLayerManager']
| 36.72973 | 83 | 0.768212 |
fb2f11a3c6a7520d0d15251b9ada1b349595c07b | 56 | py | Python | robonet/video_prediction/testing/__init__.py | russellmendonca/RoboNet | de30fa069dacb2888e62bd239e7a3471ea3aaa9d | [
"MIT"
] | 140 | 2019-10-25T03:05:04.000Z | 2022-03-07T17:41:56.000Z | robonet/video_prediction/testing/__init__.py | russellmendonca/RoboNet | de30fa069dacb2888e62bd239e7a3471ea3aaa9d | [
"MIT"
] | 9 | 2019-12-22T20:52:47.000Z | 2022-02-22T07:56:43.000Z | robonet/video_prediction/testing/__init__.py | russellmendonca/RoboNet | de30fa069dacb2888e62bd239e7a3471ea3aaa9d | [
"MIT"
] | 26 | 2019-10-21T04:49:55.000Z | 2021-09-17T15:50:17.000Z | from .model_evaluation_interface import VPredEvaluation
| 28 | 55 | 0.910714 |
17603dd3de02f5cc4f2660c679c7a84203b650d5 | 2,473 | py | Python | bankruptcy/cases/migrations/0005_auto_20191201_0439.py | euirim/bankruptcy-db | 72f5eea8a78c7959845a4a21519ee2e4defd4be2 | [
"MIT"
] | 1 | 2021-01-04T20:26:56.000Z | 2021-01-04T20:26:56.000Z | bankruptcy/cases/migrations/0005_auto_20191201_0439.py | euirim/bankruptcy-db | 72f5eea8a78c7959845a4a21519ee2e4defd4be2 | [
"MIT"
] | 3 | 2021-03-09T23:52:42.000Z | 2022-02-10T20:17:24.000Z | bankruptcy/cases/migrations/0005_auto_20191201_0439.py | euirim/bankruptcy-db | 72f5eea8a78c7959845a4a21519ee2e4defd4be2 | [
"MIT"
] | 1 | 2021-01-06T04:52:21.000Z | 2021-01-06T04:52:21.000Z | # Generated by Django 2.2.6 on 2019-12-01 04:39
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('taggit', '0003_taggeditem_add_unique_index'),
('cases', '0004_remove_case_preview'),
]
operations = [
migrations.CreateModel(
name='PersonTagged',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cases_persontagged_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cases_persontagged_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OrgTagged',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.IntegerField(db_index=True, verbose_name='Object id')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cases_orgtagged_tagged_items', to='contenttypes.ContentType', verbose_name='Content type')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cases_orgtagged_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='document',
name='organizations',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', related_name='org_docs', through='cases.OrgTagged', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='document',
name='people',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', related_name='person_docs', through='cases.PersonTagged', to='taggit.Tag', verbose_name='Tags'),
),
]
| 47.557692 | 205 | 0.632835 |
50f1443017b10a12d11ccde54f59c149c360b713 | 832 | py | Python | Python/BasicDS/LinkedListSet.py | i-love-linux/BasicDataStructure | 7853e14053d3425c836b6164cc1c78c9ee68213b | [
"MIT"
] | null | null | null | Python/BasicDS/LinkedListSet.py | i-love-linux/BasicDataStructure | 7853e14053d3425c836b6164cc1c78c9ee68213b | [
"MIT"
] | null | null | null | Python/BasicDS/LinkedListSet.py | i-love-linux/BasicDataStructure | 7853e14053d3425c836b6164cc1c78c9ee68213b | [
"MIT"
] | null | null | null | # coding=utf-8
# @Time : 2020/2/13
# @Author : Wang Xiaoxiao
# @University : Dalian University of Technology
# @FileName : LinkedListSet.py
# @Software : PyCharm
# @github : https://github.com/i-love-linux/BasicDataStructure
from BasicDS.LinkedList import LinkedList
class LinkedListSet:
"""
时间复杂度分析(平均):n为元素个数
增 add: O(n)
删 remove: O(n)
查 contains: O(n)
"""
def __init__(self):
self.__list = LinkedList()
def getSize(self):
return self.__list.getSize()
def isEmpty(self):
return self.__list.isEmpty()
def add(self, e):
if not self.__list.contains(e):
self.__list.addLast(e)
def contains(self, e):
return self.__list.contains(e)
def remove(self, e):
self.__list.removeElement(e)
| 22.486486 | 66 | 0.600962 |
8c8f3cd2c9ff03898d82d36848578acd87312e1a | 255 | py | Python | covid/util/make_test_constants.py | grayfallstown/covid-blockchain | 194d5351c70d3ee5d928f767e21c7894cfbb59a7 | [
"Apache-2.0"
] | 14 | 2021-07-28T09:56:07.000Z | 2022-02-09T04:28:14.000Z | covid/util/make_test_constants.py | grayfallstown/covid-blockchain | 194d5351c70d3ee5d928f767e21c7894cfbb59a7 | [
"Apache-2.0"
] | 23 | 2021-07-28T10:16:56.000Z | 2022-03-26T10:43:53.000Z | covid/util/make_test_constants.py | grayfallstown/covid-blockchain | 194d5351c70d3ee5d928f767e21c7894cfbb59a7 | [
"Apache-2.0"
] | 9 | 2021-07-28T02:41:24.000Z | 2022-03-15T08:32:49.000Z | from typing import Dict
from covid.consensus.default_constants import DEFAULT_CONSTANTS, ConsensusConstants
def make_test_constants(test_constants_overrides: Dict) -> ConsensusConstants:
return DEFAULT_CONSTANTS.replace(**test_constants_overrides)
| 31.875 | 83 | 0.854902 |
89e093e6de97311051e91f1a3017f948907dd4fb | 71,223 | py | Python | tests/unit/gapic/dataproc_v1beta2/test_job_controller.py | stephaniewang526/python-dataproc | 66c7af157ca5f740ebfec95abb7267e361d855f6 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/dataproc_v1beta2/test_job_controller.py | stephaniewang526/python-dataproc | 66c7af157ca5f740ebfec95abb7267e361d855f6 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/dataproc_v1beta2/test_job_controller.py | stephaniewang526/python-dataproc | 66c7af157ca5f740ebfec95abb7267e361d855f6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async
from google.api_core import operations_v1
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataproc_v1beta2.services.job_controller import (
JobControllerAsyncClient,
)
from google.cloud.dataproc_v1beta2.services.job_controller import JobControllerClient
from google.cloud.dataproc_v1beta2.services.job_controller import pagers
from google.cloud.dataproc_v1beta2.services.job_controller import transports
from google.cloud.dataproc_v1beta2.types import jobs
from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert JobControllerClient._get_default_mtls_endpoint(None) is None
assert (
JobControllerClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [JobControllerClient, JobControllerAsyncClient]
)
def test_job_controller_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client._transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client._transport._credentials == creds
assert client._transport._host == "dataproc.googleapis.com:443"
def test_job_controller_client_get_transport_class():
transport = JobControllerClient.get_transport_class()
assert transport == transports.JobControllerGrpcTransport
transport = JobControllerClient.get_transport_class("grpc")
assert transport == transports.JobControllerGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobControllerClient, transports.JobControllerGrpcTransport, "grpc"),
(
JobControllerAsyncClient,
transports.JobControllerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
JobControllerClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobControllerClient),
)
@mock.patch.object(
JobControllerAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobControllerAsyncClient),
)
def test_job_controller_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(JobControllerClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(JobControllerClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
quota_project_id=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
quota_project_id=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
quota_project_id=None,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and client_cert_source is provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=client_cert_source_callback,
quota_project_id=None,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and default_client_cert_source is provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
quota_project_id=None,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", but client_cert_source and default_client_cert_source are None.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
quota_project_id=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobControllerClient, transports.JobControllerGrpcTransport, "grpc"),
(
JobControllerAsyncClient,
transports.JobControllerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_controller_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
quota_project_id=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobControllerClient, transports.JobControllerGrpcTransport, "grpc"),
(
JobControllerAsyncClient,
transports.JobControllerGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_controller_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
quota_project_id=None,
)
def test_job_controller_client_client_options_from_dict():
with mock.patch(
"google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = JobControllerClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
quota_project_id=None,
)
def test_submit_job(transport: str = "grpc", request_type=jobs.SubmitJobRequest):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.submit_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"),
)
response = client.submit_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.SubmitJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_submit_job_from_dict():
test_submit_job(request_type=dict)
@pytest.mark.asyncio
async def test_submit_job_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.SubmitJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.submit_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
)
)
response = await client.submit_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_submit_job_flattened():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.submit_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.submit_job(
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job == jobs.Job(
reference=jobs.JobReference(project_id="project_id_value")
)
def test_submit_job_flattened_error():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.submit_job(
jobs.SubmitJobRequest(),
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
@pytest.mark.asyncio
async def test_submit_job_flattened_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.submit_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.submit_job(
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job == jobs.Job(
reference=jobs.JobReference(project_id="project_id_value")
)
@pytest.mark.asyncio
async def test_submit_job_flattened_error_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.submit_job(
jobs.SubmitJobRequest(),
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
def test_submit_job_as_operation(
transport: str = "grpc", request_type=jobs.SubmitJobRequest
):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.submit_job_as_operation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.submit_job_as_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.SubmitJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_submit_job_as_operation_from_dict():
test_submit_job_as_operation(request_type=dict)
@pytest.mark.asyncio
async def test_submit_job_as_operation_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.SubmitJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.submit_job_as_operation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.submit_job_as_operation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_submit_job_as_operation_flattened():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.submit_job_as_operation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.submit_job_as_operation(
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job == jobs.Job(
reference=jobs.JobReference(project_id="project_id_value")
)
def test_submit_job_as_operation_flattened_error():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.submit_job_as_operation(
jobs.SubmitJobRequest(),
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
@pytest.mark.asyncio
async def test_submit_job_as_operation_flattened_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.submit_job_as_operation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.submit_job_as_operation(
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job == jobs.Job(
reference=jobs.JobReference(project_id="project_id_value")
)
@pytest.mark.asyncio
async def test_submit_job_as_operation_flattened_error_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.submit_job_as_operation(
jobs.SubmitJobRequest(),
project_id="project_id_value",
region="region_value",
job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")),
)
def test_get_job(transport: str = "grpc", request_type=jobs.GetJobRequest):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"),
)
response = client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.GetJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_get_job_from_dict():
test_get_job(request_type=dict)
@pytest.mark.asyncio
async def test_get_job_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.GetJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._client._transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
)
)
response = await client.get_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_get_job_flattened():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_job(
project_id="project_id_value", region="region_value", job_id="job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job_id == "job_id_value"
def test_get_job_flattened_error():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_job(
jobs.GetJobRequest(),
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
@pytest.mark.asyncio
async def test_get_job_flattened_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._client._transport.get_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_job(
project_id="project_id_value", region="region_value", job_id="job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job_id == "job_id_value"
@pytest.mark.asyncio
async def test_get_job_flattened_error_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_job(
jobs.GetJobRequest(),
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
def test_list_jobs(transport: str = "grpc", request_type=jobs.ListJobsRequest):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.ListJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.ListJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_jobs_from_dict():
test_list_jobs(request_type=dict)
@pytest.mark.asyncio
async def test_list_jobs_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.ListJobsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
jobs.ListJobsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
def test_list_jobs_flattened():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.ListJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_jobs(
project_id="project_id_value", region="region_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].filter == "filter_value"
def test_list_jobs_flattened_error():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_jobs(
jobs.ListJobsRequest(),
project_id="project_id_value",
region="region_value",
filter="filter_value",
)
@pytest.mark.asyncio
async def test_list_jobs_flattened_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.ListJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
jobs.ListJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_jobs(
project_id="project_id_value", region="region_value", filter="filter_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].filter == "filter_value"
@pytest.mark.asyncio
async def test_list_jobs_flattened_error_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_jobs(
jobs.ListJobsRequest(),
project_id="project_id_value",
region="region_value",
filter="filter_value",
)
def test_list_jobs_pager():
client = JobControllerClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
jobs.ListJobsResponse(
jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc",
),
jobs.ListJobsResponse(jobs=[], next_page_token="def",),
jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",),
jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],),
RuntimeError,
)
metadata = ()
pager = client.list_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, jobs.Job) for i in results)
def test_list_jobs_pages():
client = JobControllerClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.list_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
jobs.ListJobsResponse(
jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc",
),
jobs.ListJobsResponse(jobs=[], next_page_token="def",),
jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",),
jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],),
RuntimeError,
)
pages = list(client.list_jobs(request={}).pages)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_jobs_async_pager():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
jobs.ListJobsResponse(
jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc",
),
jobs.ListJobsResponse(jobs=[], next_page_token="def",),
jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",),
jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],),
RuntimeError,
)
async_pager = await client.list_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, jobs.Job) for i in responses)
@pytest.mark.asyncio
async def test_list_jobs_async_pages():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
jobs.ListJobsResponse(
jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc",
),
jobs.ListJobsResponse(jobs=[], next_page_token="def",),
jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",),
jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],),
RuntimeError,
)
pages = []
async for page in (await client.list_jobs(request={})).pages:
pages.append(page)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
def test_update_job(transport: str = "grpc", request_type=jobs.UpdateJobRequest):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.update_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"),
)
response = client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.UpdateJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_update_job_from_dict():
test_update_job(request_type=dict)
@pytest.mark.asyncio
async def test_update_job_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.UpdateJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.update_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
)
)
response = await client.update_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_cancel_job(transport: str = "grpc", request_type=jobs.CancelJobRequest):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.cancel_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"),
)
response = client.cancel_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.CancelJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_cancel_job_from_dict():
test_cancel_job(request_type=dict)
@pytest.mark.asyncio
async def test_cancel_job_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.CancelJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.cancel_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
jobs.Job(
submitted_by="submitted_by_value",
driver_output_resource_uri="driver_output_resource_uri_value",
driver_control_files_uri="driver_control_files_uri_value",
job_uuid="job_uuid_value",
done=True,
)
)
response = await client.cancel_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, jobs.Job)
assert response.submitted_by == "submitted_by_value"
assert response.driver_output_resource_uri == "driver_output_resource_uri_value"
assert response.driver_control_files_uri == "driver_control_files_uri_value"
assert response.job_uuid == "job_uuid_value"
assert response.done is True
def test_cancel_job_flattened():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.cancel_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_job(
project_id="project_id_value", region="region_value", job_id="job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job_id == "job_id_value"
def test_cancel_job_flattened_error():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_job(
jobs.CancelJobRequest(),
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
@pytest.mark.asyncio
async def test_cancel_job_flattened_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.cancel_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = jobs.Job()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_job(
project_id="project_id_value", region="region_value", job_id="job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job_id == "job_id_value"
@pytest.mark.asyncio
async def test_cancel_job_flattened_error_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_job(
jobs.CancelJobRequest(),
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
def test_delete_job(transport: str = "grpc", request_type=jobs.DeleteJobRequest):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == jobs.DeleteJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_from_dict():
test_delete_job(request_type=dict)
@pytest.mark.asyncio
async def test_delete_job_async(transport: str = "grpc_asyncio"):
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = jobs.DeleteJobRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert response is None
def test_delete_job_flattened():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client._transport.delete_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_job(
project_id="project_id_value", region="region_value", job_id="job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job_id == "job_id_value"
def test_delete_job_flattened_error():
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_job(
jobs.DeleteJobRequest(),
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
@pytest.mark.asyncio
async def test_delete_job_flattened_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_job(
project_id="project_id_value", region="region_value", job_id="job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].region == "region_value"
assert args[0].job_id == "job_id_value"
@pytest.mark.asyncio
async def test_delete_job_flattened_error_async():
client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_job(
jobs.DeleteJobRequest(),
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.JobControllerGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.JobControllerGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobControllerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.JobControllerGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobControllerClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.JobControllerGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = JobControllerClient(transport=transport)
assert client._transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.JobControllerGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.JobControllerGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = JobControllerClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client._transport, transports.JobControllerGrpcTransport,)
def test_job_controller_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.JobControllerTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_job_controller_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.JobControllerTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"submit_job",
"submit_job_as_operation",
"get_job",
"list_jobs",
"update_job",
"cancel_job",
"delete_job",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_job_controller_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.JobControllerTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_job_controller_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
JobControllerClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
def test_job_controller_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.JobControllerGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_job_controller_host_no_port():
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataproc.googleapis.com"
),
)
assert client._transport._host == "dataproc.googleapis.com:443"
def test_job_controller_host_with_port():
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataproc.googleapis.com:8000"
),
)
assert client._transport._host == "dataproc.googleapis.com:8000"
def test_job_controller_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.JobControllerGrpcTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
def test_job_controller_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.JobControllerGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_job_controller_grpc_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.JobControllerGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_job_controller_grpc_asyncio_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.JobControllerGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_job_controller_grpc_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.JobControllerGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_job_controller_grpc_asyncio_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.JobControllerGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_controller_grpc_lro_client():
client = JobControllerClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
transport = client._transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_job_controller_grpc_lro_async_client():
client = JobControllerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client._client._transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
| 36.94139 | 120 | 0.689749 |
2d28c06c4cbb102f1cae1f21af9467cadf101ba7 | 996 | py | Python | lino_book/projects/min3/settings/__init__.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | 1 | 2018-01-12T14:09:58.000Z | 2018-01-12T14:09:58.000Z | lino_book/projects/min3/settings/__init__.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | 4 | 2018-02-06T19:53:10.000Z | 2019-08-01T21:47:44.000Z | lino_book/projects/min3/settings/__init__.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright 2012-2017 Luc Saffre
# License: BSD (see file COPYING for details)
from lino.projects.std.settings import *
class Site(Site):
title = "Lino Mini 3"
demo_fixtures = 'std demo demo2'
user_types_module = 'lino_xl.lib.xl.user_types'
workflows_module = 'lino_xl.lib.cal.workflows.feedback'
layouts_module = 'lino_xl.lib.cal.workflows.feedback'
use_experimental_features = True
def setup_quicklinks(self, user, tb):
super(Site, self).setup_quicklinks(user, tb)
tb.add_action(self.modules.contacts.Persons)
tb.add_action(self.modules.contacts.Companies)
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
yield 'lino.modlib.system'
yield 'lino.modlib.users'
yield 'lino_book.projects.min3.lib.contacts'
yield 'lino_xl.lib.cal'
yield 'lino.modlib.export_excel'
yield 'lino_xl.lib.phones'
yield 'lino.modlib.comments'
| 31.125 | 59 | 0.685743 |
110493cbbfe4c11f1fe73b331987a8afcf1074fe | 5,545 | py | Python | winstall.py | guilhermemaas/glassfish-winstall | 3373a2e4a91613c3fda6553b9ef2049d22d8e976 | [
"MIT"
] | 1 | 2020-04-23T19:20:18.000Z | 2020-04-23T19:20:18.000Z | winstall.py | guilhermemaas/glassfish-winstall | 3373a2e4a91613c3fda6553b9ef2049d22d8e976 | [
"MIT"
] | null | null | null | winstall.py | guilhermemaas/glassfish-winstall | 3373a2e4a91613c3fda6553b9ef2049d22d8e976 | [
"MIT"
] | null | null | null | import os
import urllib.request
import zipfile
import subprocess
from random import randint
import socket
import shutil
from print_g4wi import print_g4wi
from time import sleep
def tcp_port_check(ip: str, port: int) -> bool:
"""Checks if a TCP port is in use/open."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ip, int(port)))
sock.shutdown(2)
return True
except:
return False
def java_check() -> bool:
"""Checks if Java 7 or 8 is installed."""
version_string = 'java version"1.'
java_version = subprocess.check_output('java -version', shell=True)
if 'f{version_string}7' or 'f{version_string}8' in java_version:
return True
else:
return False
def random_int() -> int:
"""Return a random int."""
return randint(1, 99)
def install_ID() -> str:
"""Checks if installation dir exists
for return a new installation ID."""
id = random_int()
while os.path.isdir(f'C:\\glassish{id}'):
id = random_int()
return str(id)
def create_dir(path: str) -> bool:
"""Create a new dir for installation"""
try:
os.mkdir(path)
return True
except Exception as err:
print('Problem with creating directory: ', str(err))
def remove_dir(path: str) -> None:
"""Remove directory."""
try:
shutil.rmtree(path)
except Exception as err:
print('Error to remove directory.', str(err))
def download_glassfish(path: str, url: str) -> None:
"""Download GlassFish from Oracle official link."""
try:
urllib.request.urlretrieve(url, f'{path}\\glassfish-4.0.zip')
except Exception as err:
print('Problem with download Glassfish: ', str(err))
def descompact_zip(file_path: str, dest_path: str) -> None:
"""Descompact the GlassFish .zip file."""
zip_file = zipfile.ZipFile(f'{file_path}')
try:
zip_file.extractall(dest_path)
except Exception as err:
print('Error unzipping Glassfish: ', str(err))
def glassfish_create_service(asadmin_dir: str, asadmin_params: str) -> None:
"""Create GlassFish Windows Service(services.msc)."""
subprocess.call(rf'{asadmin_dir} {asadmin_params}', shell=True)
def rename_windows_service_display(install_id: str) -> None:
"""Changes the name of the service displayed in services.msc."""
print(f'sc config GlassFish_{install_id} DisplayName = "GlassFish ID_{install_id}"')
subprocess.call(f'sc config GlassFish_{install_id} DisplayName= "GlassFish_ID_{install_id}"', shell=True)
def print_line() -> str:
print('=' * 100)
#Preparing variables to install
install_ID = install_ID()
asadmin_params = f'create-service --name Glassfish_{install_ID}'
install_path = f'C:\\glassfish{install_ID}'
url = 'http://download.oracle.com/glassfish/4.0/release/glassfish-4.0.zip'
download_dir = f'{install_path}\\download'
descompact_file = f'{download_dir}\\glassfish-4.0.zip'
asadmin_dir = f'{install_path}\\glassfish4\\bin\\asadmin.bat'
tcp_port = 4848
ip = '127.0.0.1'
print_g4wi()
sleep(1)
print_line()
print(f'Install directory: {install_path}.')
print(f'Download URL: {url}.')
print(f'Download path: {download_dir}.')
print(f'Installation ID: {install_ID}.')
sleep(1)
print_line()
#Runing functions:
def main() -> None:
create_dir(install_path)
if os.path.isdir(install_path) == True:
print('Checking if Java 1.8 or 1.7 is installed...')
if java_check():
print(f'Verifying if port {tcp_port} is in use on {ip}...')
if tcp_port_check(ip, tcp_port) == False:
print('TCP port is not in use... OK')
print('Java version... OK')
print(f'Directory created sucessfuly:{install_path}...')
print('Creating download directory...')
create_dir(download_dir)
print(f'Download directory created sucessfuly: {download_dir}...')
print('Starting GlassFish4 download...')
download_glassfish(download_dir, url)
print(f'Downloaded in: {download_dir}...\n Unpacking .zip...')
descompact_zip(descompact_file, install_path)
print(f'.zip unpacked: {descompact_file}...')
print(f'Creating Windows Service... ')
glassfish_create_service(asadmin_dir, asadmin_params)
print(f'Changing service name to GlassFish ID_{install_ID}.')
rename_windows_service_display(install_ID)
print(f'Removing download directory: {download_dir}.')
remove_dir(download_dir)
print_line()
print(f"""
Finished! Glassfish4 is installed!
Installation information for deploy:
- Glassfish Admin Port: {tcp_port}.
- GlassFish HTTP Listner-1: 8080.
- Glassfish HTTP Listner-2: 8181.
- JVM Options:
- XX:MaxPermaSize=192mb.
- Xmx512mb.
""")
else:
print_line()
print(f'TCP port {tcp_port} is not avaible in {ip}. Verify if any program or older Glassfish is using.')
else:
print_line()
print('Java not installed correctly. Reinstall or check JAVA_HOME environment variable.')
else:
print_line()
print('Installation Error.')
print_line()
if __name__ == "__main__":
main()
| 33.403614 | 120 | 0.62615 |
441c8cc7994ce973fff77dd3e968670a91de9360 | 105 | py | Python | lib/routes.py | eniehack/CalAni | 2b0e9fd98cd4b9605c5318adb1a9696283213d3e | [
"MIT"
] | null | null | null | lib/routes.py | eniehack/CalAni | 2b0e9fd98cd4b9605c5318adb1a9696283213d3e | [
"MIT"
] | null | null | null | lib/routes.py | eniehack/CalAni | 2b0e9fd98cd4b9605c5318adb1a9696283213d3e | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, World!"
| 11.666667 | 26 | 0.647619 |
7b302e56bedfab507cc5e59c8ceb549e3b73e48e | 4,928 | py | Python | custom/icds/tests/tasks/test_setup_ccz_file_for_hosting.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | 1 | 2020-07-14T13:00:23.000Z | 2020-07-14T13:00:23.000Z | custom/icds/tests/tasks/test_setup_ccz_file_for_hosting.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | custom/icds/tests/tasks/test_setup_ccz_file_for_hosting.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | import mock
from django.template.defaultfilters import linebreaksbr
from django.test import SimpleTestCase
from custom.icds.tasks.hosted_ccz import setup_ccz_file_for_hosting
from custom.icds.models import (
HostedCCZ,
HostedCCZLink,
)
@mock.patch('custom.icds.tasks.hosted_ccz.open')
@mock.patch('custom.icds.tasks.hosted_ccz.wrap_app')
@mock.patch('custom.icds.tasks.hosted_ccz.get_build_doc_by_version')
@mock.patch('custom.icds.tasks.hosted_ccz.create_files_for_ccz')
@mock.patch('custom.icds.tasks.hosted_ccz.HostedCCZ.objects.get')
@mock.patch('custom.icds.models.HostedCCZUtility')
@mock.patch('custom.icds.tasks.hosted_ccz.HostedCCZ.update_status')
class TestSetUpCCZFileForHosting(SimpleTestCase):
def setUp(self):
super(TestSetUpCCZFileForHosting, self).setUp()
self.link = HostedCCZLink(username="username", password="password", identifier="link1234", domain="test")
self.hosted_ccz = HostedCCZ(link=self.link, app_id="dummy", version=12, profile_id="123456")
def test_hosting_not_present(self, mock_update_status, mock_ccz_utility, mock_get, *_):
mock_result = mock.MagicMock()
mock_result.return_value = True
mock_ccz_utility.return_value.file_exists = mock_result
mock_get.side_effect = HostedCCZ.DoesNotExist
setup_ccz_file_for_hosting(3)
self.assertFalse(mock_result.called)
self.assertFalse(mock_update_status.called)
def test_ccz_already_present(self, mock_update_status, mock_ccz_utility, mock_get, mock_create_ccz, *_):
mock_result = mock.MagicMock()
mock_result.return_value = True
mock_ccz_utility.return_value.file_exists = mock_result
mock_get.return_value = self.hosted_ccz
mock_result.return_value = True
setup_ccz_file_for_hosting(3)
self.assertTrue(mock_result.called)
self.assertFalse(mock_create_ccz.called)
calls = [mock.call('building'), mock.call('completed')]
mock_update_status.assert_has_calls(calls, any_order=False)
def test_ccz_not_already_present(self, mock_update_status, mock_ccz_utility, mock_get, mock_create_ccz,
mock_get_build, *_):
mock_get.return_value = self.hosted_ccz
mock_result = mock.MagicMock()
mock_result.return_value = False
mock_ccz_utility.return_value.file_exists = mock_result
setup_ccz_file_for_hosting(3)
self.assertTrue(mock_result.called)
mock_get_build.assert_called_with(self.hosted_ccz.domain, self.hosted_ccz.app_id,
self.hosted_ccz.version)
self.assertTrue(mock_create_ccz.called)
self.assertTrue(mock_ccz_utility.return_value.store_file_in_blobdb.called)
calls = [mock.call('building'), mock.call('completed')]
mock_update_status.assert_has_calls(calls, any_order=False)
@mock.patch('custom.icds.tasks.hosted_ccz.send_html_email_async.delay')
def test_ccz_creation_fails(self, mock_email, mock_update_status, mock_ccz_utility, mock_get, mock_create_ccz,
mock_get_build, mock_wrapped_app, *_):
mock_wrapped_app.return_value.name = "My App"
mock_get.return_value = self.hosted_ccz
mock_result = mock.MagicMock()
mock_result.return_value = False
mock_ccz_utility.return_value.file_exists = mock_result
mock_delete_ccz = mock.MagicMock()
self.hosted_ccz.delete_ccz = mock_delete_ccz
mock_delete_ccz.return_value = True
mock_store = mock.MagicMock()
mock_ccz_utility.return_value.store_file_in_blobdb = mock_store
mock_store.side_effect = Exception("Fail hard!")
with self.assertRaisesMessage(Exception, "Fail hard!"):
setup_ccz_file_for_hosting(3, user_email="batman@gotham.com")
mock_get_build.assert_called_with(self.hosted_ccz.domain, self.hosted_ccz.app_id,
self.hosted_ccz.version)
self.assertTrue(mock_create_ccz.called)
self.assertTrue(mock_ccz_utility.return_value.store_file_in_blobdb.called)
calls = [mock.call('building'), mock.call('failed')]
mock_update_status.assert_has_calls(calls, any_order=False)
self.assertTrue(mock_delete_ccz.called)
content = "Hi,\n" \
"CCZ could not be created for the following request:\n" \
"App: {app}\n" \
"Version: {version}\n" \
"Profile: {profile}\n" \
"Link: {link}" \
"".format(app="My App", version=self.hosted_ccz.version, profile=None,
link=self.hosted_ccz.link.identifier)
mock_email.assert_called_with(
"CCZ Hosting setup failed for app My App in project test",
"batman@gotham.com",
linebreaksbr(content)
)
| 47.384615 | 114 | 0.692776 |
5fd2b8c78c28e0a892f95db402546b3343914d00 | 1,601 | py | Python | sdk/python/pulumi_azure_nextgen/containerregistry/v20170601preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/containerregistry/v20170601preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/containerregistry/v20170601preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_registry import *
from .get_replication import *
from .get_webhook import *
from .get_webhook_callback_config import *
from .list_registry_credentials import *
from .list_webhook_events import *
from .registry import *
from .replication import *
from .webhook import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:containerregistry/v20170601preview:Registry":
return Registry(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:containerregistry/v20170601preview:Replication":
return Replication(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:containerregistry/v20170601preview:Webhook":
return Webhook(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "containerregistry/v20170601preview", _module_instance)
_register_module()
| 35.577778 | 116 | 0.70331 |
89e99e552116eeb901256ab66f9a16e4dbd2b3df | 22,240 | py | Python | ah.py | robertpfeiffer/ah-game | 67be00df067fb166cc26507040ab490db7e71c77 | [
"BSD-3-Clause"
] | null | null | null | ah.py | robertpfeiffer/ah-game | 67be00df067fb166cc26507040ab490db7e71c77 | [
"BSD-3-Clause"
] | null | null | null | ah.py | robertpfeiffer/ah-game | 67be00df067fb166cc26507040ab490db7e71c77 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2020 Jani Tiainen
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import random
import json
import pygame as pg
import ptext
import sys
try:
import android
import android.storage
except:
android=None
from constants import SCREEN_WIDTH, SCREEN_HEIGHT, FPS, TITLE_SCREEN, GAME, GAME_OVER, GAME_COUNTDOWN, GAME_AREA, BLACK, \
AMBER, FONT_NAME, SONGS, GAME_OVER_SONG, HIGH_SCORES, HIGHSCORE_SCROLL_TOP_Y, HIGHSCORE_SCROLL_HEIGHT
# Initialize Pygame
pg.init()
pg.display.set_icon(pg.image.load("gfx/window-icon.png"))
screen = pg.display.set_mode(
(SCREEN_WIDTH, SCREEN_HEIGHT), pg.FULLSCREEN | pg.SCALED
)
screen.fill(0)
screen.blit(pg.image.load("gfx/window-icon.png"), (0,0))
pg.display.flip()
ptext.DEFAULT_FONT_NAME = FONT_NAME
SOUNDS = {
"pick": pg.mixer.Sound("sfx/pick.ogg"),
"bubble": pg.mixer.Sound("sfx/bubble.ogg"),
"end": pg.mixer.Sound("sfx/end.ogg"),
"player": pg.mixer.Sound("sfx/player.ogg"),
}
END_MUSIC = pg.USEREVENT + 2
BUBBLE_IMAGE = pg.image.load("gfx/normal_ball.png").convert_alpha()
BUBBLE_IMAGE.set_colorkey(BLACK)
SPECIAL_IMAGE = pg.image.load("gfx/special.png").convert_alpha()
SPECIAL_IMAGE.set_colorkey(BLACK)
def vec_to_int(vector):
return tuple(map(int, vector))
class Bubble:
def __init__(self, pos, lifetime):
self.lifetime = lifetime
self.liferemaining = lifetime
self.image = BUBBLE_IMAGE
self.rect = self.image.get_rect(center=pos)
self.scaled_rect = self.rect.copy()
self.angle = random.uniform(0, 360)
self.rotation = random.uniform(-2.0, 2.0)
def update(self, delta_time):
self.angle += self.rotation
self.liferemaining -= delta_time
if self.liferemaining <= 0:
return False
return True
def draw(self, surface):
scale = self.liferemaining / self.lifetime
img = pg.transform.rotozoom(self.image, self.angle, scale)
self.scaled_rect = img.get_rect(center=self.rect.center)
surface.blit(img, self.scaled_rect)
def play_sound(self):
SOUNDS["pick"].play()
def do_action(self, context):
context.score += max(int(self.liferemaining / self.lifetime * 20), 1)
def check_collision(self, rect):
return self.scaled_rect.colliderect(rect)
class Powerup(Bubble):
def __init__(self, pos, lifetime):
super().__init__(pos, lifetime)
self.image = SPECIAL_IMAGE
def play_sound(self):
SOUNDS["pick"].play()
def do_action(self, context):
context.time_remaining += random.randint(5, 15) * 1000
class Player:
MIN_DIST = 10
MAX_DIST = 20
def __init__(self):
self.images = [
pg.image.load("gfx/slimeball_100.png").convert_alpha(),
pg.image.load("gfx/slimeball_80.png").convert_alpha(),
pg.image.load("gfx/slimeball_64.png").convert_alpha(),
pg.image.load("gfx/slimeball_51.png").convert_alpha(),
pg.image.load("gfx/slimeball_40.png").convert_alpha(),
]
self.pos = [pg.Vector2() for _ in range(5)]
self.vec = pg.Vector2()
self.vec_dt = pg.Vector2()
@property
def rect(self):
return self.images[0].get_rect(center=(vec_to_int(self.pos[0])))
def set_pos(self, x, y):
self.pos[0].xy = (x, y)
self.pos[1].xy = (x + 18, y)
self.pos[2].xy = (x + 28, y)
self.pos[3].xy = (x + 38, y)
self.pos[4].xy = (x + 48, y)
def update(self, target_vec, speed):
self.pos[0] += target_vec * speed
rect = self.images[0].get_rect(center=vec_to_int(self.pos[0]))
if rect.left < GAME_AREA.left + 2:
targt_vec.x = -target_vec.x
self.pos[0] += target_vec * speed
rect.centerx = int(self.vec.x)
if rect.right > GAME_AREA.right - 2:
target_vec.x = -target_vec.x
self.pos[0] += target_vec * speed
rect.centerx = int(self.vec.x)
if rect.top < GAME_AREA.top + 2:
target_vec.y = -target_vec.y
self.pos[0] += target_vec * speed
rect.centery = int(self.vec.y)
if rect.bottom > GAME_AREA.bottom - 2:
target_vec.y = -target_vec.y
self.pos[0] += target_vec * speed
rect.centery = int(self.vec.y)
for i in range(1, 5):
tgt = self.pos[i-1]
src = self.pos[i]
src = src.lerp(tgt, 0.1)
dst = tgt - src
length = dst.length() # Bad square root...
if length > self.MAX_DIST:
dst2 = pg.Vector2(dst)
dst.scale_to_length(self.MAX_DIST)
src += dst2 - dst
elif length < self.MIN_DIST:
dst2 = pg.Vector2(dst)
dst.scale_to_length(self.MIN_DIST)
src += dst2 - dst
self.pos[i] = src
return target_vec
def draw(self, surface):
rect = self.images[0].get_rect()
for image, vec in zip(reversed(self.images), reversed(self.pos)):
rect.center = vec_to_int(vec)
surface.blit(image, rect)
class Context:
def __init__(self, initial=None):
initial = initial or {}
for k, v in initial.items():
setattr(self, k, v)
class Game:
def __init__(self, screen):
self.screen = screen
self.clock = pg.time.Clock()
self.songs = list(SONGS[:])
random.shuffle(self.songs)
self.song_index = 0
pg.mixer.music.set_endevent(END_MUSIC)
pg.mixer.music.load(self.songs[self.song_index])
pg.mixer.music.play()
pg.display.set_caption("ÄH!")
self.player = Player()
self.game_state = {
TITLE_SCREEN: (self.title_event, self.title_update, self.title_draw,),
GAME_COUNTDOWN: (
self.countdown_event,
self.countdown_update,
self.countdown_draw,
),
GAME: (self.game_event, self.game_update, self.game_draw,),
GAME_OVER: (self.gameover_event, self.gameover_update, self.gameover_draw,),
}
self.high_scores = HIGH_SCORES
self.load_highscores()
self.state = None
self.context = self.title_start(None)
def load_highscores(self):
highscore_file = os.path.join(os.path.expanduser('~'), 'Saved Games', 'AH Game', "highscore.json")
if android:
highscore_file = os.path.join(android.storage.app_storage_path(), "Saved Games", "highscore.json")
if not os.path.isfile(highscore_file):
self.save_highscores()
with open(highscore_file, "rt") as f:
self.high_scores = json.loads(f.read())
def save_highscores(self):
save_path = os.path.join(os.path.expanduser('~'), 'Saved Games', 'AH Game')
if android:
save_path = os.path.join(android.storage.app_storage_path(), "Saved Games")
save_file = os.path.join(save_path, "highscore.json")
os.makedirs(save_path, exist_ok=True)
with open(save_file, "wt+") as f:
f.write(json.dumps(self.high_scores, indent=4))
# Title screen
def title_start(self, old_context):
self.state = TITLE_SCREEN
context = Context()
context.done = False
context.name, context.name_pos = ptext.draw("ÄH!", midtop=(SCREEN_WIDTH // 2, 20), color=AMBER, fontsize=150, surf=None)
return context
def title_event(self, context, event):
if event.type == pg.MOUSEBUTTONDOWN:
context.done = True
def title_update(self, context, delta_time):
if context.done:
context.done = False
return self.countdown_start
return None
def title_draw(self, context, surface):
surface.blit(context.name, context.name_pos)
ptext.draw(
"CLICK MOUSE BUTTON\nTO BEGIN",
center=(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2),
color=AMBER,
fontsize=40,
)
ptext.draw(
"You are the green worm trying to catch the appearing\n"
+ "bubbles by clicking towards them with your mouse.\n"
+ "The faster you click, the faster your worm moves.\n"
+ "Be quick, you have only 30 seconds.\n\n"
+ "Press ESC to quit.",
midbottom=(SCREEN_WIDTH // 2, SCREEN_HEIGHT - 5),
color=AMBER,
fontsize=18,
align="left",
)
# Countdown screen
def countdown_start(self, old_context):
self.state = GAME_COUNTDOWN
context = Context()
context.count = 4000
return context
def countdown_event(self, context, event):
pass
def countdown_update(self, context, delta_time):
context.count -= delta_time
count = context.count // 1000
context.text = f"{count}" if count else "GO!"
if context.count < 0:
return self.game_start
def countdown_draw(self, context, surface):
ptext.draw(
context.text,
center=(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2),
color=AMBER,
fontsize=40,
)
# Game screen
def game_start(self, old_context):
self.state = GAME
context = Context()
context.player = self.player
initial_pos = (random.randint(90, 550), random.randint(70, 410))
context.src_vec = pg.Vector2(initial_pos)
context.player.set_pos(*initial_pos)
context.score = 0
context.bubbles = []
context.next_bubble = random.randint(1500, 5000)
context.time_remaining = 30000
context.speed_factor = 0.98
context.tgt_vec = pg.Vector2()
context.speed = 0.0
context.old_speed = 0.0
context.dst_vec = pg.Vector2()
pg.key.stop_text_input()
return context
def game_event(self, context, event):
if event.type == pg.MOUSEBUTTONDOWN:
# Move player towards clicked place
context.dst_vec = pg.Vector2(event.pos)
tgt_vec = context.dst_vec - context.src_vec
tgt_vec.normalize_ip()
context.tgt_vec = tgt_vec
context.speed_factor = 0.98
if context.speed <= 5.0:
context.speed += 0.8
def game_update(self, context, delta_time):
context.next_bubble -= delta_time
if context.next_bubble <= 0:
context.next_bubble = random.randint(500, 2000)
# Spawn a new bubble
# Make sure that new bubble doesn't overlap existing
# bubbles and is not near vicinity of the player
bubble_class = Bubble
if random.randint(0, 10) == 0:
bubble_class = Powerup
accepted = False
x, y = 0, 0
while not accepted:
accepted = True
x = random.randint(50, 590)
y = random.randint(50, 430)
new_vec = pg.Vector2((x, y))
# Check distance other bubbles
for bubble in context.bubbles:
bubble_vec = pg.Vector2(bubble.rect.center)
if new_vec.distance_squared_to(bubble_vec) < 1600:
# Bubble too close to another bubble
accepted = False
break
if new_vec.distance_squared_to(context.player.vec) < 3600:
accepted = False
new_bubble = bubble_class((x, y), random.randint(1000, 7000))
context.bubbles.append(new_bubble)
SOUNDS["bubble"].play()
context.src_vec += context.tgt_vec * context.speed
context.tgt_vec = context.player.update(context.tgt_vec, context.speed)
cur_vec = context.player.pos[0]
dist_squared = context.dst_vec.distance_squared_to(cur_vec)
if dist_squared <= 2:
context.speed_factor = 0.9
if context.speed > 0:
context.speed *= context.speed_factor
if context.speed < 0.06:
context.speed = 0
for bubble in context.bubbles[:]:
if bubble.check_collision(context.player.rect):
# Player hit the bubble
bubble.play_sound()
bubble.do_action(context)
context.bubbles.remove(bubble)
continue
if not bubble.update(delta_time):
# Bubble died
context.bubbles.remove(bubble)
continue
# Player movement sound
if context.speed > 0:
if context.old_speed == 0:
# Movement started
SOUNDS["player"].play(-1)
SOUNDS["player"].set_volume(context.speed / 5.0)
else:
# Movement stopped
SOUNDS["player"].stop()
context.time_remaining -= delta_time
if context.time_remaining <= 0:
SOUNDS["player"].stop()
return self.gameover_start
context.old_speed = context.speed
def game_draw(self, context, surface):
pg.draw.rect(surface, AMBER, GAME_AREA, width=2)
for bubble in context.bubbles:
bubble.draw(surface)
ptext.draw(
f"SCORE: {context.score:05}", topleft=(5, 5), color=AMBER, fontsize=18,
)
ptext.draw(
f"TIME LEFT: {context.time_remaining // 1000}",
topleft=(500, 5),
fontsize=18,
color=AMBER,
)
context.player.draw(surface)
#surface.blit(context.player, context.player_rect)
# Speedmeter
spd = int(630 * context.speed / 5.0)
speed_meter = pg.Rect((5, 445), (spd, 20))
surface.fill(AMBER, speed_meter)
# Game over screen
def gameover_start(self, old_context):
self.state = GAME_OVER
context = Context()
context.count = 60000
context.score = old_context.score
context.end_jingle_start = context.count - 250
context.end_jingle_stop = 60000 - SOUNDS["end"].get_length() * 1000
context.played_fanfare = False
pg.mixer.music.set_endevent()
pg.mixer.music.fadeout(250)
context.is_high_score = context.score >= self.high_scores[-1][0]
context.high_score_name = ""
if not context.is_high_score:
self.gameover_highscores(context)
else:
pg.key.start_text_input()
return context
def gameover_highscores(self, context):
txt = ""
for score, name in self.high_scores:
txt += f"{score:04} {name}\n"
tmp_img, _ = ptext.draw(
txt, topleft=(0, 0), fontsize=18, color=AMBER, surf=None
)
size = tmp_img.get_size()
size = (size[0], size[1] + 159) # This needs to be one pixel less to avoid small glitch
highscore_img = pg.Surface(size)
rect = tmp_img.get_rect()
context.highscore_height = rect.height
highscore_img.blit(tmp_img, dest=rect)
rect.y = rect.height
rect.height = 159
highscore_img.blit(tmp_img, dest=rect)
context.highscore_img = highscore_img
context.highscore_rect = pg.Rect((0, 0), (rect.width, HIGHSCORE_SCROLL_HEIGHT))
context.highscore_top = 0
# Highscore faders
out_fader = pg.Surface((rect.width, 20), pg.SRCALPHA)
for f in range(20, 0, -1):
out_fader.fill((0, 0, 0, f * (255 / 20)), ((0, 20 - f), (rect.width, 1)))
in_fader = pg.transform.flip(out_fader, False, True)
context.out_fader = out_fader
context.out_fader_rect = out_fader.get_rect()
context.out_fader_rect.midtop = (SCREEN_WIDTH // 2, HIGHSCORE_SCROLL_TOP_Y)
context.in_fader = in_fader
context.in_fader_rect = in_fader.get_rect()
context.in_fader_rect.midbottom = (SCREEN_WIDTH // 2, HIGHSCORE_SCROLL_TOP_Y + HIGHSCORE_SCROLL_HEIGHT)
def gameover_event(self, context, event):
if context.count < 50000 and event.type == pg.MOUSEBUTTONDOWN:
context.count = 0
if context.is_high_score and event.type == pg.KEYDOWN:
if event.key == pg.K_BACKSPACE:
context.high_score_name = context.high_score_name[:-1]
return
if event.key == pg.K_RETURN:
pg.key.stop_text_input()
self.high_scores.append((context.score, context.high_score_name))
self.high_scores.sort(key=lambda x: x[0], reverse=True)
self.high_scores = self.high_scores[:-1]
context.is_high_score = False
self.save_highscores()
self.gameover_highscores(context)
return
# if event.unicode.isalnum() and len(context.high_score_name) < 8:
# context.high_score_name += event.unicode.upper()
if context.is_high_score and event.type == pg.TEXTINPUT:
context.high_score_name += event.text
def gameover_update(self, context, delta_time):
context.count -= delta_time
if not context.is_high_score:
context.highscore_top += 0.5
context.highscore_rect.top = int(context.highscore_top)
if context.highscore_rect.top >= context.highscore_height:
context.highscore_top = 0
if context.count < context.end_jingle_start:
context.end_jingle_start = -9999
SOUNDS["end"].play()
if context.count < context.end_jingle_stop:
context.end_jingle_stop = -9999
pg.mixer.music.load(GAME_OVER_SONG)
pg.mixer.music.play()
pg.mixer.music.set_endevent(END_MUSIC)
if context.count <= 0:
pg.mixer.music.fadeout(500)
return self.title_start
def gameover_draw(self, context, surface):
ptext.draw(
"GAME OVER",
center=(SCREEN_WIDTH // 2, 60),
color=AMBER,
fontsize=60,
)
surf, pos = ptext.draw(
f"SCORE: {context.score:05}", midtop=(SCREEN_WIDTH // 2, 150), fontsize=18, color=AMBER,
)
if context.is_high_score:
ptext.draw("YOU MADE HIGH SCORE!\nENTER YOUR NAME BELOW:", midtop=(SCREEN_WIDTH // 2, 100), fontsize=18,
color=AMBER)
rect = surf.get_rect(topleft=pos)
rect.right += 10
ptext.draw(
f"{context.high_score_name}\u258E", topleft=rect.topright, fontsize=18, color=AMBER
)
else:
surface.blit(context.highscore_img, dest=(SCREEN_WIDTH // 2 - context.highscore_rect.width // 2, HIGHSCORE_SCROLL_TOP_Y), area=context.highscore_rect)
surface.blit(context.out_fader, dest=context.out_fader_rect)
surface.blit(context.in_fader, dest=context.in_fader_rect)
if context.count < 50000:
ptext.draw("PRESS MOUSE BUTTON TO RESTART", midbottom=(SCREEN_WIDTH // 2, SCREEN_HEIGHT - 5), fontsize=18, color=AMBER)
def game_loop(self):
while True:
delta_time = self.clock.tick(FPS)
self.screen.fill(BLACK)
event_handler, update_handler, draw_handler = self.game_state[self.state]
for event in pg.event.get():
if event.type == pg.QUIT:
sys.exit()
if event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:
sys.exit()
if event.type == END_MUSIC:
self.song_index += 1
if self.song_index == len(self.songs):
last_song = self.songs[-1]
self.songs = self.songs[:-1]
random.shuffle(self.songs)
self.songs.insert(
random.randint(
len(self.songs) // 4,
len(self.songs) - len(self.songs) // 4 - 1,
),
last_song,
)
self.song_index = 0
pg.mixer.music.load(SONGS[self.song_index])
pg.mixer.music.play()
event_handler(self.context, event)
next_state = update_handler(self.context, delta_time)
if next_state:
self.context = next_state(self.context)
continue # Restart gameloop
draw_handler(self.context, self.screen)
pg.display.flip()
if __name__ == "__main__":
Game(screen=screen).game_loop()
| 36.821192 | 162 | 0.591817 |
4157d767bfd8aec02bb83c3f46d58ec2578fabed | 17,030 | py | Python | python/openag_micro.py | ferguman/OpenAg-MVP-II | 600ce329f373ef3dc867163cdd09a424b49cd007 | [
"MIT"
] | 2 | 2019-03-18T05:47:55.000Z | 2019-05-30T13:08:13.000Z | python/openag_micro.py | ferguman/OpenAg-MVP-II | 600ce329f373ef3dc867163cdd09a424b49cd007 | [
"MIT"
] | 14 | 2018-06-27T14:02:23.000Z | 2020-02-16T19:47:43.000Z | python/openag_micro.py | ferguman/OpenAg-MVP-II | 600ce329f373ef3dc867163cdd09a424b49cd007 | [
"MIT"
] | null | null | null | #
import re
import serial
from sys import exc_info
from threading import Lock
from time import sleep, time
from python.logger import get_sub_logger
from python.LogFileEntryTable import LogFileEntryTable
logger = get_sub_logger(__name__)
# TODO - make the log file interval a configuration file parameter.
log_entry_table = LogFileEntryTable(60 * 60)
# All the micro-controller sensor names will be put in the reading_names dictionary
reading_names = {}
def make_get(vals, reading_names:dict) -> 'func':
def get(value_name):
if value_name in reading_names:
return vals[reading_names[value_name]]
else:
log_entry_table.add_log_entry(logger.error,
'illegal value_name. Please specify one of {}.'.format(reading_names))
return None
#- return 'illegal value_name. Please specify one of {}.'.format(reading_names)
return get
# Sensor readings are defined in the configuration file
#
# Provide a lock so that multiple threads are forced to wait for commands that
# use the Arudiuno serial interface
#
serial_interface_lock = Lock()
# target_indexes and cur_command will be filled based upon the configuration setting.
target_indexes = []
cur_command = []
cur_mc_cmd_str = None
old_mc_cmd_str = None
cur_mc_response = None
old_mc_response = None
# Create a command string for the Arduino -> b'0,false,true,...false\n'
def make_fc_cmd(mc_state):
# first build an array that holds all the arduino commands
cmds = []
# scan the cur_command bits
for v in cur_command:
if v == 0:
cmds.append(False)
elif v == 1:
cmds.append(True)
else:
logger.error('bad command value: {}'.format(b))
# dump and run. this is bad!
return b'0'
# if the system is in camera pose mode then override the light commands
# in order to give good lighting for the camera.
#- if mc_state['camera_pose']:
if mc_state['camera']['pose'] == True:
for pc in mc_state['camera']['camera_pose_cmds']:
cmds[target_indexes[pc['command']]] = pc['value']
#- cmds[target_indexes['grow_light']] = False
#- cmds[target_indexes['chamber_lights']] = True
# walk the command array and build the arduino command
#
cmd = b'0'
for b in cmds:
if b == False:
cmd = cmd + b',false'
elif b == True:
cmd = cmd + b',true'
else:
logger.error('bad command boolean: {}'.format(b))
# dump and run. this is bad!
return b'0'
return cmd + b'\n'
def extract_sensor_values(mc_response, vals):
# Note these globals -> global old_mc_cmd_str, cur_mc_cmd_str, old_mc_response, cur_mc_response
# TBD: Maybe the thing to do is to pull the timestamp through from the arduiono
# if the time stamp does not move forward then detect this and blank out the
# sensor readings.
ts = time()
for r in vals:
r['ts'] = ts
readings_found = False
for msg in mc_response:
if msg[0:1] == '0':
values = re.compile(r'(\d+\.\d+)|\d+').findall(msg)
# Look for the a status code followed by the readings.
#- if len(values) == 11:
if len(values) == len(reading_names) + 1:
readings_found = True
# Save each reading with a timestamp.
# TBD: Think about converting to the "native" values (e.g. int, float, etc) here.
for i in range (1, len(reading_names) + 1):
vals[i-1]['value'] = values[i]
if not readings_found:
# when the arduino encounters one or more sensor errors it sends a line for each
# failed sensor. The format of the each line is:
# status_level, sensor_name, status_code, status_msg
# status_level is code of 0, 1, or 2 which decode to OK, WARNING, or ERROR
# status_code is a whole number that gives sensor specific satus or error info
# status_msg is a human readable description of what the status code means.
#
log_entry_table.add_log_entry(
logger.error, 'Error reading fopd microconroller sensors. Micro returned: {}'.format(mc_response))
for r in vals:
r['value'] = None
def make_help(args):
def help():
prefix = args['name']
s = '{}.help() - Displays this help page.\n'.format(prefix)
s = s + "{}.cmd('camera_pose' | 'cp', action) - if action = 'on' then Actuate the grow chamber lights for a picture,\n".format(prefix)
s = s + " - if action = 'off' then return the grow lights to the current state\n"
s = s + "{}.cmd('on':'off', target) - Turn an actuator on or off. Targets:\n".format(prefix)
s = s + " Run {}.cmd('st') to see the possible values for the target argument\n".format(prefix)
s = s + "{}.cmd('show_targets'|'st') - Show all the available target values\n".format(prefix)
s = s + '{}.get(value_name) - Get value such as air temperature.\n'.format(prefix)
s = s + ' The following value names are recognized:\n'
s = s + ' humidity, air_temp, TBD add other available options to this help message.\n'
s = s + "{0}.mc_cmd(mc_cmd_str) - Micro-controller command. Try {0}.uc_cmd('(help)') to get started.\n".format(prefix)
s = s + " mc_cmd_str is specified as a string -> {0}.mc_cmd(\"(help)\") or {0}.mc_cmd('(help)')\n".format(prefix)
s = s + " Embed quotes (\") by using the \ character -> {0}.mc_cmd(\"(c 'co2 'ser ".format(prefix) + r'\"Z\")")' + '\n'
s = s + '{}.state() - Show sensor readings and actuator state.\n'.format(prefix)
s = s + "{}['sensor_readings'][index] - Returns the sensor reading referenced by index.\n".format(prefix)
s = s + " 0: air humidity\n"
s = s + " 1: air temperature\n"
return s
return help
def get(value_name):
return 'OK'
def make_cmd(mc_state, ser):
'''
This grow device hardware supports the following commands:
'circ_fan -> on or off.
'''
def cmd(*args):
cmd= args[0]
# is this a show_target command
if cmd == 'show_targets' or cmd == 'st':
s = None
for t in target_indexes:
if s == None:
s = t
else:
s = s + ', ' + t
return s
# is this an on or off command?
elif cmd == 'on' or cmd == 'off':
target = args[1]
if target in target_indexes:
target_index = target_indexes[target]
global cur_command
if cmd == 'on':
if cur_command[target_index] == 0:
logger.info('Received {0} on command. Will turn {0} on.'.format(target))
cur_command[target_index] = 1
return 'OK'
elif cmd == 'off':
if cur_command[target_index] == 1:
logger.info('Received {0} off command. Will turn {0} off.'.format(target))
cur_command[target_index] = 0
return 'OK'
else:
logger.error('Unknown on/off command action received: {}'.format(target))
return 'unknown target.'
# is this an on or off command?
elif cmd == 'camera_pose' or cmd == 'cp':
if args[1] == 'on':
#- mc_state['camera_pose'] = True
mc_state['camera']['pose'] = True
# send a command to the arduino now so the lights go into pose mode ASAP
send_mc_cmd(ser, make_fc_cmd(mc_state))
logger.info('posing for a picture')
return 'OK'
elif args[1] == 'off':
mc_state['camera']['pose'] = None
logger.info('will stop posing for a picture')
return 'OK'
else:
logger.error('Unknown pose command action {}'.format(args[1]))
return 'Unknown pose command action {}'.format(args[1])
logger.error('unknown command received: {}'.format(cmd))
return "unknown cmd. Specify 'on' or 'off'"
return cmd
def make_mc_cmd(ser):
def mc_cmd(cmd_str):
result = None
# wait until the serial interface is free.
serial_interface_lock.acquire()
try:
cmd_str_bytes = bytes(cmd_str, "ascii")
ser.write(cmd_str_bytes + b'\n')
result = ser.read_until(b'OK\r\n').rstrip().decode('utf-8')
ser.reset_input_buffer()
finally:
serial_interface_lock.release()
return result
return mc_cmd
def cur_mc_response_as_str():
# Note the use of the global cur_mc_response
if cur_mc_response == None:
return 'None'
else:
return '\n'.join(cur_mc_response)
# TBD - make a long and short form of this command. The long form would be used by local console
# for debuging. The short form would be used by MQTT to get the state of the arduiono.
# show_state('long' | 'short')
#
def show_state():
# Note use of global cur_mc_cmd_str
return 'current micro-controller string: {}\n'.format(cur_mc_cmd_str) +\
'current micro-controller response: {}\n'.format(cur_mc_response_as_str())
def log_mc_response(response):
for msg in response:
if msg[0:1] == '0':
logger.info('sensor readings: {}'.format(msg))
elif msg[0:1] == '1':
logger.warning('micro warning: {}'.format(msg))
elif msg[0:1] == '2':
log_entry_table.add_log_entry(logger.error, 'micro error: {}'.format(msg))
#- logger.error('micro error: {}'.format(msg))
elif msg[0:30] == 'OpenAg Serial Monitor Starting':
logger.info('micro reset detected: {}'.format(msg))
else:
logger.info('micro response: {}'.format(msg))
def log_cmd_changes():
# Note use of globals cur_mc_cmd_str, old_mc_cmd_str, and cur_mc_response
show_response = False
if cur_mc_cmd_str != old_mc_cmd_str:
logger.info('Arduino command change old: {}'.format(old_mc_cmd_str))
logger.info(' new: {}'.format(cur_mc_cmd_str))
show_response = True
if (old_mc_response == None) or (len(cur_mc_response) != len(old_mc_response)):
logger.info('Arduino response (i.e. # of lines) changed')
show_response = True
if show_response:
log_mc_response(cur_mc_response)
def tokenize_mc_response(mc_response):
# Remove the trailing "\r\nOK" and then split the micro-controller's response into an array of lines.
return mc_response.decode('utf-8')[0:-6].split('\r\n')
# The micro-controller responds to food computer commands as follows:
# If any module (a sensor or an actuator) has a warning or failure then a message line is returned
# for each such failing module. The format of these message lines
# is "status level, module name, status code, status message".
# If any sensor has a warning or failure then no sensor readings are returned. If sensor
# readings are returned then they are sent on a line formatted as:
# "0,x1,x2, ... xn" where xn is either an integer (e.g. 20) or a float (e.g. 20.5).
# Lastly the string "OK\r\n" is returned to mark the end of the micro-controller's response
# to the command.
#
def send_mc_cmd(ser, cmd_str):
serial_interface_lock.acquire()
try:
# Update current state - So logger routines can intelligently log changes
global old_mc_cmd_str, cur_mc_cmd_str, old_mc_response, cur_mc_response
old_mc_cmd_str = cur_mc_cmd_str
cur_mc_cmd_str = cmd_str
old_mc_response = cur_mc_response
logger.debug('arduino command: {}'.format(cmd_str))
ser.write(cmd_str)
mc_response = ser.read_until(b'OK\r\n')
logger.debug('arduino response {}'.format(mc_response))
ser.reset_input_buffer()
except:
logger.error('serial interface error {}, {}'.format(exc_info()[0], exc_info()[1]))
finally:
serial_interface_lock.release()
cur_mc_response = tokenize_mc_response(mc_response)
log_cmd_changes()
return cur_mc_response
# TBD: check on the fc and see if it is ok
# run unit tests and report failure in the log
# TBD:if the unit tests fail then print a log message and exit the program!
#
def start_serial_connection(args):
logger.setLevel(args['log_level'])
logger.info('starting openag microcontroller monitor for food computer version 1')
try:
# Starting the serial port resets the Arduino.
ser = serial.Serial(args['serial_port'], args['baud_rate'], timeout=args['serial_timeout'])
# The Arduino should respond with the serial monitor salutation (i.e.
# "OpenAg Serial Monitor Starting" and any warnings or errors generated by the modules during
# the invokation of their begin methods.
# TBD - Add checking for failed startup messages.
log_mc_response(tokenize_mc_response(ser.read_until(b'OK\r\n')))
ser.reset_input_buffer()
return ser
except:
logger.error('unable to start serial connection to micro-controller: {}, {}'.format(exc_info()[0], exc_info()[1]))
def initialize_fc(mc_state, ser, vals, iterations):
# Turn the food computer micro-controller loop on
logger.info("asking the food computer if it is on.")
log_mc_response(send_mc_cmd(ser, b"(fc 'read)\n"))
logger.info("regardless of response tell fc to turn on.")
send_mc_cmd(ser, b"(fc 'on)\n")
log_mc_response(send_mc_cmd(ser, b"(fc 'read)\n"))
# Ping the mc twice so that it does two update loops
for i in range(0, iterations):
log_mc_response(send_mc_cmd(ser, make_fc_cmd(mc_state)))
sleep(1)
def start(app_state, args, b):
logger.info('fopd microcontroller interface thread starting.')
# Initialize the reading (i.e. sensor outputs) and target (i.e. actuator inputs) information.
#
global reading_names
reading_names = args['sensor_reading_names']
# Start a serial connection with the Aruduino - Note that this resets the Arduino.
ser = start_serial_connection(args)
if not ser:
# if no serial connection can be made then tell the system to stop.
app_state['stop'] = True
# We have one state variable (i.e. camera_pose) so no need of a state structure
mc_state = {}
#- mc_state['camera_pose'] = None
mc_state['camera'] = {'pose': None, 'camera_pose_cmds': args['camera_pose_cmds']}
# Initilize the actuators
global target_indexes, cur_command
target_indexes = args['command_set']
cur_command = [0] * len(target_indexes)
# Inject your commands into app_state.
app_state[args['name']] = {}
app_state[args['name']]['help'] = make_help(args)
app_state[args['name']]['cmd'] = make_cmd(mc_state, ser)
app_state[args['name']]['mc_cmd'] = make_mc_cmd(ser)
app_state[args['name']]['state'] = show_state
vals = app_state[args['name']]['sensor_readings'] = args['sensor_readings']
app_state[args['name']]['get'] = make_get(args['sensor_readings'], args['sensor_reading_names'])
if ser:
# Start the fc loop and and let it run for n seconds where n = args['mc_start_delay'].
# 10 is recommened for the fc version 1 in order to wait for the
# co2 reading to be accurate. TBD: There are more sophisticated ways - such as making the co2
# reading "unavailible" until it is available.
initialize_fc(mc_state, ser, vals, args['mc_start_delay'])
# Take the first set of sensor readings
extract_sensor_values(send_mc_cmd(ser, make_fc_cmd(mc_state)), vals)
# Let the system know that you are good to go.
try:
b.wait()
except Exception as err:
# assume a broken barrier
logger.error('barrier error: {}'.format(str(err)))
app_state['stop'] = True
while not app_state['stop']:
# Send a command string to the Arduino that actuates as per the current controller state.
cur_mc_response = send_mc_cmd(ser, make_fc_cmd(mc_state))
# Look for a set of sensor readings and extract them if you find one.
extract_sensor_values(cur_mc_response, vals)
sleep(1)
logger.info('fopd microcontroller interface thread stopping.')
| 37.346491 | 165 | 0.607634 |
bfae87825b011c6112f3cd048dcfd1135bb49296 | 885 | py | Python | op_builder/transformer_inference.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 1 | 2022-03-15T07:00:38.000Z | 2022-03-15T07:00:38.000Z | op_builder/transformer_inference.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | null | null | null | op_builder/transformer_inference.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | null | null | null | from .builder import CUDAOpBuilder
class InferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
NAME = "transformer_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
def sources(self):
return [
'csrc/transformer/inference/csrc/pt_binding.cpp',
'csrc/transformer/inference/csrc/gelu.cu',
'csrc/transformer/inference/csrc/normalize.cu',
'csrc/transformer/inference/csrc/softmax.cu',
'csrc/transformer/inference/csrc/dequantize.cu',
'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu',
]
def include_paths(self):
return ['csrc/transformer/inference/includes']
| 32.777778 | 70 | 0.670056 |
1a2c2a574803b96f3ed3798b51d60e23883938e6 | 247 | py | Python | cursoemvideo/modulos/ex107/ex107.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | cursoemvideo/modulos/ex107/ex107.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | cursoemvideo/modulos/ex107/ex107.py | mrqssjeff/project-python | b3b08f2acfe825640a5ee92cf9d6fa45ab580384 | [
"MIT"
] | null | null | null | import moeda
preço = float(input('Digite o preço: '))
print(f'''A metade de {preço} é {moeda.metade(preço)},
O dobro de {preço} é {moeda.dobro(preço)},
Aumentado 15%, temos {moeda.aumentar(preço)}
Reduzindo 23%, temos {moeda.diminuir(preço)}''') | 35.285714 | 54 | 0.696356 |
b90bce6b6451b67e0af9b46d713dd3d3a3b462e5 | 3,675 | py | Python | examples/request_init_listener.py | clohfink/python-driver | 30a0e27cd1b8999267c146f0a93adf962a50790b | [
"Apache-2.0"
] | 1,163 | 2015-01-01T03:02:05.000Z | 2022-03-22T13:04:00.000Z | examples/request_init_listener.py | clohfink/python-driver | 30a0e27cd1b8999267c146f0a93adf962a50790b | [
"Apache-2.0"
] | 556 | 2015-01-05T16:39:29.000Z | 2022-03-26T20:51:36.000Z | examples/request_init_listener.py | clohfink/python-driver | 30a0e27cd1b8999267c146f0a93adf962a50790b | [
"Apache-2.0"
] | 449 | 2015-01-05T10:28:59.000Z | 2022-03-14T23:15:32.000Z | #!/usr/bin/env python
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script shows an example "request init listener" which can be registered to track certain request metrics
# for a session. In this case we're just accumulating total request and error counts, as well as some statistics
# about the encoded request size. Note that the counts would be available using the internal 'metrics' tracking --
# this is just demonstrating a way to track a few custom attributes.
from __future__ import print_function
from cassandra.cluster import Cluster
from greplin import scales
import pprint
pp = pprint.PrettyPrinter(indent=2)
class RequestAnalyzer(object):
"""
Class used to track request and error counts for a Session.
Also computes statistics on encoded request size.
"""
requests = scales.PmfStat('request size')
errors = scales.IntStat('errors')
def __init__(self, session):
scales.init(self, '/cassandra')
# each instance will be registered with a session, and receive a callback for each request generated
session.add_request_init_listener(self.on_request)
def on_request(self, rf):
# This callback is invoked each time a request is created, on the thread creating the request.
# We can use this to count events, or add callbacks
rf.add_callbacks(self.on_success, self.on_error, callback_args=(rf,), errback_args=(rf,))
def on_success(self, _, response_future):
# future callback on a successful request; just record the size
self.requests.addValue(response_future.request_encoded_size)
def on_error(self, _, response_future):
# future callback for failed; record size and increment errors
self.requests.addValue(response_future.request_encoded_size)
self.errors += 1
def __str__(self):
# just extracting request count from the size stats (which are recorded on all requests)
request_sizes = dict(self.requests)
count = request_sizes.pop('count')
return "%d requests (%d errors)\nRequest size statistics:\n%s" % (count, self.errors, pp.pformat(request_sizes))
# connect a session
session = Cluster().connect()
# attach a listener to this session
ra = RequestAnalyzer(session)
session.execute("SELECT release_version FROM system.local")
session.execute("SELECT release_version FROM system.local")
print(ra)
# 2 requests (0 errors)
# Request size statistics:
# { '75percentile': 74,
# '95percentile': 74,
# '98percentile': 74,
# '999percentile': 74,
# '99percentile': 74,
# 'max': 74,
# 'mean': 74.0,
# 'median': 74.0,
# 'min': 74,
# 'stddev': 0.0}
try:
# intentional error to show that count increase
session.execute("syntax err")
except Exception as e:
pass
print()
print(ra) # note: the counts are updated, but the stats are not because scales only updates every 20s
# 3 requests (1 errors)
# Request size statistics:
# { '75percentile': 74,
# '95percentile': 74,
# '98percentile': 74,
# '999percentile': 74,
# '99percentile': 74,
# 'max': 74,
# 'mean': 74.0,
# 'median': 74.0,
# 'min': 74,
# 'stddev': 0.0}
| 34.027778 | 120 | 0.711293 |
71dbe5894d912ac12f9454e16d19d991f9f76e6c | 16,329 | py | Python | dace/transformation/dataflow/strip_mining.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | dace/transformation/dataflow/strip_mining.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | dace/transformation/dataflow/strip_mining.py | targetsm/dace | 297b12804a334df8cc6fad5250d5fb0cce20dc6e | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
""" This module contains classes and functions that implement the strip-mining
transformation."""
import dace
from copy import deepcopy as dcpy
from dace import dtypes, registry, subsets, symbolic
from dace.sdfg import SDFG, SDFGState
from dace.properties import make_properties, Property
from dace.sdfg import nodes
from dace.sdfg import utils as sdutil
from dace.symbolic import issymbolic, overapproximate, SymExpr
from dace.transformation import pattern_matching
import sympy
def calc_set_image_index(map_idx, map_set, array_idx):
image = []
for a_idx in array_idx.indices:
new_range = [a_idx, a_idx, SymExpr(1, 1)]
for m_idx, m_range in zip(map_idx, map_set):
symbol = symbolic.pystr_to_symbolic(m_idx)
for i in range(2):
if isinstance(m_range[i], SymExpr):
exact = m_range[i].expr
approx = m_range[i].approx
else:
exact = m_range[i]
approx = overapproximate(m_range[i])
if isinstance(new_range[i], SymExpr):
new_range[i] = SymExpr(
new_range[i].expr.subs([(symbol, exact)]),
new_range[i].approx.subs([(symbol, approx)]))
elif issymbolic(new_range[i]):
new_range[i] = SymExpr(
new_range[i].subs([(symbol, exact)]),
new_range[i].subs([(symbol, approx)]))
else:
new_range[i] = SymExpr(new_range[i], new_range[i])
image.append(new_range)
return subsets.Range(image)
def calc_set_image_range(map_idx, map_set, array_range):
image = []
for a_range in array_range:
new_range = list(a_range)
for m_idx, m_range in zip(map_idx, map_set):
symbol = symbolic.pystr_to_symbolic(m_idx)
for i in range(3):
if isinstance(m_range[i], SymExpr):
exact = m_range[i].expr
approx = m_range[i].approx
else:
exact = m_range[i]
approx = overapproximate(m_range[i])
if isinstance(new_range[i], SymExpr):
new_range[i] = SymExpr(
new_range[i].expr.subs([(symbol, exact)]),
new_range[i].approx.subs([(symbol, approx)]))
elif issymbolic(new_range[i]):
new_range[i] = SymExpr(
new_range[i].subs([(symbol, exact)]),
new_range[i].subs([(symbol, approx)]))
else:
new_range[i] = SymExpr(new_range[i], new_range[i])
image.append(new_range)
return subsets.Range(image)
def calc_set_image(map_idx, map_set, array_set):
if isinstance(array_set, subsets.Range):
return calc_set_image_range(map_idx, map_set, array_set)
if isinstance(array_set, subsets.Indices):
return calc_set_image_index(map_idx, map_set, array_set)
def calc_set_union(set_a, set_b):
if isinstance(set_a, subsets.Indices) or isinstance(set_b, subsets.Indices):
raise NotImplementedError('Set union with indices is not implemented.')
if not (isinstance(set_a, subsets.Range)
and isinstance(set_b, subsets.Range)):
raise TypeError('Can only compute the union of ranges.')
if len(set_a) != len(set_b):
raise ValueError('Range dimensions do not match')
union = []
for range_a, range_b in zip(set_a, set_b):
r_union = []
for i in range(3):
if isinstance(range_a[i], SymExpr):
a_exact = range_a[i].expr
a_approx = range_a[i].approx
else:
a_exact = range_a[i]
a_approx = range_a[i]
if isinstance(range_b[i], SymExpr):
b_exact = range_b[i].expr
b_approx = range_b[i].approx
else:
b_exact = range_b[i]
b_approx = range_b[i]
if i in {0, 2}:
r_union.append(
SymExpr(sympy.Min(a_exact, b_exact),
sympy.Min(a_approx, b_approx)))
else:
r_union.append(
SymExpr(sympy.Max(a_exact, b_exact),
sympy.Max(a_approx, b_approx)))
union.append(r_union)
# union.append([
# sympy.Min(range_a[0], range_b[0]),
# sympy.Max(range_a[1], range_b[1]),
# sympy.Min(range_a[2], range_b[2]),
# ])
return subsets.Range(union)
@registry.autoregister_params(singlestate=True)
@make_properties
class StripMining(pattern_matching.Transformation):
""" Implements the strip-mining transformation.
Strip-mining takes as input a map dimension and splits it into
two dimensions. The new dimension iterates over the range of
the original one with a parameterizable step, called the tile
size. The original dimension is changed to iterates over the
range of the tile size, with the same step as before.
"""
_map_entry = nodes.MapEntry(nodes.Map("", [], []))
# Properties
dim_idx = Property(dtype=int,
default=-1,
desc="Index of dimension to be strip-mined")
new_dim_prefix = Property(dtype=str,
default="tile",
desc="Prefix for new dimension name")
tile_size = Property(dtype=str,
default="64",
desc="Tile size of strip-mined dimension")
tile_stride = Property(dtype=str,
default="",
desc="Stride between two tiles of the "
"strip-mined dimension")
divides_evenly = Property(dtype=bool,
default=False,
desc="Tile size divides dimension range evenly?")
strided = Property(
dtype=bool,
default=False,
desc="Continuous (false) or strided (true) elements in tile")
@staticmethod
def annotates_memlets():
return True
@staticmethod
def expressions():
return [
sdutil.node_path_graph(StripMining._map_entry)
# kStripMining._tasklet, StripMining._map_exit)
]
@staticmethod
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
return True
@staticmethod
def match_to_str(graph, candidate):
map_entry = graph.nodes()[candidate[StripMining._map_entry]]
return map_entry.map.label + ': ' + str(map_entry.map.params)
def apply(self, sdfg):
graph = sdfg.nodes()[self.state_id]
# Strip-mine selected dimension.
_, _, new_map = self._stripmine(sdfg, graph, self.subgraph)
return new_map
# def __init__(self, tag=True):
def __init__(self, *args, **kwargs):
self._entry = nodes.EntryNode()
self._tasklet = nodes.Tasklet('_')
self._exit = nodes.ExitNode()
super().__init__(*args, **kwargs)
# self.tag = tag
@property
def entry(self):
return self._entry
@property
def exit(self):
return self._exit
@property
def tasklet(self):
return self._tasklet
def print_match_pattern(self, candidate):
gentry = candidate[self.entry]
return str(gentry.map.params[-1])
def modifies_graph(self):
return True
def _find_new_dim(self, sdfg: SDFG, state: SDFGState, entry: nodes.MapEntry,
prefix: str, target_dim: str):
""" Finds a variable that is not already defined in scope. """
stree = state.scope_tree()
if len(prefix) == 0:
return target_dim
candidate = '%s_%s' % (prefix, target_dim)
index = 1
while candidate in map(str, stree[entry].defined_vars):
candidate = '%s%d_%s' % (prefix, index, target_dim)
index += 1
return candidate
def _stripmine(self, sdfg, graph, candidate):
# Retrieve map entry and exit nodes.
map_entry = graph.nodes()[candidate[StripMining._map_entry]]
map_exit = graph.exit_node(map_entry)
# Retrieve transformation properties.
dim_idx = self.dim_idx
new_dim_prefix = self.new_dim_prefix
tile_size = self.tile_size
divides_evenly = self.divides_evenly
strided = self.strided
tile_stride = self.tile_stride
if tile_stride is None or len(tile_stride) == 0:
tile_stride = tile_size
# Retrieve parameter and range of dimension to be strip-mined.
target_dim = map_entry.map.params[dim_idx]
td_from, td_to, td_step = map_entry.map.range[dim_idx]
# Create new map. Replace by cloning map object?
new_dim = self._find_new_dim(sdfg, graph, map_entry, new_dim_prefix,
target_dim)
nd_from = 0
if symbolic.pystr_to_symbolic(tile_stride) == 1:
nd_to = td_to
else:
nd_to = symbolic.pystr_to_symbolic(
'int_ceil(%s + 1 - %s, %s) - 1' %
(symbolic.symstr(td_to), symbolic.symstr(td_from), tile_stride))
nd_step = 1
new_dim_range = (nd_from, nd_to, nd_step)
new_map = nodes.Map(new_dim + '_' + map_entry.map.label, [new_dim],
subsets.Range([new_dim_range]))
new_map_entry = nodes.MapEntry(new_map)
new_map_exit = nodes.MapExit(new_map)
# Change the range of the selected dimension to iterate over a single
# tile
if strided:
td_from_new = symbolic.pystr_to_symbolic(new_dim)
td_to_new_approx = td_to
td_step = symbolic.pystr_to_symbolic(tile_size)
else:
td_from_new = symbolic.pystr_to_symbolic(
'%s + %s * %s' %
(symbolic.symstr(td_from), str(new_dim), tile_stride))
td_to_new_exact = symbolic.pystr_to_symbolic(
'min(%s + 1, %s + %s * %s + %s) - 1' %
(symbolic.symstr(td_to), symbolic.symstr(td_from), tile_stride,
str(new_dim), tile_size))
td_to_new_approx = symbolic.pystr_to_symbolic(
'%s + %s * %s + %s - 1' %
(symbolic.symstr(td_from), tile_stride, str(new_dim),
tile_size))
if divides_evenly or strided:
td_to_new = td_to_new_approx
else:
td_to_new = dace.symbolic.SymExpr(td_to_new_exact, td_to_new_approx)
# Special case: If range is 1 and no prefix was specified, skip range
if td_from_new == td_to_new_approx and target_dim == new_dim:
map_entry.map.range = subsets.Range(
[r for i, r in enumerate(map_entry.map.range) if i != dim_idx])
map_entry.map.params = [
p for i, p in enumerate(map_entry.map.params) if i != dim_idx
]
if len(map_entry.map.params) == 0:
raise ValueError('Strip-mining all dimensions of the map with '
'empty tiles is disallowed')
else:
map_entry.map.range[dim_idx] = (td_from_new, td_to_new, td_step)
# Make internal map's schedule to "not parallel"
new_map.schedule = map_entry.map.schedule
map_entry.map.schedule = dtypes.ScheduleType.Sequential
# Redirect edges
new_map_entry.in_connectors = dcpy(map_entry.in_connectors)
sdutil.change_edge_dest(graph, map_entry, new_map_entry)
new_map_exit.out_connectors = dcpy(map_exit.out_connectors)
sdutil.change_edge_src(graph, map_exit, new_map_exit)
# Create new entry edges
new_in_edges = dict()
entry_in_conn = {}
entry_out_conn = {}
for _src, src_conn, _dst, _, memlet in graph.out_edges(map_entry):
if (src_conn is not None and src_conn[:4] == 'OUT_' and
not isinstance(sdfg.arrays[memlet.data], dace.data.Scalar)):
new_subset = calc_set_image(
map_entry.map.params,
map_entry.map.range,
memlet.subset,
)
conn = src_conn[4:]
key = (memlet.data, 'IN_' + conn, 'OUT_' + conn)
if key in new_in_edges.keys():
old_subset = new_in_edges[key].subset
new_in_edges[key].subset = calc_set_union(
old_subset, new_subset)
else:
entry_in_conn['IN_' + conn] = None
entry_out_conn['OUT_' + conn] = None
new_memlet = dcpy(memlet)
new_memlet.subset = new_subset
if memlet.dynamic:
new_memlet.num_accesses = memlet.num_accesses
else:
new_memlet.num_accesses = new_memlet.num_elements()
new_in_edges[key] = new_memlet
else:
if src_conn is not None and src_conn[:4] == 'OUT_':
conn = src_conn[4:]
in_conn = 'IN_' + conn
out_conn = 'OUT_' + conn
else:
in_conn = src_conn
out_conn = src_conn
if in_conn:
entry_in_conn[in_conn] = None
if out_conn:
entry_out_conn[out_conn] = None
new_in_edges[(memlet.data, in_conn, out_conn)] = dcpy(memlet)
new_map_entry.out_connectors = entry_out_conn
map_entry.in_connectors = entry_in_conn
for (_, in_conn, out_conn), memlet in new_in_edges.items():
graph.add_edge(new_map_entry, out_conn, map_entry, in_conn, memlet)
# Create new exit edges
new_out_edges = dict()
exit_in_conn = {}
exit_out_conn = {}
for _src, _, _dst, dst_conn, memlet in graph.in_edges(map_exit):
if (dst_conn is not None and dst_conn[:3] == 'IN_' and
not isinstance(sdfg.arrays[memlet.data], dace.data.Scalar)):
new_subset = calc_set_image(
map_entry.map.params,
map_entry.map.range,
memlet.subset,
)
conn = dst_conn[3:]
key = (memlet.data, 'IN_' + conn, 'OUT_' + conn)
if key in new_out_edges.keys():
old_subset = new_out_edges[key].subset
new_out_edges[key].subset = calc_set_union(
old_subset, new_subset)
else:
exit_in_conn['IN_' + conn] = None
exit_out_conn['OUT_' + conn] = None
new_memlet = dcpy(memlet)
new_memlet.subset = new_subset
if memlet.dynamic:
new_memlet.num_accesses = memlet.num_accesses
else:
new_memlet.num_accesses = new_memlet.num_elements()
new_out_edges[key] = new_memlet
else:
if dst_conn is not None and dst_conn[:3] == 'IN_':
conn = dst_conn[3:]
in_conn = 'IN_' + conn
out_conn = 'OUT_' + conn
else:
in_conn = src_conn
out_conn = src_conn
if in_conn:
exit_in_conn[in_conn] = None
if out_conn:
exit_out_conn[out_conn] = None
new_in_edges[(memlet.data, in_conn, out_conn)] = dcpy(memlet)
new_map_exit.in_connectors = exit_in_conn
map_exit.out_connectors = exit_out_conn
for (_, in_conn, out_conn), memlet in new_out_edges.items():
graph.add_edge(map_exit, out_conn, new_map_exit, in_conn, memlet)
# Return strip-mined dimension.
return target_dim, new_dim, new_map
| 41.027638 | 80 | 0.558271 |
2e849ed6cf235668c888ffdd31c05b414db710cf | 4,962 | py | Python | data_loader/batch_loader.py | SigureMo/shoeprint-recognition | fe9288938827497c8b555f4fea98e96487943d44 | [
"MIT"
] | 1 | 2020-04-06T05:37:03.000Z | 2020-04-06T05:37:03.000Z | data_loader/batch_loader.py | cattidea/shoeprint-recognition | fe9288938827497c8b555f4fea98e96487943d44 | [
"MIT"
] | 2 | 2019-12-16T23:43:38.000Z | 2020-02-01T07:01:39.000Z | data_loader/batch_loader.py | cattidea/shoeprint-recognition | fe9288938827497c8b555f4fea98e96487943d44 | [
"MIT"
] | 1 | 2019-11-29T16:41:28.000Z | 2019-11-29T16:41:28.000Z | import numpy as np
from config_parser.config import MARGIN
class BatchLoader():
""" Triplet 选取器,FaceNet 实现 """
def __init__(self, model, indices, class_per_batch, shoe_per_class, img_per_shoe,
img_arrays, sess):
self.model = model
self.indices = indices
self.class_per_batch = class_per_batch
self.shoe_per_class = shoe_per_class
self.img_per_shoe = img_per_shoe
self.img_arrays = img_arrays
self.sess = sess
self.alpha = MARGIN
self.start_index = 0
self.shadow_index = 0
def __iter__(self):
return self
def __next__(self):
if self.start_index >= self.shadow_index:
self.shadow_index = self.start_index
shoeprints, nrof_shoes_per_class, self.start_index = self.sample_shoeprint(self.indices, self.start_index, self.class_per_batch, self.shoe_per_class, self.img_per_shoe)
embeddings = self.model.compute_embeddings(self.img_arrays[shoeprints], self.sess)
triplets = self.select_triplets(embeddings, shoeprints, nrof_shoes_per_class, self.class_per_batch, self.img_per_shoe, self.alpha)
return self.shadow_index, triplets
else:
raise StopIteration
@staticmethod
def sample_shoeprint(data_set, start_index, class_per_batch, shoe_per_class, img_per_shoe):
""" 抽取一个 batch 所需的鞋印
``` python
[
<idx01>, <idx02>, ...
]
```
"""
nrof_shoes = class_per_batch * shoe_per_class
nrof_classes = len(data_set)
img_per_shoe_origin = len(data_set[0][0])
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
shoeprints = []
nrof_shoes_per_class = []
while len(shoeprints) < nrof_shoes:
# print("sample_shoeprint {}/{} ".format(len(shoeprints), nrof_shoes), end='\r')
class_index = class_indices[start_index]
# 某一类中鞋印的总数量
nrof_shoes_in_class = len(data_set[class_index])
if nrof_shoes_in_class > 1:
# if True:
shoe_indices = np.arange(nrof_shoes_in_class)
np.random.shuffle(shoe_indices)
# 该类中需要抽取鞋印的数量
nrof_shoes_from_class = min(nrof_shoes_in_class, shoe_per_class, nrof_shoes-len(shoeprints))
idx = shoe_indices[: nrof_shoes_from_class]
# 随机选取一定量的扩增图
img_indices = np.random.choice(img_per_shoe_origin, img_per_shoe, replace=False)
shoeprints += [np.array(data_set[class_index][i])[img_indices] for i in idx]
nrof_shoes_per_class.append(nrof_shoes_from_class)
start_index += 1
start_index %= nrof_classes
assert len(shoeprints) == nrof_shoes
return np.reshape(shoeprints, (nrof_shoes * img_per_shoe, )), nrof_shoes_per_class, start_index
@staticmethod
def select_triplets(embeddings, shoeprints, nrof_shoes_per_class, class_per_batch, img_per_shoe, alpha):
""" 选择三元组 """
emb_start_idx = 0
triplets = []
for i in range(len(nrof_shoes_per_class)):
# print("select_triplets {}/{} ".format(i, class_per_batch), end='\r')
nrof_shoes = int(nrof_shoes_per_class[i])
if nrof_shoes <= 1:
continue
# 某个鞋
for j in range(0, nrof_shoes*img_per_shoe, img_per_shoe):
a_offset = np.random.randint(img_per_shoe) # 同图偏移
a_idx = emb_start_idx + j + a_offset
neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), axis=-1)
# 将本类鞋距离设为无穷,不作 negative
neg_dists_sqr[emb_start_idx: emb_start_idx+nrof_shoes*img_per_shoe] = np.inf
for k in range(j+img_per_shoe, nrof_shoes*img_per_shoe, img_per_shoe):
p_offset = np.random.randint(img_per_shoe)
p_idx = emb_start_idx + k + p_offset
pos_dist_sqr = np.sum(np.square(embeddings[a_idx] - embeddings[p_idx]))
all_neg = np.where(neg_dists_sqr-pos_dist_sqr < alpha)[0]
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs > 0:
# 如果存在满足条件的 neg ,则随机挑选一个
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
triplets.append((shoeprints[a_idx], shoeprints[p_idx], shoeprints[n_idx]))
# neg_loss = neg_dists_sqr - pos_dist_sqr - alpha
# n_idx = np.argmin(neg_loss)
# if neg_loss[n_idx] < 0:
# triplets.append((shoeprints[a_idx], shoeprints[p_idx], shoeprints[n_idx]))
emb_start_idx += nrof_shoes * img_per_shoe
np.random.shuffle(triplets)
return triplets
| 41.697479 | 180 | 0.606207 |
0697d3306aeb7dc5c8a2b89fe58669c8eefc47b3 | 481 | py | Python | static_model/models/DemoModel.py | 12860/dlflow | 6fb974fd800649af82b20c5f4e40aea123559d10 | [
"Apache-2.0"
] | 156 | 2020-04-22T10:59:26.000Z | 2022-02-28T09:09:01.000Z | static_model/models/DemoModel.py | 12860/dlflow | 6fb974fd800649af82b20c5f4e40aea123559d10 | [
"Apache-2.0"
] | 5 | 2020-07-10T05:39:48.000Z | 2022-03-15T14:38:23.000Z | static_model/models/DemoModel.py | 12860/dlflow | 6fb974fd800649af82b20c5f4e40aea123559d10 | [
"Apache-2.0"
] | 31 | 2020-04-22T12:51:32.000Z | 2022-03-15T07:02:05.000Z | from dlflow.mgr import model, config
from dlflow.models import ModelBase
@model.reg("model register name")
class DemoModel(ModelBase):
cfg = config.setting(
config.opt("DemoParam", "DemoDefaultValue")
)
def __init__(self, fmap):
super(DemoModel, self).__init__(fmap)
def build(self):
...
def train(self, feature, label):
...
def evaluate(self, feature, label):
...
def predict(self, feature):
...
| 18.5 | 51 | 0.607069 |
403e8b9ea30bd8b7115f761ab9cf7dae194aebf0 | 1,332 | py | Python | recommendation/urls.py | Zeble1603/cv-django | 329d8d471c92dc0ce5f4bfb2bb5212fc1c8c34b4 | [
"MIT"
] | 1 | 2021-10-19T21:22:38.000Z | 2021-10-19T21:22:38.000Z | recommendation/urls.py | Zeble1603/cv-django | 329d8d471c92dc0ce5f4bfb2bb5212fc1c8c34b4 | [
"MIT"
] | null | null | null | recommendation/urls.py | Zeble1603/cv-django | 329d8d471c92dc0ce5f4bfb2bb5212fc1c8c34b4 | [
"MIT"
] | null | null | null | """cv URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
app_name = "reco"
urlpatterns = [
path("", views.ValidRecoListView.as_view(), name = "validreco_list"),
path("list", views.RecoListView.as_view(), name = "reco_list"),
path("new", views.RecoCreateView.as_view(), name = "reco_create"),
path("thanks", views.ThanksTemplateView.as_view(), name = "thanks"),
path("<pk>", views.RecoDetailView.as_view(), name = "reco_detail"),
path("<pk>/update", views.RecoUpdateView.as_view(), name = "reco_update"),
path("<pk>/delete", views.RecoDeleteView.as_view(), name = "reco_delete"),
path("<pk>/publish", views.reco_publish, name = "reco_publish"),
] | 40.363636 | 78 | 0.693694 |
e18b69cdc5b3a11e45b3092f395fd2cfffcd06b3 | 2,956 | py | Python | h/schemas/base.py | Manuelinux/kubeh | a549f0d1c09619843290f9b78bce7668ed90853a | [
"BSD-2-Clause"
] | null | null | null | h/schemas/base.py | Manuelinux/kubeh | a549f0d1c09619843290f9b78bce7668ed90853a | [
"BSD-2-Clause"
] | 4 | 2020-03-24T17:38:24.000Z | 2022-03-02T05:45:01.000Z | h/schemas/base.py | Manuelinux/kubeh | a549f0d1c09619843290f9b78bce7668ed90853a | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Shared functionality for schemas."""
from __future__ import unicode_literals
import copy
import colander
import deform
import jsonschema
from pyramid.csrf import check_csrf_token, get_csrf_token
from pyramid import httpexceptions
@colander.deferred
def deferred_csrf_token(node, kw):
request = kw.get("request")
return get_csrf_token(request)
class ValidationError(httpexceptions.HTTPBadRequest):
pass
class CSRFSchema(colander.Schema):
"""
A CSRFSchema backward-compatible with the one from the hem module.
Unlike hem, this doesn't require that the csrf_token appear in the
serialized appstruct.
"""
csrf_token = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
default=deferred_csrf_token,
missing=None,
)
def validator(self, form, value):
request = form.bindings["request"]
check_csrf_token(request)
class JSONSchema:
"""
Validate data according to a Draft 4 JSON Schema.
Inherit from this class and override the `schema` class property with a
valid JSON schema.
"""
schema = {}
def __init__(self):
format_checker = jsonschema.FormatChecker()
self.validator = jsonschema.Draft4Validator(
self.schema, format_checker=format_checker
)
def validate(self, data):
"""
Validate `data` according to the current schema.
:param data: The data to be validated
:returns: valid data
:raises ~h.schemas.ValidationError: if the data is invalid
"""
# Take a copy to ensure we don't modify what we were passed.
appstruct = copy.deepcopy(data)
errors = list(self.validator.iter_errors(appstruct))
if errors:
msg = ", ".join([_format_jsonschema_error(e) for e in errors])
raise ValidationError(msg)
return appstruct
def enum_type(enum_cls):
"""
Return a `colander.Type` implementation for a field with a given enum type.
:param enum_cls: The enum class
:type enum_cls: enum.Enum
"""
class EnumType(colander.SchemaType):
def deserialize(self, node, cstruct):
if cstruct == colander.null:
return None
try:
return enum_cls[cstruct]
except KeyError:
msg = '"{}" is not a known value'.format(cstruct)
raise colander.Invalid(node, msg)
def serialize(self, node, appstruct):
if not appstruct:
return ""
return appstruct.name
return EnumType
def _format_jsonschema_error(error):
"""Format a :py:class:`jsonschema.ValidationError` as a string."""
if error.path:
dotted_path = ".".join([str(c) for c in error.path])
return "{path}: {message}".format(path=dotted_path, message=error.message)
return error.message
| 26.159292 | 82 | 0.643099 |
85e27ad4631d2971e58e8422494e986a7b4d903d | 58,601 | py | Python | Lib/test/test_xmlrpc.py | techkang/cpython | d0fb3cec282fa31223f7002c2e7841a3cc9cc14a | [
"0BSD"
] | 4 | 2020-03-04T06:35:24.000Z | 2021-09-20T12:22:45.000Z | Lib/test/test_xmlrpc.py | techkang/cpython | d0fb3cec282fa31223f7002c2e7841a3cc9cc14a | [
"0BSD"
] | 148 | 2020-02-26T01:08:34.000Z | 2022-03-01T15:00:59.000Z | Lib/test/test_xmlrpc.py | jab/cpython | a856364cc920d8b16750fd1fadc902efb509754c | [
"0BSD"
] | 1 | 2019-09-02T00:51:59.000Z | 2019-09-02T00:51:59.000Z | import base64
import datetime
import decimal
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import http, http.server
import socket
import threading
import re
import io
import contextlib
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
try:
import gzip
except ImportError:
gzip = None
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_encoding(self):
value = {'key\u20ac\xa4':
'value\u20ac\xa4'}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = strg.encode('iso-8859-15', 'xmlcharrefreplace')
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = 'method\u20ac\xa4'
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
def check_loads(self, s, value, **kwargs):
dump = '<params><param><value>%s</value></param></params>' % s
result, m = xmlrpclib.loads(dump, **kwargs)
(newvalue,) = result
self.assertEqual(newvalue, value)
self.assertIs(type(newvalue), type(value))
self.assertIsNone(m)
def test_load_standard_types(self):
check = self.check_loads
check('string', 'string')
check('<string>string</string>', 'string')
check('<string>𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string</string>', '𝔘𝔫𝔦𝔠𝔬𝔡𝔢 string')
check('<int>2056183947</int>', 2056183947)
check('<int>-2056183947</int>', -2056183947)
check('<i4>2056183947</i4>', 2056183947)
check('<double>46093.78125</double>', 46093.78125)
check('<boolean>0</boolean>', False)
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
xmlrpclib.Binary(b'\x00byte string\xff'))
check('<base64>AGJ5dGUgc3RyaW5n/w==</base64>',
b'\x00byte string\xff', use_builtin_types=True)
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
xmlrpclib.DateTime('20050210T11:41:23'))
check('<dateTime.iso8601>20050210T11:41:23</dateTime.iso8601>',
datetime.datetime(2005, 2, 10, 11, 41, 23),
use_builtin_types=True)
check('<array><data>'
'<value><int>1</int></value><value><int>2</int></value>'
'</data></array>', [1, 2])
check('<struct>'
'<member><name>b</name><value><int>2</int></value></member>'
'<member><name>a</name><value><int>1</int></value></member>'
'</struct>', {'a': 1, 'b': 2})
def test_load_extension_types(self):
check = self.check_loads
check('<nil/>', None)
check('<ex:nil/>', None)
check('<i1>205</i1>', 205)
check('<i2>20561</i2>', 20561)
check('<i8>9876543210</i8>', 9876543210)
check('<biginteger>98765432100123456789</biginteger>',
98765432100123456789)
check('<float>93.78125</float>', 93.78125)
check('<bigdecimal>9876543210.0123456789</bigdecimal>',
decimal.Decimal('9876543210.0123456789'))
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("user@host.tld"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except OSError:
self.assertTrue(has_ssl)
def test_keepalive_disconnect(self):
class RequestHandler(http.server.BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
handled = False
def do_POST(self):
length = int(self.headers.get("Content-Length"))
self.rfile.read(length)
if self.handled:
self.close_connection = True
return
response = xmlrpclib.dumps((5,), methodresponse=True)
response = response.encode()
self.send_response(http.HTTPStatus.OK)
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
self.handled = True
self.close_connection = False
def log_message(self, format, *args):
# don't clobber sys.stderr
pass
def run_server():
server.socket.settimeout(float(1)) # Don't hang if client fails
server.handle_request() # First request and attempt at second
server.handle_request() # Retried second request
server = http.server.HTTPServer((socket_helper.HOST, 0), RequestHandler)
self.addCleanup(server.server_close)
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join)
url = "http://{}:{}/".format(*server.server_address)
with xmlrpclib.ServerProxy(url) as p:
self.assertEqual(p.method(), 5)
self.assertEqual(p.method(), 5)
class SimpleXMLRPCDispatcherTestCase(unittest.TestCase):
class DispatchExc(Exception):
"""Raised inside the dispatched functions when checking for
chained exceptions"""
def test_call_registered_func(self):
"""Calls explicitly registered function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
def dispatched_func(*params):
raise self.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(dispatched_func)
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_instance_func(self):
"""Calls a registered instance attribute as a function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_params = 1, 2, 3
class DispatchedClass:
def dispatched_func(self, *params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(DispatchedClass())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch('dispatched_func', exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_params,))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_call_dispatch_func(self):
"""Calls the registered instance's `_dispatch` function"""
# Makes sure any exception raised inside the function has no other
# exception chained to it
exp_method = 'method'
exp_params = 1, 2, 3
class TestInstance:
def _dispatch(self, method, params):
raise SimpleXMLRPCDispatcherTestCase.DispatchExc(
method, params)
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(TestInstance())
with self.assertRaises(self.DispatchExc) as exc_ctx:
dispatcher._dispatch(exp_method, exp_params)
self.assertEqual(exc_ctx.exception.args, (exp_method, exp_params))
self.assertIsNone(exc_ctx.exception.__cause__)
self.assertIsNone(exc_ctx.exception.__context__)
def test_registered_func_is_none(self):
"""Calls explicitly registered function which is None"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_function(None, name='method')
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_instance_has_no_func(self):
"""Attempts to call nonexistent function on a registered instance"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
dispatcher.register_instance(object())
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
def test_cannot_locate_func(self):
"""Calls a function that the dispatcher cannot locate"""
dispatcher = xmlrpc.server.SimpleXMLRPCDispatcher()
with self.assertRaisesRegex(Exception, 'method'):
dispatcher._dispatch('method', ('param',))
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %#x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
self.assertFalse(dtime == 1970)
self.assertTrue(dtime != dbytes)
self.assertFalse(dtime == bytearray(dbytes))
self.assertTrue(dtime != dtuple)
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
self.assertTrue(dtime == ALWAYS_EQ)
self.assertFalse(dtime != ALWAYS_EQ)
self.assertTrue(dtime < LARGEST)
self.assertFalse(dtime > LARGEST)
self.assertTrue(dtime <= LARGEST)
self.assertFalse(dtime >= LARGEST)
self.assertFalse(dtime < SMALLEST)
self.assertTrue(dtime > SMALLEST)
self.assertFalse(dtime <= SMALLEST)
self.assertTrue(dtime >= SMALLEST)
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm chosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
class Fixture:
@staticmethod
def getData():
return '42'
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x: x, 'têšt')
@serv.register_function
def my_function():
'''This is my function'''
return True
@serv.register_function(name='add')
def _(x, y):
return x + y
testInstance = TestInstanceClass()
serv.register_instance(testInstance, allow_dotted_names=True)
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except TimeoutError:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = [
"/foo", "/foo/bar",
"/foo?k=v", "/foo#frag", "/foo?k=v#frag",
"", "/", "/RPC2", "?k=v", "#frag",
]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
d.register_function(lambda p=path: p, 'test')
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except TimeoutError:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore OSErrors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, OSError) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
thread = threading.Thread(target=self.threadFunc, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_client_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='ascii')
self.assertEqual(p.têšt(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
with contextlib.closing(http.client.HTTPConnection(ADDR, PORT)) as conn:
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add', 'têšt',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall',
'Fixture'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_allow_dotted_names_true(self):
# XXX also need allow_dotted_names_false test.
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
data = server.Fixture.getData()
self.assertEqual(data, '42')
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
with contextlib.closing(socket.create_connection((ADDR, PORT))) as conn:
conn.send('POST /RPC2 HTTP/1.0\r\n'
'Content-Length: 100\r\n\r\n'
'bye HTTP/1.1\r\n'
f'Host: {ADDR}:{PORT}\r\n'
'Accept-Encoding: identity\r\n'
'Content-Length: 0\r\n\r\n'.encode('ascii'))
def test_context_manager(self):
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, 3)
self.assertNotEqual(server('transport')._connection,
(None, None))
self.assertEqual(server('transport')._connection,
(None, None))
def test_context_manager_method_error(self):
try:
with xmlrpclib.ServerProxy(URL) as server:
server.add(2, "a")
except xmlrpclib.Fault:
pass
self.assertEqual(server('transport')._connection,
(None, None))
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
def test_server_encoding(self):
start_string = '\u20ac'
end_string = '\xa4'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_invalid_path(self):
p = xmlrpclib.ServerProxy(URL+"/invalid")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path_query_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v#frag")
self.assertEqual(p.test(), "/foo?k=v#frag")
def test_path_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo#frag")
self.assertEqual(p.test(), "/foo#frag")
def test_path_query(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v")
self.assertEqual(p.test(), "/foo?k=v")
def test_empty_path(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.test(), "/RPC2")
def test_root_path(self):
p = xmlrpclib.ServerProxy(URL + "/")
self.assertEqual(p.test(), "/")
def test_empty_path_query(self):
p = xmlrpclib.ServerProxy(URL + "?k=v")
self.assertEqual(p.test(), "?k=v")
def test_empty_path_fragment(self):
p = xmlrpclib.ServerProxy(URL + "#frag")
self.assertEqual(p.test(), "#frag")
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
@unittest.skipIf(gzip is None, 'requires gzip')
class GzipUtilTestCase(unittest.TestCase):
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = b'\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = b'\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegex(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
class HeadersServerTestCase(BaseServerTestCase):
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
test_headers = None
def do_POST(self):
self.__class__.test_headers = self.headers
return super().do_POST()
requestHandler = RequestHandler
standard_headers = [
'Host', 'Accept-Encoding', 'Content-Type', 'User-Agent',
'Content-Length']
def setUp(self):
self.RequestHandler.test_headers = None
return super().setUp()
def assertContainsAdditionalHeaders(self, headers, additional):
expected_keys = sorted(self.standard_headers + list(additional.keys()))
self.assertListEqual(sorted(headers.keys()), expected_keys)
for key, value in additional.items():
self.assertEqual(headers.get(key), value)
def test_header(self):
p = xmlrpclib.ServerProxy(URL, headers=[('X-Test', 'foo')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_many(self):
p = xmlrpclib.ServerProxy(
URL, headers=[('X-Test', 'foo'), ('X-Test-Second', 'bar')])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(
headers, {'X-Test': 'foo', 'X-Test-Second': 'bar'})
def test_header_empty(self):
p = xmlrpclib.ServerProxy(URL, headers=[])
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {})
def test_header_tuple(self):
p = xmlrpclib.ServerProxy(URL, headers=(('X-Test', 'foo'),))
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
def test_header_items(self):
p = xmlrpclib.ServerProxy(URL, headers={'X-Test': 'foo'}.items())
self.assertEqual(p.pow(6, 8), 6**8)
headers = self.RequestHandler.test_headers
self.assertContainsAdditionalHeaders(headers, {'X-Test': 'foo'})
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
# Actual value of the URL doesn't matter if it is a string in
# the correct format.
self.url = 'http://fake.localhost'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
thread = threading.Thread(target=http_server, args=serv_args)
thread.start()
self.addCleanup(thread.join)
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, OSError) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with os_helper.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with os_helper.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search(r'Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
def setUpModule():
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| 38.706077 | 87 | 0.618061 |
f5681f28bdac502eec8d802e14bd9cd3c3b2a996 | 1,679 | py | Python | flask_app/users/forms.py | julien-bonnefoy/website | a00d70697cc3a367dcdb32ca62ed29493029cf91 | [
"Apache-2.0"
] | null | null | null | flask_app/users/forms.py | julien-bonnefoy/website | a00d70697cc3a367dcdb32ca62ed29493029cf91 | [
"Apache-2.0"
] | null | null | null | flask_app/users/forms.py | julien-bonnefoy/website | a00d70697cc3a367dcdb32ca62ed29493029cf91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
user forms: EditProfileForm, EmptyForm, SearchForm, MessageForm
"""
from flask import request
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from flask_babel import _, lazy_gettext as _l
from wtforms.validators import ValidationError, DataRequired, Length
from .models import User
class EditProfileForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
about_me = TextAreaField(_l('About me'),
validators=[Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
class EmptyForm(FlaskForm):
submit = SubmitField('Submit')
class SearchForm(FlaskForm):
q = StringField(_l('Search'), validators=[DataRequired()])
def __init__(self, *args, **kwargs):
if 'formdata' not in kwargs:
kwargs['formdata'] = request.args
if 'csrf_enabled' not in kwargs:
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
class MessageForm(FlaskForm):
message = TextAreaField(_l('Message'), validators=[
DataRequired(), Length(min=1, max=140)])
submit = SubmitField(_l('Submit'))
| 32.288462 | 76 | 0.674806 |
329c363f98d3f85f976dbc461f37571fb10caf4e | 393 | py | Python | t/web/web_view_home_test.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
] | 1 | 2019-10-15T08:37:56.000Z | 2019-10-15T08:37:56.000Z | t/web/web_view_home_test.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
] | null | null | null | t/web/web_view_home_test.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from _sadm.web.view import home
def test_index(testing_webapp):
wapp = testing_webapp('view')
with wapp.mock() as ctx:
d = home.index()
ctx.wapp.route.assert_any_call('/')
ctx.view.assert_any_call('index.html')
ctx.tpl.data.assert_any_call('home')
assert sorted(d.keys()) == ['cfg', 'cfgfile', 'user']
| 28.071429 | 56 | 0.707379 |
1e5d3ee304490e9de10e23cda4fb4eb2a76983e7 | 897 | py | Python | DeepLearning/DeepLearning/09_Deep_SongJW/garbageCan/minimini_network.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
] | 1 | 2019-06-27T04:05:59.000Z | 2019-06-27T04:05:59.000Z | DeepLearning/DeepLearning/09_Deep_SongJW/garbageCan/minimini_network.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
] | null | null | null | DeepLearning/DeepLearning/09_Deep_SongJW/garbageCan/minimini_network.py | ghost9023/DeepLearningPythonStudy | 4d319c8729472cc5f490935854441a2d4b4e8818 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import mnist_example_2layers_p73 as me73
network=me73.MyTwoLayerNet(10, 5, 2)
input_x=np.array([
[1,2,3,4,5,6,7,8,9,10],
[3,2,5,3,1,6,4,2,5,2],
[5,2,1,3,5,3,2,3,5,10],
[5,2,6,7,3,2,4,1,1,2],
[7,5,4,5,2,2,1,5,3,1]
])
label_x=np.array([
[0,1],
[0,1],
[1,0],
[0,1],
[1,0]
])
itersNum=1000
learningRate=0.01
trainLossList=[]
temp=network.params['W1'][2,2]
plt.ion()
for i in range(itersNum):
grad=network.numericalGradient(input_x, label_x)
for key in ('W1', 'W2', 'b1', 'b2'):
network.params[key]-=learningRate*grad[key]
loss=network.loss(input_x, label_x)
trainLossList.append(loss)
if i%10==0:
plt.scatter(i, loss, color='r')
plt.pause(0.01)
print('iteration', i, ':', loss)
print(temp)
print(network.params['W1'][2,2])
while True :
plt.pause(1)
| 19.085106 | 52 | 0.595318 |
2ff9e7cfdd4a7f85b604d07711e9b54312dd41c0 | 2,406 | py | Python | z2/part2/batch/jm/parser_errors_2/168772915.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part2/batch/jm/parser_errors_2/168772915.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part2/batch/jm/parser_errors_2/168772915.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 168772915
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 5, 3, 1)
assert board is not None
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_busy_fields(board, 2) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_free_fields(board, 1) == 3
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_free_fields(board, 1) == 3
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_busy_fields(board, 3) == 1
assert gamma_move(board, 1, 4, 2) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_free_fields(board, 2) == 5
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_busy_fields(board, 3) == 1
gamma_delete(board)
| 31.246753 | 44 | 0.650873 |
e0bf2872cbdc6bb7ec5a2d61edd2a6c4a6912063 | 1,561 | py | Python | notebooks/utils.py | mikelkl/APTOS2019 | fc99c889b09e3cd9d8b2c03bcc6557df017a94ce | [
"MIT"
] | 51 | 2019-09-08T06:58:52.000Z | 2021-06-26T16:24:37.000Z | notebooks/utils.py | mikelkl/APTOS2019 | fc99c889b09e3cd9d8b2c03bcc6557df017a94ce | [
"MIT"
] | null | null | null | notebooks/utils.py | mikelkl/APTOS2019 | fc99c889b09e3cd9d8b2c03bcc6557df017a94ce | [
"MIT"
] | 21 | 2019-09-17T13:29:34.000Z | 2021-06-26T16:25:03.000Z | import itertools
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta, timezone
def get_BJ_time():
# 拿到UTC时间,并强制设置时区为UTC+0:00
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
# astimezone()将转换时区为北京时间
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
current_time = bj_dt.strftime('%m%d_%H-%M-%S')
return current_time
# 绘制混淆矩阵
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Input
- cm : 计算出的混淆矩阵的值
- classes : 混淆矩阵中每一行每一列对应的列
- normalize : True:显示百分比, False:显示个数
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label') | 33.934783 | 101 | 0.660474 |
9d60a6c6461a38838ee26c03f043c252fe8903f0 | 465 | py | Python | config.py | VitorHaselein/Trabalho2 | cd18abaf6e9a70c3e704b6682e7372d9b6fc0448 | [
"bzip2-1.0.6"
] | 2 | 2019-06-05T21:00:50.000Z | 2019-06-08T20:24:37.000Z | config.py | VitorHaselein/Trabalho2 | cd18abaf6e9a70c3e704b6682e7372d9b6fc0448 | [
"bzip2-1.0.6"
] | 5 | 2020-07-17T04:44:19.000Z | 2022-02-17T23:56:10.000Z | config.py | VitorHaselein/Trabalho2 | cd18abaf6e9a70c3e704b6682e7372d9b6fc0448 | [
"bzip2-1.0.6"
] | null | null | null | DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///storage.db'
SQLALCHEMY_TRACK_MODIFICATIONS = True
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SQLALCHEMY_DATABASE_URI = 'sqlite:///storage.db'
SQLALCHEMY_TRACK_MODIFICATIONS = True # ...
# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'storage.db')
# SQLALCHEMY_TRACK_MODIFICATIONS = False | 33.214286 | 67 | 0.72043 |
0f5ae82dd3fa15aa91d2dcd2daff1a2936868bc4 | 2,027 | py | Python | source/contest/bounce_kpoppenhaeger.py | python4astronomers/python4astronomers | 397d0241169483e00c336d7b09299e1633b7a330 | [
"CC-BY-3.0"
] | 46 | 2015-01-20T21:09:01.000Z | 2022-01-31T04:21:35.000Z | source/contest/bounce_kpoppenhaeger.py | python4astronomers/python4astronomers | 397d0241169483e00c336d7b09299e1633b7a330 | [
"CC-BY-3.0"
] | 9 | 2015-02-08T14:39:40.000Z | 2017-09-14T10:51:54.000Z | source/contest/bounce_kpoppenhaeger.py | python4astronomers/python4astronomers | 397d0241169483e00c336d7b09299e1633b7a330 | [
"CC-BY-3.0"
] | 18 | 2015-05-15T21:35:13.000Z | 2021-12-06T00:48:41.000Z | figure(1)
clf()
size = 15
axis([-size, size, -size, size])
# Define properties
n = 10
pos1 = (np.linspace(10,10,20)).reshape(n, 2) # the bubbles
pos2 = (np.linspace(-10,-10,20)).reshape(n, 2) # the thorns
vel1 = (0.2 * normal(size=n*2)).reshape(n, 2)
vel2 = (0.5 * normal(size=n*2)).reshape(n, 2)
sizes1 = 500 * random_sample(n) + 150
sizes2 = ones(n) * 50
# Colors where each row is (Red, Green, Blue, Alpha). Each can go
# from 0 to 1. Alpha is the transparency.
colors1 = random_sample([n, 4])
colors2 = random_sample([n, 4])
# Draw all the circles and return an object ``circles`` that allows
# manipulation of the plotted circles.
circles = scatter(pos1[:,0], pos1[:,1], marker='o', s=sizes1, c=colors1)
triangles = scatter(pos2[:,0], pos2[:,1], marker='^', s=sizes2, c=colors2)
boom = np.array([False] * n)
gone = 0.
angle = 0.
while gone < 10:
pos1 = pos1 + vel1
pos2 = pos2 + vel2
bounce1 = abs(pos1) > size # Find objects that are outside walls
bounce2 = abs(pos2) > size
vel1[bounce1] = -vel1[bounce1] # Bounce if outside the walls
vel2[bounce2] = -vel2[bounce2]
position = [[size+5], [size+5]]
for j in np.arange(0,n): # Check if target has been hit
boom_new = np.sqrt( (pos1[j,0] - pos2[:,0])**2 + (pos1[j,1] - pos2[:,1])**2 ) < (sizes1[j]/240)
if np.sum(boom_new) == 1:
position = [pos1[j,0], pos1[j,1]] # remember position where it was hit
boom[j] = True # and remember which bubble was hit
sizes1[boom] = 0 # If target was hit, let it vanish
plt.plot(position[0], position[1], 'o', color='r', markeredgecolor='r', markersize=5) # draw red dot at position of collision
gone = np.sum(boom) # How many bubbles have been hit so far
circles.set_offsets(pos1) # Change the positions
triangles.set_offsets(pos2)
angle = angle + 20
triangles.set_transform(matplotlib.transforms.Affine2D().rotate_deg(angle)) # Let thorns spin
draw()
| 37.537037 | 131 | 0.616675 |
76de009476f540aa66eb65c347aa38dd3aa0efb3 | 990 | py | Python | alert_grabbing.py | shlokie1999/KRYPTO-CODING-TASK | 7acb5d88a5ace507c000feedd7fd1bb5fe3f5dc2 | [
"Apache-2.0"
] | null | null | null | alert_grabbing.py | shlokie1999/KRYPTO-CODING-TASK | 7acb5d88a5ace507c000feedd7fd1bb5fe3f5dc2 | [
"Apache-2.0"
] | null | null | null | alert_grabbing.py | shlokie1999/KRYPTO-CODING-TASK | 7acb5d88a5ace507c000feedd7fd1bb5fe3f5dc2 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres:///Alerts.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
dbice = SQLAlchemy(app)
ma = Marshmallow(app)
@staticmethod
def fetchAllAlerts():
try: id = request.args['alertid']
except Exception as _: id = None
if not id:
alert = User.query.all()
return jsonify(users_schema.dump(alerts))
alert = Alerts.query.get(id)
return jsonify(user_schema.dump(alert))
@staticmethod
def fetchAlertsByStatus():
try: status = request.args['status']
except Exception as _: status = None
if not id:
alert = User.query.all()
return jsonify(users_schema.dump(alerts))
alert = Alerts.query.get(status)
return jsonify(user_schema.dump(alert)) | 34.137931 | 65 | 0.693939 |
8dd79c537b817feb49545d28d901becd248cc3ac | 530 | py | Python | webapp/app/migrations/0016_auto_20150828_0735.py | jacyn/burst | a3a655fbffa7f19197eb05ecb07b5fe05f6171b0 | [
"MIT"
] | null | null | null | webapp/app/migrations/0016_auto_20150828_0735.py | jacyn/burst | a3a655fbffa7f19197eb05ecb07b5fe05f6171b0 | [
"MIT"
] | null | null | null | webapp/app/migrations/0016_auto_20150828_0735.py | jacyn/burst | a3a655fbffa7f19197eb05ecb07b5fe05f6171b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0015_auto_20150828_0654'),
]
operations = [
migrations.AlterField(
model_name='object',
name='text_align',
field=models.CharField(default=b'left', max_length=64, choices=[(b'left', b'Left'), (b'right', b'Right'), (b'center', b'Center')]),
preserve_default=True,
),
]
| 25.238095 | 143 | 0.6 |
2c3a3f342b79b164bb476033b2290280ce7d1ea4 | 1,842 | py | Python | train/build-AMP-table.py | celiosantosjr/macrel | b29985c282dfc4243d441f88dfc9be590a8b4fec | [
"MIT"
] | null | null | null | train/build-AMP-table.py | celiosantosjr/macrel | b29985c282dfc4243d441f88dfc9be590a8b4fec | [
"MIT"
] | null | null | null | train/build-AMP-table.py | celiosantosjr/macrel | b29985c282dfc4243d441f88dfc9be590a8b4fec | [
"MIT"
] | null | null | null | from macrel.fasta import fasta_iter
from macrel.AMP_features import fasta_features
from os import makedirs
makedirs('preproc/', exist_ok=True)
normalized_fname = 'preproc/AMP_NAMP.train.faa'
# The AmPEP data has duplicates! The same exact same sequences appear on both
# the positive and negative classes:
seen = set()
with open(normalized_fname, 'wt') as output:
for i, (_, seq) in enumerate(fasta_iter('data/M_model_train_AMP_sequence.fasta')):
output.write(f">AMP_{i}\n{seq}\n")
seen.add(seq)
for i, (_, seq) in enumerate(fasta_iter('data/M_model_train_nonAMP_sequence.fasta')):
if seq in seen: continue
output.write(f">NAMP_{i}\n{seq}\n")
seen.add(seq)
fs = fasta_features(normalized_fname)
fs['group'] = fs.index.map(lambda ix: ix.split('_')[0])
fs.to_csv('preproc/AMP.train.tsv', sep='\t')
normalized_fname_test = 'preproc/AMP_NAMP.test.faa'
with open(normalized_fname_test, 'wt') as output:
for i, (_, seq) in enumerate(fasta_iter('data/Supp-S2_AMP.faa')):
output.write(f">AMP_{i}\n{seq}\n")
for i, (_, seq) in enumerate(fasta_iter('data/Supp-S2_NAMP.faa')):
output.write(f">NAMP_{i}\n{seq}\n")
fs_t = fasta_features(normalized_fname_test)
fs_t['group'] = fs_t.index.map(lambda ix: ix.split('_')[0])
fs_t.to_csv('preproc/AMP.test.tsv', sep='\t')
normalized_fname_test = 'preproc/AMP_NAMP.train.bench.faa'
with open(normalized_fname_test, 'wt') as output:
for i, (_, seq) in enumerate(fasta_iter('data/Supp-S1_AMP.faa')):
output.write(f">AMP_{i}\n{seq}\n")
for i, (_, seq) in enumerate(fasta_iter('data/Supp-S1_NAMP.faa')):
output.write(f">NAMP_{i}\n{seq}\n")
fs_bench = fasta_features(normalized_fname_test)
fs_bench['group'] = fs_bench.index.map(lambda ix: ix.split('_')[0])
fs_bench.to_csv('preproc/AMP.train_bench.tsv', sep='\t')
| 41.863636 | 89 | 0.698697 |
dae2167473d19c021bdf76a4655faa6080f6bd5e | 295 | py | Python | creational/factory_method/data/truck.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | creational/factory_method/data/truck.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | creational/factory_method/data/truck.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | from creational.factory_method.data import Transport
class Truck(Transport):
def load(self) -> None:
print(f"Parcel '{self.parcel}' is loaded into {Truck.__name__} {self}")
def ship(self) -> None:
print(f"Parcel '{self.parcel}' is shipped by {Truck.__name__} {self}")
| 29.5 | 79 | 0.667797 |
4256feb9b7729fb929693e49842224fea49f5299 | 1,916 | py | Python | airbus_plugins/airbus_plugin_node_manager/src/airbus_plugin_node_manager/plugin.py | ipa320/airbus_coop | 974564807ba5d24096e237a9991311608a390da1 | [
"Apache-2.0"
] | 4 | 2017-10-15T23:32:24.000Z | 2019-12-26T12:31:53.000Z | airbus_plugins/airbus_plugin_node_manager/src/airbus_plugin_node_manager/plugin.py | ipa320/airbus_coop | 974564807ba5d24096e237a9991311608a390da1 | [
"Apache-2.0"
] | 6 | 2017-09-05T13:52:00.000Z | 2017-12-01T14:18:27.000Z | airbus_plugins/airbus_plugin_node_manager/src/airbus_plugin_node_manager/plugin.py | ipa320/airbus_coop | 974564807ba5d24096e237a9991311608a390da1 | [
"Apache-2.0"
] | 4 | 2017-09-04T08:14:36.000Z | 2017-09-18T07:22:21.000Z | #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import time
import os
import re
import subprocess
import rosnode
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from table_monitoring_nodes import TableMonitoringNodes
# from table_launch_nodes import TableLaunchNodes
from airbus_plugin_node_manager.res import R
from airbus_cobot_gui import Plugin, ControlMode
class PluginNodeManager(Plugin):
def __init__(self, context):
Plugin.__init__(self, context)
def onCreate(self, param):
# Extend the widget with all attributes and children from UI file
loadUi(R.layouts.mainwindow, self)
self.monitoring = TableMonitoringNodes(self)
self.monitoring.onStart()
def onPause(self):
pass
def onResume(self):
pass
def onControlModeChanged(self, mode):
pass
def onUserChanged(self, user_info):
pass
def onTranslate(self, lng):
self.monitoring.translate(lng)
def onEmergencyStop(self, state):
pass
def onDestroy(self):
self.monitoring.onClose()
#End of file
| 25.891892 | 88 | 0.713466 |
ee5e85dd72c1141740123c606ba9e885cb1a923d | 77,547 | py | Python | phonopy/cui/settings.py | fidanyan/phonopy | 560ee340c4ae337dfac8018119ed129ae3b0c8b1 | [
"BSD-3-Clause"
] | null | null | null | phonopy/cui/settings.py | fidanyan/phonopy | 560ee340c4ae337dfac8018119ed129ae3b0c8b1 | [
"BSD-3-Clause"
] | null | null | null | phonopy/cui/settings.py | fidanyan/phonopy | 560ee340c4ae337dfac8018119ed129ae3b0c8b1 | [
"BSD-3-Clause"
] | 1 | 2021-09-17T08:21:30.000Z | 2021-09-17T08:21:30.000Z | # Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
def fracval(frac):
if frac.find('/') == -1:
return float(frac)
else:
x = frac.split('/')
return float(x[0]) / float(x[1])
class Settings(object):
def __init__(self):
self._band_indices = None
self._band_paths = None
self._band_points = None
self._cell_filename = None
self._chemical_symbols = None
self._cutoff_frequency = None
self._displacement_distance = None
self._dm_decimals = None
self._fc_decimals = None
self._fc_symmetry = False
self._fpitch = None
self._frequency_conversion_factor = None
self._frequency_scale_factor = None
self._gv_delta_q = None
self._is_diagonal_displacement = True
self._is_eigenvectors = False
self._is_mesh_symmetry = True
self._is_nac = False
self._is_rotational_invariance = False
self._is_plusminus_displacement = 'auto'
self._is_symmetry = True
self._is_tetrahedron_method = False
self._is_time_reversal_symmetry = True
self._is_trigonal_displacement = False
self._magmoms = None
self._masses = None
self._mesh = None
self._mesh_shift = None
self._nac_method = None
self._nac_q_direction = None
self._num_frequency_points = None
self._primitive_matrix = None
self._qpoints = None
self._read_qpoints = False
self._sigma = None
self._supercell_matrix = None
self._tmax = 1000
self._tmin = 0
self._tstep = 10
self._use_alm = False
self._yaml_mode = False
def set_band_paths(self, band_paths):
self._band_paths = band_paths
def get_band_paths(self):
return self._band_paths
def set_band_points(self, band_points):
self._band_points = band_points
def get_band_points(self):
return self._band_points
def set_band_indices(self, band_indices):
self._band_indices = band_indices
def get_band_indices(self):
return self._band_indices
def set_cell_filename(self, cell_filename):
self._cell_filename = cell_filename
def get_cell_filename(self):
return self._cell_filename
def set_chemical_symbols(self, symbols):
self._chemical_symbols = symbols
def get_chemical_symbols(self):
return self._chemical_symbols
def set_cutoff_frequency(self, cutoff_frequency):
self._cutoff_frequency = cutoff_frequency
def get_cutoff_frequency(self):
return self._cutoff_frequency
def set_dm_decimals(self, decimals):
self._dm_decimals = decimals
def get_dm_decimals(self):
return self._dm_decimals
def set_displacement_distance(self, distance):
self._displacement_distance = distance
def get_displacement_distance(self):
return self._displacement_distance
def set_fc_symmetry(self, fc_symmetry):
self._fc_symmetry = fc_symmetry
def get_fc_symmetry(self):
return self._fc_symmetry
def set_fc_decimals(self, decimals):
self._fc_decimals = decimals
def get_fc_decimals(self):
return self._fc_decimals
def set_frequency_conversion_factor(self, frequency_conversion_factor):
self._frequency_conversion_factor = frequency_conversion_factor
def get_frequency_conversion_factor(self):
return self._frequency_conversion_factor
def set_frequency_pitch(self, fpitch):
self._fpitch = fpitch
def get_frequency_pitch(self):
return self._fpitch
def set_frequency_scale_factor(self, frequency_scale_factor):
self._frequency_scale_factor = frequency_scale_factor
def get_frequency_scale_factor(self):
return self._frequency_scale_factor
def set_num_frequency_points(self, num_frequency_points):
self._num_frequency_points = num_frequency_points
def get_num_frequency_points(self):
return self._num_frequency_points
def set_group_velocity_delta_q(self, gv_delta_q):
self._gv_delta_q = gv_delta_q
def get_group_velocity_delta_q(self):
return self._gv_delta_q
def set_is_diagonal_displacement(self, is_diag):
self._is_diagonal_displacement = is_diag
def get_is_diagonal_displacement(self):
return self._is_diagonal_displacement
def set_is_eigenvectors(self, is_eigenvectors):
self._is_eigenvectors = is_eigenvectors
def get_is_eigenvectors(self):
return self._is_eigenvectors
def set_is_mesh_symmetry(self, is_mesh_symmetry):
self._is_mesh_symmetry = is_mesh_symmetry
def get_is_mesh_symmetry(self):
return self._is_mesh_symmetry
def set_is_nac(self, is_nac):
self._is_nac = is_nac
def get_is_nac(self):
return self._is_nac
def set_is_plusminus_displacement(self, is_pm):
self._is_plusminus_displacement = is_pm
def get_is_plusminus_displacement(self):
return self._is_plusminus_displacement
def set_is_rotational_invariance(self, is_rotational_invariance):
self._is_rotational_invariance = is_rotational_invariance
def get_is_rotational_invariance(self):
return self._is_rotational_invariance
def set_is_tetrahedron_method(self, is_thm):
self._is_tetrahedron_method = is_thm
def get_is_tetrahedron_method(self):
return self._is_tetrahedron_method
def set_is_trigonal_displacement(self, is_trigonal):
self._is_trigonal_displacement = is_trigonal
def get_is_trigonal_displacement(self):
return self._is_trigonal_displacement
def set_is_symmetry(self, is_symmetry):
self._is_symmetry = is_symmetry
def get_is_symmetry(self):
return self._is_symmetry
def set_magnetic_moments(self, magmoms):
self._magmoms = magmoms
def get_magnetic_moments(self):
return self._magmoms
def set_masses(self, masses):
self._masses = masses
def get_masses(self):
return self._masses
def set_max_temperature(self, tmax):
self._tmax = tmax
def get_max_temperature(self):
return self._tmax
def set_mesh_numbers(self, mesh):
self._mesh = mesh
def get_mesh_numbers(self):
return self._mesh
def set_mesh_shift(self, mesh_shift):
self._mesh_shift = mesh_shift
def get_mesh_shift(self):
return self._mesh_shift
def set_min_temperature(self, tmin):
self._tmin = tmin
def get_min_temperature(self):
return self._tmin
def set_nac_method(self, nac_method):
self._nac_method = nac_method
def get_nac_method(self):
return self._nac_method
def set_nac_q_direction(self, nac_q_direction):
self._nac_q_direction = nac_q_direction
def get_nac_q_direction(self):
return self._nac_q_direction
def set_primitive_matrix(self, primitive_matrix):
self._primitive_matrix = primitive_matrix
def get_primitive_matrix(self):
return self._primitive_matrix
def set_qpoints(self, qpoints):
self._qpoints = qpoints
def get_qpoints(self):
return self._qpoints
def set_read_qpoints(self, read_qpoints):
self._read_qpoints = read_qpoints
def get_read_qpoints(self):
return self._read_qpoints
def set_sigma(self, sigma):
self._sigma = sigma
def get_sigma(self):
return self._sigma
def set_supercell_matrix(self, matrix):
self._supercell_matrix = matrix
def get_supercell_matrix(self):
return self._supercell_matrix
def set_temperature_step(self, tstep):
self._tstep = tstep
def get_temperature_step(self):
return self._tstep
def set_time_reversal_symmetry(self, time_reversal_symmetry=True):
self._is_time_reversal_symmetry = time_reversal_symmetry
def get_time_reversal_symmetry(self):
return self._is_time_reversal_symmetry
def set_use_alm(self, use_alm):
self._use_alm = use_alm
def get_use_alm(self):
return self._use_alm
def set_yaml_mode(self, yaml_mode):
self._yaml_mode = yaml_mode
def get_yaml_mode(self):
return self._yaml_mode
# Parse phonopy setting filen
class ConfParser(object):
def __init__(self, filename=None, args=None):
self._confs = {}
self._parameters = {}
self._args = args
self._filename = filename
def get_configures(self):
return self._confs
def get_settings(self):
return self._settings
def setting_error(self, message):
print(message)
print("Please check the setting tags and options.")
sys.exit(1)
def read_file(self):
file = open(self._filename, 'r')
is_continue = False
left = None
for line in file:
if line.strip() == '':
is_continue = False
continue
if line.strip()[0] == '#':
is_continue = False
continue
if is_continue and left is not None:
self._confs[left] += line.strip()
self._confs[left] = self._confs[left].replace('+++', ' ')
is_continue = False
if line.find('=') != -1:
left, right = [x.strip() for x in line.split('=')]
self._confs[left.lower()] = right
if line.find('+++') != -1:
is_continue = True
def read_options(self):
arg_list = vars(self._args)
if 'band_indices' in arg_list:
band_indices = self._args.band_indices
if band_indices is not None:
if type(band_indices) is list:
self._confs['band_indices'] = " ".join(band_indices)
else:
self._confs['band_indices'] = band_indices
if 'band_paths' in arg_list:
if self._args.band_paths is not None:
if type(self._args.band_paths) is list:
self._confs['band'] = " ".join(self._args.band_paths)
else:
self._confs['band'] = self._args.band_paths
if 'band_points' in arg_list:
if self._args.band_points is not None:
self._confs['band_points'] = self._args.band_points
if 'cell_filename' in arg_list:
if self._args.cell_filename is not None:
self._confs['cell_filename'] = self._args.cell_filename
if 'cutoff_frequency' in arg_list:
if self._args.cutoff_frequency:
self._confs['cutoff_frequency'] = self._args.cutoff_frequency
if 'displacement_distance' in arg_list:
if self._args.displacement_distance:
self._confs['displacement_distance'] = \
self._args.displacement_distance
if 'dynamical_matrix_decimals' in arg_list:
if self._args.dynamical_matrix_decimals:
self._confs['dm_decimals'] = \
self._args.dynamical_matrix_decimals
if 'fc_symmetry' in arg_list:
if self._args.fc_symmetry:
self._confs['fc_symmetry'] = '.true.'
if 'force_constants_decimals' in arg_list:
if self._args.force_constants_decimals:
self._confs['fc_decimals'] = \
self._args.force_constants_decimals
if 'fpitch' in arg_list:
if self._args.fpitch:
self._confs['fpitch'] = self._args.fpitch
if 'frequency_conversion_factor' in arg_list:
freq_factor = self._args.frequency_conversion_factor
if freq_factor:
self._confs['frequency_conversion_factor'] = freq_factor
if 'frequency_scale_factor' in self._args:
freq_scale = self._args.frequency_scale_factor
if freq_scale is not None:
self._confs['frequency_scale_factor'] = freq_scale
if 'gv_delta_q' in arg_list:
if self._args.gv_delta_q:
self._confs['gv_delta_q'] = self._args.gv_delta_q
if 'is_eigenvectors' in arg_list:
if self._args.is_eigenvectors:
self._confs['eigenvectors'] = '.true.'
if 'is_nac' in arg_list:
if self._args.is_nac:
self._confs['nac'] = '.true.'
if 'is_nodiag' in arg_list:
if self._args.is_nodiag:
self._confs['diag'] = '.false.'
if 'is_nomeshsym' in arg_list:
if self._args.is_nomeshsym:
self._confs['mesh_symmetry'] = '.false.'
if 'is_nosym' in arg_list:
if self._args.is_nosym:
self._confs['symmetry'] = '.false.'
if 'is_plusminus_displacements' in arg_list:
if self._args.is_plusminus_displacements:
self._confs['pm'] = '.true.'
if 'is_tetrahedron_method' in arg_list:
if self._args.is_tetrahedron_method:
self._confs['tetrahedron'] = '.true.'
if 'is_trigonal_displacements' in arg_list:
if self._args.is_trigonal_displacements:
self._confs['trigonal'] = '.true.'
if 'masses' in arg_list:
if self._args.masses is not None:
if type(self._args.masses) is list:
self._confs['mass'] = " ".join(self._args.masses)
else:
self._confs['mass'] = self._args.masses
if 'magmoms' in arg_list:
if self._args.magmoms is not None:
if type(self._args.magmoms) is list:
self._confs['magmom'] = " ".join(self._args.magmoms)
else:
self._confs['magmom'] = self._args.magmoms
if 'mesh_numbers' in arg_list:
mesh = self._args.mesh_numbers
if mesh is not None:
if type(mesh) is list:
self._confs['mesh_numbers'] = " ".join(mesh)
else:
self._confs['mesh_numbers'] = mesh
if 'num_frequency_points' in arg_list:
opt_num_freqs = self._args.num_frequency_points
if opt_num_freqs:
self._confs['num_frequency_points'] = opt_num_freqs
# For backword compatibility
if 'primitive_axis' in arg_list:
if self._args.primitive_axis is not None:
if type(self._args.primitive_axis) is list:
primitive_axes = " ".join(self._args.primitive_axis)
self._confs['primitive_axes'] = primitive_axes
else:
self._confs['primitive_axes'] = self._args.primitive_axis
if 'primitive_axes' in arg_list:
if self._args.primitive_axes:
if type(self._args.primitive_axes) is list:
primitive_axes = " ".join(self._args.primitive_axes)
self._confs['primitive_axes'] = primitive_axes
else:
self._confs['primitive_axes'] = self._args.primitive_axes
if 'supercell_dimension' in arg_list:
dim = self._args.supercell_dimension
if dim is not None:
if type(dim) is list:
self._confs['dim'] = " ".join(dim)
else:
self._confs['dim'] = dim
if 'qpoints' in arg_list:
if self._args.qpoints is not None:
if type(self._args.qpoints) is list:
self._confs['qpoints'] = " ".join(self._args.qpoints)
else:
self._confs['qpoints'] = self._args.qpoints
if 'nac_q_direction' in arg_list:
q_dir = self._args.nac_q_direction
if q_dir is not None:
if type(q_dir) is list:
self._confs['q_direction'] = " ".join(q_dir)
else:
self._confs['q_direction'] = q_dir
if 'nac_method' in arg_list:
if self._args.nac_method is not None:
self._confs['nac_method'] = self._args.nac_method
if 'read_qpoints' in arg_list:
if self._args.read_qpoints:
self._confs['read_qpoints'] = '.true.'
if 'sigma' in arg_list:
if self._args.sigma is not None:
if type(self._args.sigma) is list:
self._confs['sigma'] = " ".join(self._args.sigma)
else:
self._confs['sigma'] = self._args.sigma
if 'tmax' in arg_list:
if self._args.tmax:
self._confs['tmax'] = self._args.tmax
if 'tmin' in arg_list:
if self._args.tmin:
self._confs['tmin'] = self._args.tmin
if 'tstep' in arg_list:
if self._args.tstep:
self._confs['tstep'] = self._args.tstep
if 'use_alm' in arg_list:
if self._args.use_alm:
self._confs['alm'] = '.true.'
if 'yaml_mode' in arg_list:
if self._args.yaml_mode:
self._confs['yaml_mode'] = '.true.'
def parse_conf(self):
confs = self._confs
for conf_key in confs.keys():
if conf_key == 'band_indices':
vals = []
for sum_set in confs['band_indices'].split(','):
vals.append([int(x) - 1 for x in sum_set.split()])
self.set_parameter('band_indices', vals)
if conf_key == 'cell_filename':
self.set_parameter('cell_filename', confs['cell_filename'])
if conf_key == 'dim':
matrix = [int(x) for x in confs['dim'].split()]
if len(matrix) == 9:
matrix = np.array(matrix).reshape(3, 3)
elif len(matrix) == 3:
matrix = np.diag(matrix)
else:
self.setting_error(
"Number of elements of DIM tag has to be 3 or 9.")
if matrix.shape == (3, 3):
if np.linalg.det(matrix) < 1:
self.setting_error(
'Determinant of supercell matrix has to be '
'positive.')
else:
self.set_parameter('supercell_matrix', matrix)
if conf_key in ('primitive_axis', 'primitive_axes'):
if confs[conf_key].strip().lower() == 'auto':
self.set_parameter('primitive_axes', 'auto')
elif not len(confs[conf_key].split()) == 9:
self.setting_error(
"Number of elements in %s has to be 9." %
conf_key.upper())
else:
p_axis = []
for x in confs[conf_key].split():
p_axis.append(fracval(x))
p_axis = np.array(p_axis).reshape(3, 3)
if np.linalg.det(p_axis) < 1e-8:
self.setting_error(
"%s has to have positive determinant." %
conf_key.upper())
self.set_parameter('primitive_axes', p_axis)
if conf_key == 'mass':
self.set_parameter(
'mass',
[float(x) for x in confs['mass'].split()])
if conf_key == 'magmom':
self.set_parameter(
'magmom',
[float(x) for x in confs['magmom'].split()])
if conf_key == 'atom_name':
self.set_parameter(
'atom_name',
[x.capitalize() for x in confs['atom_name'].split()])
if conf_key == 'displacement_distance':
self.set_parameter('displacement_distance',
float(confs['displacement_distance']))
if conf_key == 'diag':
if confs['diag'].lower() == '.false.':
self.set_parameter('diag', False)
elif confs['diag'].lower() == '.true.':
self.set_parameter('diag', True)
if conf_key == 'pm':
if confs['pm'].lower() == '.false.':
self.set_parameter('pm_displacement', False)
elif confs['pm'].lower() == '.true.':
self.set_parameter('pm_displacement', True)
if conf_key == 'trigonal':
if confs['trigonal'].lower() == '.false.':
self.set_parameter('is_trigonal_displacement', False)
elif confs['trigonal'].lower() == '.true.':
self.set_parameter('is_trigonal_displacement', True)
if conf_key == 'eigenvectors':
if confs['eigenvectors'].lower() == '.false.':
self.set_parameter('is_eigenvectors', False)
elif confs['eigenvectors'].lower() == '.true.':
self.set_parameter('is_eigenvectors', True)
if conf_key == 'nac':
if confs['nac'].lower() == '.false.':
self.set_parameter('is_nac', False)
elif confs['nac'].lower() == '.true.':
self.set_parameter('is_nac', True)
if conf_key == 'symmetry':
if confs['symmetry'].lower() == '.false.':
self.set_parameter('is_symmetry', False)
self.set_parameter('is_mesh_symmetry', False)
elif confs['symmetry'].lower() == '.true.':
self.set_parameter('is_symmetry', True)
if conf_key == 'mesh_symmetry':
if confs['mesh_symmetry'].lower() == '.false.':
self.set_parameter('is_mesh_symmetry', False)
elif confs['mesh_symmetry'].lower() == '.true.':
self.set_parameter('is_mesh_symmetry', True)
if conf_key == 'rotational':
if confs['rotational'].lower() == '.false.':
self.set_parameter('is_rotational', False)
elif confs['rotational'].lower() == '.true.':
self.set_parameter('is_rotational', True)
if conf_key == 'fc_symmetry':
if confs['fc_symmetry'].lower() == '.false.':
self.set_parameter('fc_symmetry', False)
elif confs['fc_symmetry'].lower() == '.true.':
self.set_parameter('fc_symmetry', True)
else:
self.setting_error(
"FC_SYMMETRY has to be specified by .TRUE. or .FALSE.")
if conf_key == 'fc_decimals':
self.set_parameter('fc_decimals', confs['fc_decimals'])
if conf_key == 'dm_decimals':
self.set_parameter('dm_decimals', confs['dm_decimals'])
if conf_key in ['mesh_numbers', 'mp', 'mesh']:
vals = [int(x) for x in confs[conf_key].split()]
if len(vals) < 3:
self.setting_error("Mesh numbers are incorrectly set.")
self.set_parameter('mesh_numbers', vals[:3])
if conf_key == 'band_points':
self.set_parameter('band_points', int(confs['band_points']))
if conf_key == 'band':
bands = []
if confs['band'].strip().lower() == 'auto':
self.set_parameter('band_paths', 'auto')
else:
for section in confs['band'].split(','):
points = [fracval(x) for x in section.split()]
if len(points) % 3 != 0 or len(points) < 6:
self.setting_error("BAND is incorrectly set.")
break
bands.append(np.array(points).reshape(-1, 3))
self.set_parameter('band_paths', bands)
if conf_key == 'qpoints':
if confs['qpoints'].lower() == '.true.':
self.set_parameter('read_qpoints', True)
elif confs['qpoints'].lower() == '.false.':
self.set_parameter('read_qpoints', False)
else:
vals = [fracval(x) for x in confs['qpoints'].split()]
if len(vals) == 0 or len(vals) % 3 != 0:
self.setting_error("Q-points are incorrectly set.")
else:
self.set_parameter('qpoints',
list(np.reshape(vals, (-1, 3))))
if conf_key == 'read_qpoints':
if confs['read_qpoints'].lower() == '.false.':
self.set_parameter('read_qpoints', False)
elif confs['read_qpoints'].lower() == '.true.':
self.set_parameter('read_qpoints', True)
if conf_key == 'nac_method':
self.set_parameter('nac_method', confs['nac_method'].lower())
if conf_key == 'q_direction':
q_direction = [fracval(x)
for x in confs['q_direction'].split()]
if len(q_direction) < 3:
self.setting_error("Number of elements of q_direction "
"is less than 3")
else:
self.set_parameter('nac_q_direction', q_direction)
if conf_key == 'frequency_conversion_factor':
val = float(confs['frequency_conversion_factor'])
self.set_parameter('frequency_conversion_factor', val)
if conf_key == 'frequency_scale_factor':
self.set_parameter('frequency_scale_factor',
float(confs['frequency_scale_factor']))
if conf_key == 'fpitch':
val = float(confs['fpitch'])
self.set_parameter('fpitch', val)
if conf_key == 'num_frequency_points':
val = int(confs['num_frequency_points'])
self.set_parameter('num_frequency_points', val)
if conf_key == 'cutoff_frequency':
val = float(confs['cutoff_frequency'])
self.set_parameter('cutoff_frequency', val)
if conf_key == 'sigma':
vals = [float(x) for x in str(confs['sigma']).split()]
if len(vals) == 1:
self.set_parameter('sigma', vals[0])
else:
self.set_parameter('sigma', vals)
if conf_key == 'tetrahedron':
if confs['tetrahedron'].lower() == '.false.':
self.set_parameter('is_tetrahedron_method', False)
if confs['tetrahedron'].lower() == '.true.':
self.set_parameter('is_tetrahedron_method', True)
if conf_key == 'tmin':
val = float(confs['tmin'])
self.set_parameter('tmin', val)
if conf_key == 'tmax':
val = float(confs['tmax'])
self.set_parameter('tmax', val)
if conf_key == 'tstep':
val = float(confs['tstep'])
self.set_parameter('tstep', val)
# Group velocity finite difference
if conf_key == 'gv_delta_q':
self.set_parameter('gv_delta_q', float(confs['gv_delta_q']))
# Use ALM for generating force constants
if conf_key == 'alm':
if confs['alm'].lower() == '.true.':
self.set_parameter('alm', True)
# Phonopy YAML mode
if conf_key == 'yaml_mode':
if confs['yaml_mode'].lower() == '.true.':
self.set_parameter('yaml_mode', True)
def set_parameter(self, key, val):
self._parameters[key] = val
def set_settings(self):
params = self._parameters
# Chemical symbols
if 'atom_name' in params:
self._settings.set_chemical_symbols(params['atom_name'])
# Sets of band indices that are summed
if 'band_indices' in params:
self._settings.set_band_indices(params['band_indices'])
# Filename of input unit cell
if 'cell_filename' in params:
self._settings.set_cell_filename(params['cell_filename'])
# Cutoff frequency
if 'cutoff_frequency' in params:
self._settings.set_cutoff_frequency(params['cutoff_frequency'])
# Diagonal displacement
if 'diag' in params:
self._settings.set_is_diagonal_displacement(params['diag'])
# Distance of finite displacements introduced
if 'displacement_distance' in params:
self._settings.set_displacement_distance(
params['displacement_distance'])
# Decimals of values of dynamical matrxi
if 'dm_decimals' in params:
self._settings.set_dm_decimals(int(params['dm_decimals']))
# Decimals of values of force constants
if 'fc_decimals' in params:
self._settings.set_fc_decimals(int(params['fc_decimals']))
# Enforce translational invariance and index permutation symmetry
# to force constants?
if 'fc_symmetry' in params:
self._settings.set_fc_symmetry(params['fc_symmetry'])
# Frequency unit conversion factor
if 'frequency_conversion_factor' in params:
self._settings.set_frequency_conversion_factor(
params['frequency_conversion_factor'])
# This scale factor is multiplied to force constants by
# fc * scale_factor ** 2, therefore only changes
# frequencies but does not change NAC part.
if 'frequency_scale_factor' in params:
self._settings.set_frequency_scale_factor(
params['frequency_scale_factor'])
# Spectram drawing step
if 'fpitch' in params:
self._settings.set_frequency_pitch(params['fpitch'])
# Number of sampling points for spectram drawing
if 'num_frequency_points' in params:
self._settings.set_num_frequency_points(params['num_frequency_points'])
# Group velocity finite difference
if 'gv_delta_q' in params:
self._settings.set_group_velocity_delta_q(params['gv_delta_q'])
# Mesh sampling numbers
if 'mesh_numbers' in params:
self._settings.set_mesh_numbers(params['mesh_numbers'])
# Is getting eigenvectors?
if 'is_eigenvectors' in params:
self._settings.set_is_eigenvectors(params['is_eigenvectors'])
# Is reciprocal mesh symmetry searched?
if 'is_mesh_symmetry' in params:
self._settings.set_is_mesh_symmetry(params['is_mesh_symmetry'])
# Non analytical term correction?
if 'is_nac' in params:
self._settings.set_is_nac(params['is_nac'])
# Is rotational invariance ?
if 'is_rotational' in params:
self._settings.set_is_rotational_invariance(params['is_rotational'])
# Is crystal symmetry searched?
if 'is_symmetry' in params:
self._settings.set_is_symmetry(params['is_symmetry'])
# Tetrahedron method
if 'is_tetrahedron_method' in params:
self._settings.set_is_tetrahedron_method(
params['is_tetrahedron_method'])
# Trigonal displacement
if 'is_trigonal_displacement' in params:
self._settings.set_is_trigonal_displacement(
params['is_trigonal_displacement'])
# Magnetic moments
if 'magmom' in params:
self._settings.set_magnetic_moments(params['magmom'])
# Atomic mass
if 'mass' in params:
self._settings.set_masses(params['mass'])
# Plus minus displacement
if 'pm_displacement' in params:
self._settings.set_is_plusminus_displacement(
params['pm_displacement'])
# Primitive cell shape
if 'primitive_axes' in params:
self._settings.set_primitive_matrix(params['primitive_axes'])
# Q-points mode
if 'qpoints' in params:
self._settings.set_qpoints(params['qpoints'])
if 'read_qpoints' in params:
if params['read_qpoints']:
self._settings.set_read_qpoints(params['read_qpoints'])
# non analytical term correction method
if 'nac_method' in params:
self._settings.set_nac_method(params['nac_method'])
# q-direction for non analytical term correction
if 'nac_q_direction' in params:
self._settings.set_nac_q_direction(params['nac_q_direction'])
# Smearing width
if 'sigma' in params:
self._settings.set_sigma(params['sigma'])
# Supercell size
if 'supercell_matrix' in params:
self._settings.set_supercell_matrix(params['supercell_matrix'])
# Temerature range
if 'tmax' in params:
self._settings.set_max_temperature(params['tmax'])
if 'tmin' in params:
self._settings.set_min_temperature(params['tmin'])
if 'tstep' in params:
self._settings.set_temperature_step(params['tstep'])
# Band paths
# BAND = 0.0 0.0 0.0 0.5 0.0 0.0 0.5 0.5 0.0 0.0 0.0 0.0 0.5 0.5 0.5
# [array([[ 0. , 0. , 0. ],
# [ 0.5, 0. , 0. ],
# [ 0.5, 0.5, 0. ],
# [ 0. , 0. , 0. ],
# [ 0.5, 0.5, 0.5]])]
#
# BAND = 0.0 0.0 0.0 0.5 0.0 0.0, 0.5 0.5 0.0 0.0 0.0 0.0 0.5 0.5 0.5
# [array([[ 0. , 0. , 0. ],
# [ 0.5, 0. , 0. ]]),
# array([[ 0.5, 0.5, 0. ],
# [ 0. , 0. , 0. ],
# [ 0.5, 0.5, 0.5]])]
# or
# BAND = AUTO
if 'band_paths' in params:
self._settings.set_band_paths(params['band_paths'])
# This number includes end points
if 'band_points' in params:
self._settings.set_band_points(params['band_points'])
# Use ALM to generating force constants
if 'alm' in params:
self._settings.set_use_alm(params['alm'])
# Activate phonopy YAML mode
if 'yaml_mode' in params:
self._settings.set_yaml_mode(params['yaml_mode'])
#
# For phonopy
#
class PhonopySettings(Settings):
def __init__(self):
Settings.__init__(self)
self._anime_band_index = None
self._anime_amplitude = None
self._anime_division = None
self._anime_qpoint = None
self._anime_shift = None
self._anime_type = 'v_sim'
self._band_format = 'yaml'
self._band_labels = None
self._band_connection = False
self._cutoff_radius = None
self._dos = None
self._fc_spg_symmetry = False
self._fits_Debye_model = False
self._fmax = None
self._fmin = None
self._irreps_q_point = None
self._irreps_tolerance = 1e-5
self._is_dos_mode = False
self._is_full_fc = False
self._is_group_velocity = False
self._is_gamma_center = False
self._is_hdf5 = False
self._is_little_cogroup = False
self._is_moment = False
self._is_plusminus_displacement = 'auto'
self._is_thermal_displacements = False
self._is_thermal_displacement_matrices = False
self._is_thermal_distances = False
self._is_thermal_properties = False
self._is_projected_thermal_properties = False
self._lapack_solver = False
self._mesh_format = 'yaml'
self._modulation = None
self._moment_order = None
self._pdos_indices = None
self._pretend_real = False
self._projection_direction = None
self._qpoints_format = 'yaml'
self._read_force_constants = False
self._readfc_format = 'text'
self._run_mode = None
self._show_irreps = False
self._thermal_atom_pairs = None
self._thermal_displacement_matrix_temperatue = None
self._write_dynamical_matrices = False
self._write_mesh = True
self._write_force_constants = False
self._writefc_format = 'text'
self._xyz_projection = False
def set_anime_band_index(self, band_index):
self._anime_band_index = band_index
def get_anime_band_index(self):
return self._anime_band_index
def set_anime_amplitude(self, amplitude):
self._anime_amplitude = amplitude
def get_anime_amplitude(self):
return self._anime_amplitude
def set_anime_division(self, division):
self._anime_division = division
def get_anime_division(self):
return self._anime_division
def set_anime_qpoint(self, qpoint):
self._anime_qpoint = qpoint
def get_anime_qpoint(self):
return self._anime_qpoint
def set_anime_shift(self, shift):
self._anime_shift = shift
def get_anime_shift(self):
return self._anime_shift
def set_anime_type(self, anime_type):
self._anime_type = anime_type
def get_anime_type(self):
return self._anime_type
def set_band_format(self, band_format):
self._band_format = band_format
def get_band_format(self):
return self._band_format
def set_band_labels(self, labels):
self._band_labels = labels
def get_band_labels(self):
return self._band_labels
def set_cutoff_radius(self, cutoff_radius):
self._cutoff_radius = cutoff_radius
def get_cutoff_radius(self):
return self._cutoff_radius
def set_fc_spg_symmetry(self, fc_spg_symmetry):
self._fc_spg_symmetry = fc_spg_symmetry
def get_fc_spg_symmetry(self):
return self._fc_spg_symmetry
def set_fits_Debye_model(self, fits_Debye_model):
self._fits_Debye_model = fits_Debye_model
def get_fits_Debye_model(self):
return self._fits_Debye_model
def set_max_frequency(self, fmax):
self._fmax = fmax
def get_max_frequency(self):
return self._fmax
def set_min_frequency(self, fmin):
self._fmin = fmin
def get_min_frequency(self):
return self._fmin
def set_irreps_q_point(self, q_point):
self._irreps_q_point = q_point
def get_irreps_q_point(self):
return self._irreps_q_point
def set_irreps_tolerance(self, tolerance):
self._irreps_tolerance = tolerance
def get_irreps_tolerance(self):
return self._irreps_tolerance
def set_is_band_connection(self, band_connection):
self._band_connection = band_connection
def get_is_band_connection(self):
return self._band_connection
def set_is_dos_mode(self, is_dos_mode):
self._is_dos_mode = is_dos_mode
def get_is_dos_mode(self):
return self._is_dos_mode
def set_is_full_fc(self, is_full_fc):
self._is_full_fc = is_full_fc
def get_is_full_fc(self):
return self._is_full_fc
def set_is_gamma_center(self, is_gamma_center):
self._is_gamma_center = is_gamma_center
def get_is_gamma_center(self):
return self._is_gamma_center
def set_is_group_velocity(self, is_group_velocity):
self._is_group_velocity = is_group_velocity
def get_is_group_velocity(self):
return self._is_group_velocity
def set_is_hdf5(self, is_hdf5):
self._is_hdf5 = is_hdf5
def get_is_hdf5(self):
return self._is_hdf5
def set_is_little_cogroup(self, is_little_cogroup):
self._is_little_cogroup = is_little_cogroup
def get_is_little_cogroup(self):
return self._is_little_cogroup
def set_is_moment(self, is_moment):
self._is_moment = is_moment
def get_is_moment(self):
return self._is_moment
def set_is_projected_thermal_properties(self, is_ptp):
self._is_projected_thermal_properties = is_ptp
def get_is_projected_thermal_properties(self):
return self._is_projected_thermal_properties
def set_is_thermal_displacements(self, is_thermal_displacements):
self._is_thermal_displacements = is_thermal_displacements
def get_is_thermal_displacements(self):
return self._is_thermal_displacements
def set_is_thermal_displacement_matrices(self, is_displacement_matrices):
self._is_thermal_displacement_matrices = is_displacement_matrices
def get_is_thermal_displacement_matrices(self):
return self._is_thermal_displacement_matrices
def set_is_thermal_distances(self, is_thermal_distances):
self._is_thermal_distances = is_thermal_distances
def get_is_thermal_distances(self):
return self._is_thermal_distances
def set_is_thermal_properties(self, is_thermal_properties):
self._is_thermal_properties = is_thermal_properties
def get_is_thermal_properties(self):
return self._is_thermal_properties
def set_lapack_solver(self, lapack_solver):
self._lapack_solver = lapack_solver
def get_lapack_solver(self):
return self._lapack_solver
def set_mesh(self,
mesh,
mesh_shift=None,
is_time_reversal_symmetry=True,
is_mesh_symmetry=True,
is_gamma_center=False):
if mesh_shift is None:
mesh_shift = [0.,0.,0.]
self._mesh = mesh
self._mesh_shift = mesh_shift
self._is_time_reversal_symmetry = is_time_reversal_symmetry
self._is_mesh_symmetry = is_mesh_symmetry
self._is_gamma_center = is_gamma_center
def get_mesh(self):
return (self._mesh,
self._mesh_shift,
self._is_time_reversal_symmetry,
self._is_mesh_symmetry,
self._is_gamma_center)
def set_mesh_format(self, mesh_format):
self._mesh_format = mesh_format
def get_mesh_format(self):
return self._mesh_format
def set_modulation(self, modulation):
self._modulation = modulation
def get_modulation(self):
return self._modulation
def set_moment_order(self, moment_order):
self._moment_order = moment_order
def get_moment_order(self):
return self._moment_order
def set_pdos_indices(self, indices):
self._pdos_indices = indices
def get_pdos_indices(self):
return self._pdos_indices
def set_pretend_real(self, pretend_real):
self._pretend_real = pretend_real
def get_pretend_real(self):
return self._pretend_real
def set_projection_direction(self, direction):
self._projection_direction = direction
def get_projection_direction(self):
return self._projection_direction
def set_qpoints_format(self, qpoints_format):
self._qpoints_format = qpoints_format
def get_qpoints_format(self):
return self._qpoints_format
def set_read_force_constants(self, read_force_constants):
self._read_force_constants = read_force_constants
def get_read_force_constants(self):
return self._read_force_constants
def set_readfc_format(self, readfc_format):
self._readfc_format = readfc_format
def get_readfc_format(self):
return self._readfc_format
def set_run_mode(self, run_mode):
modes = ['qpoints',
'mesh',
'band',
'band_mesh',
'anime',
'modulation',
'displacements',
'irreps']
for mode in modes:
if run_mode.lower() == mode:
self._run_mode = run_mode
def get_run_mode(self):
return self._run_mode
def set_thermal_property_range(self, tmin, tmax, tstep):
self._tmax = tmax
self._tmin = tmin
self._tstep = tstep
def get_thermal_property_range(self):
return {'min': self._tmin,
'max': self._tmax,
'step': self._tstep}
def set_thermal_atom_pairs(self, atom_pairs):
self._thermal_atom_pairs = atom_pairs
def get_thermal_atom_pairs(self):
return self._thermal_atom_pairs
def set_thermal_displacement_matrix_temperature(self, t):
self._thermal_displacement_matrix_temperatue = t
def get_thermal_displacement_matrix_temperature(self):
return self._thermal_displacement_matrix_temperatue
def set_show_irreps(self, show_irreps):
self._show_irreps = show_irreps
def get_show_irreps(self):
return self._show_irreps
def set_write_dynamical_matrices(self, write_dynamical_matrices):
self._write_dynamical_matrices = write_dynamical_matrices
def get_write_dynamical_matrices(self):
return self._write_dynamical_matrices
def set_write_force_constants(self, write_force_constants):
self._write_force_constants = write_force_constants
def get_write_force_constants(self):
return self._write_force_constants
def set_write_mesh(self, write_mesh):
self._write_mesh = write_mesh
def get_write_mesh(self):
return self._write_mesh
def set_writefc_format(self, writefc_format):
self._writefc_format = writefc_format
def get_writefc_format(self):
return self._writefc_format
def set_xyz_projection(self, xyz_projection):
self._xyz_projection = xyz_projection
def get_xyz_projection(self):
return self._xyz_projection
class PhonopyConfParser(ConfParser):
def __init__(self, filename=None, args=None):
self._settings = PhonopySettings()
confs = {}
if filename is not None:
ConfParser.__init__(self, filename=filename)
self.read_file() # store .conf file setting in self._confs
self._parse_conf() # self.parameters[key] = val
self._set_settings() # self.parameters -> PhonopySettings
confs.update(self._confs)
if args is not None:
# To invoke ConfParser.__init__() to flush variables.
ConfParser.__init__(self, args=args)
self._read_options() # store options in self._confs
self._parse_conf() # self.parameters[key] = val
self._set_settings() # self.parameters -> PhonopySettings
confs.update(self._confs)
self._confs = confs
def _read_options(self):
self.read_options() # store data in self._confs
arg_list = vars(self._args)
if 'band_format' in arg_list:
if self._args.band_format:
self._confs['band_format'] = self._args.band_format
if 'band_labels' in arg_list:
if self._args.band_labels is not None:
self._confs['band_labels'] = " ".join(self._args.band_labels)
if 'is_displacement' in arg_list:
if self._args.is_displacement:
self._confs['create_displacements'] = '.true.'
if 'is_gamma_center' in arg_list:
if self._args.is_gamma_center:
self._confs['gamma_center'] = '.true.'
if 'is_dos_mode' in arg_list:
if self._args.is_dos_mode:
self._confs['dos'] = '.true.'
if 'pdos' in arg_list:
if self._args.pdos is not None:
self._confs['pdos'] = " ".join(self._args.pdos)
if 'xyz_projection' in arg_list:
if self._args.xyz_projection:
self._confs['xyz_projection'] = '.true.'
if 'fc_spg_symmetry' in arg_list:
if self._args.fc_spg_symmetry:
self._confs['fc_spg_symmetry'] = '.true.'
if 'is_full_fc' in arg_list:
if self._args.is_full_fc:
self._confs['full_force_constants'] = '.true.'
if 'fits_debye_model' in arg_list:
if self._args.fits_debye_model:
self._confs['debye_model'] = '.true.'
if 'fmax' in arg_list:
if self._args.fmax:
self._confs['fmax'] = self._args.fmax
if 'fmin' in arg_list:
if self._args.fmin:
self._confs['fmin'] = self._args.fmin
if 'is_thermal_properties' in arg_list:
if self._args.is_thermal_properties:
self._confs['tprop'] = '.true.'
if 'pretend_real' in arg_list:
if self._args.pretend_real:
self._confs['pretend_real'] = '.true.'
if 'is_projected_thermal_properties' in arg_list:
if self._args.is_projected_thermal_properties:
self._confs['ptprop'] = '.true.'
if 'is_thermal_displacements' in arg_list:
if self._args.is_thermal_displacements:
self._confs['tdisp'] = '.true.'
if 'is_thermal_displacement_matrices' in arg_list:
if self._args.is_thermal_displacement_matrices:
self._confs['tdispmat'] = '.true.'
if 'thermal_displacement_matrices_cif' in arg_list:
opt_tdm_cif = self._args.thermal_displacement_matrices_cif
if opt_tdm_cif:
self._confs['tdispmat_cif'] = opt_tdm_cif
if 'projection_direction' in arg_list:
opt_proj_dir = self._args.projection_direction
if opt_proj_dir is not None:
self._confs['projection_direction'] = " ".join(opt_proj_dir)
if 'read_force_constants' in arg_list:
if self._args.read_force_constants:
self._confs['read_force_constants'] = '.true.'
if 'write_force_constants' in arg_list:
if self._args.write_force_constants:
self._confs['write_force_constants'] = '.true.'
if 'readfc_format' in arg_list:
if self._args.readfc_format:
self._confs['readfc_format'] = self._args.readfc_format
if 'writefc_format' in arg_list:
if self._args.writefc_format:
self._confs['writefc_format'] = self._args.writefc_format
if 'fc_format' in arg_list:
if self._args.fc_format:
self._confs['fc_format'] = self._args.fc_format
if 'is_hdf5' in arg_list:
if self._args.is_hdf5:
self._confs['hdf5'] = '.true.'
if 'write_dynamical_matrices' in arg_list:
if self._args.write_dynamical_matrices:
self._confs['writedm'] = '.true.'
if 'write_mesh' in arg_list:
if not self._args.write_mesh:
self._confs['write_mesh'] = '.false.'
if 'mesh_format' in arg_list:
if self._args.mesh_format:
self._confs['mesh_format'] = self._args.mesh_format
if 'qpoints_format' in arg_list:
if self._args.qpoints_format:
self._confs['qpoints_format'] = self._args.qpoints_format
if 'irreps_qpoint' in arg_list:
if self._args.irreps_qpoint is not None:
self._confs['irreps'] = " ".join(self._args.irreps_qpoint)
if 'show_irreps' in arg_list:
if self._args.show_irreps:
self._confs['show_irreps'] = '.true.'
if 'is_little_cogroup' in arg_list:
if self._args.is_little_cogroup:
self._confs['little_cogroup'] = '.true.'
if 'is_band_connection' in arg_list:
if self._args.is_band_connection:
self._confs['band_connection'] = '.true.'
if 'cutoff_radius' in arg_list:
if self._args.cutoff_radius:
self._confs['cutoff_radius'] = self._args.cutoff_radius
if 'modulation' in arg_list:
if self._args.modulation:
self._confs['modulation'] = " ".join(self._args.modulation)
if 'anime' in arg_list:
if self._args.anime:
self._confs['anime'] = " ".join(self._args.anime)
if 'is_group_velocity' in arg_list:
if self._args.is_group_velocity:
self._confs['group_velocity'] = '.true.'
if 'is_moment' in arg_list:
if self._args.is_moment:
self._confs['moment'] = '.true.'
if 'moment_order' in arg_list:
if self._args.moment_order:
self._confs['moment_order'] = self._args.moment_order
# Overwrite
if 'is_check_symmetry' in arg_list:
if self._args.is_check_symmetry:
# Dummy 'dim' setting for sym-check
self._confs['dim'] = '1 1 1'
if 'lapack_solver' in arg_list:
if self._args.lapack_solver:
self._confs['lapack_solver'] = '.true.'
def _parse_conf(self):
self.parse_conf()
confs = self._confs
for conf_key in confs.keys():
if conf_key == 'create_displacements':
if confs['create_displacements'].lower() == '.true.':
self.set_parameter('create_displacements', True)
if conf_key == 'band_format':
self.set_parameter('band_format', confs['band_format'].lower())
if conf_key == 'band_labels':
labels = [x for x in confs['band_labels'].split()]
self.set_parameter('band_labels', labels)
if conf_key == 'band_connection':
if confs['band_connection'].lower() == '.true.':
self.set_parameter('band_connection', True)
if conf_key == 'force_constants':
self.set_parameter('force_constants',
confs['force_constants'].lower())
if conf_key == 'read_force_constants':
if confs['read_force_constants'].lower() == '.true.':
self.set_parameter('read_force_constants', True)
if conf_key == 'write_force_constants':
if confs['write_force_constants'].lower() == '.true.':
self.set_parameter('write_force_constants', True)
if conf_key == 'full_force_constants':
if confs['full_force_constants'].lower() == '.true.':
self.set_parameter('is_full_fc', True)
if conf_key == 'cutoff_radius':
val = float(confs['cutoff_radius'])
self.set_parameter('cutoff_radius', val)
if conf_key == 'writedm':
if confs['writedm'].lower() == '.true.':
self.set_parameter('write_dynamical_matrices', True)
if conf_key == 'write_mesh':
if confs['write_mesh'].lower() == '.false.':
self.set_parameter('write_mesh', False)
if conf_key == 'hdf5':
if confs['hdf5'].lower() == '.true.':
self.set_parameter('hdf5', True)
if conf_key == 'mp_shift':
vals = [fracval(x) for x in confs['mp_shift'].split()]
if len(vals) < 3:
self.setting_error("MP_SHIFT is incorrectly set.")
self.set_parameter('mp_shift', vals[:3])
if conf_key == 'mesh_format':
self.set_parameter('mesh_format', confs['mesh_format'].lower())
if conf_key == 'qpoints_format':
self.set_parameter('qpoints_format',
confs['qpoints_format'].lower())
if conf_key == 'time_reversal_symmetry':
if confs['time_reversal_symmetry'].lower() == '.false.':
self.set_parameter('is_time_reversal_symmetry', False)
if conf_key == 'gamma_center':
if confs['gamma_center'].lower() == '.true.':
self.set_parameter('is_gamma_center', True)
if conf_key == 'fc_spg_symmetry':
if confs['fc_spg_symmetry'].lower() == '.true.':
self.set_parameter('fc_spg_symmetry', True)
if conf_key == 'readfc_format':
self.set_parameter('readfc_format', confs['readfc_format'].lower())
if conf_key == 'writefc_format':
self.set_parameter('writefc_format', confs['writefc_format'].lower())
if conf_key == 'fc_format':
self.set_parameter('readfc_format', confs['fc_format'].lower())
self.set_parameter('writefc_format', confs['fc_format'].lower())
# Animation
if conf_key == 'anime':
vals = []
data = confs['anime'].split()
if len(data) < 3:
self.setting_error("ANIME is incorrectly set.")
else:
self.set_parameter('anime', data)
if conf_key == 'anime_type':
anime_type = confs['anime_type'].lower()
if anime_type in ('arc', 'v_sim', 'poscar', 'xyz', 'jmol'):
self.set_parameter('anime_type', anime_type)
else:
self.setting_error("%s is not available for ANIME_TYPE tag."
% confs['anime_type'])
# Modulation
if conf_key == 'modulation':
self._parse_conf_modulation(confs['modulation'])
# Character table
if conf_key == 'irreps':
vals = [fracval(x) for x in confs['irreps'].split()]
if len(vals) == 3 or len(vals) == 4:
self.set_parameter('irreps_qpoint', vals)
else:
self.setting_error("IRREPS is incorrectly set.")
if conf_key == 'show_irreps':
if confs['show_irreps'].lower() == '.true.':
self.set_parameter('show_irreps', True)
if conf_key == 'little_cogroup':
if confs['little_cogroup'].lower() == '.true.':
self.set_parameter('little_cogroup', True)
# DOS
if conf_key == 'pdos':
vals = []
for index_set in confs['pdos'].split(','):
vals.append([int(x) - 1 for x in index_set.split()])
self.set_parameter('pdos', vals)
if conf_key == 'xyz_projection':
if confs['xyz_projection'].lower() == '.true.':
self.set_parameter('xyz_projection', True)
if conf_key == 'dos':
if confs['dos'].lower() == '.true.':
self.set_parameter('dos', True)
if conf_key == 'debye_model':
if confs['debye_model'].lower() == '.true.':
self.set_parameter('fits_debye_model', True)
if conf_key == 'dos_range':
vals = [float(x) for x in confs['dos_range'].split()]
self.set_parameter('dos_range', vals)
if conf_key == 'fmax':
self.set_parameter('fmax', float(confs['fmax']))
if conf_key == 'fmin':
self.set_parameter('fmin', float(confs['fmin']))
# Thermal properties
if conf_key == 'tprop':
if confs['tprop'].lower() == '.true.':
self.set_parameter('tprop', True)
# Projected thermal properties
if conf_key == 'ptprop':
if confs['ptprop'].lower() == '.true.':
self.set_parameter('ptprop', True)
# Use imaginary frequency as real for thermal property calculation
if conf_key == 'pretend_real':
if confs['pretend_real'].lower() == '.true.':
self.set_parameter('pretend_real', True)
# Thermal displacement
if conf_key == 'tdisp':
if confs['tdisp'].lower() == '.true.':
self.set_parameter('tdisp', True)
# Thermal displacement matrices
if conf_key == 'tdispmat':
if confs['tdispmat'].lower() == '.true.':
self.set_parameter('tdispmat', True)
# Write thermal displacement matrices to cif file,
# for which the temperature to execute is stored.
if conf_key == 'tdispmat_cif':
self.set_parameter('tdispmat_cif', float(confs['tdispmat_cif']))
# Thermal distance
if conf_key == 'tdistance':
atom_pairs = []
for atoms in confs['tdistance'].split(','):
pair = [int(x) - 1 for x in atoms.split()]
if len(pair) == 2:
atom_pairs.append(pair)
else:
self.setting_error(
"TDISTANCE is incorrectly specified.")
if len(atom_pairs) > 0:
self.set_parameter('tdistance', atom_pairs)
# Projection direction used for thermal displacements and PDOS
if conf_key == 'projection_direction':
vals = [float(x) for x in confs['projection_direction'].split()]
if len(vals) < 3:
self.setting_error(
"PROJECTION_DIRECTION (--pd) is incorrectly specified.")
else:
self.set_parameter('projection_direction', vals)
# Group velocity
if conf_key == 'group_velocity':
if confs['group_velocity'].lower() == '.true.':
self.set_parameter('is_group_velocity', True)
# Moment of phonon states distribution
if conf_key == 'moment':
if confs['moment'].lower() == '.true.':
self.set_parameter('moment', True)
if conf_key == 'moment_order':
self.set_parameter('moment_order', int(confs['moment_order']))
# Use Lapack solver via Lapacke
if conf_key == 'lapack_solver':
if confs['lapack_solver'].lower() == '.true.':
self.set_parameter('lapack_solver', True)
def _parse_conf_modulation(self, conf_modulation):
modulation = {}
modulation['dimension'] = [1, 1, 1]
modulation['order'] = None
mod_list = conf_modulation.split(',')
header = mod_list[0].split()
if len(header) > 2 and len(mod_list) > 1:
if len(header) > 8:
dimension = [int(x) for x in header[:9]]
modulation['dimension'] = dimension
if len(header) > 11:
delta_q = [float(x) for x in header[9:12]]
modulation['delta_q'] = delta_q
if len(header) == 13:
modulation['order'] = int(header[12])
else:
dimension = [int(x) for x in header[:3]]
modulation['dimension'] = dimension
if len(header) > 3:
delta_q = [float(x) for x in header[3:6]]
modulation['delta_q'] = delta_q
if len(header) == 7:
modulation['order'] = int(header[6])
vals = []
for phonon_mode in mod_list[1:]:
mode_conf = [x for x in phonon_mode.split()]
if len(mode_conf) < 4 or len(mode_conf) > 6:
self.setting_error("MODULATION tag is wrongly set.")
break
else:
q = [fracval(x) for x in mode_conf[:3]]
if len(mode_conf) == 4:
vals.append([q, int(mode_conf[3]) - 1, 1.0, 0])
elif len(mode_conf) == 5:
vals.append([q,
int(mode_conf[3]) - 1,
float(mode_conf[4]),
0])
else:
vals.append([q,
int(mode_conf[3]) - 1,
float(mode_conf[4]),
float(mode_conf[5])])
modulation['modulations'] = vals
self.set_parameter('modulation', modulation)
else:
self.setting_error("MODULATION tag is wrongly set.")
def _set_settings(self):
self.set_settings()
params = self._parameters
# Is getting least displacements?
if 'create_displacements' in params:
if params['create_displacements']:
self._settings.set_run_mode('displacements')
# Is force constants written or read?
if 'force_constants' in params:
if params['force_constants'] == 'write':
self._settings.set_write_force_constants(True)
elif params['force_constants'] == 'read':
self._settings.set_read_force_constants(True)
if 'read_force_constants' in params:
self._settings.set_read_force_constants(
params['read_force_constants'])
if 'write_force_constants' in params:
self._settings.set_write_force_constants(
params['write_force_constants'])
if 'is_full_fc' in params:
self._settings.set_is_full_fc(params['is_full_fc'])
# Enforce space group symmetyr to force constants?
if 'fc_spg_symmetry' in params:
self._settings.set_fc_spg_symmetry(params['fc_spg_symmetry'])
if 'readfc_format' in params:
self._settings.set_readfc_format(params['readfc_format'])
if 'writefc_format' in params:
self._settings.set_writefc_format(params['writefc_format'])
# Use hdf5?
if 'hdf5' in params:
self._settings.set_is_hdf5(params['hdf5'])
# Cutoff radius of force constants
if 'cutoff_radius' in params:
self._settings.set_cutoff_radius(params['cutoff_radius'])
# band & mesh mode
# This has to come before 'mesh_numbers' and 'band_paths'
if 'mesh_numbers' in params and 'band_paths' in params:
self._settings.set_run_mode('band_mesh')
# Mesh
if 'mesh_numbers' in params:
if self._settings.get_run_mode() != 'band_mesh':
self._settings.set_run_mode('mesh')
self._settings.set_mesh_numbers(params['mesh_numbers'])
if (self._settings.get_run_mode() == 'mesh' or
self._settings.get_run_mode() == 'band_mesh'):
if 'mp_shift' in params:
shift = params['mp_shift']
else:
shift = [0.,0.,0.]
self._settings.set_mesh_shift(shift)
if 'is_time_reversal_symmetry' in params:
if not params['is_time_reversal_symmetry']:
self._settings.set_time_reversal_symmetry(False)
if 'is_mesh_symmetry' in params:
if not params['is_mesh_symmetry']:
self._settings.set_is_mesh_symmetry(False)
if 'is_gamma_center' in params:
if params['is_gamma_center']:
self._settings.set_is_gamma_center(True)
if 'mesh_format' in params:
self._settings.set_mesh_format(params['mesh_format'])
# band mode
if 'band_paths' in params:
if self._settings.get_run_mode() != 'band_mesh':
self._settings.set_run_mode('band')
if (self._settings.get_run_mode() == 'band' or
self._settings.get_run_mode() == 'band_mesh'):
if 'band_format' in params:
self._settings.set_band_format(params['band_format'])
if 'band_labels' in params:
self._settings.set_band_labels(params['band_labels'])
if 'band_connection' in params:
self._settings.set_is_band_connection(params['band_connection'])
# Q-points mode
if 'qpoints' in params or 'read_qpoints' in params:
self._settings.set_run_mode('qpoints')
if self._settings.get_run_mode() == 'qpoints':
if 'qpoints_format' in params:
self._settings.set_qpoints_format(params['qpoints_format'])
# Whether write out dynamical matrices or not
if 'write_dynamical_matrices' in params:
if params['write_dynamical_matrices']:
self._settings.set_write_dynamical_matrices(True)
# Whether write out mesh.yaml or mesh.hdf5
if 'write_mesh' in params:
self._settings.set_write_mesh(params['write_mesh'])
# Anime mode
if 'anime_type' in params:
self._settings.set_anime_type(params['anime_type'])
if 'anime' in params:
self._settings.set_run_mode('anime')
anime_type = self._settings.get_anime_type()
if anime_type == 'v_sim':
qpoints = [fracval(x) for x in params['anime'][0:3]]
self._settings.set_anime_qpoint(qpoints)
if len(params['anime']) > 3:
self._settings.set_anime_amplitude(float(params['anime'][3]))
else:
self._settings.set_anime_band_index(int(params['anime'][0]))
self._settings.set_anime_amplitude(float(params['anime'][1]))
self._settings.set_anime_division(int(params['anime'][2]))
if len(params['anime']) == 6:
self._settings.set_anime_shift(
[fracval(x) for x in params['anime'][3:6]])
# Modulation mode
if 'modulation' in params:
self._settings.set_run_mode('modulation')
self._settings.set_modulation(params['modulation'])
# Character table mode
if 'irreps_qpoint' in params:
self._settings.set_run_mode('irreps')
self._settings.set_irreps_q_point(
params['irreps_qpoint'][:3])
if len(params['irreps_qpoint']) == 4:
self._settings.set_irreps_tolerance(params['irreps_qpoint'][3])
if self._settings.get_run_mode() == 'irreps':
if 'show_irreps' in params:
self._settings.set_show_irreps(params['show_irreps'])
if 'little_cogroup' in params:
self._settings.set_is_little_cogroup(params['little_cogroup'])
# DOS
if 'dos_range' in params:
fmin = params['dos_range'][0]
fmax = params['dos_range'][1]
fpitch = params['dos_range'][2]
self._settings.set_min_frequency(fmin)
self._settings.set_max_frequency(fmax)
self._settings.set_frequency_pitch(fpitch)
if 'dos' in params:
self._settings.set_is_dos_mode(params['dos'])
if 'fits_debye_model' in params:
self._settings.set_fits_Debye_model(params['fits_debye_model'])
if 'fmax' in params:
self._settings.set_max_frequency(params['fmax'])
if 'fmin' in params:
self._settings.set_min_frequency(params['fmin'])
# Project PDOS x, y, z directions in Cartesian coordinates
if 'xyz_projection' in params:
self._settings.set_xyz_projection(params['xyz_projection'])
if ('pdos' not in params and
self._settings.get_pdos_indices() is None):
self.set_parameter('pdos', [])
if 'pdos' in params:
self._settings.set_pdos_indices(params['pdos'])
self._settings.set_is_eigenvectors(True)
self._settings.set_is_dos_mode(True)
self._settings.set_is_mesh_symmetry(False)
if ('projection_direction' in params and
not self._settings.get_xyz_projection()):
self._settings.set_projection_direction(
params['projection_direction'])
self._settings.set_is_eigenvectors(True)
self._settings.set_is_dos_mode(True)
self._settings.set_is_mesh_symmetry(False)
# Thermal properties
if 'tprop' in params:
self._settings.set_is_thermal_properties(params['tprop'])
# Exclusive conditions
self._settings.set_is_thermal_displacements(False)
self._settings.set_is_thermal_displacement_matrices(False)
self._settings.set_is_thermal_distances(False)
# Projected thermal properties
if 'ptprop' in params and params['ptprop']:
self._settings.set_is_thermal_properties(True)
self._settings.set_is_projected_thermal_properties(True)
self._settings.set_is_eigenvectors(True)
self._settings.set_is_mesh_symmetry(False)
# Exclusive conditions
self._settings.set_is_thermal_displacements(False)
self._settings.set_is_thermal_displacement_matrices(False)
self._settings.set_is_thermal_distances(False)
# Use imaginary frequency as real for thermal property calculation
if 'pretend_real' in params:
self._settings.set_pretend_real(params['pretend_real'])
# Thermal displacements
if 'tdisp' in params and params['tdisp']:
self._settings.set_is_thermal_displacements(True)
self._settings.set_is_eigenvectors(True)
self._settings.set_is_mesh_symmetry(False)
# Exclusive conditions
self._settings.set_is_thermal_properties(False)
self._settings.set_is_thermal_displacement_matrices(False)
self._settings.set_is_thermal_distances(True)
# Thermal displacement matrices
if ('tdispmat' in params and params['tdispmat'] or
'tdispmat_cif' in params):
self._settings.set_is_thermal_displacement_matrices(True)
self._settings.set_is_eigenvectors(True)
self._settings.set_is_mesh_symmetry(False)
# Exclusive conditions
self._settings.set_is_thermal_properties(False)
self._settings.set_is_thermal_displacements(False)
self._settings.set_is_thermal_distances(False)
# Temperature used to calculate thermal displacement matrix
# to write aniso_U to cif
if 'tdispmat_cif' in params:
self._settings.set_thermal_displacement_matrix_temperature(
params['tdispmat_cif'])
# Thermal distances
if 'tdistance' in params:
self._settings.set_is_thermal_distances(True)
self._settings.set_is_eigenvectors(True)
self._settings.set_is_mesh_symmetry(False)
self._settings.set_thermal_atom_pairs(params['tdistance'])
# Exclusive conditions
self._settings.set_is_thermal_properties(False)
self._settings.set_is_thermal_displacements(False)
self._settings.set_is_thermal_displacement_matrices(False)
# Group velocity
if 'is_group_velocity' in params:
self._settings.set_is_group_velocity(params['is_group_velocity'])
# Moment mode
if 'moment' in params:
self._settings.set_is_moment(params['moment'])
self._settings.set_is_eigenvectors(True)
self._settings.set_is_mesh_symmetry(False)
if self._settings.get_is_moment():
if 'moment_order' in params:
self._settings.set_moment_order(params['moment_order'])
# Use Lapack solver via Lapacke
if 'lapack_solver' in params:
self._settings.set_lapack_solver(params['lapack_solver'])
| 36.682592 | 85 | 0.588056 |
c31eba42bd4053cb78786f57a0aa915e07c1a06a | 1,594 | py | Python | clai/emulator/emulator_docker_log_connector.py | emishulovin/clai | 9121241ef036e8482e6883ae7a337ff16397c54e | [
"MIT"
] | 391 | 2019-12-08T03:34:39.000Z | 2022-03-04T12:14:01.000Z | clai/emulator/emulator_docker_log_connector.py | Pycomet/clai | 4d8e661f1335ce35fd077ad812b56da361565d57 | [
"MIT"
] | 74 | 2020-01-28T16:53:00.000Z | 2022-03-12T00:48:26.000Z | clai/emulator/emulator_docker_log_connector.py | Pycomet/clai | 4d8e661f1335ce35fd077ad812b56da361565d57 | [
"MIT"
] | 73 | 2020-02-06T14:46:13.000Z | 2022-03-04T12:46:29.000Z | import docker
from pytest_docker_tools.wrappers import Container
from clai.emulator.docker_message import DockerMessage, DockerReply
from clai.tools.docker_utils import wait_server_is_started, read
# pylint: disable=too-few-public-methods,protected-access
class EmulatorDockerLogConnector:
def __init__(self, pool, log_queue, queue_out):
self.pool_log = pool
self.consumer_log = None
self.log_queue = log_queue
self.queue_out = queue_out
def start(self):
self.consumer_log = self.pool_log.map_async(__log_consumer__, ((self.log_queue, self.queue_out),))
def __log_consumer__(args):
queue, queue_out = args
my_clai: Container = None
socket = None
print('starting reading the log queue')
while True:
docker_message: DockerMessage = queue.get()
if docker_message.docker_command == 'start_logger':
docker_client = docker.from_env()
docker_container = docker_client.containers.get(
docker_message.message)
my_clai = Container(docker_container)
if my_clai:
if not socket:
socket = my_clai.exec_run(cmd="bash -l", stdin=True, tty=True,
privileged=True, socket=True)
wait_server_is_started()
socket.output._sock.send('clai "none" tail -f /var/tmp/app.log\n'.encode())
read(socket, lambda chunk: queue_out.put(DockerReply(docker_reply='log', message=chunk)))
queue.task_done()
queue.put(DockerMessage(docker_command='log'))
| 36.227273 | 106 | 0.663739 |
5a3180376fd31ac3f8f767077d0e2d388c7a4464 | 7,786 | py | Python | deepreg/train.py | YipengHu/DeepReg | 6c610a29c813448be25d384555f5b9bbb6a3bd2a | [
"Apache-2.0"
] | null | null | null | deepreg/train.py | YipengHu/DeepReg | 6c610a29c813448be25d384555f5b9bbb6a3bd2a | [
"Apache-2.0"
] | null | null | null | deepreg/train.py | YipengHu/DeepReg | 6c610a29c813448be25d384555f5b9bbb6a3bd2a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
Module to train a network using init files and a CLI.
"""
import argparse
import os
from typing import Dict, List, Tuple, Union
import tensorflow as tf
import deepreg.config.parser as config_parser
import deepreg.model.optimizer as opt
from deepreg.callback import build_checkpoint_callback
from deepreg.registry import REGISTRY
from deepreg.util import build_dataset, build_log_dir
def build_config(
config_path: Union[str, List[str]],
log_dir: str,
exp_name: str,
ckpt_path: str,
max_epochs: int = -1,
) -> Tuple[Dict, str, str]:
"""
Function to initialise log directories,
assert that checkpointed model is the right
type and to parse the configuration for training.
:param config_path: list of str, path to config file
:param log_dir: path of the log directory
:param exp_name: name of the experiment
:param ckpt_path: path where model is stored.
:param max_epochs: if max_epochs > 0, use it to overwrite the configuration
:return: - config: a dictionary saving configuration
- exp_name: the path of directory to save logs
"""
# init log directory
log_dir = build_log_dir(log_dir=log_dir, exp_name=exp_name)
# load config
config = config_parser.load_configs(config_path)
# replace the ~ with user home path
ckpt_path = os.path.expanduser(ckpt_path)
# overwrite epochs and save_period if necessary
if max_epochs > 0:
config["train"]["epochs"] = max_epochs
config["train"]["save_period"] = min(max_epochs, config["train"]["save_period"])
# backup config
config_parser.save(config=config, out_dir=log_dir)
# batch_size in original config corresponds to batch_size per GPU
gpus = tf.config.experimental.list_physical_devices("GPU")
config["train"]["preprocess"]["batch_size"] *= max(len(gpus), 1)
return config, log_dir, ckpt_path
def train(
gpu: str,
config_path: Union[str, List[str]],
gpu_allow_growth: bool,
ckpt_path: str,
exp_name: str = "",
log_dir: str = "logs",
max_epochs: int = -1,
):
"""
Function to train a model.
:param gpu: which local gpu to use to train.
:param config_path: path to configuration set up.
:param gpu_allow_growth: whether to allocate whole GPU memory for training.
:param ckpt_path: where to store training checkpoints.
:param log_dir: path of the log directory.
:param exp_name: experiment name.
:param max_epochs: if max_epochs > 0, will use it to overwrite the configuration.
"""
# set env variables
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" if gpu_allow_growth else "false"
# load config
config, log_dir, ckpt_path = build_config(
config_path=config_path,
log_dir=log_dir,
exp_name=exp_name,
ckpt_path=ckpt_path,
max_epochs=max_epochs,
)
# build dataset
data_loader_train, dataset_train, steps_per_epoch_train = build_dataset(
dataset_config=config["dataset"],
preprocess_config=config["train"]["preprocess"],
mode="train",
training=True,
repeat=True,
)
assert data_loader_train is not None # train data should not be None
data_loader_val, dataset_val, steps_per_epoch_val = build_dataset(
dataset_config=config["dataset"],
preprocess_config=config["train"]["preprocess"],
mode="valid",
training=False,
repeat=True,
)
# use strategy to support multiple GPUs
# the network is mirrored in each GPU so that we can use larger batch size
# https://www.tensorflow.org/guide/distributed_training
# only model, optimizer and metrics need to be defined inside the strategy
num_devices = max(len(tf.config.list_physical_devices("GPU")), 1)
if num_devices > 1:
strategy = tf.distribute.MirroredStrategy() # pragma: no cover
else:
strategy = tf.distribute.get_strategy()
with strategy.scope():
model: tf.keras.Model = REGISTRY.build_model(
config=dict(
name=config["train"]["method"],
moving_image_size=data_loader_train.moving_image_shape,
fixed_image_size=data_loader_train.fixed_image_shape,
index_size=data_loader_train.num_indices,
labeled=config["dataset"]["labeled"],
batch_size=config["train"]["preprocess"]["batch_size"],
config=config["train"],
num_devices=num_devices,
)
)
optimizer = opt.build_optimizer(optimizer_config=config["train"]["optimizer"])
# compile
model.compile(optimizer=optimizer)
model.plot_model(output_dir=log_dir)
# build callbacks
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=config["train"]["save_period"]
)
ckpt_callback, initial_epoch = build_checkpoint_callback(
model=model,
dataset=dataset_train,
log_dir=log_dir,
save_period=config["train"]["save_period"],
ckpt_path=ckpt_path,
)
callbacks = [tensorboard_callback, ckpt_callback]
# train
# it's necessary to define the steps_per_epoch
# and validation_steps to prevent errors like
# BaseCollectiveExecutor::StartAbort Out of range: End of sequence
model.fit(
x=dataset_train,
steps_per_epoch=steps_per_epoch_train,
initial_epoch=initial_epoch,
epochs=config["train"]["epochs"],
validation_data=dataset_val,
validation_steps=steps_per_epoch_val,
callbacks=callbacks,
)
# close file loaders in data loaders after training
data_loader_train.close()
if data_loader_val is not None:
data_loader_val.close()
def main(args=None):
"""
Entry point for train script.
:param args: arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--gpu",
"-g",
help="GPU index for training."
'-g "" for using CPU'
'-g "0" for using GPU 0'
'-g "0,1" for using GPU 0 and 1.',
type=str,
required=True,
)
parser.add_argument(
"--gpu_allow_growth",
"-gr",
help="Prevent TensorFlow from reserving all available GPU memory",
default=False,
)
parser.add_argument(
"--ckpt_path",
"-k",
help="Path of the saved model checkpoint to load."
"No need to provide if start training from scratch.",
default="",
type=str,
required=False,
)
parser.add_argument(
"--log_dir", help="Path of log directory.", default="logs", type=str
)
parser.add_argument(
"--exp_name",
"-l",
help="Name of log directory."
"The directory is under log root, e.g. logs/ by default."
"If not provided, a timestamp based folder will be created.",
default="",
type=str,
)
parser.add_argument(
"--config_path",
"-c",
help="Path of config, must end with .yaml. Can pass multiple paths.",
type=str,
nargs="+",
required=True,
)
parser.add_argument(
"--max_epochs",
help="The maximum number of epochs, -1 means following configuration.",
type=int,
default=-1,
)
args = parser.parse_args(args)
train(
gpu=args.gpu,
config_path=args.config_path,
gpu_allow_growth=args.gpu_allow_growth,
ckpt_path=args.ckpt_path,
log_dir=args.log_dir,
exp_name=args.exp_name,
max_epochs=args.max_epochs,
)
if __name__ == "__main__":
main() # pragma: no cover
| 30.29572 | 88 | 0.647958 |
7461d656b5c7044a627be87d084619ce6f33d3e2 | 9,457 | py | Python | spfeas/spfeas/helpers/other/progressbar/progressbar.py | siu-panh/mapeo-uso-del-suelo | f7081a4e6784281eddceaa1a6087e0d972c92820 | [
"Apache-2.0"
] | 26 | 2017-12-07T06:38:46.000Z | 2021-10-01T18:24:47.000Z | spfeas/spfeas/helpers/other/progressbar/progressbar.py | siu-panh/mapeo-uso-del-suelo | f7081a4e6784281eddceaa1a6087e0d972c92820 | [
"Apache-2.0"
] | 21 | 2018-03-01T15:08:49.000Z | 2019-03-11T15:53:13.000Z | spfeas/spfeas/helpers/other/progressbar/progressbar.py | siu-panh/mapeo-uso-del-suelo | f7081a4e6784281eddceaa1a6087e0d972c92820 | [
"Apache-2.0"
] | 10 | 2018-03-20T22:27:43.000Z | 2020-09-07T00:27:41.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# progressbar - Text progress bar library for Python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Main ProgressBar class."""
from __future__ import division
from builtins import int
import math
import os
import signal
import sys
import time
try:
from fcntl import ioctl
from array import array
import termios
except ImportError:
pass
from compat import * # for: any, next
import widgets
class UnknownLength: pass
class ProgressBar(object):
"""The ProgressBar class which updates and prints the bar.
A common way of using it is like:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
You can also use a ProgressBar as an iterator:
>>> progress = ProgressBar()
>>> for i in progress(some_iterable):
... # do something
...
Since the progress bar is incredibly customizable you can specify
different widgets of any type in any order. You can even write your own
widgets! However, since there are already a good number of widgets you
should probably play around with them before moving on to create your own
widgets.
The term_width parameter represents the current terminal width. If the
parameter is set to an integer then the progress bar will use that,
otherwise it will attempt to determine the terminal width falling back to
80 columns if the width cannot be determined.
When implementing a widget's update method you are passed a reference to
the current progress bar. As a result, you have access to the
ProgressBar's methods and attributes. Although there is nothing preventing
you from changing the ProgressBar you should treat it as read only.
Useful methods and attributes include (Public API):
- currval: current progress (0 <= currval <= maxval)
- maxval: maximum (and final) value
- finished: True if the bar has finished (reached 100%)
- start_time: the time when start() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time and last call to
update
- percentage(): progress in percent [0..100]
"""
__slots__ = ('currval', 'fd', 'finished', 'last_update_time',
'left_justify', 'maxval', 'next_update', 'num_intervals',
'poll', 'seconds_elapsed', 'signal_set', 'start_time',
'term_width', 'update_interval', 'widgets', '_time_sensitive',
'__iterable')
_DEFAULT_MAXVAL = 100
_DEFAULT_TERMSIZE = 80
_DEFAULT_WIDGETS = [widgets.Percentage(), ' ', widgets.Bar()]
def __init__(self, maxval=None, widgets=None, term_width=None, poll=1,
left_justify=True, fd=sys.stderr):
"""Initializes a progress bar with sane defaults."""
# Don't share a reference with any other progress bars
if widgets is None:
widgets = list(self._DEFAULT_WIDGETS)
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.left_justify = left_justify
self.signal_set = False
if term_width is not None:
self.term_width = term_width
else:
try:
self._handle_resize()
signal.signal(signal.SIGWINCH, self._handle_resize)
self.signal_set = True
except (SystemExit, KeyboardInterrupt): raise
except:
self.term_width = self._env_size()
self.__iterable = None
self._update_widgets()
self.currval = 0
self.finished = False
self.last_update_time = None
self.poll = poll
self.seconds_elapsed = 0
self.start_time = None
self.update_interval = 1
self.next_update = 0
def __call__(self, iterable):
"""Use a ProgressBar to iterate through an iterable."""
try:
self.maxval = len(iterable)
except:
if self.maxval is None:
self.maxval = UnknownLength
self.__iterable = iter(iterable)
return self
def __iter__(self):
return self
def __next__(self):
try:
value = next(self.__iterable)
if self.start_time is None:
self.start()
else:
self.update(self.currval + 1)
return value
except StopIteration:
if self.start_time is None:
self.start()
self.finish()
raise
# Create an alias so that Python 2.x won't complain about not being
# an iterator.
next = __next__
def _env_size(self):
"""Tries to find the term_width from the environment."""
return int(os.environ.get('COLUMNS', self._DEFAULT_TERMSIZE)) - 1
def _handle_resize(self, signum=None, frame=None):
"""Tries to catch resize signals sent from the terminal."""
h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
self.term_width = w
def percentage(self):
"""Returns the progress as a percentage."""
if self.currval >= self.maxval:
return 100.0
return self.currval * 100.0 / self.maxval
percent = property(percentage)
def _format_widgets(self):
result = []
expanding = []
width = self.term_width
for index, widget in enumerate(self.widgets):
if isinstance(widget, widgets.WidgetHFill):
result.append(widget)
expanding.insert(0, index)
else:
widget = widgets.format_updatable(widget, self)
result.append(widget)
width -= len(widget)
count = len(expanding)
while count:
portion = max(int(math.ceil(width * 1. / count)), 0)
index = expanding.pop()
count -= 1
widget = result[index].update(self, portion)
width -= len(widget)
result[index] = widget
return result
def _format_line(self):
"""Joins the widgets and justifies the line."""
widgets = ''.join(self._format_widgets())
if self.left_justify: return widgets.ljust(self.term_width)
else: return widgets.rjust(self.term_width)
def _need_update(self):
"""Returns whether the ProgressBar should redraw the line."""
if self.currval >= self.next_update or self.finished: return True
delta = time.time() - self.last_update_time
return self._time_sensitive and delta > self.poll
def _update_widgets(self):
"""Checks all widgets for the time sensitive bit."""
self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
for w in self.widgets)
def update(self, value=None):
"""Updates the ProgressBar to a new value."""
if value is not None and value is not UnknownLength:
if (self.maxval is not UnknownLength
and not 0 <= value <= self.maxval):
raise ValueError('Value out of range')
self.currval = value
if not self._need_update(): return
if self.start_time is None:
raise RuntimeError('You must call "start" before calling "update"')
now = time.time()
self.seconds_elapsed = now - self.start_time
self.next_update = self.currval + self.update_interval
self.fd.write(self._format_line() + '\r')
self.last_update_time = now
def start(self):
"""Starts measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in range(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
if self.maxval is None:
self.maxval = self._DEFAULT_MAXVAL
self.num_intervals = max(100, self.term_width)
self.next_update = 0
if self.maxval is not UnknownLength:
if self.maxval < 0: raise ValueError('Value out of range')
self.update_interval = self.maxval / self.num_intervals
self.start_time = self.last_update_time = time.time()
self.update(0)
return self
def finish(self):
"""Puts the ProgressBar bar in the finished state."""
if self.finished:
return
self.finished = True
self.update(self.maxval)
self.fd.write('\n')
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
| 30.704545 | 79 | 0.616369 |
e65c345f781ff124e930a7ff1d05fe2974919925 | 1,624 | py | Python | userbot/client/client_list.py | sensiherme/SensiAnubis | 046c40efbe41f5a0aa2d468df39b028f89eb1da5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/client/client_list.py | sensiherme/SensiAnubis | 046c40efbe41f5a0aa2d468df39b028f89eb1da5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/client/client_list.py | sensiherme/SensiAnubis | 046c40efbe41f5a0aa2d468df39b028f89eb1da5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Sensi - UserBot
# Copyright (c) 2022 Sensi-Userbot
# Credits: @indraudah || https://github.com/annubishermes/
#
# This file is a part of < https://github.com/annubishermes/hermesubot>
#
from base64 import b64decode
import telethon.utils
from telethon.tl.functions.users import GetFullUserRequest
async def clients_list(SUDO_USERS, bot, MAN2, MAN3, MAN4, MAN5):
user_ids = list(SUDO_USERS) or []
main_id = await bot.get_me()
user_ids.append(main_id.id)
try:
if MAN2 is not None:
id2 = await MAN2.get_me()
user_ids.append(id2.id)
except BaseException:
pass
try:
if MAN3 is not None:
id3 = await MAN3.get_me()
user_ids.append(id3.id)
except BaseException:
pass
try:
if MAN4 is not None:
id4 = await MAN4.get_me()
user_ids.append(id4.id)
except BaseException:
pass
try:
if MAN5 is not None:
id5 = await MAN5.get_me()
user_ids.append(id5.id)
except BaseException:
pass
return user_ids
ITSME = list(map(int, b64decode("ODQ0NDMyMjIw").split()))
async def client_id(event, botid=None):
if botid is not None:
uid = await event.client(GetFullUserRequest(botid))
OWNER_ID = uid.user.id
MAN_USER = uid.user.first_name
else:
client = await event.client.get_me()
uid = telethon.utils.get_peer_id(client)
OWNER_ID = uid
MAN_USER = client.first_name
man_mention = f"[{MAN_USER}](tg://user?id={OWNER_ID})"
return OWNER_ID, MAN_USER, man_mention
| 24.984615 | 71 | 0.625 |
b48d406b7889ab2f342ed33d69612aeb76c58614 | 1,344 | py | Python | google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition",
manifest={"AutoMlTextExtraction", "AutoMlTextExtractionInputs",},
)
class AutoMlTextExtraction(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Text
Extraction Model.
Attributes:
inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextExtractionInputs):
The input parameters of this TrainingJob.
"""
inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextExtractionInputs",)
class AutoMlTextExtractionInputs(proto.Message):
r"""
"""
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.255814 | 120 | 0.744048 |
6108ffdaa895632b9cbf29c0574904bb66d5272d | 615 | py | Python | sdk/identity/azure-identity/azure/identity/_internal/__init__.py | yanfa317/azure-sdk-for-python | 5aeebe33ad61fe9da5e7b0314e24a8332c061e3d | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/_internal/__init__.py | yanfa317/azure-sdk-for-python | 5aeebe33ad61fe9da5e7b0314e24a8332c061e3d | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/_internal/__init__.py | yanfa317/azure-sdk-for-python | 5aeebe33ad61fe9da5e7b0314e24a8332c061e3d | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from .auth_code_redirect_handler import AuthCodeRedirectServer
from .exception_wrapper import wrap_exceptions
from .msal_credentials import ConfidentialClientCredential, PublicClientCredential
from .msal_transport_adapter import MsalTransportAdapter, MsalTransportResponse
__all__ = [
"AuthCodeRedirectServer",
"ConfidentialClientCredential",
"MsalTransportAdapter",
"MsalTransportResponse",
"PublicClientCredential",
"wrap_exceptions",
]
| 34.166667 | 82 | 0.715447 |
0183b3deef7466c8c394b8848a1328cdb4d467ce | 414 | py | Python | auctions/migrations/0011_auto_20201030_1225.py | huutrungrimp/commerce | 8a22ea44bfca69f96d721a3ebefd7729487db3cf | [
"MIT"
] | null | null | null | auctions/migrations/0011_auto_20201030_1225.py | huutrungrimp/commerce | 8a22ea44bfca69f96d721a3ebefd7729487db3cf | [
"MIT"
] | null | null | null | auctions/migrations/0011_auto_20201030_1225.py | huutrungrimp/commerce | 8a22ea44bfca69f96d721a3ebefd7729487db3cf | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-30 16:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0010_auto_20201030_1223'),
]
operations = [
migrations.AlterField(
model_name='bidlisting',
name='bidprice',
field=models.DecimalField(decimal_places=2, max_digits=10),
),
]
| 21.789474 | 71 | 0.618357 |
ed8d7b4385c0efbaa405bd2a81481e1d8078a07a | 2,041 | py | Python | setup.py | UAL-RE/figshare | a364a662ffecdfd29cce595003b6f2d2fb1ce767 | [
"BSD-3-Clause"
] | null | null | null | setup.py | UAL-RE/figshare | a364a662ffecdfd29cce595003b6f2d2fb1ce767 | [
"BSD-3-Clause"
] | 2 | 2020-03-27T00:04:46.000Z | 2020-07-15T16:43:47.000Z | setup.py | UAL-RE/figshare | a364a662ffecdfd29cce595003b6f2d2fb1ce767 | [
"BSD-3-Clause"
] | null | null | null | import setuptools
setuptools.setup(
name='figshare',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3.5',
description='Figshare client for Project Cognoma',
# The project's main homepage.
url='https://github.com/cognoma',
# Author details
author='Project Cognoma',
# Choose your license
license='BSD 3-Clause',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='cognoma machine learning cancer figshare',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=setuptools.find_packages(),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'requests == 2.20.0',
'pytest == 3.0.4',
],
# pytest integration
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
| 32.396825 | 79 | 0.651641 |
13e6c18fd85a33758645cefa9b276c034ecb6c9c | 1,000 | py | Python | tests/sentry/api/serializers/test_incident_activity.py | 0x11-dev/sentry | b3e9486b91ba272a65261ae6e29970a006e7d9a5 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/serializers/test_incident_activity.py | 0x11-dev/sentry | b3e9486b91ba272a65261ae6e29970a006e7d9a5 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/serializers/test_incident_activity.py | 0x11-dev/sentry | b3e9486b91ba272a65261ae6e29970a006e7d9a5 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.incidents.models import IncidentActivityType
from sentry.incidents.logic import create_incident_activity
from sentry.testutils import TestCase
class IncidentSerializerTest(TestCase):
def test_simple(self):
activity = create_incident_activity(
incident=self.create_incident(),
activity_type=IncidentActivityType.COMMENT,
user=self.user,
comment='hello',
)
result = serialize(activity)
assert result['id'] == six.text_type(activity.id)
assert result['incidentIdentifier'] == six.text_type(activity.incident.identifier)
assert result['userId'] == six.text_type(activity.user_id)
assert result['type'] == activity.type
assert result['value'] is None
assert result['previousValue'] is None
assert result['comment'] == activity.comment
| 32.258065 | 90 | 0.695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.