hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b65708f79844663d5227eae40e33fe52930ab933 | 18,721 | py | Python | sourcecode/System.py | rlp81/CoalOS | f5d2d9bdae8f17ef43b905a9628dd91ef70e0ae5 | [
"MIT"
] | 3 | 2021-03-17T22:31:08.000Z | 2022-03-30T12:46:39.000Z | sourcecode/System.py | rlp81/CoalOS | f5d2d9bdae8f17ef43b905a9628dd91ef70e0ae5 | [
"MIT"
] | 1 | 2022-03-30T19:14:21.000Z | 2022-03-30T19:40:38.000Z | sourcecode/System.py | rlp81/CoalOS | f5d2d9bdae8f17ef43b905a9628dd91ef70e0ae5 | [
"MIT"
] | 3 | 2021-03-17T22:31:10.000Z | 2021-06-17T13:44:38.000Z | from logging import fatal
import time
import json
sysopen = False
Topen = False
Fopen = False
Adpan = False
with open("first.json", "r") as f:
first = json.load(f)
with open("permissions.json", 'r') as f:
per = json.load(f)
with open("name.json", "r") as f:
Termname = json.load(f)
while True:
if first["First"] == "True":
print("To access the admin account login with User: Admin and Password 4670")
first["First"] = "False"
with open("first.json", "w") as f:
json.dump(first, f)
print("Open: Classified, System")
command = input("")
if command == ("Open Classified"):
print("sorry that's illegal")
if command == ("Open System"):
print("Starting System")
print("Created by Coal#7238")
time.sleep(5)
print(Termname["TerminalName"])
time.sleep(.5)
print("All rights reserved")
time.sleep(3)
sysopen = True
while sysopen == True:
print("Please Login by typing: Login Or type: Exit to exit the terminal computer")
print("You can also make an account by typing CreateAcc")
syscommand = input("")
if syscommand == "Exit":
sysopen = False
if syscommand == "Login":
with open("users.json", 'r') as f:
users = json.load(f)
user = input("Username: ")
if str(user) in users:
passw = input("Password: ")
if users[str(user)] == str(passw):
Topen = True
print("Hello and Welcome to The Terminal computer")
print("What application would you like too open? To list the applications type list.")
print("To exit the computer or any application type: Exit")
while Topen == True:
syscommand = input('')
if syscommand == ("list"):
if per[str(user)]["AP"] == "True":
print("Applications: File Explorer, Admin Panel")
else:
print("Applications: File Explorer")
if syscommand == ("Exit"):
Topen = False
if syscommand == ("File Explorer"):
with open("permissions.json", 'r') as f:
per = json.load(f)
if per[str(user)]["EF"] == "True":
print("To create a file type: CreateFile")
print("To open files type: Open {File Name}")
print("files: Cake.txt, Custom Files")
with open("cfiles.json", "r") as f:
cfile = json.load(f)
syscommand = input()
if syscommand == "Open Custom Files":
print("What is the name of the custom file?")
file = input()
if str(file) in cfile:
print("File contents:")
print(cfile[str(file)])
if not str(file) in cfile:
print("File doesn't exist.")
if file == "CreateFile":
with open("cfiles.json", "r") as f:
cfile = json.load(f)
print("What is the name of the file?")
file = input()
print("What will it contain?")
file1 = input()
cfile[file] = file1
with open("cfiles.json", "w") as f:
cfile = json.dump(cfile, f, indent=4)
if file == "Open Cake.txt":
print("""To make the Cake:
One 18.25 ounce package chocolate cake mix.
One can prepared coconut pecan frosting.
Three slash four cup vegetable oil.
Four large eggs. One cup semi-sweet chocolate chips.
Three slash four cups butter or margarine.
One and two third cups granulated sugar.
Two cups all purpose flower.
Don't forget garnishes such as:
Fish shaped crackers.
Fish shaped candies.
Fish shaped solid waste.
Fish shaped dirt.
Fish shaped ethyl benzene.
print("Pull and peel licorice.
Fish shaped volatile organic compounds and sediment shaped sediment.
Candy coated peanut butter pieces. Shaped like fish.
One cup lemon juice.
Alpha resins.
Unsaturated polyester resin.
Fiberglass surface resins.
And volatile malted milk impoundments.
Nine large egg yolks.
Twelve medium geosynthetic membranes.
One cup granulated sugar.
An entry called 'how to kill someone with your bare hands'.
Two cups rhubarb, sliced.
Two slash three cups granulated rhubarb.
One tablespoon all-purpose rhubarb.
One teaspoon grated orange rhubarb.
Three tablespoons rhubarb, on fire.
One large rhubarb.
One cross borehole electro-magnetic imaging rhubarb.
Two tablespoons rhubarb juice.
Adjustable aluminum head positioner.
Slaughter electric needle injector.
Cordless electric needle injector.
Injector needle driver.
Injector needle gun.
Cranial caps.
And it contains proven preservatives, deep penetration agents, and gas and odor control chemicals.
That will deodorize and preserve putrid tissue.""")
if per[str(user)]["AP"] == "True":
if syscommand == "Admin Panel":
print("To view commands type: list")
print("To exit type: Exit")
Adpan = True
while Adpan == True:
syscommand = input()
if syscommand == 'list':
print("DelApp")
print("DelUser")
print("ManageSystem")
if syscommand == "Exit":
Adpan = False
if syscommand == "ManageSystem":
print("For a list of commands type: list")
print("To exit type: Exit")
ms = True
while ms == True:
syscommand = input()
if syscommand == "list":
print("ChangeTerminalName")
print("Permissions")
print("ChangePass")
if syscommand == "ChangePass":
with open("users.json", 'r') as f:
users = json.load(f)
print("What User's password do you want to change?")
user = input("User:")
if str(user) in users:
print("What is the new password?")
passw = input("Password:")
users[str(user)] = passw
with open("users.json", "w") as f:
json.dump(users, f, indent=4)
if not str(user) in users:
print("Error no such user.")
if syscommand == "Exit":
ms = False
print("To view commands type: list")
print("To exit type: Exit")
if syscommand == "Permissions":
perms = True
while perms == True:
print("What user would you like to change permissions for?")
user = input("User:")
if user == "Admin":
print("Error you cannot change permissions for this account.")
with open('users.json', 'r') as f:
users = json.load(f)
if not str(user) in users:
print("Error this User does not exist.")
if str(user) in users:
with open("permissions.json", "r") as f:
per = json.load(f)
print("To see permission commands type: list")
perw = True
while perw == True:
syscommand = input()
if syscommand == "Edit Files":
print("Type True or False")
access = input("")
if access == "True":
per[str(user)]["EF"] = "True"
print(f"User {user} can now create files.")
with open("permissions.json", 'w') as f:
json.dump(per, f, indent=4)
if access == "False":
per[str(user)]["EF"] = "False"
print(f"User {user} can not create files.")
with open("permissions.json", 'w') as f:
json.dump(per, f, indent=4)
if syscommand == "Access AP":
print("Type True or False")
access = input("")
if access == "True":
per[str(user)]["AP"] = "True"
print(f"User {user} can now access the Admin Panel.")
with open("permissions.json", 'w') as f:
json.dump(per, f, indent=4)
if access == "False":
per[str(user)]["AP"] = "False"
print(f"User {user} can not access the Admin Panel.")
with open("permissions.json", 'w') as f:
json.dump(per, f, indent=4)
if syscommand == "list":
print("Access AP(Admin Panel)")
print("Edit Files(In File Explorer)")
if syscommand == "ChangeTerminalName":
print("What would you like to change the terminal name to?")
with open('name.json', 'r') as f:
Tname = json.load(f)
NewName = input("Name:")
Tname["TerminalName"] = NewName
with open('name.json', 'w') as f:
json.dump(Tname, f)
print("For a list of commands type: list")
print("To exit type: Exit")
if syscommand == "DelUser":
delu = True
print("Type the user you want to delete.")
print("Type Exit to exit DelUser.")
while delu == True:
user = input("User:")
with open("users.json", 'r') as f:
users = json.load(f)
if user == "Exit":
delu = False
print("To view commands type: list")
print("To exit type: Exit")
if str(user) in users:
users.pop(str(user))
with open("users.json", "w") as f:
json.dump(users, f)
if syscommand == ("Exit"):
print("Hello and Welcome to The Terminal computer")
print("What application would you like too open? To list the applications type list.")
print("To exit the computer or any application type: Exit")
else:
print("Incorrect PassWord")
else:
print("Incorrect Username")
if syscommand == "CreateAcc":
user = input("NewUsername:")
passw = input("NewPassword:")
with open('users.json', 'r') as f:
users = json.load(f)
with open("permissions.json", "r") as p:
perm = json.load(p)
if str(user) in users:
print("Error this usernamme already exists.")
users[user] = passw
perm[user] = {}
perm[user]['AD'] = "False"
perm[user]['EF'] = "False"
with open('users.json', 'w') as f:
json.dump(users, f, indent = 4)
with open("permissions.json", "w") as p:
json.dump(perm, p, indent = 4)
if command == ("Quit"):
quit()
| 65.003472 | 135 | 0.307302 |
b3f6a564d9fa3457391d3026baa153bd68aa4d78 | 13,071 | py | Python | allel/stats/window.py | petrelharp/scikit-allel | 3ad21209c9ad9d7914761be7e04cb6a67fb02024 | [
"MIT"
] | null | null | null | allel/stats/window.py | petrelharp/scikit-allel | 3ad21209c9ad9d7914761be7e04cb6a67fb02024 | [
"MIT"
] | null | null | null | allel/stats/window.py | petrelharp/scikit-allel | 3ad21209c9ad9d7914761be7e04cb6a67fb02024 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
from allel.model.ndarray import SortedIndex
from allel.util import asarray_ndim, ignore_invalid, check_equal_length
def moving_statistic(values, statistic, size, start=0, stop=None, step=None):
"""Calculate a statistic in a moving window over `values`.
Parameters
----------
values : array_like
The data to summarise.
statistic : function
The statistic to compute within each window.
size : int
The window size (number of values).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
Returns
-------
out : ndarray, shape (n_windows,)
Examples
--------
>>> import allel
>>> values = [2, 5, 8, 16]
>>> allel.stats.moving_statistic(values, np.sum, size=2)
array([ 7, 24])
>>> allel.stats.moving_statistic(values, np.sum, size=2, step=1)
array([ 7, 13, 24])
"""
windows = index_windows(values, size, start, stop, step)
# setup output
out = np.array([statistic(values[i:j]) for i, j in windows])
return out
def moving_mean(values, size, start=0, stop=None, step=None):
return moving_statistic(values, statistic=np.mean, size=size,
start=start, stop=stop, step=step)
def moving_std(values, size, start=0, stop=None, step=None):
return moving_statistic(values, statistic=np.std, size=size,
start=start, stop=stop, step=step)
def moving_midpoint(values, size, start=0, stop=None, step=None):
return moving_statistic(values, statistic=lambda v: (v[0] + v[-1])/2,
size=size, start=start, stop=stop, step=step)
def index_windows(values, size, start, stop, step):
"""Convenience function to construct windows for the
:func:`moving_statistic` function.
"""
# determine step
if stop is None:
stop = len(values)
if step is None:
# non-overlapping
step = size
# iterate over windows
for window_start in range(start, stop, step):
window_stop = window_start + size
if window_stop > stop:
# ensure all windows are equal sized
return
yield (window_start, window_stop)
def position_windows(pos, size, start, stop, step):
"""Convenience function to construct windows for the
:func:`windowed_statistic` and :func:`windowed_count` functions.
"""
last = False
# determine start and stop positions
if start is None:
start = pos[0]
if stop is None:
stop = pos[-1]
if step is None:
# non-overlapping
step = size
windows = []
for window_start in range(start, stop, step):
# determine window stop
window_stop = window_start + size
if window_stop >= stop:
# last window
window_stop = stop
last = True
else:
window_stop -= 1
windows.append([window_start, window_stop])
if last:
break
return np.asarray(windows)
def window_locations(pos, windows):
"""Locate indices in `pos` corresponding to the start and stop positions
of `windows`.
"""
start_locs = np.searchsorted(pos, windows[:, 0])
stop_locs = np.searchsorted(pos, windows[:, 1], side='right')
locs = np.column_stack((start_locs, stop_locs))
return locs
def windowed_count(pos, size=None, start=None, stop=None, step=None,
windows=None):
"""Count the number of items in windows over a single chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
Returns
-------
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> counts, windows = allel.stats.windowed_count(pos, size=10)
>>> counts
array([2, 2, 1])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
Half-overlapping windows::
>>> counts, windows = allel.stats.windowed_count(pos, size=10, step=5)
>>> counts
array([2, 3, 2, 0, 1])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
"""
# assume sorted positions
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
# setup windows
if windows is None:
windows = position_windows(pos, size, start, stop, step)
else:
windows = asarray_ndim(windows, 2)
# find window locations
locs = window_locations(pos, windows)
# count number of items in each window
counts = np.diff(locs, axis=1).reshape(-1)
return counts, windows
def windowed_statistic(pos, values, statistic, size=None, start=None,
stop=None, step=None, windows=None, fill=np.nan):
"""Calculate a statistic from items in windows over a single
chromosome/contig.
Parameters
----------
pos : array_like, int, shape (n_items,)
The item positions in ascending order, using 1-based coordinates..
values : array_like, int, shape (n_items,)
The values to summarise. May also be a tuple of values arrays,
in which case each array will be sliced and passed through to the
statistic function as separate arguments.
statistic : function
The statistic to compute.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where a window is empty, i.e., contains no items.
Returns
-------
out : ndarray, shape (n_windows,)
The value of the statistic for each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
The number of items in each window.
Notes
-----
The window stop positions are included within a window.
The final window will be truncated to the specified stop position,
and so may be smaller than the other windows.
Examples
--------
Count non-zero (i.e., True) items in non-overlapping windows::
>>> import allel
>>> pos = [1, 7, 12, 15, 28]
>>> values = [True, False, True, False, False]
>>> nnz, windows, counts = allel.stats.windowed_statistic(
... pos, values, statistic=np.count_nonzero, size=10
... )
>>> nnz
array([1, 1, 0])
>>> windows
array([[ 1, 10],
[11, 20],
[21, 28]])
>>> counts
array([2, 2, 1])
Compute a sum over items in half-overlapping windows::
>>> values = [3, 4, 2, 6, 9]
>>> x, windows, counts = allel.stats.windowed_statistic(
... pos, values, statistic=np.sum, size=10, step=5, fill=0
... )
>>> x
array([ 7, 12, 8, 0, 9])
>>> windows
array([[ 1, 10],
[ 6, 15],
[11, 20],
[16, 25],
[21, 28]])
>>> counts
array([2, 3, 2, 0, 1])
"""
# assume sorted positions
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
# check lengths are equal
if isinstance(values, tuple):
# assume multiple values arrays
check_equal_length(pos, *values)
else:
# assume a single values array
check_equal_length(pos, values)
# setup windows
if windows is None:
windows = position_windows(pos, size, start, stop, step)
else:
windows = asarray_ndim(windows, 2)
# find window locations
locs = window_locations(pos, windows)
# setup outputs
out = []
counts = []
# iterate over windows
for start_idx, stop_idx in locs:
# calculate number of values in window
n = stop_idx - start_idx
if n == 0:
# window is empty
s = fill
else:
if isinstance(values, tuple):
# assume multiple values arrays
wv = [v[start_idx:stop_idx] for v in values]
s = statistic(*wv)
else:
# assume a single values array
wv = values[start_idx:stop_idx]
s = statistic(wv)
# store outputs
out.append(s)
counts.append(n)
# convert to arrays for output
return np.asarray(out), windows, np.asarray(counts)
def per_base(x, windows, is_accessible=None, fill=np.nan):
"""Calculate the per-base value of a windowed statistic.
Parameters
----------
x : array_like, shape (n_windows,)
The statistic to average per-base.
windows : array_like, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop)
positions using 1-based coordinates.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
fill : object, optional
Use this value where there are no accessible bases in a window.
Returns
-------
y : ndarray, float, shape (n_windows,)
The input array divided by the number of (accessible) bases in each
window.
n_bases : ndarray, int, shape (n_windows,)
The number of (accessible) bases in each window
"""
# calculate window sizes
if is_accessible is None:
# N.B., window stops are included
n_bases = np.diff(windows, axis=1).reshape(-1) + 1
else:
n_bases = np.array([np.count_nonzero(is_accessible[i-1:j])
for i, j in windows])
# deal with multidimensional x
if x.ndim == 1:
pass
elif x.ndim == 2:
n_bases = n_bases[:, None]
else:
raise NotImplementedError('only arrays of 1 or 2 dimensions supported')
# calculate density per-base
with ignore_invalid():
y = np.where(n_bases > 0, x / n_bases, fill)
# restore to 1-dimensional
if n_bases.ndim > 1:
n_bases = n_bases.reshape(-1)
return y, n_bases
def equally_accessible_windows(is_accessible, size):
"""Create windows each containing the same number of accessible bases.
Parameters
----------
is_accessible : array_like, bool, shape (n_bases,)
Array defining accessible status of all bases on a contig/chromosome.
size : int
Window size (number of accessible bases).
Returns
-------
windows : ndarray, int, shape (n_windows, 2)
Window start/stop positions (1-based).
"""
pos_accessible, = np.nonzero(is_accessible)
pos_accessible += 1 # convert to 1-based coordinates
windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]],
size=size)
return windows
| 28.918142 | 79 | 0.60026 |
9b090affac3ba6390bd54044bec85e12bb7138e3 | 924 | py | Python | scripts/stl/udp_1pkt_dns.py | ajitkhaparde/trex-core | 1834ebd49112af0731a819056612dde832f7a94e | [
"Apache-2.0"
] | 956 | 2015-06-24T15:04:55.000Z | 2022-03-30T06:25:04.000Z | scripts/stl/udp_1pkt_dns.py | hjat2005/trex-core | 400f03c86c844a0096dff3f6b13e58a808aaefff | [
"Apache-2.0"
] | 782 | 2015-09-20T15:19:00.000Z | 2022-03-31T23:52:05.000Z | scripts/stl/udp_1pkt_dns.py | hjat2005/trex-core | 400f03c86c844a0096dff3f6b13e58a808aaefff | [
"Apache-2.0"
] | 429 | 2015-06-27T19:34:21.000Z | 2022-03-23T11:02:51.000Z | from trex_stl_lib.api import *
from scapy.layers.dns import * # import from layers. in default only ipv4/ipv6 are imported for speedup
import argparse
class STLS1(object):
def __init__ (self):
pass;
def create_stream (self):
# DNS
pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(sport=1025)/DNS();
# burst of 17 packets
return STLStream(packet = STLPktBuilder(pkt = pkt ,vm = []),
mode = STLTXSingleBurst( pps = 1, total_pkts = 17) )
def get_streams (self, tunables, **kwargs):
parser = argparse.ArgumentParser(description='Argparser for {}'.format(os.path.basename(__file__)),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args(tunables)
# create 1 stream
return [ self.create_stream() ]
def register():
return STLS1()
| 28 | 108 | 0.609307 |
f563662358baa7bb20d3105d63e8091fce5fb999 | 861 | py | Python | CppSimShared/Python/test_phase_noise_plot.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
] | 1 | 2021-05-30T13:27:33.000Z | 2021-05-30T13:27:33.000Z | CppSimShared/Python/test_phase_noise_plot.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
] | null | null | null | CppSimShared/Python/test_phase_noise_plot.py | silicon-vlsi-org/eda-sue2Plus | 83a2afa9c80308d5afe07a3fa0214d8412addb6d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
# import cppsimdata module
import os
import sys
cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME")
if cppsimsharedhome != None:
CPPSIMSHARED_PATH = '%s' % cppsimsharedhome
else:
home_dir = os.getenv("HOME")
CPPSIMSHARED_PATH = '%s/CppSim/CppSimShared' % home_dir
sys.path.append(CPPSIMSHARED_PATH + '/Python')
from cppsimdata import *
# import pylab package
from pylab import *
data = CppSimData('test_noise.tr0')
noiseout = data.evalsig('noiseout')
t = data.evalsig('TIME')
Ts = t[20]-t[19]
f_low = 10e3
f_high = 30e6
f, Pxx_db = calc_pll_phasenoise(noiseout,Ts)
fig = figure(1)
fig.clf()
semilogx(f,Pxx_db)
axis([f_low, f_high, min(Pxx_db), max(Pxx_db)])
xlabel('Frequency Offset from Carrier (Hz)')
ylabel('L(f) (dBc/Hz)')
title('Simulated Phase Noise')
grid(True,which='both')
fig.show()
| 22.657895 | 58 | 0.722416 |
fe3aa6d2476a508dff0d5b3f049c27e23699cab9 | 455 | py | Python | data/scripts/templates/object/building/poi/shared_corellia_meatlump_small1.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/building/poi/shared_corellia_meatlump_small1.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/building/poi/shared_corellia_meatlump_small1.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_corellia_meatlump_small1.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.764706 | 76 | 0.731868 |
2db0af3716a8e7ccb2c2e64743701936f747da92 | 411 | py | Python | userpwd.py | jacksonkirka/bristosoftcontacts | 9e4b12cb45b115336af9988c1267a52e4793220d | [
"MIT"
] | null | null | null | userpwd.py | jacksonkirka/bristosoftcontacts | 9e4b12cb45b115336af9988c1267a52e4793220d | [
"MIT"
] | null | null | null | userpwd.py | jacksonkirka/bristosoftcontacts | 9e4b12cb45b115336af9988c1267a52e4793220d | [
"MIT"
] | null | null | null | #!/usr/bin/python
import uuid
import hashlib
def hashpwd(_pwd):
'''
hashpwd hashes a password by NSA Secure Hash Algorithm 2
sha256 algorithm and adds a uuid prefix salt.
'''
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() +\
_pwd.encode()).hexdigest() + ':' + salt
passwd = input('Please enter password to hash: ')
print(passwd)
print(hashpwd(passwd))
| 20.55 | 61 | 0.647202 |
9dad849c0e21778ff5b58c1c14549afea542c081 | 2,292 | py | Python | salt/utils/virt.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | salt/utils/virt.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/utils/virt.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
This module contains routines shared by the virt system.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import time
import logging
# Import salt libs
import salt.utils.files
log = logging.getLogger(__name__)
class VirtKey(object):
'''
Used to manage key signing requests.
'''
def __init__(self, hyper, id_, opts):
self.opts = opts
self.hyper = hyper
self.id = id_
path = os.path.join(self.opts['pki_dir'], 'virtkeys', hyper)
if not os.path.isdir(path):
os.makedirs(path)
self.path = os.path.join(path, id_)
def accept(self, pub):
'''
Accept the provided key
'''
try:
with salt.utils.files.fopen(self.path, 'r') as fp_:
expiry = int(fp_.read())
except (OSError, IOError):
log.error(
'Request to sign key for minion \'%s\' on hyper \'%s\' '
'denied: no authorization', self.id, self.hyper
)
return False
except ValueError:
log.error('Invalid expiry data in %s', self.path)
return False
# Limit acceptance window to 10 minutes
# TODO: Move this value to the master config file
if (time.time() - expiry) > 600:
log.warning(
'Request to sign key for minion "%s" on hyper "%s" denied: '
'authorization expired', self.id, self.hyper
)
return False
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
self.id)
with salt.utils.files.fopen(pubfn, 'w+') as fp_:
fp_.write(pub)
self.void()
return True
def authorize(self):
'''
Prepare the master to expect a signing request
'''
with salt.utils.files.fopen(self.path, 'w+') as fp_:
fp_.write(str(int(time.time()))) # future lint: disable=blacklisted-function
return True
def void(self):
'''
Invalidate any existing authorization
'''
try:
os.unlink(self.path)
return True
except OSError:
return False
| 27.614458 | 89 | 0.54712 |
e7a0e933aeb33d0c713b2a951badbb4a2e4906fd | 3,830 | py | Python | aesara/d3viz/d3viz.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | aesara/d3viz/d3viz.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | aesara/d3viz/d3viz.py | danhphan/aesara | 5a0fb0e731358d54648823170acd911cc1534d6a | [
"BSD-3-Clause"
] | null | null | null | """Dynamic visualization of Aesara graphs.
Author: Christof Angermueller <cangermueller@gmail.com>
"""
import json
import os
import shutil
from aesara.d3viz.formatting import PyDotFormatter
__path__ = os.path.dirname(os.path.realpath(__file__))
def replace_patterns(x, replace):
"""Replace `replace` in string `x`.
Parameters
----------
s : str
String on which function is applied
replace : dict
`key`, `value` pairs where key is a regular expression and `value` a
string by which `key` is replaced
"""
for from_, to in replace.items():
x = x.replace(str(from_), str(to))
return x
def safe_json(obj):
"""Encode `obj` to JSON so that it can be embedded safely inside HTML.
Parameters
----------
obj : object
object to serialize
"""
return json.dumps(obj).replace("<", "\\u003c")
def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):
"""Create HTML file with dynamic visualizing of an Aesara function graph.
In the HTML file, the whole graph or single nodes can be moved by drag and
drop. Zooming is possible via the mouse wheel. Detailed information about
nodes and edges are displayed via mouse-over events. Node labels can be
edited by selecting Edit from the context menu.
Input nodes are colored in green, output nodes in blue. Apply nodes are
ellipses, and colored depending on the type of operation they perform.
Edges are black by default. If a node returns a view of an
input, the input edge will be blue. If it returns a destroyed input, the
edge will be red.
Parameters
----------
fct : aesara.compile.function.types.Function
A compiled Aesara function, variable, apply or a list of variables.
outfile : str
Path to output HTML file.
copy_deps : bool, optional
Copy javascript and CSS dependencies to output directory.
Notes
-----
This function accepts extra parameters which will be forwarded to
:class:`aesara.d3viz.formatting.PyDotFormatter`.
"""
# Create DOT graph
formatter = PyDotFormatter(*args, **kwargs)
graph = formatter(fct)
dot_graph = graph.create_dot()
dot_graph = dot_graph.decode("utf8")
# Create output directory if not existing
outdir = os.path.dirname(outfile)
if outdir != "" and not os.path.exists(outdir):
os.makedirs(outdir)
# Read template HTML file
template_file = os.path.join(__path__, "html", "template.html")
with open(template_file) as f:
template = f.read()
# Copy dependencies to output directory
src_deps = __path__
if copy_deps:
dst_deps = "d3viz"
for d in ("js", "css"):
dep = os.path.join(outdir, dst_deps, d)
if not os.path.exists(dep):
shutil.copytree(os.path.join(src_deps, d), dep)
else:
dst_deps = src_deps
# Replace patterns in template
replace = {
"%% JS_DIR %%": os.path.join(dst_deps, "js"),
"%% CSS_DIR %%": os.path.join(dst_deps, "css"),
"%% DOT_GRAPH %%": safe_json(dot_graph),
}
html = replace_patterns(template, replace)
# Write HTML file
with open(outfile, "w") as f:
f.write(html)
def d3write(fct, path, *args, **kwargs):
"""Convert Aesara graph to pydot graph and write to dot file.
Parameters
----------
fct : aesara.compile.function.types.Function
A compiled Aesara function, variable, apply or a list of variables.
path: str
Path to output file
Notes
-----
This function accepts extra parameters which will be forwarded to
:class:`aesara.d3viz.formatting.PyDotFormatter`.
"""
formatter = PyDotFormatter(*args, **kwargs)
graph = formatter(fct)
graph.write_dot(path)
| 28.37037 | 78 | 0.649086 |
77958837464b76d1be8bb9627011a10165b72bff | 1,784 | py | Python | commands/prefix.py | denzven/Denzven-Graphing-Api-Bot | 1865fc4e38d363eaef6a6509aaaf0bec9176910a | [
"MIT"
] | 2 | 2021-06-29T06:30:32.000Z | 2021-08-08T07:34:42.000Z | commands/prefix.py | denzven/Denzven-Graphing-Api-Bot | 1865fc4e38d363eaef6a6509aaaf0bec9176910a | [
"MIT"
] | 6 | 2021-08-30T12:09:23.000Z | 2021-12-08T06:33:44.000Z | commands/prefix.py | denzven/Denzven-Graphing-Api-Bot | 1865fc4e38d363eaef6a6509aaaf0bec9176910a | [
"MIT"
] | 5 | 2021-08-16T12:29:24.000Z | 2021-12-07T23:56:54.000Z | # this deals with setting a custom prefix and writing it to a json file
# (not the best way, but works)
# Imports
from discord.ext import commands
import discord
import json
# config
from config import DEFAULT_PREFIX
# cog class
class Prefix(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
help='Set a Custom prefix for the bot in this guild',
name='Prefix',
description='Set a Custom prefix for the bot in this guild',
)
@commands.has_permissions(manage_messages=True)
async def prefix(self, ctx, prefix: str = None):
if prefix is None:
await ctx.reply(
f'My prefix for this server is `{self.bot.prefixes_cache.get(str(ctx.guild.id), DEFAULT_PREFIX)}`',
allowed_mentions=discord.AllowedMentions.none()
)
else:
with open("prefixes.json", "r") as f:
current_prefixes: dict = json.load(f)
if prefix != DEFAULT_PREFIX:
if prefix.startswith("@") and prefix != "@":
await ctx.reply(f"The Prefix of the bot cannot be sent to a mention, it is recommended not to do so.")
else:
current_prefixes[str(ctx.guild.id)] = prefix
else:
current_prefixes.pop(str(ctx.guild.id), 'amogus')
if prefix.startswith("@") and prefix != "@":
print("mention prefix")
else:
with open("prefixes.json", "w") as f:
json.dump(current_prefixes, f)
await ctx.reply(f'The Prefix has been set to `{prefix}`', allowed_mentions=discord.AllowedMentions.none())
def setup(bot):
bot.add_cog(Prefix(bot))
| 35.68 | 130 | 0.580717 |
0e64cb1242e2e5d104f3dbd0f0f7bfd353f7b1b3 | 6,163 | py | Python | fullnode.py | nicoloridulfo/PyChain | d0969abedb6e51f8822c90e4bd74d6ce1c4576e6 | [
"MIT"
] | null | null | null | fullnode.py | nicoloridulfo/PyChain | d0969abedb6e51f8822c90e4bd74d6ce1c4576e6 | [
"MIT"
] | null | null | null | fullnode.py | nicoloridulfo/PyChain | d0969abedb6e51f8822c90e4bd74d6ce1c4576e6 | [
"MIT"
] | null | null | null | #!env/bin/python
import json
from operator import truediv
import socketserver
import sys
import threading
import time
import logging
from PyChain import Blockchain, request
from PyChain.protocol import recv_msg, send_msg
"""
A PyChain full node
This network communicated through sockets.
Messages are encoded using JSON.
The fields are:
- request: the type of request
- body (optional, depends on request): the body of the request
- time
A full node's job is to keep track of the blockchain,
by receiving blocks, verifying them and finally adding them to the blockchain.
They also answer to requests from other participants of the blockchain.
"""
logging.basicConfig(level=logging.DEBUG)
blockchain = Blockchain()
blockchain.import_chain([
Blockchain.encode_block(0, b"", 0, "Genesis block")])
def get_peers(old_peers: list):
new_peers = set(old_peers)
for peer in static_peers:
try:
response = request(peer, "get_peers")
n = len(response["response"])
logging.info(f"Got {n} peers from {peer}")
new_peers.union(response["response"])
except:
pass
return list(new_peers)
def check_peers(peers: list):
"""
Checks if a peer responds to ping
"""
alive_peers = []
for peer in peers:
try:
response = request(peer, "ping")
if response["response"] == "pong":
alive_peers.append(peer)
request(peer, "add_me", f"{HOST}:{PORT}")
except:
pass
return alive_peers
def longest_chain(peers: list):
"""
Returns the blockchain with the longest chain from the peers.
This function also verifies that the chain is valid.
"""
peer_length = {}
for peer in peers:
try:
response = request(peer, "get_length")
peer_length[peer] = response["response"]
except Exception as e:
print(e)
sorted_peer_length = {k: v for k, v in sorted(
peer_length.items(), key=lambda item: -item[1])}
for peer, length in sorted_peer_length.items():
# If the peer with the longest chain does not have a longer chain than the local one: break
if length <= len(blockchain.blocks):
break
response = request(peer, "get_blocks")
assert len(response["response"]) == length
chain = Blockchain()
chain.import_chain([
Blockchain.encode_block(0, b"", 0, "Genesis block")])
for block in response["response"][1:]:
chain.blocks.append(Blockchain.dict_to_block(block))
valid, reason = chain.verify_chain()
if valid:
return chain
class RequestHandler(socketserver.BaseRequestHandler):
@staticmethod
def create_response(response: str | dict, http_code: int):
return {"response": response,
"time": time.time(),
"http_code": http_code}
"""
Here come request handing functions
"""
def get_blocks(self):
return self.create_response([Blockchain.block_to_dict(block)
for block in blockchain.blocks], 200)
def get_block(self, index: int):
return self.create_response(Blockchain.block_to_dict(blockchain.blocks[index]), 200)
def get_blochchain_length(self):
return self.create_response(len(blockchain.blocks), 200)
def get_peers(self):
return self.create_response(peers, 200)
def recieve_block(self, block: dict):
blockchain.blocks.append(Blockchain.dict_to_block(block))
if not blockchain.verify_chain()[0]:
blockchain.blocks.pop()
return self.create_response("Invalid chain", 400)
return self.create_response("OK, block added", 200)
def add_peer(self, host: str):
if host in peers:
return self.create_response("Already in peers", 400)
peers.append(host)
return self.create_response("OK", 200)
def handle(self):
"""
This method is called when a request is received
It checks the request type and returns a response
"""
host, port = self.client_address
data = recv_msg(self.request).decode()
request = json.loads(data)
logging.info(f"{host}:{port} requested {request['request']}")
match request['request']:
case 'get_blocks':
response = self.get_blocks()
case 'get_block':
response = self.get_block(request['body'])
case "ping":
response = self.create_response("pong", 200)
case "recieve_block":
response = self.recieve_block(request["body"])
case "get_peers":
response = self.get_peers()
case "get_length":
response = self.get_blochchain_length()
case "add_peer":
response = self.add_peer(request["body"])
case _:
response = self.create_response("Unknown request", 400)
send_msg(self.request, json.dumps(response).encode())
def poll_peers_thread():
global blockchain
global is_on_server
logging.info("Polling peers has started")
while True:
longest_chain_found = longest_chain(peers)
if longest_chain_found:
logging.info(
f"New longest chain of length {len(longest_chain_found.blocks)} found.")
blockchain = longest_chain_found
time.sleep(5)
if __name__ == '__main__':
is_on_server = True
HOST, PORT = 'localhost', int(sys.argv[1])
static_peers = [line for line in open(
'peers.txt', 'r').read().split('\n') if line != '']
peers = get_peers(check_peers(static_peers))
socketserver.TCPServer.allow_reuse_address = True
polling_thread = threading.Thread(target=poll_peers_thread)
polling_thread.start()
with socketserver.ThreadingTCPServer((HOST, PORT), RequestHandler) as server:
logging.info("Starting server on {}:{}".format(HOST, PORT))
server.serve_forever()
logging.info("Stopping server")
| 32.781915 | 99 | 0.625994 |
c91fabb4ca29da3e1b016198247429a6981837d6 | 2,117 | py | Python | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AclCreateOrUpdateParameters(Model):
"""The parameters used to create or update an access control list (ACL) entry.
All required parameters must be populated in order to send to Azure.
:param ace_type: Required. the access control list (ACL) entry type.
UserObj and GroupObj denote the owning user and group, respectively.
Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group'
:type ace_type: str or
~azure.mgmt.datalake.analytics.catalog.models.AclType
:param principal_id: Required. the Azure AD object ID of the user or group
being specified in the access control list (ACL) entry.
:type principal_id: str
:param permission: Required. the permission type of the access control
list (ACL) entry. Possible values include: 'None', 'Use', 'Create',
'Drop', 'Alter', 'Write', 'All'
:type permission: str or
~azure.mgmt.datalake.analytics.catalog.models.PermissionType
"""
_validation = {
'ace_type': {'required': True},
'principal_id': {'required': True},
'permission': {'required': True},
}
_attribute_map = {
'ace_type': {'key': 'aceType', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'permission': {'key': 'permission', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AclCreateOrUpdateParameters, self).__init__(**kwargs)
self.ace_type = kwargs.get('ace_type', None)
self.principal_id = kwargs.get('principal_id', None)
self.permission = kwargs.get('permission', None)
| 40.711538 | 82 | 0.629192 |
f6561d062f07184331a63131600c946dd72ec179 | 1,896 | py | Python | test/test_api_alert_profile_pager_duty_settings.py | hi-artem/twistlock-py | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | [
"RSA-MD"
] | null | null | null | test/test_api_alert_profile_pager_duty_settings.py | hi-artem/twistlock-py | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | [
"RSA-MD"
] | null | null | null | test/test_api_alert_profile_pager_duty_settings.py | hi-artem/twistlock-py | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.api_alert_profile_pager_duty_settings import ApiAlertProfilePagerDutySettings # noqa: E501
from openapi_client.rest import ApiException
class TestApiAlertProfilePagerDutySettings(unittest.TestCase):
"""ApiAlertProfilePagerDutySettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ApiAlertProfilePagerDutySettings
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.api_alert_profile_pager_duty_settings.ApiAlertProfilePagerDutySettings() # noqa: E501
if include_optional :
return ApiAlertProfilePagerDutySettings(
enabled = True,
routing_key = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
severity = '[\"critical\",\"error\",\"warning\",\"info\"]',
summary = ''
)
else :
return ApiAlertProfilePagerDutySettings(
)
def testApiAlertProfilePagerDutySettings(self):
"""Test ApiAlertProfilePagerDutySettings"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 33.263158 | 126 | 0.679325 |
a8553a1c0e43ae6e5f6cf0b51b3ad365653d0d66 | 8,103 | py | Python | review/review-dev.py | radish608/graduationProject_DL4WebSecurity | 1bafeca95d8c02be438b79e8192cae3f624879c9 | [
"MIT"
] | 1 | 2020-09-15T01:44:21.000Z | 2020-09-15T01:44:21.000Z | review/review-dev.py | radish608/graduationProject_DL4WebSecurity | 1bafeca95d8c02be438b79e8192cae3f624879c9 | [
"MIT"
] | null | null | null | review/review-dev.py | radish608/graduationProject_DL4WebSecurity | 1bafeca95d8c02be438b79e8192cae3f624879c9 | [
"MIT"
] | null | null | null | from sklearn.feature_extraction.text import CountVectorizer
import os
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm
from sklearn.feature_extraction.text import TfidfTransformer
import tensorflow as tf
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_1d, global_max_pool
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
from tflearn.data_utils import to_categorical, pad_sequences
from sklearn.neural_network import MLPClassifier
from tflearn.layers.normalization import local_response_normalization
from tensorflow.contrib import learn
max_features=5000
max_document_length=1000
vocabulary=None
def load_one_file(filename):
x=""
with open(filename) as f:
for line in f:
line=line.strip('\n')
line = line.strip('\r')
x+=line
f.close()
return x
def load_files_from_dir(rootdir):
x=[]
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isfile(path):
v=load_one_file(path)
x.append(v)
return x
def load_all_files():
x_train=[]
y_train=[]
x_test=[]
y_test=[]
path="review/aclImdb/train/pos/"
print "Load %s" % path
x_train=load_files_from_dir(path)
y_train=[0]*len(x_train)
path="review/aclImdb/train/neg/"
print "Load %s" % path
tmp=load_files_from_dir(path)
y_train+=[1]*len(tmp)
x_train+=tmp
path="review/aclImdb/test/pos/"
print "Load %s" % path
x_test=load_files_from_dir(path)
y_test=[0]*len(x_test)
path="review/aclImdb/test/neg/"
print "Load %s" % path
tmp=load_files_from_dir(path)
y_test+=[1]*len(tmp)
x_test+=tmp
return x_train, x_test, y_train, y_test
def get_features_by_wordbag():
global max_features
x_train, x_test, y_train, y_test=load_all_files()
vectorizer = CountVectorizer(
decode_error='ignore',
strip_accents='ascii',
max_features=max_features,
stop_words='english',
max_df=1.0,
min_df=1 )
print vectorizer
x_train=vectorizer.fit_transform(x_train)
x_train=x_train.toarray()
vocabulary=vectorizer.vocabulary_
vectorizer = CountVectorizer(
decode_error='ignore',
strip_accents='ascii',
vocabulary=vocabulary,
stop_words='english',
max_df=1.0,
min_df=1 )
print vectorizer
x_test=vectorizer.fit_transform(x_test)
x_test=x_test.toarray()
return x_train, x_test, y_train, y_test
def show_diffrent_max_features():
global max_features
a=[]
b=[]
for i in range(1000,20000,2000):
max_features=i
print "max_features=%d" % i
x, y = get_features_by_wordbag()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=0)
gnb = GaussianNB()
gnb.fit(x_train, y_train)
y_pred = gnb.predict(x_test)
score=metrics.accuracy_score(y_test, y_pred)
a.append(max_features)
b.append(score)
plt.plot(a, b, 'r')
plt.xlabel("max_features")
plt.ylabel("metrics.accuracy_score")
plt.title("metrics.accuracy_score VS max_features")
plt.legend()
plt.show()
def nb_wordbag(x_train, x_test, y_train, y_test):
print "NB and wordbag"
gnb = GaussianNB()
gnb.fit(x_train,y_train)
y_pred=gnb.predict(x_test)
print metrics.accuracy_score(y_test, y_pred)
print metrics.confusion_matrix(y_test, y_pred)
def get_features_by_wordbag_tfidf():
global max_features
x_train, x_test, y_train, y_test=load_all_files()
vectorizer = CountVectorizer(
decode_error='ignore',
strip_accents='ascii',
max_features=max_features,
stop_words='english',
max_df=1.0,
min_df=1,
binary=True)
print vectorizer
x_train=vectorizer.fit_transform(x_train)
x_train=x_train.toarray()
vocabulary=vectorizer.vocabulary_
vectorizer = CountVectorizer(
decode_error='ignore',
strip_accents='ascii',
vocabulary=vocabulary,
stop_words='english',
max_df=1.0,binary=True,
min_df=1 )
print vectorizer
x_test=vectorizer.fit_transform(x_test)
x_test=x_test.toarray()
transformer = TfidfTransformer(smooth_idf=False)
x_train=transformer.fit_transform(x_train)
x_train=x_train.toarray()
x_test=transformer.transform(x_test)
x_test=x_test.toarray()
return x_train, x_test, y_train, y_test
def rnn_wordbag(trainX, testX, trainY, testY):
global max_document_length
print "RNN and wordbag"
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="review",n_epoch=5)
def dnn_wordbag(x_train, x_test, y_train, y_test):
print "MLP and wordbag"
clf = MLPClassifier(solver='lbfgs',
alpha=1e-5,
hidden_layer_sizes = (5, 2),
random_state = 1)
print clf
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print metrics.accuracy_score(y_test, y_pred)
print metrics.confusion_matrix(y_test, y_pred)
def get_features_by_tf():
global max_document_length
x_train, x_test, y_train, y_test=load_all_files()
vp=tflearn.data_utils.VocabularyProcessor(max_document_length=max_document_length,
min_frequency=0,
vocabulary=None,
tokenizer_fn=None)
x_train=vp.fit_transform(x_train, unused_y=None)
x_train=np.array(list(x_train))
x_test=vp.transform(x_test)
x_test=np.array(list(x_test))
return x_train, x_test, y_train, y_test
if __name__ == "__main__":
#print "get_features_by_wordbag_tfidf"
#x_train, x_test, y_train, y_test=get_features_by_wordbag_tfidf()
print "get_features_by_tf"
x_train, x_test, y_train, y_test=get_features_by_tf()
#print "get_features_by_wordbag_tfidf"
#x,y=get_features_by_wordbag_tfidf()
#x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 0)
#show_diffrent_max_features()
#dnn_wordbag(x_train, x_test, y_train, y_test)
#print "get_features_by_tf"
#x,y=get_features_by_tf()
#x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.4, random_state = 0)
#RNN
rnn_wordbag(x_train, x_test, y_train, y_test)
| 32.939024 | 97 | 0.626928 |
c196fa8ba12f2d139b4f3b7233fd4f0bdf53fa7a | 391 | py | Python | category/migrations/0003_remove_category_slug.py | Moisestuli/karrata | 962ce0c573214bfc83720727c9cacae823a8c372 | [
"MIT"
] | null | null | null | category/migrations/0003_remove_category_slug.py | Moisestuli/karrata | 962ce0c573214bfc83720727c9cacae823a8c372 | [
"MIT"
] | null | null | null | category/migrations/0003_remove_category_slug.py | Moisestuli/karrata | 962ce0c573214bfc83720727c9cacae823a8c372 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-09 09:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('category', '0002_category_upload'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='slug',
),
]
| 19.55 | 48 | 0.611253 |
0b23ee06bb7ff048a89fc89f4563d10a300f6873 | 37,242 | py | Python | tests/installation/test_installer.py | zymergen-luke/poetry | 69a6bbe90630b4f1920dc6677771f92f05f564ac | [
"MIT"
] | null | null | null | tests/installation/test_installer.py | zymergen-luke/poetry | 69a6bbe90630b4f1920dc6677771f92f05f564ac | [
"MIT"
] | null | null | null | tests/installation/test_installer.py | zymergen-luke/poetry | 69a6bbe90630b4f1920dc6677771f92f05f564ac | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import sys
import pytest
from poetry.installation import Installer as BaseInstaller
from poetry.installation.noop_installer import NoopInstaller
from poetry.io import NullIO
from poetry.packages import Locker as BaseLocker
from poetry.packages import ProjectPackage
from poetry.repositories import Pool
from poetry.repositories import Repository
from poetry.repositories.installed_repository import InstalledRepository
from poetry.utils._compat import Path
from poetry.utils._compat import PY2
from poetry.utils.toml_file import TomlFile
from poetry.utils.env import NullEnv
from tests.helpers import get_dependency
from tests.helpers import get_package
from tests.repositories.test_pypi_repository import MockRepository
class Installer(BaseInstaller):
def _get_installer(self):
return NoopInstaller()
class CustomInstalledRepository(InstalledRepository):
@classmethod
def load(cls, env):
return cls()
class Locker(BaseLocker):
def __init__(self):
self._written_data = None
self._locked = False
self._content_hash = self._get_content_hash()
@property
def written_data(self):
return self._written_data
def locked(self, is_locked=True):
self._locked = is_locked
return self
def mock_lock_data(self, data):
self._lock_data = data
def is_locked(self):
return self._locked
def is_fresh(self):
return True
def _get_content_hash(self):
return "123456789"
def _write_lock_data(self, data):
for package in data["package"]:
python_versions = str(package["python-versions"])
if PY2:
python_versions = python_versions.decode()
if "requirements" in package:
requirements = {}
for key, value in package["requirements"].items():
requirements[key.decode()] = value.decode()
package["requirements"] = requirements
package["python-versions"] = python_versions
self._written_data = data
@pytest.fixture(autouse=True)
def setup():
# Mock python version and platform to get reliable tests
original_platform = sys.platform
sys.platform = "darwin"
yield
sys.platform = original_platform
@pytest.fixture()
def package():
return ProjectPackage("root", "1.0")
@pytest.fixture()
def repo():
return Repository()
@pytest.fixture()
def pool(repo):
pool = Pool()
pool.add_repository(repo)
return pool
@pytest.fixture()
def installed():
return CustomInstalledRepository()
@pytest.fixture()
def locker():
return Locker()
@pytest.fixture()
def env():
return NullEnv()
@pytest.fixture()
def installer(package, pool, locker, env, installed):
return Installer(NullIO(), env, package, locker, pool, installed=installed)
def fixture(name):
file = TomlFile(Path(__file__).parent / "fixtures" / "{}.test".format(name))
return file.read()
def test_run_no_dependencies(installer, locker):
installer.run()
expected = fixture("no-dependencies")
assert locker.written_data == expected
def test_run_with_dependencies(installer, locker, repo, package):
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
package.add_dependency("A", "~1.0")
package.add_dependency("B", "^1.0")
installer.run()
expected = fixture("with-dependencies")
assert locker.written_data == expected
def test_run_update_after_removing_dependencies(
installer, locker, repo, package, installed
):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "B",
"version": "1.1",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.2",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": [], "C": []},
},
}
)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c = get_package("C", "1.2")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
package.add_dependency("A", "~1.0")
package.add_dependency("B", "~1.1")
installer.update(True)
installer.run()
expected = fixture("with-dependencies")
assert locker.written_data == expected
installs = installer.installer.installs
assert len(installs) == 0
updates = installer.installer.updates
assert len(updates) == 0
removals = installer.installer.removals
assert len(removals) == 1
def test_run_install_no_dev(installer, locker, repo, package, installed):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "B",
"version": "1.1",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.2",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": [], "C": []},
},
}
)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c = get_package("C", "1.2")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
installed.add_package(package_a)
installed.add_package(package_b)
installed.add_package(package_c)
package.add_dependency("A", "~1.0")
package.add_dependency("B", "~1.1")
package.add_dependency("C", "~1.2", category="dev")
installer.dev_mode(False)
installer.run()
installs = installer.installer.installs
assert len(installs) == 0
updates = installer.installer.updates
assert len(updates) == 0
removals = installer.installer.removals
assert len(removals) == 1
def test_run_whitelist_add(installer, locker, repo, package):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": []},
},
}
)
package_a = get_package("A", "1.0")
package_a_new = get_package("A", "1.1")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_a_new)
repo.add_package(package_b)
package.add_dependency("A", "~1.0")
package.add_dependency("B", "^1.0")
installer.update(True)
installer.whitelist(["B"])
installer.run()
expected = fixture("with-dependencies")
assert locker.written_data == expected
def test_run_whitelist_remove(installer, locker, repo, package):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "B",
"version": "1.1",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": []},
},
}
)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
package.add_dependency("A", "~1.0")
installer.update(True)
installer.whitelist(["B"])
installer.run()
expected = fixture("remove")
assert locker.written_data == expected
def test_add_with_sub_dependencies(installer, locker, repo, package):
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c = get_package("C", "1.2")
package_d = get_package("D", "1.3")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency("A", "~1.0")
package.add_dependency("B", "^1.0")
package_a.add_dependency("D", "^1.0")
package_b.add_dependency("C", "~1.2")
installer.run()
expected = fixture("with-sub-dependencies")
assert locker.written_data == expected
def test_run_with_python_versions(installer, locker, repo, package):
package.python_versions = "~2.7 || ^3.4"
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c12 = get_package("C", "1.2")
package_c12.python_versions = "~2.7 || ^3.3"
package_c13 = get_package("C", "1.3")
package_c13.python_versions = "~3.3"
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
package.add_dependency("A", "~1.0")
package.add_dependency("B", "^1.0")
package.add_dependency("C", "^1.0")
installer.run()
expected = fixture("with-python-versions")
assert locker.written_data == expected
def test_run_with_optional_and_python_restricted_dependencies(
installer, locker, repo, package
):
package.python_versions = "~2.7 || ^3.4"
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c12 = get_package("C", "1.2")
package_c13 = get_package("C", "1.3")
package_d = get_package("D", "1.4")
package_c13.add_dependency("D", "^1.2")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
repo.add_package(package_d)
package.extras = {"foo": [get_dependency("A", "~1.0")]}
package.add_dependency("A", {"version": "~1.0", "optional": True})
package.add_dependency("B", {"version": "^1.0", "python": "~2.4"})
package.add_dependency("C", {"version": "^1.0", "python": "~2.7 || ^3.4"})
installer.run()
expected = fixture("with-optional-dependencies")
assert locker.written_data == expected
installer = installer.installer
# We should only have 2 installs:
# C,D since python version is not compatible
# with B's python constraint and A is optional
assert len(installer.installs) == 2
assert installer.installs[0].name == "d"
assert installer.installs[1].name == "c"
def test_run_with_optional_and_platform_restricted_dependencies(
installer, locker, repo, package
):
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_c12 = get_package("C", "1.2")
package_c13 = get_package("C", "1.3")
package_d = get_package("D", "1.4")
package_c13.add_dependency("D", "^1.2")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
repo.add_package(package_d)
package.extras = {"foo": [get_dependency("A", "~1.0")]}
package.add_dependency("A", {"version": "~1.0", "optional": True})
package.add_dependency("B", {"version": "^1.0", "platform": "custom"})
package.add_dependency("C", {"version": "^1.0", "platform": "darwin"})
installer.run()
expected = fixture("with-platform-dependencies")
assert locker.written_data == expected
installer = installer.installer
# We should only have 2 installs:
# C,D since the mocked python version is not compatible
# with B's python constraint and A is optional
assert len(installer.installs) == 2
assert installer.installs[0].name == "d"
assert installer.installs[1].name == "c"
def test_run_with_dependencies_extras(installer, locker, repo, package):
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_b.extras = {"foo": [get_dependency("C", "^1.0")]}
package_b.add_dependency("C", {"version": "^1.0", "optional": True})
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
package.add_dependency("A", "^1.0")
package.add_dependency("B", {"version": "^1.0", "extras": ["foo"]})
installer.run()
expected = fixture("with-dependencies-extras")
assert locker.written_data == expected
def test_run_does_not_install_extras_if_not_requested(installer, locker, repo, package):
package.extras["foo"] = [get_dependency("D")]
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_d = get_package("D", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency("A", "^1.0")
package.add_dependency("B", "^1.0")
package.add_dependency("C", "^1.0")
package.add_dependency("D", {"version": "^1.0", "optional": True})
installer.run()
expected = fixture("extras")
# Extras are pinned in lock
assert locker.written_data == expected
# But should not be installed
installer = installer.installer
assert len(installer.installs) == 3 # A, B, C
def test_run_installs_extras_if_requested(installer, locker, repo, package):
package.extras["foo"] = [get_dependency("D")]
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_d = get_package("D", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency("A", "^1.0")
package.add_dependency("B", "^1.0")
package.add_dependency("C", "^1.0")
package.add_dependency("D", {"version": "^1.0", "optional": True})
installer.extras(["foo"])
installer.run()
expected = fixture("extras")
# Extras are pinned in lock
assert locker.written_data == expected
# But should not be installed
installer = installer.installer
assert len(installer.installs) == 4 # A, B, C, D
def test_run_installs_extras_with_deps_if_requested(installer, locker, repo, package):
package.extras["foo"] = [get_dependency("C")]
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_d = get_package("D", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency("A", "^1.0")
package.add_dependency("B", "^1.0")
package.add_dependency("C", {"version": "^1.0", "optional": True})
package_c.add_dependency("D", "^1.0")
installer.extras(["foo"])
installer.run()
expected = fixture("extras-with-dependencies")
# Extras are pinned in lock
assert locker.written_data == expected
# But should not be installed
installer = installer.installer
assert len(installer.installs) == 4 # A, B, C, D
def test_run_installs_extras_with_deps_if_requested_locked(
installer, locker, repo, package
):
locker.locked(True)
locker.mock_lock_data(fixture("extras-with-dependencies"))
package.extras["foo"] = [get_dependency("C")]
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.0")
package_c = get_package("C", "1.0")
package_d = get_package("D", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c)
repo.add_package(package_d)
package.add_dependency("A", "^1.0")
package.add_dependency("B", "^1.0")
package.add_dependency("C", {"version": "^1.0", "optional": True})
package_c.add_dependency("D", "^1.0")
installer.extras(["foo"])
installer.run()
# But should not be installed
installer = installer.installer
assert len(installer.installs) == 4 # A, B, C, D
def test_installer_with_pypi_repository(package, locker, installed):
pool = Pool()
pool.add_repository(MockRepository())
installer = Installer(
NullIO(), NullEnv(), package, locker, pool, installed=installed
)
package.add_dependency("pytest", "^3.5", category="dev")
installer.run()
expected = fixture("with-pypi-repository")
assert locker.written_data == expected
def test_run_installs_with_local_file(installer, locker, repo, package):
file_path = Path("tests/fixtures/distributions/demo-0.1.0-py2.py3-none-any.whl")
package.add_dependency("demo", {"file": str(file_path)})
repo.add_package(get_package("pendulum", "1.4.4"))
installer.run()
expected = fixture("with-file-dependency")
assert locker.written_data == expected
assert len(installer.installer.installs) == 2
def test_run_installs_with_local_poetry_directory_and_extras(
installer, locker, repo, package, tmpdir
):
file_path = Path("tests/fixtures/project_with_extras")
package.add_dependency(
"project-with-extras", {"path": str(file_path), "extras": ["extras_a"]}
)
repo.add_package(get_package("pendulum", "1.4.4"))
installer.run()
expected = fixture("with-directory-dependency-poetry")
assert locker.written_data == expected
assert len(installer.installer.installs) == 2
def test_run_installs_with_local_poetry_directory_transitive(
installer, locker, repo, package, tmpdir
):
file_path = Path(
"tests/fixtures/directory/project_with_transitive_directory_dependencies/"
)
package.add_dependency(
"project-with-transitive-directory-dependencies", {"path": str(file_path)}
)
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("cachy", "0.2.0"))
installer.run()
expected = fixture("with-directory-dependency-poetry-transitive")
assert locker.written_data == expected
assert len(installer.installer.installs) == 2
def test_run_installs_with_local_setuptools_directory(
installer, locker, repo, package, tmpdir
):
file_path = Path("tests/fixtures/project_with_setup/")
package.add_dependency("my-package", {"path": str(file_path)})
repo.add_package(get_package("pendulum", "1.4.4"))
repo.add_package(get_package("cachy", "0.2.0"))
installer.run()
expected = fixture("with-directory-dependency-setuptools")
assert locker.written_data == expected
assert len(installer.installer.installs) == 3
def test_run_with_prereleases(installer, locker, repo, package):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0a2",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": []},
},
}
)
package_a = get_package("A", "1.0a2")
package_b = get_package("B", "1.1")
repo.add_package(package_a)
repo.add_package(package_b)
package.add_dependency("A", {"version": "*", "allows-prereleases": True})
package.add_dependency("B", "^1.1")
installer.update(True)
installer.whitelist({"B": "^1.1"})
installer.run()
expected = fixture("with-prereleases")
assert locker.written_data == expected
def test_run_changes_category_if_needed(installer, locker, repo, package):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "dev",
"optional": True,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": []},
},
}
)
package_a = get_package("A", "1.0")
package_b = get_package("B", "1.1")
package_b.add_dependency("A", "^1.0")
repo.add_package(package_a)
repo.add_package(package_b)
package.add_dependency("A", {"version": "^1.0", "optional": True}, category="dev")
package.add_dependency("B", "^1.1")
installer.update(True)
installer.whitelist(["B"])
installer.run()
expected = fixture("with-category-change")
assert locker.written_data == expected
def test_run_update_all_with_lock(installer, locker, repo, package):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "dev",
"optional": True,
"platform": "*",
"python-versions": "*",
"checksum": [],
}
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": []},
},
}
)
package_a = get_package("A", "1.1")
repo.add_package(get_package("A", "1.0"))
repo.add_package(package_a)
package.add_dependency("A")
installer.update(True)
installer.run()
expected = fixture("update-with-lock")
assert locker.written_data == expected
def test_run_update_with_locked_extras(installer, locker, repo, package):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"B": "^1.0", "C": "^1.0"},
},
{
"name": "B",
"version": "1.0",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.1",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"requirements": {"python": "~2.7"},
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": [], "C": []},
},
}
)
package_a = get_package("A", "1.0")
package_a.extras["foo"] = [get_dependency("B")]
b_dependency = get_dependency("B", "^1.0", optional=True)
b_dependency.in_extras.append("foo")
c_dependency = get_dependency("C", "^1.0")
c_dependency.python_versions = "~2.7"
package_a.requires.append(b_dependency)
package_a.requires.append(c_dependency)
repo.add_package(package_a)
repo.add_package(get_package("B", "1.0"))
repo.add_package(get_package("C", "1.1"))
repo.add_package(get_package("D", "1.1"))
package.add_dependency("A", {"version": "^1.0", "extras": ["foo"]})
package.add_dependency("D", "^1.0")
installer.update(True)
installer.whitelist("D")
installer.run()
expected = fixture("update-with-locked-extras")
assert locker.written_data == expected
def test_run_install_duplicate_dependencies_different_constraints(
installer, locker, repo, package
):
package.add_dependency("A")
package_a = get_package("A", "1.0")
package_a.add_dependency("B", {"version": "^1.0", "python": "<4.0"})
package_a.add_dependency("B", {"version": "^2.0", "python": ">=4.0"})
package_b10 = get_package("B", "1.0")
package_b20 = get_package("B", "2.0")
package_b10.add_dependency("C", "1.2")
package_b20.add_dependency("C", "1.5")
package_c12 = get_package("C", "1.2")
package_c15 = get_package("C", "1.5")
repo.add_package(package_a)
repo.add_package(package_b10)
repo.add_package(package_b20)
repo.add_package(package_c12)
repo.add_package(package_c15)
installer.run()
expected = fixture("with-duplicate-dependencies")
assert locker.written_data == expected
installs = installer.installer.installs
assert len(installs) == 3
assert installs[0] == package_c12
assert installs[1] == package_b10
assert installs[2] == package_a
updates = installer.installer.updates
assert len(updates) == 0
removals = installer.installer.removals
assert len(removals) == 0
def test_run_install_duplicate_dependencies_different_constraints_with_lock(
installer, locker, repo, package
):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {
"B": [
{"version": "^1.0", "python": "<4.0"},
{"version": "^2.0", "python": ">=4.0"},
]
},
},
{
"name": "B",
"version": "1.0",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.2"},
"requirements": {"python": "<4.0"},
},
{
"name": "B",
"version": "2.0",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.5"},
"requirements": {"python": ">=4.0"},
},
{
"name": "C",
"version": "1.2",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.5",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": [], "C": []},
},
}
)
package.add_dependency("A")
package_a = get_package("A", "1.0")
package_a.add_dependency("B", {"version": "^1.0", "python": "<4.0"})
package_a.add_dependency("B", {"version": "^2.0", "python": ">=4.0"})
package_b10 = get_package("B", "1.0")
package_b20 = get_package("B", "2.0")
package_b10.add_dependency("C", "1.2")
package_b20.add_dependency("C", "1.5")
package_c12 = get_package("C", "1.2")
package_c15 = get_package("C", "1.5")
repo.add_package(package_a)
repo.add_package(package_b10)
repo.add_package(package_b20)
repo.add_package(package_c12)
repo.add_package(package_c15)
installer.update(True)
installer.run()
expected = fixture("with-duplicate-dependencies")
assert locker.written_data == expected
installs = installer.installer.installs
assert len(installs) == 3
updates = installer.installer.updates
assert len(updates) == 0
removals = installer.installer.removals
assert len(removals) == 0
def test_run_update_uninstalls_after_removal_transient_dependency(
installer, locker, repo, package, installed
):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"B": {"version": "^1.0", "python": "<2.0"}},
},
{
"name": "B",
"version": "1.0",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": []},
},
}
)
package.add_dependency("A")
package_a = get_package("A", "1.0")
package_a.add_dependency("B", {"version": "^1.0", "python": "<2.0"})
package_b10 = get_package("B", "1.0")
repo.add_package(package_a)
repo.add_package(package_b10)
installed.add_package(get_package("A", "1.0"))
installed.add_package(get_package("B", "1.0"))
installer.update(True)
installer.run()
installs = installer.installer.installs
assert len(installs) == 0
updates = installer.installer.updates
assert len(updates) == 0
removals = installer.installer.removals
assert len(removals) == 1
def test_run_install_duplicate_dependencies_different_constraints_with_lock_update(
installer, locker, repo, package, installed
):
locker.locked(True)
locker.mock_lock_data(
{
"package": [
{
"name": "A",
"version": "1.0",
"category": "main",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {
"B": [
{"version": "^1.0", "python": "<4.0"},
{"version": "^2.0", "python": ">=4.0"},
]
},
},
{
"name": "B",
"version": "1.0",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.2"},
"requirements": {"python": "<4.0"},
},
{
"name": "B",
"version": "2.0",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
"dependencies": {"C": "1.5"},
"requirements": {"python": ">=4.0"},
},
{
"name": "C",
"version": "1.2",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
{
"name": "C",
"version": "1.5",
"category": "dev",
"optional": False,
"platform": "*",
"python-versions": "*",
"checksum": [],
},
],
"metadata": {
"python-versions": "*",
"platform": "*",
"content-hash": "123456789",
"hashes": {"A": [], "B": [], "C": []},
},
}
)
package.add_dependency("A")
package_a = get_package("A", "1.1")
package_a.add_dependency("B", "^2.0")
package_b10 = get_package("B", "1.0")
package_b20 = get_package("B", "2.0")
package_b10.add_dependency("C", "1.2")
package_b20.add_dependency("C", "1.5")
package_c12 = get_package("C", "1.2")
package_c15 = get_package("C", "1.5")
repo.add_package(package_a)
repo.add_package(package_b10)
repo.add_package(package_b20)
repo.add_package(package_c12)
repo.add_package(package_c15)
installed.add_package(get_package("A", "1.0"))
installer.update(True)
installer.whitelist(["A"])
installer.run()
expected = fixture("with-duplicate-dependencies-update")
assert locker.written_data == expected
installs = installer.installer.installs
assert len(installs) == 2
updates = installer.installer.updates
assert len(updates) == 1
removals = installer.installer.removals
assert len(removals) == 0
@pytest.mark.skip(
"This is not working at the moment due to limitations in the resolver"
)
def test_installer_test_solver_finds_compatible_package_for_dependency_python_not_fully_compatible_with_package_python(
installer, locker, repo, package, installed
):
package.python_versions = "~2.7 || ^3.4"
package.add_dependency("A", {"version": "^1.0", "python": "^3.5"})
package_a101 = get_package("A", "1.0.1")
package_a101.python_versions = ">=3.6"
package_a100 = get_package("A", "1.0.0")
package_a100.python_versions = ">=3.5"
repo.add_package(package_a100)
repo.add_package(package_a101)
installer.run()
expected = fixture("with-conditional-dependency")
assert locker.written_data == expected
installs = installer.installer.installs
if sys.version_info >= (3, 5, 0):
assert len(installs) == 1
else:
assert len(installs) == 0
| 29.393844 | 119 | 0.531846 |
5dbcb3675aafe65972fc85e355fcfd1e5c1bfdad | 732 | py | Python | THUMT-master/thumt/models/__init__.py | shawnkx/Fast-MoS | af54093c7368da34d6b709ab8aaf4fd2218348d8 | [
"MIT"
] | 3 | 2019-07-17T19:44:26.000Z | 2020-01-28T04:39:08.000Z | THUMT-master/thumt/models/__init__.py | shawnkx/Fast-MoS | af54093c7368da34d6b709ab8aaf4fd2218348d8 | [
"MIT"
] | null | null | null | THUMT-master/thumt/models/__init__.py | shawnkx/Fast-MoS | af54093c7368da34d6b709ab8aaf4fd2218348d8 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import thumt.models.seq2seq
import thumt.models.rnnsearch
import thumt.models.transformer
import thumt.models.transformer_relpos
def get_model(name):
name = name.lower()
if name == "rnnsearch":
return thumt.models.rnnsearch.RNNsearch
elif name == "seq2seq":
return thumt.models.seq2seq.Seq2Seq
elif name == "transformer":
return thumt.models.transformer.Transformer
elif name == "transformer_relpos":
return thumt.models.transformer_relpos.Transformer
else:
raise LookupError("Unknown model %s" % name)
| 27.111111 | 59 | 0.737705 |
2a3fd7db75759685bae966f662f407e3f5697745 | 13,038 | py | Python | tests/torch/nas/test_scheduler.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | null | null | null | tests/torch/nas/test_scheduler.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | null | null | null | tests/torch/nas/test_scheduler.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from functools import partial
from typing import List
import pytest
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SingleElasticityHandler
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import ElasticDepthHandler
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import ElasticWidthHandler
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.multi_elasticity_handler import MultiElasticityHandler
from nncf.experimental.torch.nas.bootstrapNAS.training.base_training import BNASTrainingAlgorithm
from nncf.experimental.torch.nas.bootstrapNAS.training.progressive_shrinking_builder import ProgressiveShrinkingBuilder
from nncf.experimental.torch.nas.bootstrapNAS.training.progressive_shrinking_controller import \
ProgressiveShrinkingController
from nncf.experimental.torch.nas.bootstrapNAS.training.scheduler import BootstrapNASScheduler
from nncf.experimental.torch.nas.bootstrapNAS.training.scheduler import NASSchedulerParams
from nncf.experimental.torch.nas.bootstrapNAS.training.stage_descriptor import StageDescriptor
from nncf.torch.nncf_network import NNCFNetwork
from tests.torch.helpers import MockModel
LIST_STAGES__K_KW_KWD = [
[ElasticityDim.KERNEL],
[ElasticityDim.KERNEL, ElasticityDim.WIDTH],
[ElasticityDim.KERNEL, ElasticityDim.WIDTH, ElasticityDim.DEPTH]
]
LIST_STAGES__K_KD_KDW = [
[ElasticityDim.KERNEL],
[ElasticityDim.KERNEL, ElasticityDim.DEPTH],
[ElasticityDim.KERNEL, ElasticityDim.DEPTH, ElasticityDim.WIDTH]
]
SIMPLE_LIST_STAGE_DESCRIPTORS = [
StageDescriptor(train_dims=[ElasticityDim.KERNEL],
epochs=1),
StageDescriptor(train_dims=[ElasticityDim.KERNEL, ElasticityDim.DEPTH],
epochs=1, depth_indicator=1),
StageDescriptor(train_dims=[ElasticityDim.KERNEL, ElasticityDim.DEPTH],
epochs=1, depth_indicator=2),
StageDescriptor(train_dims=[ElasticityDim.KERNEL, ElasticityDim.DEPTH, ElasticityDim.WIDTH],
epochs=1, depth_indicator=2, reorg_weights=True, width_indicator=2),
StageDescriptor(train_dims=[ElasticityDim.KERNEL, ElasticityDim.DEPTH, ElasticityDim.WIDTH],
epochs=1, depth_indicator=2, reorg_weights=True, width_indicator=3),
]
@pytest.fixture(name='schedule_params', params=[SIMPLE_LIST_STAGE_DESCRIPTORS], ids=['simple_desc'])
def fixture_schedule_params(request):
list_descriptors = request.param
return NASSchedulerParams(list_descriptors)
LIST_DIMS__KDW = [ElasticityDim.KERNEL, ElasticityDim.DEPTH, ElasticityDim.WIDTH]
class TestScheduler:
def test_get_stage(self, schedule_params: NASSchedulerParams, mocker):
training_ctrl_mock = mocker.MagicMock(spec=BNASTrainingAlgorithm)
scheduler = BootstrapNASScheduler(training_ctrl_mock, schedule_params, LIST_DIMS__KDW, LIST_DIMS__KDW)
scheduler.epoch_step()
ref_desc = StageDescriptor(train_dims=[ElasticityDim.KERNEL])
act_desc, act_idx = scheduler.get_current_stage_desc()
assert ref_desc == act_desc
assert act_idx == 0
scheduler.epoch_step(next_epoch=2)
ref_desc.train_dims.append(ElasticityDim.DEPTH)
ref_desc.depth_indicator = 2
act_desc, act_idx = scheduler.get_current_stage_desc()
assert ref_desc == act_desc
assert act_idx == 2
scheduler.epoch_step()
ref_desc.train_dims.append(ElasticityDim.WIDTH)
ref_desc.reorg_weights = True
ref_desc.width_indicator = 2
act_desc, act_idx = scheduler.get_current_stage_desc()
assert ref_desc == act_desc
assert act_idx == 3
scheduler.epoch_step()
ref_desc.width_indicator = 3
act_desc, act_idx = scheduler.get_current_stage_desc()
assert ref_desc == act_desc
assert act_idx == 4
def test_epoch_step(self, schedule_params, mocker):
mock_model = MockModel()
mock_nncf_network = mocker.MagicMock(spec=NNCFNetwork)
mock_width_handler = mocker.MagicMock(spec=ElasticWidthHandler)
mock_depth_handler = mocker.MagicMock(spec=ElasticDepthHandler)
mock_kernel_handler = mocker.MagicMock(spec=SingleElasticityHandler)
handlers = OrderedDict({
ElasticityDim.WIDTH: mock_width_handler,
ElasticityDim.KERNEL: mock_kernel_handler,
ElasticityDim.DEPTH: mock_depth_handler,
})
mock_handler = MultiElasticityHandler(handlers, mock_nncf_network)
# pylint:disable=protected-access
is_handler_enabled_map = mock_handler._is_handler_enabled_map
mock_elasticity_ctrl = mocker.stub()
mock_elasticity_ctrl.multi_elasticity_handler = mock_handler
training_algo = ProgressiveShrinkingController(mock_model, mock_elasticity_ctrl, mocker.stub(),
ProgressiveShrinkingBuilder.DEFAULT_PROGRESSIVITY,
schedule_params)
scheduler = training_algo.scheduler
scheduler.epoch_step()
assert is_handler_enabled_map == {
ElasticityDim.WIDTH: False,
ElasticityDim.DEPTH: False,
ElasticityDim.KERNEL: True
}
scheduler.epoch_step()
assert is_handler_enabled_map == {
ElasticityDim.WIDTH: False,
ElasticityDim.DEPTH: True,
ElasticityDim.KERNEL: True
}
assert mock_depth_handler.depth_indicator == 1
scheduler.epoch_step()
assert is_handler_enabled_map == {
ElasticityDim.WIDTH: False,
ElasticityDim.DEPTH: True,
ElasticityDim.KERNEL: True
}
assert mock_depth_handler.depth_indicator == 2
scheduler.epoch_step()
assert is_handler_enabled_map == {
ElasticityDim.WIDTH: True,
ElasticityDim.DEPTH: True,
ElasticityDim.KERNEL: True
}
mock_width_handler.reorganize_weights.assert_called()
assert mock_width_handler.width_num_params_indicator == 2
scheduler.epoch_step()
assert is_handler_enabled_map == {
ElasticityDim.WIDTH: True,
ElasticityDim.DEPTH: True,
ElasticityDim.KERNEL: True
}
mock_width_handler.reorganize_weights.assert_called()
assert mock_width_handler.width_num_params_indicator == 3
def test_get_total_training_epochs(self, schedule_params, mocker):
scheduler = BootstrapNASScheduler(mocker.stub(), schedule_params,
available_elasticity_dims=LIST_DIMS__KDW,
progressivity_of_elasticity=LIST_DIMS__KDW)
assert scheduler.get_total_training_epochs() == 5
class SchedulerTestDesc:
def __init__(self, list_stage_dims: List[List[ElasticityDim]],
progressivity_of_elasticity: List[ElasticityDim],
available_elasticity_dims: List[ElasticityDim],
name: str = '',
error_in_scheduler: bool = False,
error_in_builder: bool = False):
self.list_stage_dims = list_stage_dims
self.progressivity_of_elasticity = progressivity_of_elasticity
self.available_elasticity_dims = available_elasticity_dims
self.error_in_scheduler = error_in_scheduler
self.error_in_builder = error_in_builder
self.name = name
def __str__(self):
return self.name
@property
def scheduler_params(self) -> NASSchedulerParams:
list_stage_descs = [{"train_dims": list(map(lambda x: x.value, stage_dims))} for stage_dims in
self.list_stage_dims]
return NASSchedulerParams.from_config({"list_stage_descriptions": list_stage_descs})
LIST_SCHEDULER_DESCS = [
SchedulerTestDesc(
name='default',
list_stage_dims=LIST_STAGES__K_KD_KDW,
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=LIST_DIMS__KDW,
),
SchedulerTestDesc(
name='wrong order in progressivity',
list_stage_dims=LIST_STAGES__K_KW_KWD,
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=LIST_DIMS__KDW,
error_in_scheduler=True
),
SchedulerTestDesc(
name='limited progressivity',
list_stage_dims=LIST_STAGES__K_KW_KWD,
progressivity_of_elasticity=[ElasticityDim.KERNEL],
available_elasticity_dims=LIST_DIMS__KDW,
error_in_builder=True,
error_in_scheduler=True,
),
SchedulerTestDesc(
name='limited enabled dims',
list_stage_dims=LIST_STAGES__K_KW_KWD,
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=[ElasticityDim.KERNEL],
error_in_scheduler=True
),
SchedulerTestDesc(
name='limited progressivity and enabled dims',
list_stage_dims=LIST_STAGES__K_KW_KWD,
progressivity_of_elasticity=[ElasticityDim.KERNEL],
available_elasticity_dims=[ElasticityDim.KERNEL],
error_in_scheduler=True,
),
SchedulerTestDesc(
name='limited list stages',
list_stage_dims=[[ElasticityDim.KERNEL]],
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=LIST_DIMS__KDW,
),
SchedulerTestDesc(
name='violated progressivity',
list_stage_dims=LIST_STAGES__K_KW_KWD,
progressivity_of_elasticity=[ElasticityDim.KERNEL, ElasticityDim.DEPTH, ElasticityDim.WIDTH],
available_elasticity_dims=LIST_DIMS__KDW,
error_in_scheduler=True,
),
SchedulerTestDesc(
name='order within stage doesn\'t matter',
list_stage_dims=[
[ElasticityDim.KERNEL],
[ElasticityDim.DEPTH, ElasticityDim.KERNEL],
[ElasticityDim.DEPTH, ElasticityDim.WIDTH, ElasticityDim.KERNEL]
],
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=LIST_DIMS__KDW,
),
SchedulerTestDesc(
name='new single dim on each stage',
list_stage_dims=[
[ElasticityDim.KERNEL],
[ElasticityDim.DEPTH],
[ElasticityDim.WIDTH],
],
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=LIST_DIMS__KDW,
error_in_scheduler=True,
),
SchedulerTestDesc(
name='intermediate dim is not enabled',
list_stage_dims=[
[ElasticityDim.KERNEL],
[ElasticityDim.DEPTH, ElasticityDim.KERNEL],
],
progressivity_of_elasticity=[ElasticityDim.KERNEL, ElasticityDim.WIDTH, ElasticityDim.DEPTH],
available_elasticity_dims=[ElasticityDim.KERNEL, ElasticityDim.DEPTH],
),
SchedulerTestDesc(
name='limited list stages started from intermediate',
list_stage_dims=[
[ElasticityDim.DEPTH],
[ElasticityDim.DEPTH, ElasticityDim.WIDTH]
],
progressivity_of_elasticity=LIST_DIMS__KDW,
available_elasticity_dims=LIST_DIMS__KDW,
),
]
@pytest.mark.parametrize('desc', LIST_SCHEDULER_DESCS, ids=map(str, LIST_SCHEDULER_DESCS))
class TestElasticityConsistency:
def test_checks_on_scheduler_init(self, mocker, desc: SchedulerTestDesc):
scheduler_fn = partial(BootstrapNASScheduler,
mocker.stub(),
desc.scheduler_params,
progressivity_of_elasticity=desc.progressivity_of_elasticity,
available_elasticity_dims=desc.available_elasticity_dims)
scheduler = scheduler_fn()
if desc.error_in_scheduler:
with pytest.raises(ValueError):
_ = scheduler.list_stage_descriptors
else:
_ = scheduler.list_stage_descriptors
def test_progressivity_vs_enabled_dims(self, desc: SchedulerTestDesc):
builder_fn = partial(ProgressiveShrinkingBuilder.check_elasticity_dims_consistency,
desc.available_elasticity_dims, desc.progressivity_of_elasticity)
if desc.error_in_builder:
with pytest.raises(ValueError):
builder_fn()
else:
builder_fn()
| 42.469055 | 119 | 0.702102 |
7a906e32f4b30f84c3a04de8b9a2af3f823f61a0 | 742 | py | Python | molsysmt/item/nglview_NGLWidget/to_string_pdb_text.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/nglview_NGLWidget/to_string_pdb_text.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/nglview_NGLWidget/to_string_pdb_text.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | from molsysmt._private.exceptions import *
from molsysmt._private.digestion import *
def to_string_pdb_text(item, atom_indices='all', structure_indices='all', check=True):
if check:
digest_item(item, 'string:pdb_text')
atom_indices = digest_atom_indices(atom_indices)
structure_indices = digest_structure_indices(structure_indices)
from ..string_pdb_text import extract
try:
tmp_item = item.component_0.get_structure_string()
except:
tmp_item = item.get_state()['_ngl_msg_archive'][0]['args'][0]['data']
tmp_item = extract(tmp_item, atom_indices=atom_indices, structure_indices=structure_indices,
copy_if_all=False, check=False)
return tmp_item
| 28.538462 | 96 | 0.712938 |
245e7d2a9a6161e3df1470f6cd09f03a985feed3 | 16,789 | py | Python | synapseclient/multipart_upload.py | pcstout/synapsePythonClient | 25ec524dd6acde1fe66224fba04abb3a549faa9e | [
"Apache-2.0"
] | null | null | null | synapseclient/multipart_upload.py | pcstout/synapsePythonClient | 25ec524dd6acde1fe66224fba04abb3a549faa9e | [
"Apache-2.0"
] | null | null | null | synapseclient/multipart_upload.py | pcstout/synapsePythonClient | 25ec524dd6acde1fe66224fba04abb3a549faa9e | [
"Apache-2.0"
] | null | null | null | """
************************
Synapse Multipart Upload
************************
Implements the client side of `Synapse multipart upload`_, which provides a robust means of uploading large files (into
the 10s of GB). End users should not need to call any of these functions directly.
.. _Synapse multipart upload:
http://docs.synapse.org/rest/index.html#org.sagebionetworks.file.controller.UploadController
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import hashlib
import json
import math
import mimetypes
import os
import requests
import time
import warnings
from ctypes import c_bool
import concurrent.futures
try:
from urllib.parse import urlparse
from urllib.parse import parse_qs
except ImportError:
from urlparse import urlparse
from urlparse import parse_qs
from . import exceptions
from .utils import printTransferProgress, md5_for_file, MB
from .dict_object import DictObject
from .exceptions import SynapseError
from .exceptions import SynapseHTTPError
from .utils import threadsafe_generator
MAX_NUMBER_OF_PARTS = 10000
MIN_PART_SIZE = 8*MB
MAX_RETRIES = 7
def find_parts_to_upload(part_status):
"""
Given a string of the form "1001110", where 1 and 0 indicate a status of completed or not, return the part numbers
that aren't completed.
"""
return [i+1 for i, c in enumerate(part_status) if c == '0']
def count_completed_parts(part_status):
"""
Given a string of the form "1001110", where 1 and 0 indicate a status of completed or not, return the count of
parts already completed.
"""
return len([c for c in part_status if c == '1'])
def calculate_part_size(fileSize, partSize=None, min_part_size=MIN_PART_SIZE, max_parts=MAX_NUMBER_OF_PARTS):
"""
Parts for multipart upload must be at least 5 MB and there must be at most 10,000 parts
"""
if partSize is None:
partSize = max(min_part_size, int(math.ceil(fileSize/float(max_parts))))
if partSize < min_part_size:
raise ValueError('Minimum part size is %d MB.' % (min_part_size/MB))
if int(math.ceil(float(fileSize) / partSize)) > max_parts:
raise ValueError('A part size of %0.1f MB results in too many parts (%d).'
% (float(partSize)/MB, int(math.ceil(fileSize / partSize))))
return partSize
def get_file_chunk(filepath, n, chunksize=8*MB):
"""
Read the nth chunk from the file.
"""
with open(filepath, 'rb') as f:
f.seek((n-1)*chunksize)
return f.read(chunksize)
def get_data_chunk(data, n, chunksize=8*MB):
"""
Return the nth chunk of a buffer.
"""
return data[(n-1)*chunksize: n*chunksize]
def _start_multipart_upload(syn, filename, md5, fileSize, partSize, contentType, preview=True, storageLocationId=None,
forceRestart=False):
"""
:returns: A `MultipartUploadStatus`_
.. _MultipartUploadStatus:
http://docs.synapse.org/rest/org/sagebionetworks/repo/model/file/MultipartUploadStatus.html
"""
upload_request = {
'contentMD5Hex': md5,
'fileName': filename,
'generatePreview': preview,
'contentType': contentType,
'partSizeBytes': partSize,
'fileSizeBytes': fileSize,
'storageLocationId': storageLocationId
}
return DictObject(**syn.restPOST(uri='/file/multipart?forceRestart=%s' % forceRestart,
body=json.dumps(upload_request),
endpoint=syn.fileHandleEndpoint))
@threadsafe_generator
def _get_presigned_urls(syn, uploadId, parts_to_upload):
"""Returns list of urls to upload parts to.
:param syn: a Synapse object
:param uploadId: The id of the multipart upload
:param parts_to_upload: A list of integers corresponding to the parts that need to be uploaded
:returns: A BatchPresignedUploadUrlResponse_.
.. BatchPresignedUploadUrlResponse:
http://docs.synapse.org/rest/POST/file/multipart/uploadId/presigned/url/batch.html
"""
if len(parts_to_upload) == 0:
return
presigned_url_request = {'uploadId': uploadId}
uri = '/file/multipart/{uploadId}/presigned/url/batch'.format(uploadId=uploadId)
presigned_url_request['partNumbers'] = parts_to_upload
presigned_url_batch = syn.restPOST(uri, body=json.dumps(presigned_url_request),
endpoint=syn.fileHandleEndpoint)
for part in presigned_url_batch['partPresignedUrls']:
yield part
def _add_part(syn, uploadId, partNumber, partMD5Hex):
"""
:returns: An AddPartResponse_ with fields for an errorMessage and addPartState containing either 'ADD_SUCCESS' or
'ADD_FAILED'.
.. AddPartResponse: http://docs.synapse.org/rest/org/sagebionetworks/repo/model/file/AddPartResponse.html
"""
uri = '/file/multipart/{uploadId}/add/{partNumber}?partMD5Hex={partMD5Hex}'.format(**locals())
return DictObject(**syn.restPUT(uri, endpoint=syn.fileHandleEndpoint))
def _complete_multipart_upload(syn, uploadId):
"""
:returns: A MultipartUploadStatus_.
.. MultipartUploadStatus:
http://docs.synapse.org/rest/org/sagebionetworks/repo/model/file/MultipartUploadStatus.html
"""
uri = '/file/multipart/{uploadId}/complete'.format(uploadId=uploadId)
return DictObject(**syn.restPUT(uri, endpoint=syn.fileHandleEndpoint))
def _put_chunk(url, chunk, verbose=False):
response = requests.put(url, data=chunk)
try:
# Make sure requests closes response stream?:
# see: http://docs.python-requests.org/en/latest/user/advanced/#keep-alive
if response is not None:
response.content
except Exception as ex:
warnings.warn('error reading response: '+str(ex))
exceptions._raise_for_status(response, verbose=verbose)
def multipart_upload(syn, filepath, filename=None, contentType=None, storageLocationId=None, **kwargs):
"""
Upload a file to a Synapse upload destination in chunks.
:param syn: a Synapse object
:param filepath: the file to upload
:param filename: upload as a different filename
:param contentType: `contentType`_
:param partSize: number of bytes per part. Minimum 5MB.
:param storageLocationId: a id indicating where the file should be stored.
Retrieved from Synapse's UploadDestination
:return: a File Handle ID
Keyword arguments are passed down to :py:func:`_multipart_upload` and :py:func:`_start_multipart_upload`.
.. _contentType: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17
"""
if not os.path.exists(filepath):
raise IOError('File "%s" not found.' % filepath)
if os.path.isdir(filepath):
raise IOError('File "%s" is a directory.' % filepath)
fileSize = os.path.getsize(filepath)
if not filename:
filename = os.path.basename(filepath)
md5 = md5_for_file(filepath).hexdigest()
if contentType is None:
(mimetype, enc) = mimetypes.guess_type(filepath, strict=False)
if not mimetype:
mimetype = "application/octet-stream"
contentType = mimetype
syn.logger.debug("Initiating multi-part upload for file: [{path}] size={size} md5={md5}, contentType={contentType}"
.format(path=filepath, size=fileSize, md5=md5, contentType=contentType))
def get_chunk_function(n, partSize): return get_file_chunk(filepath, n, partSize)
status = _multipart_upload(syn, filename, contentType,
get_chunk_function=get_chunk_function,
md5=md5,
fileSize=fileSize,
storageLocationId=storageLocationId,
**kwargs)
syn.logger.debug("Completed multi-part upload. Result:%s" % status)
return status["resultFileHandleId"]
def multipart_upload_string(syn, text, filename=None, contentType=None, storageLocationId=None, **kwargs):
"""
Upload a string using the multipart file upload.
:param syn: a Synapse object
:param text: a string to upload as a file.
:param filename: a string containing the base filename
:param contentType: `contentType`_
:param partSize: number of bytes per part. Minimum 5MB.
:param storageLocationId: a id indicating where the text should be stored.
Retrieved from Synapse's UploadDestination
:return: a File Handle ID
Keyword arguments are passed down to :py:func:`_multipart_upload` and :py:func:`_start_multipart_upload`.
.. _contentType: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17
"""
data = text.encode('utf-8')
fileSize = len(data)
md5 = hashlib.md5(data).hexdigest()
if not filename:
filename = 'message.txt'
if not contentType:
contentType = "text/plain; charset=utf-8"
def get_chunk_function(n, partSize): return get_data_chunk(data, n, partSize)
status = _multipart_upload(syn, filename, contentType,
get_chunk_function=get_chunk_function,
md5=md5,
fileSize=fileSize,
storageLocationId=storageLocationId,
**kwargs)
return status["resultFileHandleId"]
def _upload_chunk(part, status, syn, filename, get_chunk_function, fileSize, partSize):
partNumber = part["partNumber"]
url = part["uploadPresignedUrl"]
syn.logger.debug("uploading this part of the upload: %s" % part)
completed = 0
try:
chunk = get_chunk_function(partNumber, partSize)
syn.logger.debug("start upload part %s" % partNumber)
_put_chunk(url, chunk, syn.debug)
syn.logger.debug("PUT upload of part %s complete" % partNumber)
# compute the MD5 for the chunk
md5 = hashlib.md5()
md5.update(chunk)
# confirm that part got uploaded
syn.logger.debug("contacting Synapse to complete part %s" % partNumber)
add_part_response = _add_part(syn, uploadId=status.uploadId,
partNumber=partNumber, partMD5Hex=md5.hexdigest())
# if part was successfully uploaded, increment progress
if add_part_response["addPartState"] == "ADD_SUCCESS":
syn.logger.debug("finished contacting Synapse about adding part %s" % partNumber)
completed = len(chunk)
else:
syn.logger.debug("did not successfully add part %s" % partNumber)
except Exception as ex1:
if isinstance(ex1, SynapseHTTPError) and ex1.response.status_code == 403:
syn.logger.debug("The pre-signed upload URL for part %s has expired. Restarting upload...\n" % partNumber)
warnings.warn("The pre-signed upload URL has expired. Restarting upload...\n")
return 0
# If we are not in verbose debug mode we will swallow the error and retry.
else:
syn.logger.debug("Encountered an exception: %s. Retrying...\n" % str(type(ex1)), exc_info=True)
return completed
def _multipart_upload(syn, filename, contentType, get_chunk_function, md5, fileSize,
partSize=None, storageLocationId=None, **kwargs):
"""
Multipart Upload.
:param syn: a Synapse object
:param filename: a string containing the base filename
:param contentType: contentType_
:param get_chunk_function: a function that takes a part number and size and returns the bytes of that chunk of the
file
:param md5: the part's MD5 as hex.
:param fileSize: total number of bytes
:param partSize: number of bytes per part. Minimum 5MB.
:param storageLocationId: a id indicating where the file should be stored. retrieved from Synapse's
UploadDestination
:return: a MultipartUploadStatus_ object
Keyword arguments are passed down to :py:func:`_start_multipart_upload`.
.. MultipartUploadStatus:
http://docs.synapse.org/rest/org/sagebionetworks/repo/model/file/MultipartUploadStatus.html
.. contentType: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17
"""
partSize = calculate_part_size(fileSize, partSize, MIN_PART_SIZE, MAX_NUMBER_OF_PARTS)
status = _start_multipart_upload(syn, filename, md5, fileSize, partSize, contentType,
storageLocationId=storageLocationId, **kwargs)
# only force restart once
kwargs['forceRestart'] = False
completedParts = count_completed_parts(status.partsState)
# bytes that were previously uploaded before the current upload began. This variable is set only once
previously_completed_bytes = min(completedParts * partSize, fileSize)
syn.logger.debug("file partitioned into size: %s" % partSize)
syn.logger.debug("current multipart-upload status: %s" % status)
syn.logger.debug("previously completed %d parts, estimated %d bytes" % (completedParts, previously_completed_bytes))
time_upload_started = time.time()
retries = 0
while retries < MAX_RETRIES:
syn.logger.debug("Started retry loop for multipart_upload. Currently %d/%d retries"
% (retries, MAX_RETRIES))
# keep track of the number of bytes uploaded so far
completed = min(completedParts * partSize, fileSize)
printTransferProgress(completed, fileSize, prefix='Uploading', postfix=filename)
def chunk_upload(part): return _upload_chunk(part, status=status,
syn=syn, filename=filename,
get_chunk_function=get_chunk_function,
fileSize=fileSize, partSize=partSize)
syn.logger.debug("fetching pre-signed urls and mapping to Pool")
presigned_urls = _get_presigned_urls(syn, status.uploadId, find_parts_to_upload(status.partsState))
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = { executor.submit(chunk_upload, url): url for url in presigned_urls }
for future in concurrent.futures.as_completed(futures):
completed_amount = future.result()
# If an upload fails then cancel everything and retry.
if completed_amount < 1:
syn.logger.debug("upload failed, aborting other uploads")
for f in futures: f.cancel()
else:
completed += completed_amount
printTransferProgress(completed, fileSize, prefix='Uploading',
postfix=filename, dt=time.time()-time_upload_started,
previouslyTransferred=previously_completed_bytes)
syn.logger.debug("completed pooled upload")
# Check if there are still parts
status = _start_multipart_upload(syn, filename, md5, fileSize, partSize, contentType,
storageLocationId=storageLocationId, **kwargs)
oldCompletedParts, completedParts = completedParts, count_completed_parts(status.partsState)
progress = (completedParts > oldCompletedParts)
retries = retries+1 if not progress else retries
syn.logger.debug("progress made in this loop? %s" % progress)
# Are we done, yet?
if completed >= fileSize:
try:
syn.logger.debug("attempting to finalize multipart upload because completed >= filesize"
" ({completed} >= {size})".format(completed=completed, size=fileSize))
status = _complete_multipart_upload(syn, status.uploadId)
if status.state == "COMPLETED":
break
except Exception as ex1:
syn.logger.error("Attempt to complete the multipart upload failed with exception %s %s"
% (type(ex1), ex1))
syn.logger.debug("multipart upload failed:", exc_info=True)
if status["state"] != "COMPLETED":
raise SynapseError("Upload {id} did not complete. Try again.".format(id=status["uploadId"]))
return status
| 41.763682 | 120 | 0.649532 |
899ce4370a4f0d892b25e759a7fa45c1adb242c1 | 4,592 | py | Python | denise.alana.py | dtepie1/pythonteachingcode | d6fe56ff52a089a8a38f3d2182f40ddf98ff9cac | [
"MIT"
] | null | null | null | denise.alana.py | dtepie1/pythonteachingcode | d6fe56ff52a089a8a38f3d2182f40ddf98ff9cac | [
"MIT"
] | null | null | null | denise.alana.py | dtepie1/pythonteachingcode | d6fe56ff52a089a8a38f3d2182f40ddf98ff9cac | [
"MIT"
] | null | null | null | #vfebfe
import cv2.cv as cv
from datetime import datetime
import time
class MotionDetectorInstantaneous():
def onChange(self, val): #callback when the user change the detection threshold
self.threshold = val
def __init__(self,threshold=8, doRecord=True, showWindows=True):
self.writer = None
self.font = None
self.doRecord=doRecord #Either or not record the moving object
self.show = showWindows #Either or not show the 2 windows
self.frame = None
self.capture=cv.CaptureFromCAM(0)
self.frame = cv.QueryFrame(self.capture) #Take a frame to init recorder
if doRecord:
self.initRecorder()
self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1
cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY)
#Will hold the thresholded result
self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U)
self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t
self.width = self.frame.width
self.height = self.frame.height
self.nb_pixels = self.width * self.height
self.threshold = threshold
self.isRecording = False
self.trigger_time = 0 #Hold timestamp of the last detection
if showWindows:
cv.NamedWindow("Image")
cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold, 100, self.onChange)
def initRecorder(self): #Create the recorder
codec = cv.CV_FOURCC('M', 'J', 'P', 'G') #('W', 'M', 'V', '2')
self.writer=cv.CreateVideoWriter(datetime.now().strftime("%b-%d_%H_%M_%S")+".wmv", codec, 5, cv.GetSize(self.frame), 1)
#FPS set to 5 because it seems to be the fps of my cam but should be ajusted to your needs
self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font
def run(self):
started = time.time()
while True:
curframe = cv.QueryFrame(self.capture)
instant = time.time() #Get timestamp o the frame
self.processImage(curframe) #Process the image
if not self.isRecording:
if self.somethingHasMoved():
self.trigger_time = instant #Update the trigger_time
if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
print (datetime.now().strftime("%b:%d %H:%M:%S"), "Something is moving !")
if self.doRecord: #set isRecording=True only if we record a video
self.isRecording = True
else:
if instant >= self.trigger_time +10: #Record during 10 seconds
print (datetime.now().strftime("%b %d, %H:%M:%S"), "Stop recording")
self.isRecording = False
else:
cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
cv.WriteFrame(self.writer, curframe) #Write the frame
if self.show:
cv.ShowImage("Image", curframe)
cv.ShowImage("Res", self.res)
cv.Copy(self.frame2gray, self.frame1gray)
c=cv.WaitKey(1) % 0x100
if c==27 or c == 10: #Break if user enters 'Esc'.
break
def processImage(self, frame):
cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)
#Absdiff to get the difference between to the frames
cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)
#Remove the noise and do the threshold
cv.Smooth(self.res, self.res, cv.CV_BLUR, 5,5)
cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)
cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)
cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)
def somethingHasMoved(self):
nb=0 #Will hold the number of black pixels
min_threshold = (self.nb_pixels/100) * self.threshold #Number of pixels for current threshold
nb = self.nb_pixels - cv.CountNonZero(self.res)
if (nb) > min_threshold:
return True
else:
return False
if __name__=="__main__":
detect = MotionDetectorInstantaneous(doRecord=True)
detect.run()
| 44.153846 | 128 | 0.592334 |
aa66e9aca2a2c13aee71aa08797c01cdf4f72cc8 | 7,611 | py | Python | pyrice/utils.py | SouthGreenPlatform/PyRice | ca34acf65099db0c906cb9453a1b39c46b2263ee | [
"MIT"
] | 14 | 2020-04-22T01:17:25.000Z | 2021-07-01T13:17:01.000Z | pyrice/utils.py | SouthGreenPlatform/PyRice | ca34acf65099db0c906cb9453a1b39c46b2263ee | [
"MIT"
] | 10 | 2019-10-14T09:07:41.000Z | 2021-06-11T06:17:11.000Z | pyrice/utils.py | SouthGreenPlatform/PyRice | ca34acf65099db0c906cb9453a1b39c46b2263ee | [
"MIT"
] | 2 | 2020-11-23T01:11:09.000Z | 2021-02-03T11:23:06.000Z | # -*- coding: utf-8 -*-
import os
import requests
import urllib3
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException,WebDriverException
from time import sleep
chrome_path = ""
dir_path = os.path.dirname(os.path.realpath(__file__))
download_dir = os.path.join(dir_path,"support/download/")
def connection_error(link, data = "", type = None, db = None, gene_id=None):
"""
Get result with request post or get; with JavaScript
:param link: (str) url
:param data: (str) data to give to the form
:param type: (str) use with JavaScript format
:param db: (str) database name - use with JavaScript format
:param gene_id: (str) gene id - use with JavaScript format
:return: object of requests
"""
"""
Get result with request post or request get
:param link: (str) url
:param data: (str) data to give to the form
:return: object of requests
"""
if type =="javascript":
options = webdriver.ChromeOptions()
profile = {"plugins.plugins_list": [{"enabled": False, "name": "Chrome PDF Viewer"}],
# Disable Chrome's PDF Viewer
"download.default_directory": download_dir, "download.extensions_to_open": "applications/pdf"}
options.add_experimental_option("prefs", profile)
try:
if (os.path.exists(chrome_path)):
driver = webdriver.Chrome(chrome_path, chrome_options=options)
else:
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(options=options)
driver.get(link)
wait = WebDriverWait(driver, 5)
men_menu = wait.until(ec.visibility_of_element_located((By.XPATH, data)))
button = driver.find_elements_by_xpath(data)[0]
ActionChains(driver).move_to_element(men_menu).click(button).perform()
sleep(1)
if db == "gwas_atlas":
csv_button = driver.find_elements_by_xpath("//ul[@class='dropdown-menu'and @role='menu']/li[2]/a")[0]
ActionChains(driver).move_to_element(csv_button).click().perform()
sleep(2)
driver.close()
driver.quit()
return 1
except (TimeoutException, WebDriverException) as e:
print(e)
return None
# finally:
# driver.close()
# driver.quit()
else:
try:
urllib3.disable_warnings()
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0'}
#print(link)
if data!= "":
res = requests.post(link, data=data, headers=headers,verify=False)
else:
res = requests.get(link, allow_redirects=False,stream=True,verify=False)
if res.status_code != 200:
print('Server Error: ' + str(res.status_code) + '\n' + 'For url:' + link)
return res
except requests.exceptions.RequestException as error:
print("Can't connect: {} - Eror: {}".format(link,error))
return None
def execute_query(db, qfields=[], verbose=False):
"""
Get url and result of api databases
:param db: (str) name of database
:param qfields: (list) list of loc,id
:param verbose: (bool) if True print for debug
:return: information of gene after send request to url api
"""
#Get query qfields list
fields = db[0].find_all("field")
# Prepare URL
link = db[0].find_all("link")[0]["stern"]
# Compile URL
if link[:4] == 'http':
if db[0]["method"] == "POST":
i = 0
for field in fields:
data = {field.text: qfields[i]}
i += 1
return connection_error(link, data)
elif db[0]["method"] == "GET":
query_string = ""
if db[0]["type"] == "javascript":
div = db[0].find_all("div")
button = db[0].find_all("button")
input = db[0].find_all("input")
if len(div) > 0:
download_button = "//div["
download_source = div
elif len(button) > 0:
download_button = "//button["
download_source = button
elif len(input) > 0:
download_button = "//input["
download_source = input
for key, value in download_source[0].attrs.items():
if type(value) is list:
download_button += "@" + key + " = '"
for v in value:
download_button += v +' '
download_button = download_button[:-1]
download_button += "' and "
else:
download_button += "@" + key + " = '" + value + "' and "
download_button = download_button[:-5] + "]"
i = 0
for field in fields:
# Detect controller field (always first field)
if "lowercase" in field:
print(qfields[i].lower())
if field.text == "":
query_string += qfields[i] + "?"
# All other fields are query fields
else:
query_string += field.text + field["op"] + qfields[i] + "&"
i += 1
query_string = query_string[:-1]
link += query_string + db[0].find_all("link")[0]["aft"]
if verbose: print(link)
return connection_error(link, download_button, type='javascript', db = db[0]["dbname"])
if db[0]["type"] != "text/csv":
i = 0
for field in fields:
# Detect controller field (always first field)
if "lowercase" in field:
print(qfields[i].lower())
if field.text == "":
query_string += qfields[i] + "?"
# All other fields are query fields
else:
query_string += field.text + field["op"] + qfields[i] + "&"
i += 1
query_string = query_string[:-1]
link += query_string + \
db[0].find_all("link")[0]["aft"]
if verbose: print(link)
return connection_error(link)
else:
return open(link)
def search(df, text):
"""
Search function on result (file .pkl)
:param df: (dataframe) dataframe of pandas
:param text: (str) text
:return: a dataframe of pandas that include text
"""
df = df.astype(str)
result_set = set()
for column in df.columns:
# print(column)
result = df[column].str.contains(text)
for i in range(len(result.values)):
if result.values[i] == True:
result_set.add(result.index[i])
# print(column,df[column].str.contains(text))
return df.loc[result_set]
| 40.269841 | 117 | 0.532913 |
32f6b710df361011584e669f0ad3cbad5746a62a | 1,835 | py | Python | checkers.py | pikulak/xoxo | 2c2aec2cc890907593d3837da2b5942b87993072 | [
"Apache-2.0"
] | 1 | 2017-02-26T11:08:19.000Z | 2017-02-26T11:08:19.000Z | checkers.py | pikulak/xoxo | 2c2aec2cc890907593d3837da2b5942b87993072 | [
"Apache-2.0"
] | null | null | null | checkers.py | pikulak/xoxo | 2c2aec2cc890907593d3837da2b5942b87993072 | [
"Apache-2.0"
] | null | null | null | from statics import BOARD_SIDE_LENGTH
class WinConditionChecker:
def __init__(self, board, player_1_win_condition, player_2_win_condition):
self._board = board
self._winner = None
self._player_1_win_condition = player_1_win_condition
self._player_2_win_condition = player_2_win_condition
self.seq = range(1, BOARD_SIDE_LENGTH + 1)
def win_by_row(self):
for x in self.seq:
row = self._board.get_row(x)
if row == self._player_1_win_condition:
self._winner = "player_1"
return True
if row == self._player_2_win_condition:
self._winner = "player_2"
return True
return False
def win_by_column(self):
for y in self.seq:
col = self._board.get_column(y)
if col == self._player_1_win_condition:
self._winner = "player_1"
return True
if col == self._player_2_win_condition:
self._winner = "player_2"
return True
return False
def win_by_diagonal(self):
first_diagonal = self._board.get_first_diagonal()
second_diagonal = self._board.get_second_diagonal()
if first_diagonal == self._player_1_win_condition\
or second_diagonal == self._player_1_win_condition:
self._winner = "player_1"
return True
if first_diagonal == self._player_2_win_condition \
or second_diagonal == self._player_2_win_condition:
self._winner = "player_2"
return True
return False
def checkout(self):
if self.win_by_column() or self.win_by_diagonal() or self.win_by_row():
return self._winner
else:
return False
| 28.671875 | 79 | 0.60436 |
0899554bda452527d8c14117cc198012bee9877d | 5,095 | py | Python | gae/layers.py | JiaxiangBU/link-prediction | 8fd569dae07cc4fc2972e2fb97cce0fb00875111 | [
"MIT"
] | 309 | 2018-02-01T22:31:23.000Z | 2022-03-13T21:18:49.000Z | gae/layers.py | JiaxiangBU/link-prediction | 8fd569dae07cc4fc2972e2fb97cce0fb00875111 | [
"MIT"
] | null | null | null | gae/layers.py | JiaxiangBU/link-prediction | 8fd569dae07cc4fc2972e2fb97cce0fb00875111 | [
"MIT"
] | 125 | 2018-01-31T10:33:08.000Z | 2022-03-31T07:53:33.000Z | from gae.initializations import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs
"""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def dropout_sparse(x, keep_prob, num_nonzero_elems, dtype=tf.float32):
"""Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
"""
noise_shape = [num_nonzero_elems]
random_tensor = tf.cast(keep_prob, dtype=dtype)
random_tensor += tf.random_uniform(noise_shape, dtype=dtype)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return tf.cast(pre_out, dtype) * tf.cast((1./keep_prob), dtype)
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
# Properties
name: String, defines the variable scope of the layer.
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.issparse = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
class GraphConvolution(Layer):
"""Basic graph convolution layer for undirected graph without edge labels."""
def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, dtype=tf.float32, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, dtype=dtype, name="weights")
self.dropout = dropout
self.adj = adj
if type(self.adj) == tf.SparseTensor: # convert to dense if necessary
self.adj = tf.sparse_tensor_to_dense(self.adj, validate_indices=False)
self.act = act
self.dtype=dtype
# Apply Graph Convolution operation:
# H_1 = activation(A_norm * X * W)
def _call(self, inputs):
x = tf.cast(inputs, self.dtype)
if type(x) == tf.SparseTensor: # convert to dense if necessary
x = tf.sparse_tensor_to_dense(x, validate_indices=False)
x = tf.nn.dropout(x, tf.cast(1-self.dropout, self.dtype))
x = tf.matmul(x, self.vars['weights'])
x = tf.matmul(self.adj, x)
outputs = self.act(x)
return outputs
class GraphConvolutionSparse(Layer):
"""Graph convolution layer for sparse inputs."""
def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., act=tf.nn.relu, dtype=tf.float32, **kwargs):
super(GraphConvolutionSparse, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, dtype=dtype, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
self.issparse = True
self.features_nonzero = features_nonzero
self.dtype=dtype
# Apply Graph Convolution operation:
# H_1 = activation(A_norm * X * W)
def _call(self, inputs):
x = inputs
# if self.dropout > 0:
x = dropout_sparse(x, 1-self.dropout, self.features_nonzero, dtype=self.dtype)
x = tf.sparse_tensor_dense_matmul(tf.cast(x, tf.float32), tf.cast(self.vars['weights'], tf.float32))
x = tf.sparse_tensor_dense_matmul(tf.cast(self.adj, tf.float32), tf.cast(x, tf.float32))
outputs = tf.cast(self.act(x), self.dtype)
return outputs
class InnerProductDecoder(Layer):
"""Decoder model layer for link prediction."""
def __init__(self, input_dim, dropout=0., act=tf.nn.sigmoid, flatten=True, **kwargs):
super(InnerProductDecoder, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
self.flatten = flatten
# Reconstruct adjacency matrix from node embeddings:
# A_pred = activation(Z*Z^T)
# Simple inner product
def _call(self, inputs):
inputs = tf.nn.dropout(inputs, 1-self.dropout)
x = tf.transpose(inputs)
x = tf.matmul(inputs, x)
if self.flatten == True:
x = tf.reshape(x, [-1])
outputs = self.act(x)
return outputs
| 37.189781 | 125 | 0.641609 |
142f7fef75795d7dd77c82472da63fb1dc8fcf7a | 4,471 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations_async/_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations_async/_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations_async/_operations_async.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Lists all of the available Network Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Network/operations'} # type: ignore
| 43.833333 | 115 | 0.660926 |
7cab2274365aa17353f1332b7bc0a3c98a7ba89e | 76,034 | py | Python | ciftify/bin/ciftify_recon_all.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | null | null | null | ciftify/bin/ciftify_recon_all.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | null | null | null | ciftify/bin/ciftify_recon_all.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Converts a freesurfer recon-all output to a working directory
Usage:
ciftify_recon_all [options] <Subject>
Arguments:
<Subject> The Subject ID in the HCP data folder
Options:
--ciftify-work-dir PATH The directory for HCP subjects (overrides
CIFTIFY_WORKDIR/ HCP_DATA enivironment variables)
--fs-subjects-dir PATH Path to the freesurfer SUBJECTS_DIR directory
(overides the SUBJECTS_DIR environment variable)
--resample-to-T1w32k Resample the Meshes to 32k Native (T1w) Space
--surf-reg REGNAME Registration sphere prefix [default: MSMSulc]
--no-symlinks Will not create symbolic links to the zz_templates folder
--fs-license FILE Path to the freesurfer license file
--read-non-lin-xfm PATH EXPERT OPTION, read this FSL format warp to MNI space
instead of generating it from the inputs.
Must be an FSL transform (warp) file.
--read-lin-premat PATH EXPERT OPTION, read this FSL format warp linear (premat)
transform to MNI space instead of generating it.
Must be an an FSL transform (warp) file.
--MSM-config PATH EXPERT OPTION. The path to the configuration file to use for
MSMSulc mode. By default, the configuration file
is ciftify/data/hcp_config/MSMSulcStrainFinalconf
This setting is ignored when not running MSMSulc mode.
--ciftify-conf YAML EXPERT OPTION. Path to a yaml configuration file. Overrides
the default settings in
ciftify/data/ciftify_workflow_settings.yaml
--hcp-data-dir PATH DEPRECATED, use --ciftify-work-dir instead
--n_cpus INT Number of cpu's available. Defaults to the value
of the OMP_NUM_THREADS environment variable
-v,--verbose Verbose logging
--debug Debug logging in Erin's very verbose style
-n,--dry-run Dry run
-h,--help Print help
DETAILS
Adapted from the PostFreeSurferPipeline module of the Human Connectome
Project's minimal proprocessing pipeline. Please cite:
Glasser MF, Sotiropoulos SN, Wilson JA, Coalson TS, Fischl B, Andersson JL, Xu J,
Jbabdi S, Webster M, Polimeni JR, Van Essen DC, Jenkinson M, WU-Minn HCP Consortium.
The minimal preprocessing pipelines for the Human Connectome Project. Neuroimage. 2013 Oct 15;80:105-24.
PubMed PMID: 23668970; PubMed Central PMCID: PMC3720813.
The default outputs are condensed to include in 4 mesh "spaces" in the following directories:
+ T1w/Native: The freesurfer "native" output meshes
+ MNINonLinear/Native: The T1w/Native mesh warped to MNINonLinear
+ MNINonLinear/fsaverage_LR32k
+ the surface registered space used for fMRI and multi-modal analysis
+ This 32k mesh has approx 2mm vertex spacing
+ MNINonLinear_164k_fs_LR (in the MNINonLinear folder):
+ the surface registered space used for HCP's anatomical analysis
+ This 164k mesh has approx 0.9mm vertex spacing
In addition, the optional flag '--resample-to-T1w32k' can be used to output an
additional T1w/fsaverage_LR32k folder that occur in the HCP Consortium Projects.
Note: the '--resample-to-T1w32k' can be called on a a completed ciftify output (missing
the T1w/fsaverage_LR32k folder). In this case the process will only run the T1w32k resampling step.
Any other call to ciftify on a incomplete output will lead to a failure.
(ciftify will not clobber old outputs by default)
By default, some to the template files needed for resampling surfaces and viewing
flatmaps will be symbolic links from a folder ($CIFTIFY_WORKDIR/zz_templates) to the
subject's output folder. If the '--no-symlinks' flag is indicated, these files will be
copied into the subject folder insteadself.
Written by Erin W Dickie
"""
import os
import sys
import math
import datetime
import tempfile
import shutil
import subprocess
import logging
import yaml
from docopt import docopt
import ciftify
from ciftify.utils import WorkFlowSettings, get_stdout, cd, section_header, has_ciftify_recon_all_run
from ciftify.filenames import *
logger = logging.getLogger('ciftify')
logger.setLevel(logging.DEBUG)
DRYRUN = False
N_CPUS = 1
FS_LICENSE = None
def run_ciftify_recon_all(temp_dir, settings):
subject = settings.subject
log_inputs(settings.fs_root_dir, settings.work_dir, subject.id,
settings.registration, settings.msm_config)
log_build_environment(settings)
fs_version = pars_recon_all_logs(subject.fs_folder)
logger.debug("Defining Settings")
## the Meshes Dict contains file paths and naming conventions specific to
## all ouput meshes
meshes = define_meshes(subject.path, temp_dir,
high_res_mesh = settings.high_res,
low_res_meshes = settings.low_res,
make_low_res = settings.resample)
expected_labels = define_expected_labels(fs_version)
# that this would have died when setting up log in the situation of an incomplete output
if settings.skip_main_wf:
logger.info("Found completed ciftify output, only resampling to T1w/fsaverage_LR32k")
else:
run_default_workflow(temp_dir, settings, meshes, expected_labels, fs_version)
if settings.resample:
resampling_to_t1w_32k(temp_dir, settings, meshes, expected_labels)
# exit successfully
logger.info(section_header('Done'))
return 0
def run_default_workflow(temp_dir, settings, meshes, expected_labels, fs_version):
'''most of the workflow with default settings'''
subject = settings.subject
#Make some folders for this and later scripts
create_output_directories(meshes, settings.registration['xfms_dir'],
os.path.join(subject.atlas_space_dir, 'ROIs'),
os.path.join(subject.atlas_space_dir, 'Results'))
T1w_nii = os.path.join(subject.T1w_dir, settings.registration['T1wImage'])
wmparc = os.path.join(subject.T1w_dir, 'wmparc.nii.gz')
convert_T1_and_freesurfer_inputs(T1w_nii, subject,
settings.ciftify_data_dir, T2_raw=settings.use_T2)
prepare_T1_image(wmparc, T1w_nii, settings.registration)
convert_inputs_to_MNI_space(settings.registration, settings.ciftify_data_dir,
temp_dir, use_T2=settings.use_T2)
#Create Spec Files including the T1w files
add_anat_images_to_spec_files(meshes, subject.id)
if settings.use_T2:
add_anat_images_to_spec_files(meshes, subject.id, img_type='T2wImage')
# Import Subcortical ROIs and resample to the Grayordinate Resolution
create_cifti_subcortical_ROIs(subject.atlas_space_dir, settings, temp_dir)
convert_FS_surfaces_to_gifti(subject.id, subject.fs_folder, meshes,
settings.registration, temp_dir)
process_native_meshes(subject, meshes, settings.dscalars, expected_labels)
## copy the HighResMesh medialwall roi and the sphere mesh from the
## templates
copy_atlas_roi_from_template(settings, meshes['HighResMesh'])
copy_sphere_mesh_from_template(settings, meshes['HighResMesh'])
reg_sphere = create_reg_sphere(settings, subject.id, meshes)
logger.info(section_header("Importing HighRes Template Sphere and Medial "
"Wall ROI"))
## incorporate the atlasroi boundries into the native space roi
merge_subject_medial_wall_with_atlas_template(subject.id, settings.high_res,
meshes, reg_sphere, temp_dir)
## remask the thickness and curvature data with the redefined medial wall roi
dilate_and_mask_metric(subject.id, meshes['AtlasSpaceNative'],
settings.dscalars)
logger.info(section_header("Creating Native Space Dense Maps"))
make_dense_map(subject.id, meshes['AtlasSpaceNative'],
settings.dscalars, expected_labels)
add_dense_maps_to_spec_file(subject.id, meshes['T1wNative'],
settings.dscalars.keys(), expected_labels)
#Populate Highres fs_LR spec file.
logger.info(section_header('Resampling data from Native to {}'
''.format(meshes['HighResMesh']['meshname'])))
copy_colin_flat_and_add_to_spec(subject.id, settings, meshes['HighResMesh'])
deform_to_native(meshes['AtlasSpaceNative'], meshes['HighResMesh'],
settings.dscalars, expected_labels, subject.id, sphere=reg_sphere)
# Populate LowRes fs_LR spec file.
for res in settings.low_res:
low_res_name = '{}k_fs_LR'.format(res)
logger.info(section_header('Resampling data from Native to '
'{}'.format(low_res_name)))
populate_low_res_spec_file(meshes['AtlasSpaceNative'],
meshes[low_res_name], subject, settings, reg_sphere, expected_labels)
def resampling_to_t1w_32k(temp_dir, settings, meshes, expected_labels):
'''Populate LowRes fs_LR spec file.
This can be run as an add-on to the original work flow
'''
# define the reg_sphere..
subject = settings.subject
# define the reg_sphere..
FS_reg_sphere_name, MSMSulc_reg_sphere_name = get_reg_sphere_names()
if settings.reg_name == 'MSMSulc':
reg_sphere = MSMSulc_reg_sphere_name
else :
reg_sphere = FS_reg_sphere_name
## double check that the registration sphere has already been created
reg_sphere_file = surf_file(subject.id, reg_sphere, 'L',
meshes['AtlasSpaceNative'])
if not os.path.exists(reg_sphere_file):
logger.critical('Cannot find registration sphere {}, exiting'.format(reg_sphere_file))
sys.exit(1)
# make the folder if it does not exist
for res in settings.low_res:
low_res_name = '{}k_fs_LR'.format(res)
logger.info(section_header('Resampling data from Native to T1w -'
'{}'.format(low_res_name)))
dest_mesh_name = 'Native{}k_fs_LR'.format(res)
# make the folder if it does not exist
if not os.path.exists(meshes[dest_mesh_name]['Folder']):
ciftify.utils.make_dir(meshes[dest_mesh_name]['Folder'], DRYRUN)
## add the anat images to the spec
add_anat_images_to_spec_files({'mesh':meshes[dest_mesh_name]},
subject.id)
## then resample the surfaces and link to the dense files and labels
resample_to_native(meshes['T1wNative'], meshes[dest_mesh_name],
settings, subject.id, reg_sphere, expected_labels,
reg_sphere_mesh = meshes['AtlasSpaceNative'])
def run(cmd, dryrun = False, suppress_stdout = False, suppress_stderr = False):
''' calls the run function with specific settings'''
global DRYRUN
dryrun = DRYRUN or dryrun
if FS_LICENSE:
run_env = {"OMP_NUM_THREADS": str(N_CPUS),
"FS_LICENSE": FS_LICENSE}
else:
run_env = {"OMP_NUM_THREADS": str(N_CPUS)}
returncode = ciftify.utils.run(cmd,
dryrun = dryrun,
suppress_stdout = suppress_stdout,
suppress_stderr = suppress_stderr,
env= run_env)
if returncode :
sys.exit(1)
return(returncode)
class Settings(WorkFlowSettings):
def __init__(self, arguments):
WorkFlowSettings.__init__(self, arguments)
self.reg_name = self.__set_registration_mode(arguments)
self.resample = arguments['--resample-to-T1w32k']
self.no_symlinks = arguments['--no-symlinks']
self.fs_root_dir = self.__set_fs_subjects_dir(arguments)
self.subject = self.__get_subject(arguments)
self.ciftify_data_dir = ciftify.config.find_ciftify_global()
self.fs_license = self.__get_freesurfer_license(arguments['--fs-license'])
self.use_T2 = self.__get_T2(arguments, self.subject) # T2 runs only using freesurfer not recommended
self.dscalars = self.__define_dscalars()
self.registration = self.__define_registration_settings(
arguments['--read-non-lin-xfm'], arguments['--read-lin-premat'])
self.skip_main_wf = self.__has_been_run_before()
def __set_registration_mode(self, arguments):
"""
Must be set after ciftify_data_dir is set, since it requires this
for MSMSulc config
"""
surf_reg = ciftify.utils.get_registration_mode(arguments)
if surf_reg == "MSMSulc":
ciftify.config.verify_msm_available()
user_config = arguments['--MSM-config']
if not user_config:
self.msm_config = os.path.join(ciftify.config.find_ciftify_global(),
'hcp_config', 'MSMSulcStrainFinalconf')
elif user_config and not os.path.exists(user_config):
logger.error("MSM config file {} does not exist".format(user_config))
sys.exit(1)
else:
self.msm_config = user_config
if not self.check_msm_config():
logger.error("Running version of MSM does not match config {}".format(self.msm_config))
sys.exit(1)
else:
self.msm_config = None
return surf_reg
def check_msm_config(self):
arg_list = list()
msm_fp = open(self.msm_config)
while True:
arg = msm_fp.readline()
if (len(arg) == 0):
break
arg = arg[0:arg.rfind('=')]
arg_list.append(arg)
msm_options = subprocess.Popen(['msm', '--printoptions'], stderr=subprocess.PIPE)
out, err = msm_options.communicate()
err = err.decode('utf-8') # for python 3 compatible
return all((arg in err or arg == '--dopt') for arg in arg_list)
def __get_freesurfer_license(self, fs_license_arg):
'''check that freesurfer license is readable'''
fs_license_file = fs_license_arg
if fs_license_file:
ciftify.utils.check_input_readable(fs_license_file)
else:
# if not set, search the env to see if it was set in the shell
fs_license_file = os.environ.get('FS_LICENSE')
return fs_license_file
def __set_fs_subjects_dir(self, arguments):
fs_root_dir = arguments['--fs-subjects-dir']
if fs_root_dir:
return fs_root_dir
fs_root_dir = ciftify.config.find_freesurfer_data()
if fs_root_dir is None:
logger.error("Cannot find freesurfer subjects dir, exiting.")
sys.exit(1)
return fs_root_dir
def __get_subject(self, arguments):
subject_id = arguments['<Subject>']
return Subject(self.work_dir, self.fs_root_dir, subject_id, self.resample)
def __has_been_run_before(self):
'''do all the checking to determine if this it is ok to run
several options:
nothing exists - ok to start Running
completed exists - ok to run resampling only..
incomplete exists - will fail at this point..
'''
if os.path.exists(self.subject.path):
if self.resample:
if has_ciftify_recon_all_run(self.work_dir, self.subject.id):
logger.info("Found completed ciftify output, only resampling to T1w/fsaverage_LR32k")
return True
else:
logger.error("Found incomplete ciftify output at {}/{}, an error may have occurred - aborting".format(self.work_dir, self.subject.id))
sys.exit(1)
else:
logger.error('Subject output {} already exists.'
'If you wish to re-run, you must first delete old outputs.'
''.format(self.subject.path))
sys.exit(1)
else:
try:
os.makedirs(self.subject.path)
except:
logger.error("Cannot make subject path {}, exiting"
"".format(self.subject.path))
sys.exit(1)
return False
def __define_dscalars(self):
dscalars_config = WorkFlowSettings.get_config_entry(self, 'dscalars')
if self.reg_name != 'MSMSulc':
try:
del dscalars_config['ArealDistortion_MSMSulc']
del dscalars_config['EdgeDistortion_MSMSulc']
except KeyError:
# do nothing, MSMSulc options not defined anyway
pass
return dscalars_config
def __define_registration_settings(self, read_nonlin_xfm, read_lin_xfm, method='FSL_fnirt',
standard_res='2mm'):
registration_config = self.get_config_entry('registration')
for key in ['src_dir', 'dest_dir', 'xfms_dir']:
try:
subfolders = registration_config[key]
except KeyError:
logger.critical("registration config does not contain expected"
"key {}".format(key))
sys.exit(1)
registration_config[key] = os.path.join(self.subject.path, subfolders)
resolution_config = WorkFlowSettings.get_resolution_config(self, method, standard_res)
registration_config.update(resolution_config)
if any([read_nonlin_xfm, read_lin_xfm]):
if all([read_nonlin_xfm, read_lin_xfm]):
ciftify.utils.check_input_readable(read_nonlin_xfm)
registration_config['User_AtlasTransform_NonLinear'] = read_nonlin_xfm
ciftify.utils.check_input_readable(read_lin_xfm)
registration_config['User_AtlasTransform_Linear'] = read_lin_xfm
else:
logger.critical("if inputing user transforms, both linear and non-linear input files are needed")
sys.exit(1)
else:
registration_config['User_AtlasTransform_NonLinear'] = False
registration_config['User_AtlasTransform_Linear'] = False
return registration_config
def __get_T2(self, arguments, subject):
'''turning this option off as HCPPipelines is recommended in this case'''
return None
# if not arguments['--T2']:
# return None
# raw_T2 = os.path.join(subject.fs_folder, 'mri/orig/T2raw.mgz')
# if not os.path.exists(raw_T2):
# return None
# return raw_T2
class Subject:
def __init__(self, work_dir, fs_root_dir, subject_id, resample_to_T1w32k):
self.id = subject_id
self.fs_folder = self.__set_fs_folder(fs_root_dir)
self.path = self.__set_path(work_dir, resample_to_T1w32k)
self.T1w_dir = os.path.join(self.path, 'T1w')
self.atlas_space_dir = os.path.join(self.path, 'MNINonLinear')
self.log = os.path.join(self.path, 'cifti_recon_all.log')
def __set_fs_folder(self, fs_root_dir):
fs_path = os.path.join(fs_root_dir, self.id)
if not os.path.exists(fs_path):
logger.error("{} freesurfer folder does not exist, exiting."
"".format(self.id))
sys.exit(1)
return fs_path
def __set_path(self, work_dir, resample_to_T1w32k):
'''do the parsing to figure out if we are good to go
several options:
nothing exists - ok to start Running
completed exists - ok to run resampling only..
incomplete exists - will fail at this point..
'''
path = os.path.join(work_dir, self.id)
return path
def get_subject_log_handler(self, formatter):
fh = logging.FileHandler(self.log)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
return fh
############ Step 0: Settings and Logging #############################
def log_inputs(fs_dir, work_dir, subject_id, registration_config, msm_config=None):
logger.info("Arguments: ")
logger.info(' freesurfer SUBJECTS_DIR: {}'.format(fs_dir))
logger.info(' CIFTIFY_WORKDIR directory: {}'.format(work_dir))
logger.info(' Subject: {}'.format(subject_id))
if msm_config:
logger.info(' MSM config file: {}'.format(msm_config))
if registration_config['User_AtlasTransform_NonLinear']:
logger.info('User given transforms (to be copied to MNINonLinear/xfm):')
logger.info(' User given linear tranform: {}'.format(registration_config['User_AtlasTransform_Linear']))
logger.info(' User given non-linear tranform: {}'.format(registration_config['User_AtlasTransform_NonLinear']))
def log_build_environment(settings):
'''print the running environment info to the logs (info)'''
logger.info("{}---### Environment Settings ###---".format(os.linesep))
logger.info("Username: {}".format(get_stdout(['whoami'],
echo=False).replace(os.linesep,'')))
logger.info(ciftify.config.system_info())
logger.info(ciftify.config.ciftify_version(os.path.basename(__file__)))
logger.info(ciftify.config.wb_command_version())
logger.info(ciftify.config.freesurfer_version())
logger.info(ciftify.config.fsl_version())
# if settings.msm_config: logger.info(ciftify.config.msm_version())
logger.info("---### End of Environment Settings ###---{}".format(os.linesep))
def pars_recon_all_logs(fs_folder):
'''prints recon_all run settings to the log '''
fslog = ciftify.config.FSLog(fs_folder)
sep = '{} '.format(os.linesep)
freesurfer_info = "recon_all was run {1} with settings:{0}Build Stamp: "\
"{2}{0}Version parsed as: {3}{0}CMD args: {4}{0}".format(
sep, fslog.start, fslog.build, fslog.version, fslog.cmdargs)
logger.info(freesurfer_info)
if len(fslog.status) > 0:
logger.warning(fslog.status)
return fslog.version
def define_expected_labels(fs_version):
''' figures out labels according to freesurfer version run '''
expected_labels = ['aparc', 'aparc.a2009s', 'BA', 'aparc.DKTatlas',
'BA_exvivo']
if 'v6.' in fs_version:
expected_labels.remove('BA')
if 'v5.' in fs_version:
expected_labels.remove('aparc.DKTatlas')
expected_labels.remove('BA_exvivo')
return expected_labels
def create_output_directories(meshes, xfms_dir, rois_dir, results_dir):
for mesh in meshes.values():
ciftify.utils.make_dir(mesh['Folder'], DRYRUN, suppress_exists_error = True)
ciftify.utils.make_dir(mesh['tmpdir'], DRYRUN, suppress_exists_error = True)
ciftify.utils.make_dir(xfms_dir, DRYRUN)
ciftify.utils.make_dir(rois_dir, DRYRUN)
ciftify.utils.make_dir(results_dir, DRYRUN)
def link_to_template_file(settings, subject_file, global_file, via_file):
'''
The original hcp pipelines would copy atlas files into each subject's
directory, which had the benefit of making the atlas files easier to find
and copy across systems but created many redundant files.
This function instead will copy the atlas files into a templates directory
in the CIFTIFY_WORKDIR Folder and then link from each subject's individual
directory to this file
'''
if settings.no_symlinks:
run(['cp', global_file, subject_file], dryrun=DRYRUN)
else:
## copy from ciftify template to the HCP_DATA if via_file does not exist
via_folder = os.path.join(settings.work_dir, 'zz_templates')
via_path = os.path.join(via_folder, via_file)
if not os.path.isfile(via_path):
if not os.path.exists(via_folder):
run(['mkdir','-p',via_folder], dryrun=DRYRUN)
run(['cp', global_file, via_path], dryrun=DRYRUN)
## link the subject_file to via_file
if not DRYRUN:
os.symlink(os.path.relpath(via_path, os.path.dirname(subject_file)),
subject_file)
## Step 1: Conversion from Freesurfer Format ######################
## Step 1.0: Conversion of Freesurfer Volumes #####################
def convert_T1_and_freesurfer_inputs(T1w_nii, subject, hcp_templates,
T2_raw=None):
logger.info(section_header("Converting T1wImage and Segmentations from "
"freesurfer"))
###### convert the mgz T1w and put in T1w folder
convert_freesurfer_T1(subject.fs_folder, T1w_nii)
#Convert FreeSurfer Volumes and import the label metadata
for image in ['wmparc', 'aparc.a2009s+aseg', 'aparc+aseg']:
convert_freesurfer_mgz(image, T1w_nii, hcp_templates, subject.fs_folder,
subject.T1w_dir)
if T2_raw:
T2w_nii = os.path.join(subject.T1w_dir, 'T2w.nii.gz')
resample_freesurfer_mgz(T1w_nii, T2_raw, T2w_nii)
def convert_freesurfer_T1(fs_folder, T1w_nii):
'''
Convert T1w from freesurfer(mgz) to nifti format and run fslreorient2std
Arguments:
fs_folder Path to the subject's freesurfer output
T1w_nii Path to T1wImage to with desired output orientation
'''
fs_T1 = os.path.join(fs_folder, 'mri', 'T1.mgz')
if not os.path.exists(fs_T1):
logger.error("Cannot find freesurfer T1 {}, exiting".format(fs_T1))
sys.exit(1)
run(['mri_convert', fs_T1, T1w_nii], dryrun=DRYRUN)
run(['fslreorient2std', T1w_nii, T1w_nii], dryrun=DRYRUN)
def convert_freesurfer_mgz(image_name, T1w_nii, hcp_templates,
freesurfer_folder, out_dir):
''' convert image from freesurfer(mgz) to nifti format, and
realigned to the specified T1wImage, and imports labels
Arguments:
image_name Name of Image to Convert
T1w_nii Path to T1wImage to with desired output
orientation
hcp_templates The path to the hcp templates, as defined by
the shell variable CIFTIFY_TEMPLATES
freesurfer_folder Path the to subjects freesurfer output
out_dir Output Directory for converted Image
'''
freesurfer_mgz = os.path.join(freesurfer_folder, 'mri',
'{}.mgz'.format(image_name))
if not os.path.isfile(freesurfer_mgz):
if freesurfer_mgz == 'wmparc.mgz':
logger.error("{} not found, exiting.".format(freesurfer_mgz))
sys.exit(1)
else:
logger.warning("{} not found".format(freesurfer_mgz))
else:
image_nii = os.path.join(out_dir, '{}.nii.gz'.format(image_name))
resample_freesurfer_mgz(T1w_nii, freesurfer_mgz, image_nii)
run(['wb_command', '-logging', 'SEVERE','-volume-label-import', image_nii,
os.path.join(hcp_templates, 'hcp_config', 'FreeSurferAllLut.txt'),
image_nii, '-drop-unused-labels'], dryrun=DRYRUN)
def resample_freesurfer_mgz(T1w_nii, freesurfer_mgz, image_nii):
run(['mri_convert', '-rt', 'nearest', '-rl', '-nc', T1w_nii, freesurfer_mgz,
image_nii], dryrun=DRYRUN)
## Step 1.1: Creating Brainmask from wmparc #######################
def prepare_T1_image(wmparc, T1w_nii, reg_settings):
T1w_brain_mask = os.path.join(reg_settings['src_dir'],
reg_settings['BrainMask'])
T1w_brain_nii = os.path.join(reg_settings['src_dir'],
reg_settings['T1wBrain'])
logger.info(section_header('Creating brainmask from freesurfer wmparc '
'segmentation'))
make_brain_mask_from_wmparc(wmparc, T1w_brain_mask)
## apply brain mask to the T1wImage
mask_T1w_image(T1w_nii, T1w_brain_mask, T1w_brain_nii)
def make_brain_mask_from_wmparc(wmparc_nii, brain_mask):
'''
Will create a brainmask_nii image out of the wmparc ROIs nifti converted
from freesurfer
'''
## Create FreeSurfer Brain Mask skipping 1mm version...
run(['fslmaths', wmparc_nii,
'-bin', '-dilD', '-dilD', '-dilD', '-ero', '-ero',
brain_mask], dryrun=DRYRUN)
run(['wb_command', '-volume-fill-holes', brain_mask, brain_mask])
run(['fslmaths', brain_mask, '-bin', brain_mask])
def mask_T1w_image(T1w_image, brain_mask, T1w_brain):
'''mask the T1w Image with the brain_mask to create the T1w_brain image'''
run(['fslmaths', T1w_image, '-mul', brain_mask, T1w_brain])
## Step 1.2: running FSL registration #############################
def convert_inputs_to_MNI_space(reg_settings, hcp_templates, temp_dir,
use_T2=None):
logger.info(section_header("Registering T1wImage to MNI template using FSL "
"FNIRT"))
run_T1_FNIRT_registration(reg_settings, temp_dir)
# convert FreeSurfer Segmentations and brainmask to MNI space
logger.info(section_header("Applying MNI transform to label files"))
for image in ['wmparc', 'aparc.a2009s+aseg', 'aparc+aseg']:
apply_nonlinear_warp_to_nifti_rois(image, reg_settings, hcp_templates)
# also transform the brain mask to MNI space
apply_nonlinear_warp_to_nifti_rois('brainmask_fs', reg_settings,
hcp_templates, import_labels=False)
if use_T2:
# Transform T2 to MNI space too
apply_nonlinear_warp_to_nifti_rois('T2w', reg_settings, hcp_templates,
import_labels=False)
def run_T1_FNIRT_registration(reg_settings, temp_dir):
'''
Run the registration from T1w to MNINonLinear space using FSL's fnirt
registration settings and file paths are read from reg_settings
'''
src_dir = reg_settings['src_dir']
T1wBrain = reg_settings['T1wBrain']
standard_T1wBrain = reg_settings['standard_T1wBrain']
xfms_dir = reg_settings['xfms_dir']
AtlasTransform_Linear = reg_settings['AtlasTransform_Linear']
standard_BrainMask = reg_settings['standard_BrainMask']
AtlasTransform_NonLinear = reg_settings['AtlasTransform_NonLinear']
FNIRTConfig = reg_settings['FNIRTConfig']
InverseAtlasTransform_NonLinear = reg_settings['InverseAtlasTransform_NonLinear']
standard_T1wImage = reg_settings['standard_T1wImage']
T1wImage = reg_settings['T1wImage']
dest_dir = reg_settings['dest_dir']
User_AtlasTransform_Linear = reg_settings['User_AtlasTransform_Linear']
User_AtlasTransform_NonLinear = reg_settings['User_AtlasTransform_NonLinear']
## Linear then non-linear registration to MNI
T1w2_standard_linear = os.path.join(temp_dir,
'T1w2StandardLinearImage.nii.gz')
if User_AtlasTransform_Linear:
run(['cp', User_AtlasTransform_Linear, os.path.join(xfms_dir,AtlasTransform_Linear)])
run(['cp', User_AtlasTransform_NonLinear, os.path.join(xfms_dir,AtlasTransform_NonLinear)])
else:
run(['flirt', '-interp', 'spline', '-dof', '12',
'-in', os.path.join(src_dir, T1wBrain), '-ref', standard_T1wBrain,
'-omat', os.path.join(xfms_dir, AtlasTransform_Linear),
'-o', T1w2_standard_linear], dryrun=DRYRUN)
## calculate the just the warp for the surface transform - need it because
## sometimes the brain is outside the bounding box of warfield
run(['fnirt','--in={}'.format(T1w2_standard_linear),
'--ref={}'.format(standard_T1wImage),
'--refmask={}'.format(standard_BrainMask),
'--fout={}'.format(os.path.join(xfms_dir, AtlasTransform_NonLinear)),
'--logout={}'.format(os.path.join(xfms_dir, 'NonlinearReg_fromlinear.log')),
'--config={}'.format(FNIRTConfig)], dryrun=DRYRUN)
## also inverse the non-prelinear warp - we will need it for the surface
## transforms
run(['invwarp', '-w', os.path.join(xfms_dir, AtlasTransform_NonLinear),
'-o', os.path.join(xfms_dir,InverseAtlasTransform_NonLinear),
'-r', standard_T1wImage], dryrun=DRYRUN)
##T1w set of warped outputs (brain/whole-head + restored/orig)
run(['applywarp', '--rel', '--interp=trilinear',
'-i', os.path.join(src_dir, T1wImage),
'-r', standard_T1wImage, '-w', os.path.join(xfms_dir, AtlasTransform_NonLinear),
'--premat={}'.format(os.path.join(xfms_dir,AtlasTransform_Linear)),
'-o', os.path.join(dest_dir, T1wImage)], dryrun=DRYRUN)
def apply_nonlinear_warp_to_nifti_rois(image, reg_settings, hcp_templates,
import_labels=True):
'''
Apply a non-linear warp to nifti image of ROI labels. Reads registration
settings from reg_settings
'''
image_src = os.path.join(reg_settings['src_dir'], '{}.nii.gz'.format(image))
fs_labels = os.path.join(hcp_templates, 'hcp_config',
'FreeSurferAllLut.txt')
if os.path.isfile(image_src):
image_dest = os.path.join(reg_settings['dest_dir'],
'{}.nii.gz'.format(image))
run(['applywarp', '--rel', '--interp=nn',
'-i', image_src,
'-r', os.path.join(reg_settings['dest_dir'],
reg_settings['T1wImage']),
'-w', os.path.join(reg_settings['xfms_dir'],
reg_settings['AtlasTransform_NonLinear']),
'--premat={}'.format(os.path.join(reg_settings['xfms_dir'],
reg_settings['AtlasTransform_Linear'])),
'-o', image_dest], dryrun=DRYRUN)
if import_labels:
run(['wb_command', '-volume-label-import', '-logging', 'SEVERE',
image_dest, fs_labels, image_dest, '-drop-unused-labels'],
dryrun=DRYRUN)
def add_anat_images_to_spec_files(meshes, subject_id, img_type='T1wImage'):
'''add all the T1wImages to their associated spec_files'''
for mesh in meshes.values():
run(['wb_command', '-add-to-spec-file',
os.path.realpath(spec_file(subject_id, mesh)),
'INVALID', os.path.realpath(mesh[img_type])], dryrun=DRYRUN)
## Step 1.5 Create Subcortical ROIs ###########################
def create_cifti_subcortical_ROIs(atlas_space_folder, settings, temp_dir):
'''
defines the subcortical ROI labels for cifti files combines a template ROI
masks with the participants freesurfer wmparc output to do so
'''
# The template files required for this section
freesurfer_labels = os.path.join(settings.ciftify_data_dir, 'hcp_config',
'FreeSurferAllLut.txt')
grayord_space_dir = os.path.join(settings.ciftify_data_dir, '91282_Greyordinates')
subcortical_gray_labels = os.path.join(settings.ciftify_data_dir, 'hcp_config',
'FreeSurferSubcorticalLabelTableLut.txt')
avg_wmparc = os.path.join(settings.ciftify_data_dir, 'standard_mesh_atlases',
'Avgwmparc.nii.gz')
## right now we only have a template for the 2mm greyordinate space..
for grayord_res in settings.grayord_res:
## The outputs of this sections
atlas_ROIs = os.path.join(atlas_space_folder, 'ROIs',
'Atlas_ROIs.{}.nii.gz'.format(grayord_res))
wmparc_ROIs = os.path.join(temp_dir,
'wmparc.{}.nii.gz'.format(grayord_res))
wmparc_atlas_ROIs = os.path.join(temp_dir,
'Atlas_wmparc.{}.nii.gz'.format(grayord_res))
ROIs_nii = os.path.join(atlas_space_folder, 'ROIs',
'ROIs.{}.nii.gz'.format(grayord_res))
## linking this file into the subjects folder because func2hcp needs it
link_to_template_file(settings, atlas_ROIs,
os.path.join(grayord_space_dir,
'Atlas_ROIs.{}.nii.gz'.format(grayord_res)),
via_file='Atlas_ROIs.{}.nii.gz'.format(grayord_res))
## the analysis steps - resample the participants wmparc output the
## greyordinate resolution
run(['applywarp', '--interp=nn', '-i', os.path.join(atlas_space_folder,
'wmparc.nii.gz'), '-r', atlas_ROIs, '-o', wmparc_ROIs], dryrun=DRYRUN)
## import the label metadata
run(['wb_command', '-logging', 'SEVERE', '-volume-label-import', wmparc_ROIs,
freesurfer_labels, wmparc_ROIs, '-drop-unused-labels'], dryrun=DRYRUN)
## These commands were used in the original fs2hcp script, Erin
## discovered they are probably not being used. Leaving these commands
## here, though, just in case
# run(['applywarp', '--interp=nn', '-i', Avgwmparc, '-r', Atlas_ROIs,
# '-o', wmparcAtlas_ROIs])
# run(['wb_command', '-volume-label-import',
# wmparcAtlas_ROIs, FreeSurferLabels, wmparcAtlas_ROIs,
# '-drop-unused-labels'])
run(['wb_command', '-logging', 'SEVERE', '-volume-label-import', wmparc_ROIs,
subcortical_gray_labels, ROIs_nii,'-discard-others'], dryrun=DRYRUN)
## Step 1.4 Conversion of other formats ###########################
def convert_FS_surfaces_to_gifti(subject_id, freesurfer_subject_dir, meshes,
reg_settings, temp_dir):
logger.info(section_header("Converting freesurfer surfaces to gifti"))
# Find c_ras offset between FreeSurfer surface and volume and generate
# matrix to transform surfaces
cras_mat = os.path.join(temp_dir, 'cras.mat')
write_cras_file(freesurfer_subject_dir, cras_mat)
for surface, secondary_type in [('white','GRAY_WHITE'), ('pial', 'PIAL')]:
## convert the surfaces from freesurfer into T1w Native Directory
convert_freesurfer_surface(subject_id, surface, 'ANATOMICAL',
freesurfer_subject_dir, meshes['T1wNative'],
surface_secondary_type=secondary_type, cras_mat=cras_mat)
## MNI transform the surfaces into the MNINonLinear/Native Folder
apply_nonlinear_warp_to_surface(subject_id, surface, reg_settings,
meshes)
# Convert original and registered spherical surfaces and add them to the
# nonlinear spec file
convert_freesurfer_surface(subject_id, 'sphere', 'SPHERICAL',
freesurfer_subject_dir, meshes['AtlasSpaceNative'])
convert_freesurfer_surface(subject_id, 'sphere.reg', 'SPHERICAL',
freesurfer_subject_dir, meshes['AtlasSpaceNative'],
add_to_spec=False)
def write_cras_file(freesurfer_folder, cras_mat):
'''read info about the surface affine matrix from freesurfer output and
write it to a tmpfile'''
mri_info = get_stdout(['mri_info', os.path.join(freesurfer_folder, 'mri',
'brain.finalsurfs.mgz')])
for line in mri_info.split(os.linesep):
if 'c_r' in line:
bitscr = line.split('=')[4]
matrix_x = bitscr.replace(' ','')
elif 'c_a' in line:
bitsca = line.split('=')[4]
matrix_y = bitsca.replace(' ','')
elif 'c_s' in line:
bitscs = line.split('=')[4]
matrix_z = bitscs.replace(' ','')
with open(cras_mat, 'w') as cfile:
cfile.write('1 0 0 {}\n'.format(matrix_x))
cfile.write('0 1 0 {}\n'.format(matrix_y))
cfile.write('0 0 1 {}\n'.format(matrix_z))
cfile.write('0 0 0 1{}\n')
def convert_freesurfer_annot(subject_id, label_name, fs_folder,
dest_mesh_settings):
''' convert a freesurfer annot to a gifti label and set metadata'''
for hemisphere, structure in [('L', 'CORTEX_LEFT'), ('R', 'CORTEX_RIGHT')]:
fs_annot = os.path.join(fs_folder, 'label',
'{}h.{}.annot'.format(hemisphere.lower(), label_name))
if os.path.exists(fs_annot):
label_gii = label_file(subject_id, label_name, hemisphere,
dest_mesh_settings)
run(['mris_convert', '--annot', fs_annot,
os.path.join(fs_folder, 'surf',
'{}h.white'.format(hemisphere.lower())),
label_gii], suppress_stderr = True, dryrun=DRYRUN)
run(['wb_command', '-set-structure', label_gii, structure],
dryrun=DRYRUN)
run(['wb_command', '-set-map-names', label_gii,
'-map', '1', '{}_{}_{}'.format(subject_id, hemisphere,
label_name)], dryrun=DRYRUN)
run(['wb_command', '-gifti-label-add-prefix',
label_gii, '{}_'.format(hemisphere), label_gii], dryrun=DRYRUN)
def apply_nonlinear_warp_to_surface(subject_id, surface, reg_settings, meshes):
'''
Apply the linear and non-linear warps to a surfaces file and add
the warped surfaces outputs to their spec file
Arguments
subject_id The id of the subject being worked on
surface The surface to transform (i.e. 'white', 'pial')
reg_settings A dictionary of settings (i.e. paths, filenames)
related to the warp.
meshes A dictionary of settings (i.e. naming conventions)
related to surfaces
'''
src_mesh_settings = meshes[reg_settings['src_mesh']]
dest_mesh_settings = meshes[reg_settings['dest_mesh']]
xfms_dir = reg_settings['xfms_dir']
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
# Native mesh processing
# Convert and volumetrically register white and pial surfaces making
# linear and nonlinear copies, add each to the appropriate spec file
surf_src = surf_file(subject_id, surface, hemisphere, src_mesh_settings)
surf_dest = surf_file(subject_id, surface, hemisphere, dest_mesh_settings)
## MNI transform the surfaces into the MNINonLinear/Native Folder
run(['wb_command', '-surface-apply-affine', surf_src,
os.path.join(xfms_dir, reg_settings['AtlasTransform_Linear']),
surf_dest, '-flirt', src_mesh_settings['T1wImage'],
reg_settings['standard_T1wImage']])
run(['wb_command', '-surface-apply-warpfield', surf_dest,
os.path.join(xfms_dir, reg_settings['InverseAtlasTransform_NonLinear']),
surf_dest, '-fnirt', os.path.join(xfms_dir,
reg_settings['AtlasTransform_NonLinear'])])
run(['wb_command', '-add-to-spec-file', spec_file(subject_id,
dest_mesh_settings), structure, surf_dest])
def convert_freesurfer_surface(subject_id, surface, surface_type, fs_subject_dir,
dest_mesh_settings, surface_secondary_type=None, cras_mat=None,
add_to_spec=True):
'''
Convert freesurfer surface to gifti surface files
Arguments:
surface Surface name
surface_type Surface type to add to the metadata
surface_secondary_type Type that will be added to gifti metadata
fs_subject_dir The subject freesurfer output folder
dest_mesh_settings Dictionary of settings with naming
conventions for the gifti files
cras_mat Path to the freesurfer affine matrix
add_to_spec Whether to add the gifti file the spec file
'''
for hemisphere, structure in [('L', 'CORTEX_LEFT'), ('R', 'CORTEX_RIGHT')]:
surf_fs = os.path.join(fs_subject_dir, 'surf',
'{}h.{}'.format(hemisphere.lower(), surface))
surf_native = surf_file(subject_id, surface, hemisphere,
dest_mesh_settings)
## convert the surface into the T1w/Native Folder
run(['mris_convert',surf_fs, surf_native], dryrun=DRYRUN)
set_structure_command = ['wb_command', '-set-structure', surf_native,
structure, '-surface-type', surface_type]
if surface_secondary_type:
set_structure_command.extend(['-surface-secondary-type',
surface_secondary_type])
run(set_structure_command, dryrun=DRYRUN)
if cras_mat:
run(['wb_command', '-surface-apply-affine', surf_native,
cras_mat, surf_native], dryrun=DRYRUN)
if add_to_spec:
run(['wb_command', '-add-to-spec-file', spec_file(subject_id,
dest_mesh_settings), structure, surf_native], dryrun=DRYRUN)
def convert_freesurfer_maps(subject_id, map_dict, fs_folder,
dest_mesh_settings):
''' Convert a freesurfer data (thickness, curv, sulc) to a gifti metric
and set metadata'''
for hemisphere, structure in [('L', 'CORTEX_LEFT'), ('R', 'CORTEX_RIGHT')]:
map_gii = metric_file(subject_id, map_dict['mapname'], hemisphere,
dest_mesh_settings)
## convert the freesurfer files to gifti
run(['mris_convert', '-c',
os.path.join(fs_folder, 'surf', '{}h.{}'.format(hemisphere.lower(),
map_dict['fsname'])),
os.path.join(fs_folder, 'surf',
'{}h.white'.format(hemisphere.lower())),
map_gii], dryrun=DRYRUN)
## set a bunch of meta-data and multiply by -1
run(['wb_command', '-set-structure', map_gii, structure], dryrun=DRYRUN)
run(['wb_command', '-metric-math', '"(var * -1)"',
map_gii, '-var', 'var', map_gii], dryrun=DRYRUN)
run(['wb_command', '-set-map-names', map_gii,
'-map', '1', '{}_{}{}'.format(subject_id, hemisphere,
map_dict['map_postfix'])], dryrun=DRYRUN)
if map_dict['mapname'] == 'thickness':
## I don't know why but there are thickness specific extra steps
# Thickness set thickness at absolute value than set palette metadata
run(['wb_command', '-metric-math', '"(abs(thickness))"',
map_gii, '-var', 'thickness', map_gii], dryrun=DRYRUN)
run(['wb_command', '-metric-palette', map_gii, map_dict['palette_mode'],
map_dict['palette_options']], dryrun=DRYRUN)
## Step 2.0 Fucntions Called Multiple times ##############################
def make_midthickness_surfaces(subject_id, mesh_settings):
'''
Use the white and pial surfaces from the same mesh to create a midthickness
file. Set the midthickness surface metadata and add it to the spec_file
'''
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
#Create midthickness by averaging white and pial surfaces
mid_surf = surf_file(subject_id, 'midthickness', hemisphere,
mesh_settings)
run(['wb_command', '-surface-average', mid_surf,
'-surf', surf_file(subject_id, 'white', hemisphere, mesh_settings),
'-surf', surf_file(subject_id, 'pial', hemisphere, mesh_settings)],
dryrun=DRYRUN)
run(['wb_command', '-set-structure', mid_surf, structure,
'-surface-type', 'ANATOMICAL', '-surface-secondary-type',
'MIDTHICKNESS'], dryrun=DRYRUN)
run(['wb_command', '-add-to-spec-file', spec_file(subject_id,
mesh_settings), structure, mid_surf], dryrun=DRYRUN)
def make_inflated_surfaces(subject_id, mesh_settings, iterations_scale=2.5):
'''
Make inflated and very_inflated surfaces from the mid surface of the
specified mesh. Adds the surfaces to the spec_file
'''
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
infl_surf = surf_file(subject_id, 'inflated', hemisphere, mesh_settings)
vinfl_surf = surf_file(subject_id, 'very_inflated', hemisphere,
mesh_settings)
run(['wb_command', '-surface-generate-inflated',
surf_file(subject_id, 'midthickness', hemisphere, mesh_settings),
infl_surf, vinfl_surf, '-iterations-scale', str(iterations_scale)],
dryrun=DRYRUN)
run(['wb_command', '-add-to-spec-file', spec_file(subject_id,
mesh_settings), structure, infl_surf], dryrun=DRYRUN)
run(['wb_command', '-add-to-spec-file', spec_file(subject_id,
mesh_settings), structure, vinfl_surf], dryrun=DRYRUN)
def create_dscalar(subject_id, mesh_settings, dscalar_entry):
'''
Create the dense scalars that combine the two surfaces, set the meta-data
and add them to the spec_file. Important options are read from two
dictionaries.
mesh_settings Contains settings for this Mesh
dscalar_entry Contains settings for this type of dscalar
(i.e. palette settings)
'''
dscalar_file = os.path.join(mesh_settings['Folder'],
'{}.{}.{}.dscalar.nii'.format(subject_id, dscalar_entry['mapname'],
mesh_settings['meshname']))
left_metric = metric_file(subject_id, dscalar_entry['mapname'], 'L',
mesh_settings)
right_metric = metric_file(subject_id, dscalar_entry['mapname'], 'R',
mesh_settings)
## combine left and right metrics into a dscalar file
if dscalar_entry['mask_medialwall']:
run(['wb_command', '-cifti-create-dense-scalar', dscalar_file,
'-left-metric', left_metric,'-roi-left',
medial_wall_roi_file(subject_id, 'L', mesh_settings),
'-right-metric', right_metric,'-roi-right',
medial_wall_roi_file(subject_id, 'R', mesh_settings)], dryrun=DRYRUN)
else :
run(['wb_command', '-cifti-create-dense-scalar', dscalar_file,
'-left-metric', left_metric, '-right-metric', right_metric],
dryrun=DRYRUN)
## set the dscalar file metadata
run(['wb_command', '-set-map-names', dscalar_file,
'-map', '1', "{}{}".format(subject_id, dscalar_entry['map_postfix'])],
dryrun=DRYRUN)
run(['wb_command', '-cifti-palette', dscalar_file,
dscalar_entry['palette_mode'], dscalar_file,
dscalar_entry['palette_options']], dryrun=DRYRUN)
def create_dlabel(subject_id, mesh_settings, label_name):
'''
Create the dense labels that combine the two surfaces, set the meta-data and
add them to the spec_file. They read the important options for the mesh
from the mesh_settings
mesh_settings Contains settings for this Mesh
label_name Contains the name of the label to combine
'''
dlabel_file = os.path.join(mesh_settings['Folder'],
'{}.{}.{}.dlabel.nii'.format(subject_id, label_name,
mesh_settings['meshname']))
left_label = label_file(subject_id, label_name, 'L', mesh_settings)
right_label = label_file(subject_id, label_name, 'R', mesh_settings)
if not os.path.exists(left_label):
logger.warning("label file {} does not exist. Skipping dlabel creation."
"".format(left_label))
return
## combine left and right metrics into a dscalar file
run(['wb_command', '-cifti-create-label', dlabel_file,
'-left-label', left_label,'-roi-left',
medial_wall_roi_file(subject_id, 'L', mesh_settings),
'-right-label', right_label,'-roi-right',
medial_wall_roi_file(subject_id, 'R', mesh_settings)], dryrun=DRYRUN)
## set the dscalar file metadata
run(['wb_command', '-set-map-names', dlabel_file, '-map', '1',
"{}_{}".format(subject_id, label_name)], dryrun=DRYRUN)
def add_dense_maps_to_spec_file(subject_id, mesh_settings,
dscalar_types, expected_labels):
'''add all the dlabels and the dscalars to the spec file'''
if 'DenseMapsFolder' in mesh_settings.keys():
maps_folder = mesh_settings['DenseMapsFolder']
else:
maps_folder = mesh_settings['Folder']
for dscalar in dscalar_types:
run(['wb_command', '-add-to-spec-file',
os.path.realpath(spec_file(subject_id, mesh_settings)), 'INVALID',
os.path.realpath(os.path.join(maps_folder,
'{}.{}.{}.dscalar.nii'.format(subject_id, dscalar,
mesh_settings['meshname'])))], dryrun=DRYRUN)
for label_name in expected_labels:
file_name = "{}.{}.{}.dlabel.nii".format(subject_id, label_name,
mesh_settings['meshname'])
dlabel_file = os.path.realpath(os.path.join(maps_folder, file_name))
if not os.path.exists(dlabel_file):
logger.debug("dlabel file {} does not exist, skipping".format(
dlabel_file))
continue
run(['wb_command', '-add-to-spec-file', os.path.realpath(spec_file(
subject_id, mesh_settings)), 'INVALID', dlabel_file],
dryrun=DRYRUN)
def copy_colin_flat_and_add_to_spec(subject_id, settings, mesh_settings):
''' Copy the colin flat atlas out of the templates folder and add it to
the spec file. '''
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
colin_src = os.path.join(settings.ciftify_data_dir, 'standard_mesh_atlases',
'colin.cerebral.{}.flat.{}.surf.gii'.format(hemisphere,
mesh_settings['meshname']))
if not os.path.exists(colin_src):
continue
colin_dest = surf_file(subject_id, 'flat', hemisphere, mesh_settings)
link_to_template_file(settings, colin_dest, colin_src, os.path.basename(colin_src))
run(['wb_command', '-add-to-spec-file', spec_file(subject_id,
mesh_settings), structure, colin_dest], dryrun=DRYRUN)
def make_dense_map(subject_id, mesh, dscalars, expected_labels):
## combine L and R metrics into dscalar files
for map_type in expected_labels:
create_dlabel(subject_id, mesh, map_type)
## combine L and R labels into a dlabel file
for map_name in dscalars.keys():
create_dscalar(subject_id, mesh, dscalars[map_name])
## add all the dscalar and dlabel files to the spec file
add_dense_maps_to_spec_file(subject_id, mesh,
dscalars.keys(), expected_labels)
## Step 2.1 Working with Native Mesh #################
def copy_sphere_mesh_from_template(settings, mesh_settings):
'''Copy the sphere of specific mesh settings out of the template and into
subjects folder'''
mesh_name = mesh_settings['meshname']
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
if mesh_name == '164k_fs_LR':
sphere_basename = 'fsaverage.{}_LR.spherical_std.{}.' \
'surf.gii'.format(hemisphere, mesh_name)
else :
sphere_basename = '{}.sphere.{}.surf.gii'.format(hemisphere,
mesh_name)
sphere_src = os.path.join(settings.ciftify_data_dir, 'standard_mesh_atlases',
sphere_basename)
sphere_dest = surf_file(settings.subject.id, 'sphere', hemisphere, mesh_settings)
link_to_template_file(settings, sphere_dest, sphere_src, sphere_basename)
run(['wb_command', '-add-to-spec-file', spec_file(settings.subject.id,
mesh_settings), structure, sphere_dest], dryrun=DRYRUN)
def copy_atlas_roi_from_template(settings, mesh_settings):
'''Copy the atlas roi (roi of medial wall) for a specific mesh out of
templates'''
for hemisphere in ['L', 'R']:
roi_basename = '{}.atlasroi.{}.shape.gii'.format(hemisphere,
mesh_settings['meshname'])
roi_src = os.path.join(settings.ciftify_data_dir, 'standard_mesh_atlases',
roi_basename)
if os.path.exists(roi_src):
## Copying sphere surface from templates file to subject folder
roi_dest = medial_wall_roi_file(settings.subject.id, hemisphere,
mesh_settings)
link_to_template_file(settings, roi_dest, roi_src, roi_basename)
def process_native_meshes(subject, meshes, dscalars, expected_labels):
logger.info(section_header("Creating midthickness, inflated and "
"very_inflated surfaces"))
for mesh_name in ['T1wNative', 'AtlasSpaceNative']:
## build midthickness out the white and pial
make_midthickness_surfaces(subject.id, meshes[mesh_name])
# make inflated surfaces from midthickness
make_inflated_surfaces(subject.id, meshes[mesh_name])
# Convert freesurfer annotation to gifti labels and set meta-data
logger.info(section_header("Converting Freesurfer measures to gifti"))
for label_name in expected_labels:
convert_freesurfer_annot(subject.id, label_name, subject.fs_folder,
meshes['AtlasSpaceNative'])
# Add more files to the spec file and convert other FreeSurfer surface data
# to metric/GIFTI including sulc, curv, and thickness.
for map_dict in dscalars.values():
if 'fsname' not in map_dict.keys():
continue
convert_freesurfer_maps(subject.id, map_dict, subject.fs_folder,
meshes['AtlasSpaceNative'])
medial_wall_rois_from_thickness_maps(subject.id, meshes['AtlasSpaceNative'])
def medial_wall_rois_from_thickness_maps(subject_id, mesh_settings):
'''create an roi file by thresholding the thickness surfaces'''
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
## create the native ROI file using the thickness file
native_roi = medial_wall_roi_file(subject_id, hemisphere,
mesh_settings)
midthickness_gii = surf_file(subject_id, 'midthickness', hemisphere,
mesh_settings)
run(['wb_command', '-metric-math', '"(thickness > 0)"', native_roi,
'-var', 'thickness', metric_file(subject_id, 'thickness', hemisphere,
mesh_settings)], dryrun=DRYRUN)
run(['wb_command', '-metric-fill-holes', midthickness_gii, native_roi,
native_roi], dryrun=DRYRUN)
run(['wb_command', '-metric-remove-islands', midthickness_gii,
native_roi, native_roi], dryrun=DRYRUN)
run(['wb_command', '-set-map-names', native_roi, '-map', '1',
'{}_{}_ROI'.format(subject_id, hemisphere)], dryrun=DRYRUN)
## Step 3.0 Surface Registration ##################################
def create_reg_sphere(settings, subject_id, meshes):
FS_reg_sphere_name, MSMSulc_reg_sphere_name = get_reg_sphere_names()
run_fs_reg_LR(subject_id, settings.ciftify_data_dir, settings.high_res,
FS_reg_sphere_name, meshes['AtlasSpaceNative'])
if settings.reg_name == 'MSMSulc':
reg_sphere_name = MSMSulc_reg_sphere_name
run_MSMSulc_registration(subject_id, settings.ciftify_data_dir,
meshes, reg_sphere_name, FS_reg_sphere_name, settings.msm_config)
else :
reg_sphere_name = FS_reg_sphere_name
return reg_sphere_name
def get_reg_sphere_names():
'''define the names of the registration spheres'''
FS_reg_sphere_name = 'sphere.reg.reg_LR'
MSMSulc_reg_sphere_name = 'sphere.MSMSulc'
return FS_reg_sphere_name, MSMSulc_reg_sphere_name
def run_fs_reg_LR(subject_id, ciftify_data_dir, high_res_mesh, reg_sphere,
native_mesh_settings):
''' Copy all the template files and do the FS left to right registration'''
logger.info(section_header("Concatenating Freesurfer Reg with template to "
"get fs_LR reg"))
surface_atlas_dir = os.path.join(ciftify_data_dir, 'standard_mesh_atlases')
for hemisphere in ['L', 'R']:
#Concatenate FS registration to FS --> FS_LR registration
fs_reg_sphere = surf_file(subject_id, reg_sphere, hemisphere,
native_mesh_settings)
run(['wb_command', '-surface-sphere-project-unproject',
surf_file(subject_id, 'sphere.reg', hemisphere,
native_mesh_settings),
os.path.join(surface_atlas_dir, 'fs_{}'.format(hemisphere),
'fsaverage.{0}.sphere.{1}k_fs_{0}.surf.gii'.format(
hemisphere, high_res_mesh)),
os.path.join(surface_atlas_dir, 'fs_{}'.format(hemisphere),
'fs_{0}-to-fs_LR_fsaverage.{0}_LR.spherical_std.' \
'{1}k_fs_{0}.surf.gii'.format(hemisphere, high_res_mesh)),
fs_reg_sphere], dryrun=DRYRUN)
#Make FreeSurfer Registration Areal Distortion Maps
calc_areal_distortion_gii(
surf_file(subject_id, 'sphere', hemisphere,
native_mesh_settings),
fs_reg_sphere,
metric_file(subject_id, 'ArealDistortion_FS', hemisphere,
native_mesh_settings),
'{}_{}'.format(subject_id, hemisphere), 'FS')
def run_MSMSulc_registration(subject, ciftify_data_dir, mesh_settings,
reg_sphere_name, FS_reg_sphere, msm_config):
logger.info(section_header("Running MSMSulc surface registration"))
native_settings = mesh_settings['AtlasSpaceNative']
highres_settings = mesh_settings['HighResMesh']
## define and create a folder to hold MSMSulc reg related files.
MSMSulc_dir = os.path.join(native_settings['Folder'], 'MSMSulc')
ciftify.utils.make_dir(MSMSulc_dir, DRYRUN)
for hemisphere, structure in [('L', 'CORTEX_LEFT'), ('R', 'CORTEX_RIGHT')]:
## prepare data for MSMSulc registration
## calculate and affine surface registration to FS mesh
native_sphere = surf_file(subject, 'sphere', hemisphere, native_settings)
fs_LR_sphere = surf_file(subject, FS_reg_sphere, hemisphere, native_settings)
affine_mat = os.path.join(MSMSulc_dir, '{}.mat'.format(hemisphere))
affine_rot_gii = os.path.join(MSMSulc_dir, '{}.sphere_rot.surf.gii'.format(hemisphere))
run(['wb_command', '-surface-affine-regression',
native_sphere, fs_LR_sphere, affine_mat], dryrun=DRYRUN)
run(['wb_command', '-surface-apply-affine',
native_sphere, affine_mat, affine_rot_gii], dryrun=DRYRUN)
run(['wb_command', '-surface-modify-sphere', '-logging', 'SEVERE',
affine_rot_gii, "100", affine_rot_gii], dryrun=DRYRUN)
## run MSM with affine rotated surf at start point
native_rot_sphere = surf_file(subject, 'sphere.rot', hemisphere, native_settings)
refsulc_metric = os.path.join(ciftify_data_dir,
'standard_mesh_atlases',
'{}.refsulc.{}.shape.gii'.format(hemisphere,
highres_settings['meshname']))
run(['cp', affine_rot_gii, native_rot_sphere], dryrun=DRYRUN)
if not DRYRUN:
with cd(MSMSulc_dir):
run(['msm', '--conf={}'.format(msm_config),
'--inmesh={}'.format(native_rot_sphere),
'--refmesh={}'.format(surf_file(subject, 'sphere', hemisphere,
highres_settings)),
'--indata={}'.format(metric_file(subject, 'sulc', hemisphere,
native_settings)),
'--refdata={}'.format(refsulc_metric),
'--out={}'.format(os.path.join(MSMSulc_dir,
'{}.'.format(hemisphere)))], dryrun=DRYRUN)
conf_log = os.path.join(MSMSulc_dir, '{}.logdir'.format(hemisphere),'conf')
run(['cp', msm_config, conf_log], dryrun=DRYRUN)
#copy the MSMSulc outputs into Native folder and calculate Distortion
MSMsulc_sphere = surf_file(subject, reg_sphere_name, hemisphere, native_settings)
run(['cp', os.path.join(MSMSulc_dir, '{}.sphere.reg.surf.gii'.format(hemisphere)),
MSMsulc_sphere], dryrun=DRYRUN)
run(['wb_command', '-set-structure', MSMsulc_sphere, structure], dryrun=DRYRUN)
#Make MSMSulc Registration Areal Distortion Maps
calc_areal_distortion_gii(native_sphere, MSMsulc_sphere,
metric_file(subject, 'ArealDistortion_MSMSulc', hemisphere, native_settings),
'{}_{}_'.format(subject, hemisphere), '_MSMSulc')
run(['wb_command', '-surface-distortion',
native_sphere, MSMsulc_sphere,
metric_file(subject, 'EdgeDistortion_MSMSulc',hemisphere, native_settings),
'-edge-method'], dryrun=DRYRUN)
def calc_areal_distortion_gii(sphere_pre, sphere_reg, AD_gii_out, map_prefix,
map_postfix):
''' calculate Areal Distortion Map (gifti) after registration
Arguments:
sphere_pre Path to the pre registration sphere (gifti)
sphere_reg Path to the post registration sphere (gifti)
AD_gii_out Path to the Area Distortion gifti output
map_prefix Prefix added to the map-name meta-data
map_postfix Posfix added to the map-name meta-data
'''
with ciftify.utils.TempDir() as va_tmpdir:
pre_va = os.path.join(va_tmpdir, 'sphere_pre_va.shape.gii')
reg_va = os.path.join(va_tmpdir, 'sphere_reg_va.shape.gii')
## calculate surface vertex areas from pre and post files
run(['wb_command', '-surface-vertex-areas', sphere_pre, pre_va],
dryrun=DRYRUN)
run(['wb_command', '-surface-vertex-areas', sphere_reg, reg_va],
dryrun=DRYRUN)
## caluculate Areal Distortion using the vertex areas
run(['wb_command', '-metric-math', '"(ln(spherereg / sphere) / ln(2))"',
AD_gii_out, '-var', 'sphere', pre_va, '-var', 'spherereg', reg_va],
dryrun=DRYRUN)
## set meta-data for the ArealDistotion files
run(['wb_command', '-set-map-names', AD_gii_out,
'-map', '1', '{}_Areal_Distortion_{}'.format(map_prefix,
map_postfix)], dryrun=DRYRUN)
run(['wb_command', '-metric-palette', AD_gii_out, 'MODE_AUTO_SCALE',
'-palette-name', 'ROY-BIG-BL', '-thresholding',
'THRESHOLD_TYPE_NORMAL', 'THRESHOLD_TEST_SHOW_OUTSIDE', '-1', '1'],
dryrun=DRYRUN)
## Step 4.0 Post Registration Native Mesh #######################
def merge_subject_medial_wall_with_atlas_template(subject_id, high_res_mesh,
meshes, reg_sphere, temp_dir):
'''resample the atlas medial wall roi into subjects native space then
merge with native roi'''
native_settings = meshes['AtlasSpaceNative']
high_res_settings = meshes['HighResMesh']
for hemisphere in ['L', 'R']:
## note this roi is a temp file so I'm not using the roi_file function
atlas_roi_native_gii = metric_file(subject_id, 'atlasroi', hemisphere,
native_settings)
native_roi = medial_wall_roi_file(subject_id, hemisphere,
native_settings)
#Ensures no zeros in atlas medial wall ROI
run(['wb_command', '-metric-resample',
medial_wall_roi_file(subject_id, hemisphere, high_res_settings),
surf_file(subject_id, 'sphere', hemisphere, high_res_settings),
surf_file(subject_id, reg_sphere, hemisphere, native_settings),
'BARYCENTRIC', atlas_roi_native_gii,'-largest'])
run(['wb_command', '-metric-math', '"(atlas + individual) > 0"',
native_roi, '-var', 'atlas', atlas_roi_native_gii, '-var',
'individual', native_roi])
def dilate_and_mask_metric(subject_id, native_mesh_settings, dscalars):
''' Dilate and mask gifti metric data... done after refinining the medial
roi mask'''
## remask the thickness and curvature data with the redefined medial wall roi
for map_name in dscalars.keys():
if not dscalars[map_name]['mask_medialwall']:
continue
for hemisphere in ['L', 'R']:
## dilate the thickness and curvature file by 10mm
metric_map = metric_file(subject_id, map_name, hemisphere,
native_mesh_settings)
run(['wb_command', '-metric-dilate', metric_map,
surf_file(subject_id, 'midthickness',hemisphere,
native_mesh_settings),
'10', metric_map,'-nearest'])
## apply the medial wall roi to the thickness and curvature files
run(['wb_command', '-metric-mask', metric_map,
medial_wall_roi_file(subject_id, hemisphere,
native_mesh_settings),
metric_map], dryrun=DRYRUN)
## Step 4.1 Resampling Mesh to other Spaces #######################
def populate_low_res_spec_file(source_mesh, dest_mesh, subject, settings,
sphere, expected_labels):
copy_atlas_roi_from_template(settings, dest_mesh)
copy_sphere_mesh_from_template(settings, dest_mesh)
copy_colin_flat_and_add_to_spec(subject.id, settings, dest_mesh)
deform_to_native(source_mesh, dest_mesh, settings.dscalars, expected_labels,
subject.id, sphere, scale=0.75)
def deform_to_native(native_mesh, dest_mesh, dscalars, expected_labels, subject_id,
sphere='sphere', scale=2.5):
'''does the resampling of surfaces, scalars and labels and makes dense maps'''
resample_surfs_and_add_to_spec(subject_id, native_mesh, dest_mesh,
current_sphere=sphere)
make_inflated_surfaces(subject_id, dest_mesh, iterations_scale=scale)
resample_metric_and_label(subject_id, dscalars, expected_labels, native_mesh, dest_mesh,
sphere)
make_dense_map(subject_id, dest_mesh, dscalars, expected_labels)
def resample_surfs_and_add_to_spec(subject_id, source_mesh, dest_mesh,
current_sphere='sphere', dest_sphere='sphere', current_sphere_mesh=None):
'''
Resample surface files and add them to the resampled spaces spec file
uses wb_command -surface-resample with BARYCENTRIC method
Arguments:
source_mesh Dictionary of Settings for current mesh
dest_mesh Dictionary of Settings for destination (output) mesh
'''
if not current_sphere_mesh:
current_sphere_mesh = source_mesh
for surface in ['white', 'midthickness', 'pial']:
for hemisphere, structure in [('L','CORTEX_LEFT'), ('R','CORTEX_RIGHT')]:
surf_in = surf_file(subject_id, surface, hemisphere, source_mesh)
surf_out = surf_file(subject_id, surface, hemisphere, dest_mesh)
current_sphere_surf = surf_file(subject_id, current_sphere,
hemisphere, current_sphere_mesh)
dest_sphere_surf = surf_file(subject_id, dest_sphere, hemisphere,
dest_mesh)
run(['wb_command', '-surface-resample', surf_in,
current_sphere_surf, dest_sphere_surf, 'BARYCENTRIC', surf_out])
run(['wb_command', '-add-to-spec-file',
spec_file(subject_id, dest_mesh), structure, surf_out])
def resample_and_mask_metric(subject_id, dscalar, hemisphere, source_mesh,
dest_mesh, current_sphere='sphere', dest_sphere='sphere'):
'''
Resample the metric files to a different mesh and then mask out the medial
wall. Uses wb_command -metric-resample with 'ADAP_BARY_AREA' method.
To remove masking steps the roi can be set to None
Arguments:
dscalar Dscalar specific settings (e.g. 'sulc',
'thickness', etc.)
current_mesh Settings for current mesh
dest_mesh Settings for destination (output) mesh
'''
map_name = dscalar['mapname']
metric_in = metric_file(subject_id, map_name, hemisphere, source_mesh)
metric_out = metric_file(subject_id, map_name, hemisphere, dest_mesh)
current_midthickness = surf_file(subject_id, 'midthickness', hemisphere,
source_mesh)
new_midthickness = surf_file(subject_id, 'midthickness', hemisphere,
dest_mesh)
current_sphere_surf = surf_file(subject_id, current_sphere, hemisphere,
source_mesh)
dest_sphere_surf = surf_file(subject_id, dest_sphere, hemisphere,
dest_mesh)
if dscalar['mask_medialwall']:
run(['wb_command', '-metric-resample', metric_in, current_sphere_surf,
dest_sphere_surf, 'ADAP_BARY_AREA', metric_out,
'-area-surfs', current_midthickness, new_midthickness,
'-current-roi', medial_wall_roi_file(subject_id, hemisphere,
source_mesh)])
run(['wb_command', '-metric-mask', metric_out,
medial_wall_roi_file(subject_id, hemisphere, dest_mesh), metric_out],
dryrun=DRYRUN)
else:
run(['wb_command', '-metric-resample', metric_in, current_sphere_surf,
dest_sphere_surf, 'ADAP_BARY_AREA', metric_out,
'-area-surfs', current_midthickness, new_midthickness])
def resample_label(subject_id, label_name, hemisphere, source_mesh, dest_mesh,
current_sphere='sphere', dest_sphere='sphere'):
'''
Resample label files if they exist. Uses wb_command -label-resample with
BARYCENTRIC method
Arguments:
label_name Name of label to resample (i.e 'aparc')
hemisphere hemisphere of label to resample ('L' or 'R')
source_mesh Settings for current mesh
dest_mesh Settings for destination (output) mesh
current_sphere The name (default 'sphere') of the current
registration surface
new_sphere The name (default 'sphere') of the dest
registration surface
'''
label_in = label_file(subject_id, label_name, hemisphere, source_mesh)
if os.path.exists(label_in):
run(['wb_command', '-label-resample', label_in,
surf_file(subject_id, current_sphere, hemisphere, source_mesh),
surf_file(subject_id, dest_sphere, hemisphere, dest_mesh),
'BARYCENTRIC',
label_file(subject_id, label_name, hemisphere, dest_mesh),
'-largest'])
def resample_to_native(native_mesh, dest_mesh, settings, subject_id,
sphere, expected_labels, reg_sphere_mesh):
copy_sphere_mesh_from_template(settings, dest_mesh)
resample_surfs_and_add_to_spec(subject_id, native_mesh, dest_mesh,
current_sphere=sphere, current_sphere_mesh= reg_sphere_mesh)
make_inflated_surfaces(subject_id, dest_mesh, iterations_scale=0.75)
add_dense_maps_to_spec_file(subject_id, dest_mesh,
settings.dscalars.keys(), expected_labels)
def resample_metric_and_label(subject_id, dscalars, expected_labels,
source_mesh, dest_mesh, current_sphere):
for hemisphere in ['L', 'R']:
## resample the metric data to the new mesh
for map_name in dscalars.keys():
resample_and_mask_metric(subject_id, dscalars[map_name], hemisphere,
source_mesh, dest_mesh, current_sphere=current_sphere)
## resample all the label data to the new mesh
for map_name in expected_labels:
resample_label(subject_id, map_name, hemisphere, source_mesh,
dest_mesh, current_sphere=current_sphere)
## The main function ################################################
def main():
global DRYRUN
arguments = docopt(__doc__)
verbose = arguments['--verbose']
debug = arguments['--debug']
DRYRUN = arguments['--dry-run']
global N_CPUS
global FS_LICENSE
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
if verbose:
ch.setLevel(logging.INFO)
if debug:
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Get settings, and add an extra handler for the subject log
settings = Settings(arguments)
fh = settings.subject.get_subject_log_handler(formatter)
logger.addHandler(fh)
# 2018-04 commenting out T2 settings as T2 output from freesurfer are much poorer than HCPPipelines
# if arguments['--T2'] and not settings.use_T2:
# logger.error("Cannot locate T2 for {} in freesurfer "
# "outputs".format(settings.subject.id))
N_CPUS = settings.n_cpus
FS_LICENSE = settings.fs_license
logger.info(ciftify.utils.ciftify_logo())
logger.info(section_header("Starting cifti_recon_all"))
with ciftify.utils.TempDir() as tmpdir:
logger.info('Creating tempdir:{} on host:{}'.format(tmpdir,
os.uname()[1]))
run_ciftify_recon_all(tmpdir, settings)
if __name__ == '__main__':
main()
| 48.398472 | 154 | 0.652235 |
1663f37ef1696bbc15c2e8e5ed865d76748de6f0 | 1,452 | py | Python | aliyun-python-sdk-vcs/aliyunsdkvcs/request/v20200515/GetInventoryRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-vcs/aliyunsdkvcs/request/v20200515/GetInventoryRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-vcs/aliyunsdkvcs/request/v20200515/GetInventoryRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvcs.endpoint import endpoint_data
class GetInventoryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vcs', '2020-05-15', 'GetInventory','vcs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CommodityCode(self):
return self.get_body_params().get('CommodityCode')
def set_CommodityCode(self,CommodityCode):
self.add_body_params('CommodityCode', CommodityCode) | 38.210526 | 74 | 0.769284 |
89eb84c329b74fc4079dba02f2ea62458e13d5c2 | 620 | py | Python | setup.py | techlib/sklady | 11475005f2544d3d0e0159546eb73d9cc650394d | [
"MIT"
] | null | null | null | setup.py | techlib/sklady | 11475005f2544d3d0e0159546eb73d9cc650394d | [
"MIT"
] | null | null | null | setup.py | techlib/sklady | 11475005f2544d3d0e0159546eb73d9cc650394d | [
"MIT"
] | null | null | null | #!/usr/bin/python3 -tt
from setuptools import setup
import os.path
setup(
name = 'sklady',
version = '1',
author = 'NTK',
description = ('Tool for getting status of supplies in storage'),
license = 'MIT',
keywords = 'storage supplies database',
url = 'http://github.com/techlib/sklady',
include_package_data = True,
package_data = {
'': ['*.png', '*.js', '*.html'],
},
packages = [
'sklady',
],
classifiers = [
'License :: OSI Approved :: MIT License',
],
scripts = 'sklady-daemon'
)
# vim:set sw=4 ts=4 et:
# -*- coding: utf-8 -*-
| 20.666667 | 69 | 0.556452 |
ac9617ef101e6d7fd4af667ded03231e2650e2e8 | 240 | py | Python | unidesign/spatial/workflow/__init__.py | unidesigner/unidesign | ee24a7152d937d1b95c2bb67b3f050966850d50f | [
"BSD-3-Clause"
] | 4 | 2015-02-26T20:08:42.000Z | 2019-12-28T09:08:19.000Z | unidesign/spatial/workflow/__init__.py | unidesigner/unidesign | ee24a7152d937d1b95c2bb67b3f050966850d50f | [
"BSD-3-Clause"
] | null | null | null | unidesign/spatial/workflow/__init__.py | unidesigner/unidesign | ee24a7152d937d1b95c2bb67b3f050966850d50f | [
"BSD-3-Clause"
] | 1 | 2019-12-28T09:08:23.000Z | 2019-12-28T09:08:23.000Z | """
Workflows
---------
Basic units are the neuronal morphologies
1) Compute statistical measures
2) Create a partition of the data
3) Identify the partitions (e.g. cell classes, species etc.)
4) Classify a new neuron based on database
""" | 24 | 60 | 0.7375 |
0a7316c1d847f519e58bfe05b125aefd7acac85c | 614 | py | Python | metaci/build/migrations/0007_auto_20170505_1607.py | abhishekalgo/metaci | cd62473b3fb85fb0f39623f9fb2850993ff708a5 | [
"BSD-3-Clause"
] | 48 | 2018-10-24T14:52:06.000Z | 2022-03-25T21:14:50.000Z | metaci/build/migrations/0007_auto_20170505_1607.py | abhishekalgo/metaci | cd62473b3fb85fb0f39623f9fb2850993ff708a5 | [
"BSD-3-Clause"
] | 2,034 | 2018-10-31T20:59:16.000Z | 2022-03-22T21:38:03.000Z | metaci/build/migrations/0007_auto_20170505_1607.py | abhishekalgo/metaci | cd62473b3fb85fb0f39623f9fb2850993ff708a5 | [
"BSD-3-Clause"
] | 27 | 2018-12-24T18:16:23.000Z | 2021-12-15T17:57:27.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-05-05 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('build', '0006_build_commit_message'),
]
operations = [
migrations.AlterField(
model_name='rebuild',
name='status',
field=models.CharField(choices=[('queued', 'Queued'), ('waiting', 'Waiting'), ('running', 'Running'), ('success', 'Success'), ('error', 'Error'), ('fail', 'Failed')], default='queued', max_length=16),
),
]
| 29.238095 | 212 | 0.607492 |
49777aa673e7e25c3512efb1f43265cd93785620 | 233 | py | Python | ex052.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | ex052.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | ex052.py | jgabriel1607/Python | d6b75519eb8f0d4fef944e1690ba8914d81a5d16 | [
"MIT"
] | null | null | null | print('Vamos verificar se um número é primo: ')
n = int(input('Digite um número: '))
if n % 1 == 0 and n % n == 0:
print('O número {} é um número primo.'.format(n))
else:
print('O número {} não é um número primo.'.format(n))
| 33.285714 | 57 | 0.609442 |
c1be9f62b9d0cd913d47ce5f36e5eb61f1c9ad62 | 1,801 | py | Python | adctest/pages/base_landing.py | pysalt/e2e-test | 692339dee485df330f623aa0c05954494586c5ad | [
"MIT"
] | null | null | null | adctest/pages/base_landing.py | pysalt/e2e-test | 692339dee485df330f623aa0c05954494586c5ad | [
"MIT"
] | null | null | null | adctest/pages/base_landing.py | pysalt/e2e-test | 692339dee485df330f623aa0c05954494586c5ad | [
"MIT"
] | null | null | null | from adctest.helpers.exceptions import PageNotOpened
from adctest.helpers.utils import get_param_from_url
from adctest.pages.base_abstract import AbstractBasePage
class BaseLandingPage(AbstractBasePage):
page_url = None
"""Адрес, на котором открывается лендинг"""
check_param = None
"""Имя параметра, по которому проверяем, что оказались на правильной странице"""
def __init__(self, page_url: str, fresh_session: bool = False, open_page: bool = True):
"""
:param page_url: url - ленда/приленда
:param fresh_session: нужно ли очищать текущую сессию браузера (удалить куки и прочее)
:param open_page: Если передать False, то будет создан объект без открытия страницы в браузере
"""
super().__init__(fresh_session=fresh_session)
self.page_url = page_url
if open_page:
self.open()
def open(self):
self._open(url=self.page_url)
self.check_opened()
def check_opened(self):
param = get_param_from_url(self.opened_url, self.check_param)
expected_param = get_param_from_url(self.page_url, self.check_param)
if param and expected_param and param[0] == expected_param[0]:
return
raise PageNotOpened(f'Get attr of {type(self).__name__}, but current url: {self.opened_url}')
def wait_page_loaded(self):
pass
def wait_loader_not_visible(self):
pass
def wait_tableloader_not_visible(self):
pass
def validate_domain(self, name: str):
"""
Проверяет, что текущая страница открыта на домене name
:param name: Имя домена
:return:
"""
if name not in self.opened_url:
raise PageNotOpened(f'Expect domain {name}, but current url: {self.opened_url}')
| 34.634615 | 102 | 0.67518 |
288dca996bf06376b2988dac21f41cd817709da4 | 4,611 | py | Python | tests/test_clusters/test_serializers.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | tests/test_clusters/test_serializers.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | tests/test_clusters/test_serializers.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | import pytest
from api.clusters.serializers import ClusterSerializer
from api.nodes.serializers import ClusterNodeDetailSerializer, ClusterNodeSerializer, GPUSerializer
from db.models.clusters import Cluster
from db.models.nodes import ClusterNode, NodeGPU
from factories.factory_clusters import ClusterNodeFactory, GPUFactory
from tests.utils import BaseTest
@pytest.mark.clusters_mark
class TestGPUSerializer(BaseTest):
serializer_class = GPUSerializer
model_class = NodeGPU
factory_class = GPUFactory
expected_keys = {'uuid', 'cluster_node', 'serial', 'name', 'index', 'memory', 'updated_at',
'created_at', }
def setUp(self):
super().setUp()
node = ClusterNodeFactory(cluster=Cluster.load())
self.obj1 = self.factory_class(cluster_node=node)
self.obj2 = self.factory_class(cluster_node=node)
def test_serialize_one(self):
data = self.serializer_class(self.obj1).data
assert set(data.keys()) == self.expected_keys
assert data.pop('uuid') == self.obj1.uuid.hex
assert data.pop('cluster_node') == self.obj1.cluster_node.uuid.hex
data.pop('created_at')
data.pop('updated_at')
for k, v in data.items():
assert getattr(self.obj1, k) == v
def test_serialize_many(self):
data = self.serializer_class(self.model_class.objects.all(), many=True).data
assert len(data) == 2
for d in data:
assert set(d.keys()) == self.expected_keys
@pytest.mark.clusters_mark
class TestClusterNodeSerializer(BaseTest):
serializer_class = ClusterNodeSerializer
model_class = ClusterNode
factory_class = ClusterNodeFactory
expected_keys = {'uuid', 'sequence', 'name', 'hostname', 'role', 'memory', 'cpu', 'n_gpus', }
def setUp(self):
super().setUp()
self.obj1 = self.factory_class(cluster=Cluster.load())
self.obj2 = self.factory_class(cluster=Cluster.load())
def test_serialize_one(self):
data = self.serializer_class(self.obj1).data
assert set(data.keys()) == self.expected_keys
assert data.pop('uuid') == self.obj1.uuid.hex
for k, v in data.items():
assert getattr(self.obj1, k) == v
def test_serialize_many(self):
data = self.serializer_class(self.model_class.objects.all(), many=True).data
assert len(data) == 2
for d in data:
assert set(d.keys()) == self.expected_keys
@pytest.mark.clusters_mark
class TestClusterNodeDetailsSerializer(BaseTest):
serializer_class = ClusterNodeDetailSerializer
model_class = ClusterNode
expected_keys = {'uuid', 'name', 'hostname', 'role', 'docker_version',
'kubelet_version', 'os_image', 'kernel_version',
'schedulable_taints', 'schedulable_state', 'is_current',
'memory', 'cpu', 'n_gpus', 'status', 'gpus', 'sequence'}
def setUp(self):
super().setUp()
self.cluster = Cluster.load()
self.obj1 = ClusterNodeFactory(cluster=self.cluster)
self.obj2 = ClusterNodeFactory(cluster=self.cluster)
self.gpu_obj1 = GPUFactory(cluster_node=self.obj1)
self.gpu_obj2 = GPUFactory(cluster_node=self.obj2)
def test_serialize_one(self):
data = self.serializer_class(self.obj1).data
assert set(data.keys()) == self.expected_keys
assert data.pop('uuid') == self.obj1.uuid.hex
assert len(data.pop('gpus')) == 1
for k, v in data.items():
assert getattr(self.obj1, k) == v
def test_serialize_many(self):
data = self.serializer_class(self.model_class.objects.all(), many=True).data
assert len(data) == 2
for d in data:
assert set(d.keys()) == self.expected_keys
@pytest.mark.clusters_mark
class TestClusterDetailSerializer(BaseTest):
serializer_class = ClusterSerializer
model_class = Cluster
expected_keys = {'uuid', 'version_api', 'created_at', 'updated_at', 'nodes', }
def setUp(self):
super().setUp()
self.cluster = Cluster.load()
ClusterNodeFactory(cluster=self.cluster)
ClusterNodeFactory(cluster=self.cluster)
def test_serialize_one(self):
data = self.serializer_class(self.cluster).data
assert set(data.keys()) == self.expected_keys
assert len(data.pop('nodes')) == 2
assert data.pop('uuid') == self.cluster.uuid.hex
data.pop('created_at')
data.pop('updated_at')
for k, v in data.items():
assert getattr(self.cluster, k) == v
| 35.744186 | 99 | 0.655823 |
f7fd7cd4894f14adfd39d9d3acfd511035572d38 | 27,769 | py | Python | examples/inc/pytorch/multiple-choice/run_swag.py | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 | [
"Apache-2.0"
] | null | null | null | examples/inc/pytorch/multiple-choice/run_swag.py | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 | [
"Apache-2.0"
] | null | null | null | examples/inc/pytorch/multiple-choice/run_swag.py | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union
import datasets
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.fx import symbolic_trace
import yaml
from optimum.intel.neural_compressor import (
IncOptimizer,
IncPruner,
IncPruningConfig,
IncQuantizationConfig,
IncQuantizationMode,
IncQuantizer,
IncTrainer,
)
from optimum.intel.neural_compressor.quantization import IncQuantizedModelForMultipleChoice
from optimum.intel.neural_compressor.utils import CONFIG_NAME
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.12.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class OptimizationArguments:
"""
Arguments pertaining to what type of optimization we are going to apply on the model.
"""
quantize: bool = field(
default=False,
metadata={"help": "Whether or not to apply quantization."},
)
quantization_approach: Optional[str] = field(
default=None,
metadata={"help": "Quantization approach. Supported approach are static, dynamic and aware_training."},
)
prune: bool = field(
default=False,
metadata={"help": "Whether or not to apply pruning."},
)
target_sparsity: Optional[float] = field(
default=None,
metadata={"help": "Targeted sparsity when pruning the model."},
)
quantization_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the quantization and "
"tuning behavior."
},
)
pruning_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the pruning behavior."
},
)
tune_metric: str = field(
default="eval_accuracy",
metadata={"help": "Metric used for the tuning strategy."},
)
perf_tol: Optional[float] = field(
default=None,
metadata={"help": "Performance tolerance when optimizing the model."},
)
verify_loading: bool = field(
default=False,
metadata={"help": "Whether or not to verify the loading of the quantized model."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class DataCollatorForMultipleChoice:
"""
Data collator that will dynamically pad the inputs for multiple choice received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]["input_ids"])
flattened_features = [
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
]
flattened_features = sum(flattened_features, [])
batch = self.tokenizer.pad(
flattened_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# Un-flatten
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
# Add back labels
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = sum(first_sentences, [])
second_sentences = sum(second_sentences, [])
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = IncTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
eval_dataloader = trainer.get_eval_dataloader()
it = iter(eval_dataloader)
try:
input_names = next(it).keys()
except StopIteration:
input_names = None
logger.warning(
"Unable to determine the names of the inputs of the model to trace, input_names is set to None and "
"model.dummy_inputs().keys() will be used instead."
)
resume_from_checkpoint = training_args.resume_from_checkpoint
metric_name = optim_args.tune_metric
def take_eval_steps(model, trainer, metric_name, save_metrics=False):
trainer.model = model
metrics = trainer.evaluate()
if save_metrics:
trainer.save_metrics("eval", metrics)
logger.info("{}: {}".format(metric_name, metrics.get(metric_name)))
logger.info("Throughput: {} samples/sec".format(metrics.get("eval_samples_per_second")))
return metrics.get(metric_name)
def eval_func(model):
return take_eval_steps(model, trainer, metric_name)
def take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint):
trainer.model_wrapped = model
trainer.model = model
checkpoint = None
if resume_from_checkpoint is not None:
checkpoint = resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(pruner, resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
def train_func(model):
return take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint)
quantizer = None
pruner = None
num_choices = len(eval_dataset[0]["input_ids"])
if not optim_args.quantize and not optim_args.prune:
raise ValueError("quantize and prune are both set to False.")
result_baseline_model = take_eval_steps(model, trainer, metric_name)
default_config = os.path.join(os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir)), "config")
if optim_args.quantize:
if not training_args.do_eval:
raise ValueError("do_eval must be set to True for quantization.")
q8_config = IncQuantizationConfig.from_pretrained(
optim_args.quantization_config if optim_args.quantization_config is not None else default_config,
config_file_name="quantization.yml",
cache_dir=model_args.cache_dir,
)
# Set metric tolerance if specified
if optim_args.perf_tol is not None:
q8_config.set_tolerance(optim_args.perf_tol)
# Set quantization approach if specified
if optim_args.quantization_approach is not None:
supported_approach = {"static", "dynamic", "aware_training"}
if optim_args.quantization_approach not in supported_approach:
raise ValueError(
"Unknown quantization approach. Supported approach are " + ", ".join(supported_approach)
)
quant_approach = getattr(IncQuantizationMode, optim_args.quantization_approach.upper()).value
q8_config.set_config("quantization.approach", quant_approach)
# torch FX used for post-training quantization and quantization aware training
# dynamic quantization will be added when torch FX is more mature
if q8_config.get_config("quantization.approach") != IncQuantizationMode.DYNAMIC.value:
if not training_args.do_train:
raise ValueError("do_train must be set to True for static and aware training quantization.")
# TODO : Remove when dynamic axes support
if (
not training_args.dataloader_drop_last
and eval_dataset.shape[0] % training_args.per_device_eval_batch_size != 0
):
raise ValueError(
"The number of samples of the dataset is not a multiple of the batch size."
"Use --dataloader_drop_last to overcome."
)
if not data_args.pad_to_max_length:
raise ValueError(
"All the samples must have the same sequence length, use --pad_to_max_length to overcome."
)
q8_config.set_config("model.framework", "pytorch_fx")
model.config.save_pretrained(training_args.output_dir)
model = symbolic_trace(
model,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
calib_dataloader = trainer.get_train_dataloader()
inc_quantizer = IncQuantizer(
model, q8_config, eval_func=eval_func, train_func=train_func, calib_dataloader=calib_dataloader
)
quantizer = inc_quantizer.fit()
if optim_args.prune:
if not training_args.do_train:
raise ValueError("do_train must be set to True for pruning.")
pruning_config = IncPruningConfig.from_pretrained(
optim_args.pruning_config if optim_args.pruning_config is not None else default_config,
config_file_name="prune.yml",
cache_dir=model_args.cache_dir,
)
# Set targeted sparsity if specified
if optim_args.target_sparsity is not None:
pruning_config.set_config(
"pruning.approach.weight_compression.target_sparsity", optim_args.target_sparsity
)
pruning_start_epoch = pruning_config.get_config("pruning.approach.weight_compression.start_epoch")
pruning_end_epoch = pruning_config.get_config("pruning.approach.weight_compression.end_epoch")
if pruning_start_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_start_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. No pruning will be applied."
)
if pruning_end_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_end_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. The target sparsity will not be reached."
)
inc_pruner = IncPruner(model, pruning_config, eval_func=eval_func, train_func=train_func)
# Creation Pruning object used for IncTrainer training loop
pruner = inc_pruner.fit()
inc_optimizer = IncOptimizer(model, quantizer=quantizer, pruner=pruner)
opt_model = inc_optimizer.fit()
_, sparsity = opt_model.report_sparsity()
result_opt_model = take_eval_steps(opt_model.model, trainer, metric_name, save_metrics=True)
trainer.save_model(training_args.output_dir)
with open(os.path.join(training_args.output_dir, CONFIG_NAME), "w") as f:
yaml.dump(opt_model.tune_cfg, f, default_flow_style=False)
logger.info(
f"Optimized model with final sparsity of {sparsity} and {metric_name} of {result_opt_model} saved to: "
f"{training_args.output_dir}. Original model had an {metric_name} of {result_baseline_model}"
)
if optim_args.quantize and optim_args.verify_loading:
# Load the model obtained after Intel Neural Compressor (INC) quantization
loaded_model = IncQuantizedModelForMultipleChoice.from_pretrained(
training_args.output_dir,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
loaded_model.eval()
result_loaded_model = take_eval_steps(loaded_model, trainer, metric_name)
if result_loaded_model != result_opt_model:
raise ValueError("The quantized model was not successfully loaded.")
else:
logger.info(f"The quantized model was successfully loaded.")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 41.94713 | 132 | 0.674961 |
e0300a1be9c4173ce9e38cda9d4fd3b9b19b661f | 2,655 | py | Python | labs/lab3/udt.py | jamestiotio/networks | 8967ee34c423989ff68eec650ba6ebb492499cb4 | [
"MIT"
] | null | null | null | labs/lab3/udt.py | jamestiotio/networks | 8967ee34c423989ff68eec650ba6ebb492499cb4 | [
"MIT"
] | null | null | null | labs/lab3/udt.py | jamestiotio/networks | 8967ee34c423989ff68eec650ba6ebb492499cb4 | [
"MIT"
] | null | null | null | import collections
import config
import random
import socket
import threading
import time
class NetworkLayer:
def __init__(self, local_port, remote_port, transport_layer):
# Port for recv and send packets.
self.local_port = local_port
self.remote_port = remote_port
# Listening on local_port to recv packets.
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind(("localhost", local_port))
# self.s.setblocking(False)
self.s.settimeout(0.5) # seconds.
# Hold transport layer object for message demultiplexing.
self.transport_layer = transport_layer
# Buffer for holding messages to be delivered to transport layer.
self.msg_buffer = collections.deque(maxlen=8)
self.buffer_lock = threading.Lock()
# Start reading network packet thread.
self.stop_accept_pkt = False
threading.Thread(target=self._packet_reader).start()
def shutdown(self):
self.stop_accept_pkt = True
# msg should be of type bytes, not string.
def send(self, msg):
if random.random() < config.BIT_ERROR_PROB:
msg = self._random_bit_error(msg)
if random.random() < config.MSG_LOST_PROB:
return
self.s.sendto(msg, ("localhost", self.remote_port))
def recv(self):
msg = ""
with self.buffer_lock:
if len(self.msg_buffer) > 0:
msg = self.msg_buffer.popleft()
return msg
def _packet_reader(self):
while not self.stop_accept_pkt:
# If there is received msg, notify the transport layer instead
# of blocking reading.
has_msg = False
with self.buffer_lock:
if len(self.msg_buffer) > 0:
has_msg = True
if has_msg:
self.transport_layer.handle_arrival_msg()
continue
try:
msg, addr = self.s.recvfrom(config.MAX_SEGMENT_SIZE)
with self.buffer_lock:
if len(self.msg_buffer) < self.msg_buffer.maxlen:
self.msg_buffer.append(msg)
except socket.timeout:
# If timeout happens, just continue.
pass
# Return a new msg with random bit errors.
def _random_bit_error(self, msg):
l = len(msg)
byte_index = random.randrange(l)
prefix = msg[:byte_index]
suffix = msg[byte_index + 1 :]
original_byte = msg[byte_index]
changed_byte = bytes([original_byte ^ 255])
return prefix + changed_byte + suffix
| 34.934211 | 74 | 0.60565 |
732477e67d5ecb64b0dc4a66c995c25c0c8900b0 | 6,320 | py | Python | ml/rl/workflow/ddpg_workflow.py | ccphillippi/Horizon | a18d8941f663eea55488781c804e6305a36f1b58 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/workflow/ddpg_workflow.py | ccphillippi/Horizon | a18d8941f663eea55488781c804e6305a36f1b58 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/workflow/ddpg_workflow.py | ccphillippi/Horizon | a18d8941f663eea55488781c804e6305a36f1b58 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import sys
from typing import Dict
from ml.rl.evaluation.evaluator import Evaluator
from ml.rl.preprocessing.normalization import (
construct_action_scale_tensor,
get_num_output_features,
)
from ml.rl.preprocessing.preprocessor import Preprocessor
from ml.rl.preprocessing.sparse_to_dense import PandasSparseToDenseProcessor
from ml.rl.readers.json_dataset_reader import JSONDatasetReader
from ml.rl.tensorboardX import summary_writer_context
from ml.rl.thrift.core.ttypes import (
ContinuousActionModelParameters,
DDPGModelParameters,
DDPGNetworkParameters,
DDPGTrainingParameters,
NormalizationParameters,
RLParameters,
)
from ml.rl.training.ddpg_trainer import ActorNetModel, CriticNetModel, DDPGTrainer
from ml.rl.training.rl_exporter import ActorExporter
from ml.rl.workflow.base_workflow import BaseWorkflow
from ml.rl.workflow.helpers import (
export_trainer_and_predictor,
minibatch_size_multiplier,
parse_args,
update_model_for_warm_start,
)
from ml.rl.workflow.preprocess_handler import (
ContinuousPreprocessHandler,
PreprocessHandler,
)
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
class ContinuousWorkflow(BaseWorkflow):
def __init__(
self,
model_params: ContinuousActionModelParameters,
preprocess_handler: PreprocessHandler,
state_normalization: Dict[int, NormalizationParameters],
action_normalization: Dict[int, NormalizationParameters],
use_gpu: bool,
use_all_avail_gpus: bool,
):
logger.info("Running continuous workflow with params:")
logger.info(model_params)
model_params = model_params
min_action_range_tensor_serving, max_action_range_tensor_serving = construct_action_scale_tensor(
action_normalization, model_params.action_rescale_map
)
state_dim = get_num_output_features(state_normalization)
action_dim = get_num_output_features(action_normalization)
# Build Actor Network
actor_network = ActorNetModel(
layers=(
[state_dim] + model_params.actor_training.layers[1:-1] + [action_dim]
),
activations=model_params.actor_training.activations,
fl_init=model_params.shared_training.final_layer_init,
state_dim=state_dim,
action_dim=action_dim,
)
# Build Critic Network
critic_network = CriticNetModel(
# Ensure dims match input state and scalar output
layers=[state_dim] + model_params.critic_training.layers[1:-1] + [1],
activations=model_params.critic_training.activations,
fl_init=model_params.shared_training.final_layer_init,
state_dim=state_dim,
action_dim=action_dim,
)
trainer = DDPGTrainer(
actor_network,
critic_network,
model_params,
state_normalization,
action_normalization,
min_action_range_tensor_serving,
max_action_range_tensor_serving,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
)
trainer = update_model_for_warm_start(trainer)
assert type(trainer) == DDPGTrainer, "Warm started wrong model type: " + str(
type(trainer)
)
evaluator = Evaluator(
None,
model_params.rl.gamma,
trainer,
metrics_to_score=trainer.metrics_to_score,
)
super().__init__(
preprocess_handler,
trainer,
evaluator,
model_params.shared_training.minibatch_size,
)
def _get_actor_exporter(trainer, state_normalization, action_normalization):
return ActorExporter.from_state_action_normalization(
trainer.actor,
state_normalization=state_normalization,
action_normalization=action_normalization,
)
def main(params):
# Set minibatch size based on # of devices being used to train
params["shared_training"]["minibatch_size"] *= minibatch_size_multiplier(
params["use_gpu"], params["use_all_avail_gpus"]
)
rl_parameters = RLParameters(**params["rl"])
training_parameters = DDPGTrainingParameters(**params["shared_training"])
actor_parameters = DDPGNetworkParameters(**params["actor_training"])
critic_parameters = DDPGNetworkParameters(**params["critic_training"])
model_params = DDPGModelParameters(
rl=rl_parameters,
shared_training=training_parameters,
actor_training=actor_parameters,
critic_training=critic_parameters,
)
state_normalization = BaseWorkflow.read_norm_file(params["state_norm_data_path"])
action_normalization = BaseWorkflow.read_norm_file(params["action_norm_data_path"])
writer = SummaryWriter(log_dir=params["model_output_path"])
logger.info("TensorBoard logging location is: {}".format(writer.log_dir))
preprocess_handler = ContinuousPreprocessHandler(
Preprocessor(state_normalization, False),
Preprocessor(action_normalization, False),
PandasSparseToDenseProcessor(),
)
workflow = ContinuousWorkflow(
model_params,
preprocess_handler,
state_normalization,
action_normalization,
params["use_gpu"],
params["use_all_avail_gpus"],
)
train_dataset = JSONDatasetReader(
params["training_data_path"], batch_size=training_parameters.minibatch_size
)
eval_dataset = JSONDatasetReader(params["eval_data_path"], batch_size=16)
with summary_writer_context(writer):
workflow.train_network(train_dataset, eval_dataset, int(params["epochs"]))
return export_trainer_and_predictor(
workflow.trainer,
params["model_output_path"],
exporter=_get_actor_exporter(
trainer=workflow.trainer,
state_normalization=state_normalization,
action_normalization=action_normalization,
),
) # noqa
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
params = parse_args(sys.argv)
main(params)
| 33.978495 | 105 | 0.706171 |
65a04bd9b0c136710ee0a831b381a04b49f9b18c | 5,996 | py | Python | metamapper/wasm_util.py | rdaly525/MetaMapper | 7f8278a012435d5d05ff6b388b7c1146061d6f1c | [
"BSD-3-Clause"
] | 3 | 2019-04-09T22:58:21.000Z | 2019-08-16T18:18:12.000Z | metamapper/wasm_util.py | rdaly525/MetaMapper | 7f8278a012435d5d05ff6b388b7c1146061d6f1c | [
"BSD-3-Clause"
] | 22 | 2019-03-13T19:42:35.000Z | 2022-03-29T19:49:33.000Z | metamapper/wasm_util.py | rdaly525/MetaMapper | 7f8278a012435d5d05ff6b388b7c1146061d6f1c | [
"BSD-3-Clause"
] | 1 | 2019-04-09T05:35:52.000Z | 2019-04-09T05:35:52.000Z | from .node import Dag, Nodes, DagNode, Input, Constant, Select, Output
from .irs.wasm import gen_WasmNodes
import metamapper.wasm.interp.convention as C
import metamapper.wasm.interp as interp
from metamapper.wasm.interp.structure import Instruction
import typing as tp
import delegator
WasmNodes = gen_WasmNodes()
class Stack:
def __init__(self):
self.data = []
def __repr__(self):
return self.data.__repr__()
def add(self, e):
self.data.append(e)
def ext(self, e: tp.List):
for i in e:
self.add(i)
def pop(self):
return self.data.pop()
def len(self):
return len(self.data)
def top(self):
return self.data[-1]
def wasm_to_dag(file, fun_name):
ilist, num_args = wasm_file_to_ilist(file, fun_name)
return ilist_to_dag(num_args, ilist)
def wasm_file_to_ilist(file, fun_name):
vm = interp.load(file)
func_addr = vm.func_addr(fun_name)
func = vm.store.funcs[vm.module_instance.funcaddrs[func_addr]]
expr_list = func.code.expr.data
num_args = len(func.functype.args)
return expr_list, num_args
from hwtypes import Product
from .family import fam
def ilist_to_dag(num_args, ilist : tp.List[Instruction]):
BV32 = fam().PyFamily().BitVector[32]
def make_const(val):
return Constant(value=BV32(val), type=BV32)
input_t = Product.from_fields("Input", {f"in{i}": BV32 for i in range(num_args)})
output_t = Product.from_fields("Output", {"out": BV32})
input = Input(type=input_t)
locals = []
for i in range(num_args):
locals.append(input.select(f"in{i}"))
for _ in range(2):
locals.append(None)
stack = Stack()
for pc, i in enumerate(ilist):
opcode = i.code
if opcode == C.drop:
stack.pop()
elif opcode == C.select:
pred = stack.pop()
in1 = stack.pop()
in0 = stack.pop()
gt0 = WasmNodes.dag_nodes["i32.gt_u"](pred, make_const(0)).select("out")
mask = WasmNodes.dag_nodes["i32.sub"](make_const(0), gt0).select("out")
mask_n = WasmNodes.dag_nodes["i32.xor"](make_const(-1), mask).select("out")
in1_mask = WasmNodes.dag_nodes["i32.and_"](mask, in1).select("out")
in0_mask = WasmNodes.dag_nodes["i32.and_"](mask_n, in0).select("out")
res = WasmNodes.dag_nodes["i32.or_"](in1_mask, in0_mask).select("out")
stack.add(res)
elif opcode == C.get_local:
if locals[i.immediate_arguments] is None:
raise ValueError("Need more locals")
stack.add(locals[i.immediate_arguments])
elif opcode == C.i32_const:
stack.add(Constant(value=BV32(i.immediate_arguments), type=BV32))
elif opcode in UnaryOps:
node_name = UnaryOps[opcode]
node = WasmNodes.dag_nodes[node_name]
in0 = stack.pop()
stack.add(node(in0).select("out"))
elif opcode in BinaryOps:
node_name = BinaryOps[opcode]
node = WasmNodes.dag_nodes[node_name]
in1 = stack.pop()
in0 = stack.pop()
stack.add(node(in0, in1).select("out"))
elif opcode in CompOps:
node_name = CompOps[opcode]
node = WasmNodes.dag_nodes[node_name]
in1 = stack.pop()
in0 = stack.pop()
stack.add(node(in0, in1).select("out"))
elif opcode == C.end:
#Control flow would pop off the label
pass
elif opcode == C.tee_local:
locals[i.immediate_arguments] = stack.top()
stack.add(node(in0, in1).select(0))
elif opcode == C.end:
#Control flow would pop off the label
pass
else:
raise NotImplementedError(C.op_name(opcode))
ret = stack.pop()
assert stack.len() == 0
output = Output(ret, type=output_t)
return Dag(sources=[input], sinks=[output])
UnaryOps = {
C.i32_clz: "i32.clz",
C.i32_ctz: "i32.ctz",
C.i32_popcnt: "i32.popcnt",
C.i32_eqz: "i32.eqz",
}
BinaryOps = {
C.i32_add: "i32.add",
C.i32_sub: "i32.sub",
C.i32_mul: "i32.mul",
C.i32_div_s: "i32.div_s",
C.i32_div_u: "i32.div_u",
C.i32_rem_s: "i32.rem_s",
C.i32_rem_u: "i32.rem_u",
C.i32_and: "i32.and_",
C.i32_or: "i32.or_",
C.i32_xor: "i32.xor",
C.i32_shl: "i32.shl",
C.i32_shr_s: "i32.shr_s",
C.i32_shr_u: "i32.shr_u",
C.i32_rotl: "i32.rotl",
C.i32_rotr: "i32.rotr",
}
#These always return i32
CompOps = {
C.i32_eq: "i32.eq",
C.i32_ne: "i32.ne",
C.i32_lt_s: "i32.lt_s",
C.i32_lt_u: "i32.lt_u",
C.i32_le_s: "i32.le_s",
C.i32_le_u: "i32.le_u",
C.i32_gt_s: "i32.gt_s",
C.i32_gt_u: "i32.gt_u",
C.i32_ge_s: "i32.ge_s",
C.i32_ge_u: "i32.ge_u",
}
def compile_c_to_wasm(file_name, cpath="./examples/wasm/c/", build_path="./examples/wasm/build", fname=None):
if fname is None:
fname=f"_{file_name}"
cfile = f"{cpath}/{file_name}.c"
wasm_file = f"{build_path}/{file_name}.wasm"
wat_file = f"{build_path}/{file_name}.wat"
from sys import platform
if platform in ("linux", "linux2"):
sed = 'sed -i'
elif platform == "darwin":
sed = 'sed -i \'\''
else:
raise NotImplementedError(platform)
for cmd in (
f'emcc -Os -s EXPORTED_FUNCTIONS="[\'{fname}\']" -o {wasm_file} {cfile}',
f'wasm2wat {wasm_file} -o {wat_file}',
f'{sed} "s/[(]data.*[)]/)/g" {wat_file}',
f'wat2wasm {wat_file} -o {wasm_file}',
):
res = delegator.run(cmd)
assert not res.return_code, res.out + res.err
return wasm_file
#def peak_to_wasm_dag(WasmNodes: Nodes, CoreIRNodes: Nodes, peak_fc) -> Dag:
# raise NotImplementedError()
#
#
#def rr_from_node(nodes: Nodes, name):
# node = nodes.dag_nodes[name]
# peak_fc = nodes.peak_nodes[name]
# replace = peak_to_wasm_dag(peak_fc)
| 31.067358 | 109 | 0.600734 |
c51ee692b6d01d388ca4f039fca4c9cd5c0765fe | 787 | py | Python | Notes.py | Youngl42/model-building-lab | 36a350e6d1340b094133f5ab3cceb2443d4a86bb | [
"Apache-2.0"
] | null | null | null | Notes.py | Youngl42/model-building-lab | 36a350e6d1340b094133f5ab3cceb2443d4a86bb | [
"Apache-2.0"
] | null | null | null | Notes.py | Youngl42/model-building-lab | 36a350e6d1340b094133f5ab3cceb2443d4a86bb | [
"Apache-2.0"
] | null | null | null | =================================================================
PEP8 Coding
=================================================================
module_name, package_name, ClassName, method_name, ExceptionName, function_name, GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name
=================================================================
Uploading to TestPyPI
=================================================================
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
=================================================================
Installing from TestPyPI
=================================================================
pip install --index-url https://test.pypi.org/simple/ model-building-lab --upgrade | 49.1875 | 178 | 0.409149 |
5eb86af73d1637854c03a8fbe3bbdca4b1d5fc95 | 8,253 | py | Python | lib/xos-config/tests/test_config.py | iecedge/xos | 566617f676fedcb2602266191c755d191b37018a | [
"Apache-2.0"
] | null | null | null | lib/xos-config/tests/test_config.py | iecedge/xos | 566617f676fedcb2602266191c755d191b37018a | [
"Apache-2.0"
] | null | null | null | lib/xos-config/tests/test_config.py | iecedge/xos | 566617f676fedcb2602266191c755d191b37018a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from xosconfig import Config
from xosconfig import Config as Config2
basic_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/basic_conf.yaml"
)
yaml_not_valid = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/yaml_not_valid.yaml"
)
invalid_format = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/invalid_format.yaml"
)
sample_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/sample_conf.yaml"
)
override_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/override_conf.yaml"
)
extend_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/extend_conf.yaml"
)
small_schema = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/schemas/small_schema.yaml"
)
services_list = {"xos-ws": [], "xos-db": []}
db_service = [
{
"ModifyIndex": 6,
"CreateIndex": 6,
"Node": "0152982c3159",
"Address": "172.19.0.2",
"ServiceID": "0d53ce210785:frontend_xos_db_1:5432",
"ServiceName": "xos-db",
"ServiceTags": [],
"ServiceAddress": "172.18.0.4",
"ServicePort": 5432,
"ServiceEnableTagOverride": "false",
}
]
class XOSConfigTest(unittest.TestCase):
"""
Testing the XOS Config Module
"""
def setUp(self):
# In case some other testcase in nose has left config in an unclean state
Config.clear()
def tearDown(self):
# NOTE clear the config after each test
Config.clear()
def test_initialize_only_once(self):
"""
[XOS-Config] Raise if initialized twice
"""
with self.assertRaises(Exception) as e:
Config.init(sample_conf)
Config2.init(sample_conf)
self.assertEqual(e.exception.message, "[XOS-Config] Module already initialized")
def test_config_not_initialized(self):
"""
[XOS-Config] Raise if accessing properties without initialization
"""
with self.assertRaises(Exception) as e:
Config.get("database")
self.assertEqual(
e.exception.message, "[XOS-Config] Module has not been initialized"
)
def test_missing_file_exception(self):
"""
[XOS-Config] Raise if file not found
"""
with self.assertRaises(Exception) as e:
Config.init("missing_conf")
self.assertEqual(
e.exception.message, "[XOS-Config] Config file not found at: missing_conf"
)
def test_yaml_not_valid(self):
"""
[XOS-Config] Raise if yaml is not valid
"""
with self.assertRaises(Exception) as e:
Config.init(yaml_not_valid)
self.assertTrue(
e.exception.message.startswith("[XOS-Config] The config format is wrong:")
)
def test_invalid_format(self):
"""
[XOS-Config] Raise if format is not valid (we expect a dictionary)
"""
with self.assertRaises(Exception) as e:
Config.init(invalid_format)
self.assertEqual(
e.exception.message,
(
"[XOS-Config] The config format is wrong: Schema validation failed:\n"
" - Value '['I am', 'a yaml', 'but the', 'format is not', 'correct']' is not a dict. Value path: ''."
),
)
def test_env_override(self):
"""
[XOS-Config] the XOS_CONFIG_FILE environment variable should override the config_file
"""
os.environ["XOS_CONFIG_FILE"] = "env.yaml"
with self.assertRaises(Exception) as e:
Config.init("missing_conf")
self.assertEqual(
e.exception.message, "[XOS-Config] Config file not found at: env.yaml"
)
del os.environ["XOS_CONFIG_FILE"]
def test_schema_override(self):
"""
[XOS-Config] the XOS_CONFIG_SCHEMA environment variable should override the config_schema
"""
os.environ["XOS_CONFIG_SCHEMA"] = "env-schema.yaml"
with self.assertRaises(Exception) as e:
Config.init(basic_conf)
self.assertRegexpMatches(
e.exception.message,
r"\[XOS\-Config\] Config schema not found at: (.+)env-schema\.yaml",
)
# self.assertEqual(e.exception.message, "[XOS-Config] Config schema not found at: env-schema.yaml")
del os.environ["XOS_CONFIG_SCHEMA"]
def test_schema_override_usage(self):
"""
[XOS-Config] the XOS_CONFIG_SCHEMA should be used to validate a config
"""
os.environ["XOS_CONFIG_SCHEMA"] = small_schema
with self.assertRaises(Exception) as e:
Config.init(basic_conf)
self.assertEqual(
e.exception.message,
(
"[XOS-Config] The config format is wrong: Schema validation failed:\n"
" - Key 'database' was not defined. Path: ''."
),
)
del os.environ["XOS_CONFIG_SCHEMA"]
def test_get_cli_param(self):
"""
[XOS-Config] Should read CLI -C param
"""
args = ["-A", "Foo", "-c", "Bar", "-C", "config.yaml"]
res = Config.get_cli_param(args)
self.assertEqual(res, "config.yaml")
def test_get_default_val_for_missing_param(self):
"""
[XOS-Config] Should get the default value if nothing is specified
"""
Config.init(basic_conf)
dir = Config.get("xos_dir")
self.assertEqual(dir, "/opt/xos")
def test_get_config_file(self):
"""
[XOS-Config] Should return the config file in use
"""
Config.init(sample_conf)
res = Config.get_config_file()
self.assertEqual(res, sample_conf)
def test_get_missing_param(self):
"""
[XOS-Config] Should return None reading a missing param
"""
Config.init(sample_conf)
res = Config.get("foo")
self.assertEqual(res, None)
def test_get_first_level(self):
"""
[XOS-Config] Should return a first level param
"""
Config.init(sample_conf)
# NOTE we are using Config2 here to be sure that the configuration is readable from any import,
# not only from the one that has been used to initialize it
res = Config2.get("database")
self.assertEqual(res, {"name": "xos", "username": "test", "password": "safe"})
def test_get_child_level(self):
"""
[XOS-Config] Should return a child level param
"""
Config.init(sample_conf)
res = Config.get("database.name")
self.assertEqual(res, "xos")
def test_config_override(self):
"""
[XOS-Config] If an override is provided for the config, it should return the overridden value
"""
Config.init(sample_conf, "xos-config-schema.yaml", override_conf)
res = Config.get("logging.level")
self.assertEqual(res, "info")
res = Config.get("database.password")
self.assertEqual(res, "overridden_password")
def test_config_extend(self):
"""
[XOS-Config] If an override is provided for the config, it should
return the overridden value (also if not defined in the base one)
"""
Config.init(sample_conf, "xos-config-schema.yaml", extend_conf)
res = Config.get("xos_dir")
self.assertEqual(res, "/opt/xos")
res = Config.get("database.password")
self.assertEqual(res, "safe")
if __name__ == "__main__":
unittest.main()
| 33.685714 | 117 | 0.619775 |
0ee0cf0d09ee46434526f2421d655ff288b05f9d | 28,250 | py | Python | python/ee/image.py | KMarkert/earthengine-api | 761c088ca2a79860903f0af1ffea7d4165de88db | [
"Apache-2.0"
] | null | null | null | python/ee/image.py | KMarkert/earthengine-api | 761c088ca2a79860903f0af1ffea7d4165de88db | [
"Apache-2.0"
] | null | null | null | python/ee/image.py | KMarkert/earthengine-api | 761c088ca2a79860903f0af1ffea7d4165de88db | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""A representation of an Earth Engine image.
See: https://sites.google.com/site/earthengineapidocs for more details.
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# Disable lint messages caused by Python 3 compatibility changes.
# pylint: disable=superfluous-parens
import json
from . import apifunction
from . import computedobject
from . import data
from . import deprecation
from . import ee_exception
from . import ee_types
from . import element
from . import function
from . import geometry
import six
class Image(element.Element):
"""An object to represent an Earth Engine image."""
_initialized = False
def __init__(self, args=None, version=None):
"""Constructs an Earth Engine image.
Args:
args: This constructor accepts a variety of arguments:
- A string - an EarthEngine asset id,
- A string and a number - an EarthEngine asset id and version,
- A number - creates a constant image,
- An EEArray - creates a constant array image,
- A list - creates an image out of each element of the array and
combines them into a single image,
- An ee.Image - returns the argument,
- Nothing - results in an empty transparent image.
version: An optional asset version.
Raises:
EEException: if passed something other than the above.
"""
self.initialize()
if version is not None:
if ee_types.isString(args) and ee_types.isNumber(version):
# An ID and version.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.load'),
{'id': args, 'version': version})
else:
raise ee_exception.EEException(
'If version is specified, the arg to Image() must be a string. '
'Received: %s' % (args,))
return
if ee_types.isNumber(args):
# A constant image.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.constant'), {'value': args})
elif ee_types.isString(args):
# An ID.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.load'), {'id': args})
elif isinstance(args, (list, tuple)):
# Make an image out of each element.
image = Image.combine_([Image(i) for i in args])
super(Image, self).__init__(image.func, image.args)
elif isinstance(args, computedobject.ComputedObject):
if args.name() == 'Array':
# A constant array image.
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.constant'), {'value': args})
else:
# A custom object to reinterpret as an Image.
super(Image, self).__init__(args.func, args.args, args.varName)
elif args is None:
super(Image, self).__init__(
apifunction.ApiFunction.lookup('Image.mask'),
{'image': Image(0), 'mask': Image(0)})
else:
raise ee_exception.EEException(
'Unrecognized argument type to convert to an Image: %s' % args)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Image', 'Image')
apifunction.ApiFunction.importApi(cls, 'Window', 'Image', 'focal_')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def getInfo(self):
"""Fetch and return information about this image.
Returns:
The return contents vary but will include at least:
bands - Array containing metadata about the bands in the image,
properties - Dictionary containing the image's metadata properties.
"""
return super(Image, self).getInfo()
def getMapId(self, vis_params=None):
"""Fetch and return a map ID dictionary, suitable for use in a Map overlay.
Args:
vis_params: The visualization parameters. See ee.data.getMapId.
Returns:
A map ID dictionary as described in ee.data.getMapId.
"""
vis_image, request = self._apply_visualization(vis_params)
request['image'] = vis_image
response = data.getMapId(request)
response['image'] = self
return response
def _apply_crs_and_affine(self, params):
"""Applies any CRS and affine parameters to an image.
Wraps the image in a call to Reproject() if the request includes
specifying a CRS and affine transformation.
Args:
params: the request parameters.
Returns:
A tuple containing:
- the result of applying the projection parameters to this image
- any remaining parameters.
"""
keys_to_extract = set(['crs', 'crs_transform', 'crsTransform'])
request = {}
reprojection_params = {}
if params:
for key in params:
if key in keys_to_extract:
reprojection_params[key] = params[key]
else:
request[key] = params[key]
image = self
if reprojection_params:
if 'crsTransform' in reprojection_params:
if 'crs_transform' in reprojection_params:
raise ee_exception.EEException(
'Both "crs_transform" and "crsTransform" are specified.')
reprojection_params['crs_transform'] = reprojection_params.pop(
'crsTransform')
if 'crs' not in reprojection_params:
raise ee_exception.EEException(
'Must specify "crs" if "crs_transform" is specified.')
crs = reprojection_params['crs']
if 'crs_transform' in reprojection_params:
crs_transform = reprojection_params['crs_transform']
# crs_transform can come in a bewildering variety of shapes: a list of
# numbers, an ee.List of possibly computed values, or even a
# comma-separated list of numbers, potentially wrapped in square
# brackets. Parameter coercion takes care of the first two, but we need
# to deal with the third.
if isinstance(crs_transform, six.string_types):
crs_transform = [
float(x) for x in crs_transform.lstrip('[').rstrip(']').split(',')
]
image = image.reproject(crs, crsTransform=crs_transform)
# Special case here: If they specified "crs", "crs_transform", and a
# two-element "dimensions", but not a region or other parameters such
# as "scale", then the desired operation is to extract an exact
# rectangle in that exact projection, not what we'd otherwise
# interpret this as ("reproject to that projection, then resize to
# those dimensions"). Detect this and convert the dimensions to a
# Geometry: a Rectangle in that Projection.
if ('dimensions' in request and 'region' not in request and
'scale' not in request):
dimensions = _parse_dimensions(params['dimensions'])
if len(dimensions) == 2:
del request['dimensions']
desired_rectangle = geometry.Geometry.Rectangle(
[0, 0, dimensions[0], dimensions[1]],
proj=image.projection(),
geodesic=False,
evenOdd=True)
# This will take effect in _apply_selection_and_scale. The
# combination reprojection and clipping will result in the exact
# desired rectangle.
request['region'] = desired_rectangle
else:
# CRS but no CRS transform means that we reproject to that CRS using a
# default transform (with the Y coordinate flipped as we usually do) but
# don't resample after the reprojection, so that later operations can
# alter the image scale.
image = image.setDefaultProjection(
crs, crsTransform=[1, 0, 0, 0, -1, 0])
return image, request
def _apply_selection_and_scale(self, params):
"""Applies region selection and scaling parameters to an image.
Wraps the image in a call to clipToBoundsAndScale() if there are any
recognized region selection and scale parameters present.
Args:
params: the request parameters.
Returns:
A tuple containing:
- the result of applying the selection and scale parameters to this
image
- any remaining (non-selection/scale) parameters.
"""
keys_to_extract = set(['region', 'dimensions', 'scale'])
scale_keys = ['maxDimension', 'height', 'width', 'scale']
request = {}
selection_params = {}
if params:
for key in params:
if key not in keys_to_extract:
request[key] = params[key]
else:
if key == 'dimensions':
dimensions = _parse_dimensions(params['dimensions'])
if len(dimensions) == 1:
selection_params['maxDimension'] = dimensions[0]
elif len(dimensions) == 2:
selection_params['width'] = dimensions[0]
selection_params['height'] = dimensions[1]
elif key == 'region':
# Could be a Geometry, a GeoJSON struct, or a GeoJSON string.
# Geometry's constructor knows how to handle the first two.
region = params[key]
# If given a Geometry object, just use the client's Geometry.
if isinstance(region, geometry.Geometry):
selection_params['geometry'] = region
continue
# Otherwise, we may be given a GeoJSON object or string.
if isinstance(region, six.string_types):
region = json.loads(region)
# By default the Geometry should be planar.
if isinstance(region, list):
if (len(region) == 2
or all(isinstance(e, (int, float)) for e in region)):
selection_params['geometry'] = geometry.Geometry.Rectangle(
region, None, geodesic=False)
else:
selection_params['geometry'] = geometry.Geometry.Polygon(
region, None, geodesic=False)
continue
selection_params['geometry'] = geometry.Geometry(
region, opt_proj=None, opt_geodesic=False)
else:
selection_params[key] = params[key]
image = self
if selection_params:
selection_params['input'] = image
if any(key in selection_params for key in scale_keys):
image = apifunction.ApiFunction.apply_(
'Image.clipToBoundsAndScale', selection_params)
else:
clip_params = {
'input': image,
'geometry': selection_params.get('geometry')
}
image = apifunction.ApiFunction.apply_('Image.clip', clip_params)
return image, request
def _apply_visualization(self, params):
"""Applies visualization parameters to an image.
Wraps the image in a call to visualize() if there are any recognized
visualization parameters present.
Args:
params: the request parameters.
Returns:
A tuple containing:
- the result of applying the visualization parameters to this image
- any remaining (non-visualization) parameters.
"""
# Split the parameters into those handled handled by visualize()
# and those that aren't.
keys_to_extract = set(['bands', 'gain', 'bias', 'min', 'max',
'gamma', 'palette', 'opacity', 'forceRgbOutput'])
request = {}
vis_params = {}
if params:
for key in params:
if key in keys_to_extract:
vis_params[key] = params[key]
else:
request[key] = params[key]
image = self
if vis_params:
vis_params['image'] = image
image = apifunction.ApiFunction.apply_('Image.visualize', vis_params)
return image, request
def _build_download_id_image(self, params):
"""Processes the getDownloadId parameters and returns the built image.
Given transformation parameters (crs, crs_transform, dimensions, scale, and
region), constructs an image per band. Band level parameters override the
parameters specified in the top level. If dimensions and scale parameters
are both specifed, the scale parameter is ignored.
Image transformations will be applied on a per band basis if the
format parameter is ZIPPED_GEO_TIFF_PER_BAND and there are bands in the
bands list. Otherwise, the transformations will be applied on the entire
image.
Args:
params: The getDownloadId parameters.
Returns:
The image filtered to the given bands and the associated transformations
applied.
"""
params = params.copy()
def _extract_and_validate_transforms(obj):
"""Takes a parameter dictionary and extracts the transformation keys."""
extracted = {}
for key in ['crs', 'crs_transform', 'dimensions', 'region']:
if key in obj:
extracted[key] = obj[key]
# Since dimensions and scale are mutually exclusive, we ignore scale
# if dimensions are specified.
if 'scale' in obj and 'dimensions' not in obj:
extracted['scale'] = obj['scale']
return extracted
def _build_image_per_band(band_params):
"""Takes a band dictionary and builds an image for it."""
if 'id' not in band_params:
raise ee_exception.EEException('Each band dictionary must have an id.')
band_id = band_params['id']
band_image = self.select(band_id)
# Override the existing top level params with the band level params.
copy_params = _extract_and_validate_transforms(params)
band_params = _extract_and_validate_transforms(band_params)
copy_params.update(band_params)
band_params = _extract_and_validate_transforms(copy_params)
# pylint: disable=protected-access
band_image, band_params = band_image._apply_crs_and_affine(band_params)
band_image, _ = band_image._apply_selection_and_scale(band_params)
# pylint: enable=protected-access
return band_image
if params['format'] == 'ZIPPED_GEO_TIFF_PER_BAND' and params.get(
'bands') and len(params.get('bands')):
# Build a new image based on the constituent band images.
image = Image.combine_(
[_build_image_per_band(band) for band in params['bands']])
else:
# Apply transformations directly onto the image, ignoring any band params.
copy_params = _extract_and_validate_transforms(params)
image, copy_params = self._apply_crs_and_affine(copy_params) # pylint: disable=protected-access
image, _ = image._apply_selection_and_scale(copy_params) # pylint: disable=protected-access
return image
def prepare_for_export(self, params):
"""Applies all relevant export parameters to an image.
Args:
params: the export request parameters.
Returns:
A tuple containing:
- an image that has had many of the request parameters applied
to it
- any remaining parameters.
"""
image = self
# If the Cloud API is enabled, we can do cleaner handling of the parameters.
# If it isn't enabled, we have to be bug-for-bug compatible with current
# behaviour, so we do nothing.
if data._use_cloud_api: # pylint: disable=protected-access
image, params = image._apply_crs_and_affine(params) # pylint: disable=protected-access
image, params = image._apply_selection_and_scale(params) # pylint: disable=protected-access
return image, params
def getDownloadURL(self, params=None):
"""Get a download URL for this image.
Args:
params: An object containing visualization options with the following
possible values:
name - a base name to use when constructing filenames.
bands - a description of the bands to download. Must be an array of
dictionaries, each with the following keys:
id - the name of the band, a string, required.
crs - an optional CRS string defining the band projection.
crs_transform - an optional array of 6 numbers specifying an affine
transform from the specified CRS, in the order: xScale, yShearing,
xShearing, yScale, xTranslation and yTranslation.
dimensions - an optional array of two integers defining the width and
height to which the band is cropped.
scale - an optional number, specifying the scale in meters of the
band; ignored if crs and crs_transform is specified.
crs - a default CRS string to use for any bands that do not explicitly
specify one.
crs_transform - a default affine transform to use for any bands that do
not specify one, of the same format as the crs_transform of bands.
dimensions - default image cropping dimensions to use for any bands
that do not specify them.
scale - a default scale to use for any bands that do not specify one;
ignored if crs and crs_transform is specified.
region - a polygon specifying a region to download; ignored if crs
and crs_transform is specified.
filePerBand - whether to produce a different GeoTIFF per band (boolean).
Defaults to true. If false, a single GeoTIFF is produced and all
band-level transformations will be ignored.
Returns:
A URL to download the specified image.
"""
request = params or {}
request['image'] = self
return data.makeDownloadUrl(data.getDownloadId(request))
def getThumbId(self, params):
"""Applies transformations and returns the thumbId.
Args:
params: Parameters identical to getMapId, plus, optionally:
dimensions - (a number or pair of numbers in format WIDTHxHEIGHT) Max
dimensions of the thumbnail to render, in pixels. If only one number
is passed, it is used as the maximum, and the other dimension is
computed by proportional scaling.
region - (E,S,W,N or GeoJSON) Geospatial region of the image
to render. By default, the whole image.
format - (string) Either 'png' or 'jpg'.
Returns:
A thumbId for the created thumbnail.
Raises:
EEException: If the region parameter is not an array or GeoJSON object.
"""
image, params = self._apply_crs_and_affine(params)
image, params = image._apply_selection_and_scale(params) # pylint: disable=protected-access
image, params = image._apply_visualization(params) # pylint: disable=protected-access
params['image'] = image
return data.getThumbId(params)
def getThumbURL(self, params=None):
"""Get a thumbnail URL for this image.
Args:
params: Parameters identical to getMapId, plus, optionally:
dimensions - (a number or pair of numbers in format WIDTHxHEIGHT) Max
dimensions of the thumbnail to render, in pixels. If only one number
is passed, it is used as the maximum, and the other dimension is
computed by proportional scaling.
region - (ee.Geometry, GeoJSON, list of numbers, list of points)
Geospatial region of the image to render. By default, the whole
image. If given a list of min lon, min lat, max lon, max lat,
a planar rectangle is created. If given a list of points a
planar polygon is created.
format - (string) Either 'png' or 'jpg'.
Returns:
A URL to download a thumbnail the specified image.
Raises:
EEException: If the region parameter is not an array or GeoJSON object.
"""
# If the Cloud API is enabled, we can do cleaner handling of the parameters.
# If it isn't enabled, we have to be bug-for-bug compatible with current
# behaviour.
if data._use_cloud_api: # pylint: disable=protected-access
return data.makeThumbUrl(self.getThumbId(params))
image, params = self._apply_visualization(params)
params['image'] = image
if 'region' in params:
if isinstance(params['region'], (dict, list)):
params['region'] = json.dumps(params['region'])
elif not isinstance(params['region'], str):
raise ee_exception.EEException(
'The region parameter must be an array or a GeoJSON object.')
return data.makeThumbUrl(data.getThumbId(params))
# Deprecated spellings to match the JS library.
getDownloadUrl = deprecation.Deprecated('Use getDownloadURL().')(
getDownloadURL)
getThumbUrl = deprecation.Deprecated('Use getThumbURL().')(getThumbURL)
###################################################
# Static methods.
###################################################
@staticmethod
def rgb(r, g, b):
"""Create a 3-band image.
This creates a 3-band image specifically for visualization using
the first band in each image.
Args:
r: The red image.
g: The green image.
b: The blue image.
Returns:
The combined image.
"""
return Image.combine_([r, g, b], ['vis-red', 'vis-green', 'vis-blue'])
@staticmethod
def cat(*args):
"""Concatenate the given images together into a single image."""
return Image.combine_(args)
@staticmethod
def combine_(images, names=None):
"""Combine all the bands from the given images into a single image.
Args:
images: The images to be combined.
names: An array of names for the output bands.
Returns:
The combined image.
"""
if not images:
raise ee_exception.EEException('Can\'t combine 0 images.')
# Append all the bands.
result = Image(images[0])
for image in images[1:]:
result = apifunction.ApiFunction.call_('Image.addBands', result, image)
# Optionally, rename the bands of the result.
if names:
result = result.select(['.*'], names)
return result
def select(self, opt_selectors=None, opt_names=None, *args):
"""Selects bands from an image.
Can be called in one of two ways:
- Passed any number of non-list arguments. All of these will be
interpreted as band selectors. These can be band names, regexes, or
numeric indices. E.g.
selected = image.select('a', 'b', 3, 'd');
- Passed two lists. The first will be used as band selectors and the
second as new names for the selected bands. The number of new names
must match the number of selected bands. E.g.
selected = image.select(['a', 4], ['newA', 'newB']);
Args:
opt_selectors: An array of names, regexes or numeric indices specifying
the bands to select.
opt_names: An array of strings specifying the new names for the
selected bands.
*args: Selector elements as varargs.
Returns:
An image with the selected bands.
"""
if opt_selectors is not None:
args = list(args)
if opt_names is not None:
args.insert(0, opt_names)
args.insert(0, opt_selectors)
algorithm_args = {
'input': self,
'bandSelectors': args[0] if args else [],
}
if args:
# If the user didn't pass an array as the first argument, assume
# that everything in the arguments array is actually a selector.
if (len(args) > 2 or
ee_types.isString(args[0]) or
ee_types.isNumber(args[0])):
# Varargs inputs.
selectors = args
# Verify we didn't get anything unexpected.
for selector in selectors:
if (not ee_types.isString(selector) and
not ee_types.isNumber(selector) and
not isinstance(selector, computedobject.ComputedObject)):
raise ee_exception.EEException(
'Illegal argument to select(): ' + selector)
algorithm_args['bandSelectors'] = selectors
elif len(args) > 1:
algorithm_args['newNames'] = args[1]
return apifunction.ApiFunction.apply_('Image.select', algorithm_args)
def expression(self, expression, opt_map=None):
"""Evaluates an arithmetic expression on an image or images.
The bands of the primary input image are available using the built-in
function b(), as b(0) or b('band_name').
Variables in the expression are interpreted as additional image parameters
which must be supplied in opt_map. The bands of each such image can be
accessed like image.band_name or image[0].
Both b() and image[] allow multiple arguments, to specify multiple bands,
such as b(1, 'name', 3). Calling b() with no arguments, or using a variable
by itself, returns all bands of the image.
Args:
expression: The expression to evaluate.
opt_map: An optional map of input images available by name.
Returns:
The image computed by the provided expression.
"""
arg_name = 'DEFAULT_EXPRESSION_IMAGE'
all_vars = [arg_name]
args = {arg_name: self}
# Add custom arguments, promoting them to Images manually.
if opt_map:
for name, value in opt_map.items():
all_vars.append(name)
args[name] = Image(value)
body = apifunction.ApiFunction.call_(
'Image.parseExpression', expression, arg_name, all_vars)
# Like Spot the zebra, Image.parseExpression is not like all the others.
# It's an Algorithm whose output (in "body" here) is another Algorithm, one
# that takes a set of Images and produces an Image. We need to make an
# ee.Function to wrap it properly: encoding and specification of input and
# output types.
class ReinterpretedFunction(function.Function):
"""A function that executes the result of a function."""
def encode_invocation(self, encoder):
return body.encode(encoder)
def encode_cloud_invocation(self, encoder):
return {'functionReference': encoder(body)}
def getSignature(self):
return {
'name': '',
'args': [{'name': name, 'type': 'Image', 'optional': False}
for name in all_vars],
'returns': 'Image'
}
# Perform the call to the result of Image.parseExpression
return ReinterpretedFunction().apply(args)
def clip(self, clip_geometry):
"""Clips an image to a Geometry or Feature.
The output bands correspond exactly the input bands, except data not
covered by the geometry is masked. The output image retains the
metadata of the input image.
Use clipToCollection to clip an image to a FeatureCollection.
Args:
clip_geometry: The Geometry or Feature to clip to.
Returns:
The clipped image.
"""
try:
# Need to manually promote GeoJSON, because the signature does not
# specify the type so auto promotion won't work.
clip_geometry = geometry.Geometry(clip_geometry)
except ee_exception.EEException:
pass # Not an ee.Geometry or GeoJSON. Just pass it along.
return apifunction.ApiFunction.call_('Image.clip', self, clip_geometry)
def rename(self, names, *args):
"""Rename the bands of an image.
Can be called with either a list of strings or any number of strings.
Args:
names: An array of strings specifying the new names for the
bands. Must exactly match the number of bands in the image.
*args: Band names as varargs.
Returns:
An image with the renamed bands.
"""
if args or ee_types.isString(names):
# Handle varargs; everything else we let the server handle.
args = list(args)
args.insert(0, names)
names = args
algorithm_args = {
'input': self,
'names': names
}
return apifunction.ApiFunction.apply_('Image.rename', algorithm_args)
@staticmethod
def name():
return 'Image'
def _parse_dimensions(dimensions):
"""Parses a dimensions specification into a one or two element list."""
if ee_types.isNumber(dimensions):
return [dimensions]
elif isinstance(dimensions, six.string_types):
# Unpack WIDTHxHEIGHT
return [int(x) for x in dimensions.split('x')]
elif isinstance(dimensions, (list, tuple)) and 1 <= len(dimensions) <= 2:
return dimensions
raise ee_exception.EEException(
'Invalid dimensions {}.'.format(dimensions))
| 38.435374 | 102 | 0.655398 |
514b706bf9a5c8b9edd166ed310ceeb6e17adbca | 3,590 | py | Python | tests/search/test_arc_concistency.py | GeekLiB/simpleai-lee | dd49778d3521b725fa1666c6e64b48c986528250 | [
"MIT"
] | null | null | null | tests/search/test_arc_concistency.py | GeekLiB/simpleai-lee | dd49778d3521b725fa1666c6e64b48c986528250 | [
"MIT"
] | null | null | null | tests/search/test_arc_concistency.py | GeekLiB/simpleai-lee | dd49778d3521b725fa1666c6e64b48c986528250 | [
"MIT"
] | 2 | 2020-01-03T06:28:23.000Z | 2020-02-23T16:01:42.000Z | # coding=utf-8
import unittest
from operator import itemgetter
from simpleai.search.arc import all_arcs, revise, arc_consistency_3
first = itemgetter(0)
class TestAllArcs(unittest.TestCase):
def setUp(self):
self.constraint = lambda variables, values: False
def test_adds_pairs_in_both_directions(self):
constraints = [(('A', 'B'), self.constraint)]
arcs_result = all_arcs(constraints)
arcs_expected = set([('A', 'B'),
('B', 'A')])
self.assertEqual(arcs_result, arcs_expected)
def test_constraints_with_more_than_2_neighbors_arent_added(self):
constraints = [(('A', 'B', 'C'), self.constraint)]
arcs_result = all_arcs(constraints)
arcs_expected = set()
self.assertEqual(arcs_result, arcs_expected)
def is_square(variables, values):
return values[0] ** 2 == values[1]
class TestReviseDomain(unittest.TestCase):
def revise(self, domain_a, domain_b, duplicate_constraints=False):
domains = {'A': domain_a, 'B': domain_b}
constraints = [(('A', 'B'), is_square)]
if duplicate_constraints:
constraints = constraints * 2
return revise(domains, ('A', 'B'), constraints), domains
def test_if_all_values_have_possible_match_the_domain_is_untouched(self):
result, domains = self.revise([1, 2, 3], [1, 4, 9])
self.assertFalse(result)
self.assertEquals(domains['A'], [1, 2, 3])
def test_if_a_value_has_no_possible_match_remove_it_from_domain(self):
result, domains = self.revise([1, 2, 3], [1, 4])
self.assertTrue(result)
self.assertEquals(domains['A'], [1, 2])
def test_if_multiple_constraints_dont_fail_removing_twice(self):
# there was a bug when two constraints tried to remove the same value
result, domains = self.revise([1, 2, 3], [1, 4], True)
self.assertTrue(result)
self.assertEquals(domains['A'], [1, 2])
class TestAC3(unittest.TestCase):
def ac3(self, domain_a, domain_b):
domains = {'A': domain_a, 'B': domain_b}
constraints = [(('A', 'B'), is_square)]
return arc_consistency_3(domains, constraints), domains
def test_values_available_for_all_returns_true(self):
result, domains = self.ac3([1, 2, 3], [1, 4, 9])
self.assertTrue(result)
def test_if_variable_has_no_domain_left_returns_false(self):
result, domains = self.ac3([1, 2, 3], [2, 3, 6])
self.assertFalse(result)
def test_chained_revise_calls_remove_non_obvious_problems(self):
# if A, B, C must be all different, with domains [1, 1], [1, 2], [2, 2] you
# can't find a solution, but it requires several chained calls to
# revise:
# revise(A, B) -> ok! [1, 1] [1, 2] [2, 2]
# revise(A, C) -> ok! [1, 1] [1, 2] [2, 2]
# revise(B, C) -> fail, remove 2 from B [1, 1] [1] [2, 2]
# and re-revise A, B and C, B
# revise(A, B) -> fail, remove 1 from A [] [1] [2, 2]
# and re-revise ...
# here A has no more values, ac3 returns a failure
domains = {'A': [1, 1],
'B': [1, 2],
'C': [2, 2]}
different = lambda variables, values: len(set(values)) == len(variables)
constraints = [(('A', 'B'), different),
(('A', 'C'), different),
(('B', 'C'), different)]
result = arc_consistency_3(domains, constraints)
self.assertFalse(result)
| 35.196078 | 83 | 0.593593 |
ab459cbec25c6756ef7836cb28fa13bc276e92b7 | 3,555 | py | Python | loc/helper/messages.py | guluc3m/loc-server | b25b6b2deec5d27c840d60f33e5aa33bd56ba08a | [
"MIT"
] | null | null | null | loc/helper/messages.py | guluc3m/loc-server | b25b6b2deec5d27c840d60f33e5aa33bd56ba08a | [
"MIT"
] | 7 | 2017-12-10T17:12:04.000Z | 2017-12-29T12:23:18.000Z | loc/helper/messages.py | guluc3m/loc-server | b25b6b2deec5d27c840d60f33e5aa33bd56ba08a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# League of Code server implementation
# https://github.com/guluc3m/loc-server
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Grupo de Usuarios de Linux UC3M <http://gul.es>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Status messages."""
from flask_babel import lazy_gettext as t
# JWT
JWT_ERROR = t('Error in JWT coding')
JWT_EXPIRED = t('JWT has expired')
JWT_MISSING = t('Request was missing the JWT token')
# Matches
MATCH_EXISTS = t('Match already exists')
MATCH_NOT_FOUND = t('Match was not found')
MATCH_ALREADY_STARTED = t('Match has already started')
MATCH_ALREADY_FINISHED = t('Match has already finished')
MATCH_NOT_STARTED = t('Match has not started')
MATCH_NOT_FINISHED = t('Match not finished')
ALREADY_PARTICIPATING = t('You are already participating in the match')
NOT_PARTICIPATING = t('You are not participating in the match')
SLUG_EXISTS = t('The slug %(slug)s already exists')
# Parameters
CHECK_DATA = t('Check field for errors')
FIELD_MISSING = t('Field was missing in request')
INVALID_TYPE = t('Data type was not valid')
INVALID_VALUE = t('Data value was not valid')
# Party
PARTY_NOT_EMPTY = t('Your party is not empty')
PARTY_NOT_FOUND = t('Could not find party')
PARTY_CANNOT_JOIN = t('The party cannot accept any more members')
PARTY_FULL = t('The party is full')
PARTY_LEADER = t('You are the leader of the party')
ALREADY_IN_PARTY = t('You are already in a party')
NOT_LEADER = t('You are not the leader of the party')
CANNOT_KICK = t('You are the leader of the party and cannot kick yourself')
# Record
RECORD_CREATE_ERROR = t('Error creating record')
RECORD_DELETE_ERROR = t('Error deleting record')
RECORD_UPDATE_ERROR = t('Error updating record')
RECORD_NOT_FOUND = t('Record not found')
ALREADY_DELETED = t('The record was already deleted')
NOT_DELETED = t('The record was not deleted')
# User
USER_EXISTS = t('A user with the specified details already exists')
USER_NOT_FOUND = t('User was not found')
ALREADY_FOLLOWING = t('Already following that user')
NOT_FOLLOWING = t('Not following the user')
CANNOT_FOLLOW_YOURSELF = t('You cannot follow yourself')
ROLE_MISSING = t('Missing required role')
EMAIL_NOT_VALID = t('Email address is not valid')
INVALID_PASSWORD = t('Invalid password')
PASSWORD_LENGTH = t('Password should be at least 8 characters long')
PASSWORD_NO_MATCH = t('Password does not match')
# Other
OP_NOT_PERMITTED = t('Operation not permitted')
INVALID_TOKEN = t('Token was not found or has expired')
PAGE_INVALID = t('Not a valid page number')
| 39.065934 | 80 | 0.759212 |
4078ad71972d9b2e1877ce6fe5c344aa65123d55 | 4,044 | py | Python | Examples/Text Record/add_text_record/add_text_record_page.py | kemori-bc/gateway-workflows | 5aa1e3492b0c0b4ec23a6247ca92861cc77f2187 | [
"Apache-2.0"
] | null | null | null | Examples/Text Record/add_text_record/add_text_record_page.py | kemori-bc/gateway-workflows | 5aa1e3492b0c0b4ec23a6247ca92861cc77f2187 | [
"Apache-2.0"
] | null | null | null | Examples/Text Record/add_text_record/add_text_record_page.py | kemori-bc/gateway-workflows | 5aa1e3492b0c0b4ec23a6247ca92861cc77f2187 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2021-08-23
# Gateway Version: 20.12.1
# Description: Example Gateway workflow
"""
Add text record page
"""
import os
from flask import url_for, redirect, render_template, flash, g, request
from bluecat import route, util
import config.default_config as config
from main_app import app
from .add_text_record_form import GenericFormTemplate
def module_path():
"""
Get module path.
:return:
"""
return os.path.dirname(os.path.abspath(str(__file__)))
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, "/add_text_record/add_text_record_endpoint")
@util.workflow_permission_required("add_text_record_page")
@util.exception_catcher
def add_text_record_add_text_record_page():
"""
Renders the form the user would first see when selecting the workflow.
:return:
"""
form = GenericFormTemplate()
return render_template(
"add_text_record_page.html",
form=form,
text=util.get_text(module_path(), config.language),
uoptions=g.user.get_options(),
)
@route(app, "/add_text_record/form", methods=["POST"])
@util.workflow_permission_required("add_text_record_page")
@util.exception_catcher
def add_text_record_add_text_record_page_form():
"""
Processes the final form after the user has input all the required data.
:return:
"""
# pylint: disable=broad-except
form = GenericFormTemplate()
# Remove this line if your workflow does not need to select a configuration
form.configuration.choices = util.get_configurations(default_val=True)
if form.validate_on_submit():
try:
# Retrieve configuration, view, and absolute name
configuration = g.user.get_api().get_entity_by_id(form.configuration.data)
view = configuration.get_view(request.form["view"])
absolute_name = form.name.data + "." + request.form.get("zone", "")
# Attempt to add the text record
text_record = view.add_text_record(absolute_name, form.text.data)
g.user.logger.info(
"Success-Text Record "
+ text_record.get_property("absoluteName")
+ " Added with Object ID: "
+ str(text_record.get_id())
)
flash(
"Success - Text Record "
+ text_record.get_property("absoluteName")
+ " Added with Object ID: "
+ str(text_record.get_id()),
"succeed",
)
return redirect(url_for("add_text_recordadd_text_record_add_text_record_page"))
except Exception as e:
flash(str(e))
# Log error and render workflow page
g.user.logger.warning(f"EXCEPTION THROWN: {e}")
return render_template(
"add_text_record_page.html",
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
else:
g.user.logger.info("Form data was not valid.")
return render_template(
"add_text_record_page.html",
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
| 33.983193 | 91 | 0.657765 |
1df9763f39c2d56b4bf5a843fea19659d3e66919 | 3,345 | py | Python | ActorNetwork2.py | yuxinma4life/gym_torcs_vision | 41bf2f4c4322f8c455fbd46b1b80629ac4a7eea5 | [
"MIT"
] | 1 | 2018-11-12T12:11:40.000Z | 2018-11-12T12:11:40.000Z | ActorNetwork2.py | yuxinma4life/gym_torcs_vision | 41bf2f4c4322f8c455fbd46b1b80629ac4a7eea5 | [
"MIT"
] | 1 | 2018-11-17T15:02:06.000Z | 2018-11-17T15:02:06.000Z | ActorNetwork2.py | yuxinma4life/gym_torcs_vision | 41bf2f4c4322f8c455fbd46b1b80629ac4a7eea5 | [
"MIT"
] | null | null | null | import numpy as np
import math
from keras.initializations import normal, identity
from keras.models import model_from_json
from keras.models import Sequential, Model
#from keras.engine.training import collect_trainable_weights
from keras.layers import Dense, Flatten, Input, merge, Lambda, Reshape, Convolution2D, MaxPooling2D, BatchNormalization, Flatten
from keras.optimizers import Adam
import tensorflow as tf
import keras.backend as K
K.set_learning_phase(1)
# K._LEARNING_PHASE = tf.constant(0)
HIDDEN1_UNITS = 300
HIDDEN2_UNITS = 600
class ActorNetwork(object):
def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):
self.sess = sess
self.BATCH_SIZE = BATCH_SIZE
self.TAU = TAU
self.LEARNING_RATE = LEARNING_RATE
K.set_session(sess)
#Now create the model
self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)
self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size)
self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)
grads = zip(self.params_grad, self.weights)
self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)
self.sess.run(tf.global_variables_initializer())
def train(self, states, action_grads):
self.sess.run(self.optimize, feed_dict={
self.state: states,
self.action_gradient: action_grads
})
def target_train(self):
actor_weights = self.model.get_weights()
actor_target_weights = self.target_model.get_weights()
for i in range(len(actor_weights)):
actor_target_weights[i] = self.TAU * actor_weights[i] + (1 - self.TAU)* actor_target_weights[i]
self.target_model.set_weights(actor_target_weights)
def create_actor_network(self, state_size, action_dim):
print("Actor model: state_size", state_size, "action_dim", action_dim)
S = Input(shape = state_size)
# S_in = Lambda(lambda img: img / 255.0)(S)
#S1 = Lambda(lambda img: img[:,-64*64*3:])(S)
#S_in = Reshape((64,64,3))(S1)
batch_norm0 = BatchNormalization()(S)
conv1 = Convolution2D(16, nb_row=4, nb_col=4, subsample=(4,4), activation='relu')(batch_norm0)
batch_norm1 = BatchNormalization()(conv1)
conv2 = Convolution2D(32, nb_row=4, nb_col=4, subsample=(2,2), activation='relu')(batch_norm1)
batch_norm2 = BatchNormalization()(conv2)
conv3 = Convolution2D(32, nb_row=4, nb_col=4, subsample=(2,2), activation = 'relu')(batch_norm2)
batch_norm3 = BatchNormalization()(conv3)
flat = Flatten()(batch_norm3)
h0 = Dense(HIDDEN1_UNITS, activation='relu')(flat)
h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)
Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
Acceleration = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
V = merge([Steering,Acceleration],mode='concat')
model = Model(input=S,output=V)
return model, model.trainable_weights, S
| 46.458333 | 128 | 0.692676 |
38e13637448dc2ba63d6d78313afc0cdc3405ffc | 7,406 | py | Python | GOTool/AnnotationParser.py | alfonsoeromero/S2F | fccb741b15acfdeb02ca0de411eb4b00ae73be85 | [
"MIT"
] | 9 | 2019-10-24T18:46:46.000Z | 2022-03-23T13:21:45.000Z | GOTool/AnnotationParser.py | alfonsoeromero/S2F | fccb741b15acfdeb02ca0de411eb4b00ae73be85 | [
"MIT"
] | 5 | 2022-01-26T18:00:01.000Z | 2022-02-08T14:09:42.000Z | GOTool/AnnotationParser.py | alfonsoeromero/S2F | fccb741b15acfdeb02ca0de411eb4b00ae73be85 | [
"MIT"
] | 2 | 2022-01-27T12:52:32.000Z | 2022-01-29T12:08:26.000Z |
"""
A higher level Gene Ontology representation in Python
=======
License
=======
Copyright (c) 2009 Tamas Nepusz <tamas@cs.rhul.ac.uk>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "Tamas Nepusz"
__email__ = "tamas@cs.rhul.ac.uk"
__copyright__ = "Copyright (c) 2009, Tamas Nepusz"
__license__ = "MIT"
__version__ = "0.1"
__all__ = ["Annotation", "AnnotationFile"]
class Annotation(object):
"""Class representing a GO annotation (possibly parsed from an
annotation file).
The class has the following attributes (corresponding to the
columns of a GO annotation file):
- ``db``: refers to the database from which the identifier
in the next column (``db_object_id``) is drawn
- ``db_object_id``: a unique identifier in ``db`` for the
item being annotated.
- ``db_object_symbol``: a unique and valid symbol to which
``db_object_id`` is matched. Usually a symbol that means
something to a biologist.
- ``qualifiers``: a list of flags that modify the interpretation
of the annotation (e.g., ``NOT``). Note that this is always
a list, even when no qualifier exists.
- ``go_id``: the GO identifier for the term attributed to
``db_object_id``.
- ``db_references``: a list of unique identifiers for a single
source cited as an authority for the attribution of the
``go_id`` to the ``db_object_id``. May be a literature or
a database record.
- ``evidence_code``: the GO evidence code
- ``with``: required for some evidence codes. Holds an additional
identifier for certain evidence codes.
- ``aspect``: one of ``P`` (biological process), ``F``
(molecular function) or ``C`` (cellular compartment).
- ``db_object_name``: name of gene or gene product
- ``db_object_synonyms``: a gene symbol or some other human-readable text
- ``db_object_type``: what kind of thing is being annotated.
This is either gene (``SO:0000704``), transcript (``SO:0000673``),
protein (``SO:0000358``), ``protein_structure`` or ``complex``.
- ``taxons``: taxonomic identifiers (at most two, at least 1). This is
always a list
- ``date``: date on which the annotation was made
- ``assigned_by``: the database which made the annotation.
- ``organism'': The organism in question. This is a modification
of the original
code, motivated by the need to annotate the same tree with
a bunch of organisms
"""
__slots__ = ["db", "db_object_id", "db_object_symbol",
"qualifiers", "go_id", "db_references",
"evidence_code", "with", "aspect",
"db_object_name", "db_object_synonyms",
"db_object_type", "taxons", "date",
"assigned_by", "organism_name"]
def __init__(self, *args, **kwds):
"""Constructs an annotation. Use keyword arguments to specify the
values of the different attributes. If you use positional arguments,
the order of the arguments must be the same as they are in the GO
annotation file. No syntax checking is done on the values entered,
but attributes with a maximum cardinality more than one are converted
to lists automatically. (If you specify a string with vertical bar
separators as they are in the input file, the string will be
splitted appropriately)."""
if len(args) == 1 and not kwds:
args = args[0].strip().split("\t")
for (name, value) in zip(self.__slots__, args):
setattr(self, name, value)
for name, value in kwds.items():
setattr(self, name, kwds[value])
for name in self.__slots__:
if not hasattr(self, name):
setattr(self, name, "")
self._polish_attributes()
def _polish_attributes(self):
"""Ensures that the atributes are of the right type"""
self._ensure_list("qualifiers")
self._ensure_list("db_references")
self._ensure_list("with")
self._ensure_list("db_object_synonyms")
self._ensure_list("taxons")
def _ensure_list(self, attr):
"""Ensures that a given attribute is a list and not a string"""
value = getattr(self, attr)
if not isinstance(value, list):
if value == "":
setattr(self, attr, [])
else:
setattr(self, attr, value.split("|"))
def __repr__(self):
params = ",".join("%s=%r" % (name, getattr(self, name))
for name in self.__slots__)
return "%s(%s)" % (self.__class__.__name__, params)
class AnnotationFile(object):
"""A parser class that processes GO annotation files"""
def __init__(self, fp, organism_name):
"""Creates an annotation file parser that reads the given file-like
object. You can also specify filenames. If the filename ends in ``.gz``
the file is assumed to contain gzipped data and it will be unzipped
on the fly. Example:
>>> import gene_ontology as go
>>> parser = go.AnnotationFile("gene_association.sgd.gz")
To read the annotations in the file, you must iterate over the parser
as if it were a list. The iterator yields `Annotation` objects.
"""
if isinstance(fp, str):
if fp[-3:] == ".gz":
from gzip import GzipFile
self.fp = GzipFile(fp)
else:
self.fp = open(fp)
else:
self.fp = fp
self.lineno = 0
self.organism_name = organism_name
def annotations(self):
"""Iterates over the annotations in this annotation file,
yielding an `Annotation` object for each annotation."""
for line in self.fp:
self.lineno += 1
if not line or line[0] == '!':
# This is a comment line
continue
try:
# append the organism name to the line, the file.
# Some wiggleling is necessary, because the last
# part of the line is actually a newline and three tab
line = line[0:-2] + self.organism_name
yield Annotation(line)
except TypeError as ex:
raise SyntaxError("cannot parse annotation", self.lineno)
def __iter__(self):
return self.annotations()
| 41.144444 | 79 | 0.641237 |
1b8f02b94df397477315decefe42dcbaf5141d78 | 1,639 | py | Python | doc/integrations/pytorch/tests/nightly/gpu/test_bert.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2020-09-27T05:00:06.000Z | 2020-09-27T05:00:06.000Z | doc/integrations/pytorch/tests/nightly/gpu/test_bert.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2021-08-04T11:17:39.000Z | 2021-08-04T11:17:39.000Z | doc/integrations/pytorch/tests/nightly/gpu/test_bert.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2021-05-03T13:27:14.000Z | 2021-05-03T13:27:14.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.utils.testing as testing_utils
@testing_utils.skipUnlessGPU
class TestBertModel(unittest.TestCase):
"""
Test of Bert biencoder and crossencoder.
Checks that Both Biencoder and CrossEncoder of Bert can be trained for about 100
samples on convai2
"""
def test_biencoder(self):
valid, test = testing_utils.train_model(
dict(
task='integration_tests:overfit',
model='bert_ranker/bi_encoder_ranker',
max_train_steps=500,
batchsize=2,
candidates="inline",
gradient_clip=1.0,
learningrate=1e-3,
text_truncate=8,
)
)
self.assertGreaterEqual(test['accuracy'], 0.5)
def test_crossencoder(self):
valid, test = testing_utils.train_model(
dict(
task='integration_tests:overfit',
model='bert_ranker/cross_encoder_ranker',
max_train_steps=500,
batchsize=2,
learningrate=1e-3,
gradient_clip=1.0,
type_optimization="all_encoder_layers",
text_truncate=8,
label_truncate=8,
)
)
self.assertGreaterEqual(test['accuracy'], 0.8)
if __name__ == '__main__':
unittest.main()
| 30.351852 | 85 | 0.57108 |
c73b6c895a4875f67c286403d7044f82e93f802f | 10,459 | py | Python | certbot-ci/certbot_integration_tests/utils/acme_server.py | mikma/certbot | ac8798e8185699cd2b7ddc904bccd978968e8e2f | [
"Apache-2.0"
] | 5 | 2021-01-26T08:47:29.000Z | 2021-01-30T00:42:12.000Z | certbot-ci/certbot_integration_tests/utils/acme_server.py | mikma/certbot | ac8798e8185699cd2b7ddc904bccd978968e8e2f | [
"Apache-2.0"
] | null | null | null | certbot-ci/certbot_integration_tests/utils/acme_server.py | mikma/certbot | ac8798e8185699cd2b7ddc904bccd978968e8e2f | [
"Apache-2.0"
] | 1 | 2020-10-28T05:49:43.000Z | 2020-10-28T05:49:43.000Z | #!/usr/bin/env python
"""Module to setup an ACME CA server environment able to run multiple tests in parallel"""
from __future__ import print_function
import errno
import json
import os
from os.path import join
import shutil
import subprocess
import sys
import tempfile
import time
import requests
from certbot_integration_tests.utils import misc
from certbot_integration_tests.utils import pebble_artifacts
from certbot_integration_tests.utils import proxy
from certbot_integration_tests.utils.constants import *
class ACMEServer(object):
"""
ACMEServer configures and handles the lifecycle of an ACME CA server and an HTTP reverse proxy
instance, to allow parallel execution of integration tests against the unique http-01 port
expected by the ACME CA server.
Typically all pytest integration tests will be executed in this context.
ACMEServer gives access the acme_xdist parameter, listing the ports and directory url to use
for each pytest node. It exposes also start and stop methods in order to start the stack, and
stop it with proper resources cleanup.
ACMEServer is also a context manager, and so can be used to ensure ACME server is started/stopped
upon context enter/exit.
"""
def __init__(self, acme_server, nodes, http_proxy=True, stdout=False):
"""
Create an ACMEServer instance.
:param str acme_server: the type of acme server used (boulder-v1, boulder-v2 or pebble)
:param list nodes: list of node names that will be setup by pytest xdist
:param bool http_proxy: if False do not start the HTTP proxy
:param bool stdout: if True stream all subprocesses stdout to standard stdout
"""
self._construct_acme_xdist(acme_server, nodes)
self._acme_type = 'pebble' if acme_server == 'pebble' else 'boulder'
self._proxy = http_proxy
self._workspace = tempfile.mkdtemp()
self._processes = []
self._stdout = sys.stdout if stdout else open(os.devnull, 'w')
def start(self):
"""Start the test stack"""
try:
if self._proxy:
self._prepare_http_proxy()
if self._acme_type == 'pebble':
self._prepare_pebble_server()
if self._acme_type == 'boulder':
self._prepare_boulder_server()
except BaseException as e:
self.stop()
raise e
def stop(self):
"""Stop the test stack, and clean its resources"""
print('=> Tear down the test infrastructure...')
try:
for process in self._processes:
try:
process.terminate()
except OSError as e:
# Process may be not started yet, so no PID and terminate fails.
# Then the process never started, and the situation is acceptable.
if e.errno != errno.ESRCH:
raise
for process in self._processes:
process.wait()
if os.path.exists(os.path.join(self._workspace, 'boulder')):
# Boulder docker generates build artifacts owned by root with 0o744 permissions.
# If we started the acme server from a normal user that has access to the Docker
# daemon, this user will not be able to delete these artifacts from the host.
# We need to do it through a docker.
process = self._launch_process(['docker', 'run', '--rm', '-v',
'{0}:/workspace'.format(self._workspace),
'alpine', 'rm', '-rf', '/workspace/boulder'])
process.wait()
finally:
if os.path.exists(self._workspace):
shutil.rmtree(self._workspace)
if self._stdout != sys.stdout:
self._stdout.close()
print('=> Test infrastructure stopped and cleaned up.')
def __enter__(self):
self.start()
return self.acme_xdist
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def _construct_acme_xdist(self, acme_server, nodes):
"""Generate and return the acme_xdist dict"""
acme_xdist = {'acme_server': acme_server, 'challtestsrv_port': CHALLTESTSRV_PORT}
# Directory and ACME port are set implicitly in the docker-compose.yml files of Boulder/Pebble.
if acme_server == 'pebble':
acme_xdist['directory_url'] = PEBBLE_DIRECTORY_URL
else: # boulder
acme_xdist['directory_url'] = BOULDER_V2_DIRECTORY_URL \
if acme_server == 'boulder-v2' else BOULDER_V1_DIRECTORY_URL
acme_xdist['http_port'] = {node: port for (node, port)
in zip(nodes, range(5200, 5200 + len(nodes)))}
acme_xdist['https_port'] = {node: port for (node, port)
in zip(nodes, range(5100, 5100 + len(nodes)))}
acme_xdist['other_port'] = {node: port for (node, port)
in zip(nodes, range(5300, 5300 + len(nodes)))}
self.acme_xdist = acme_xdist
def _prepare_pebble_server(self):
"""Configure and launch the Pebble server"""
print('=> Starting pebble instance deployment...')
pebble_path, challtestsrv_path, pebble_config_path = pebble_artifacts.fetch(self._workspace)
# Configure Pebble at full speed (PEBBLE_VA_NOSLEEP=1) and not randomly refusing valid
# nonce (PEBBLE_WFE_NONCEREJECT=0) to have a stable test environment.
environ = os.environ.copy()
environ['PEBBLE_VA_NOSLEEP'] = '1'
environ['PEBBLE_WFE_NONCEREJECT'] = '0'
environ['PEBBLE_AUTHZREUSE'] = '100'
environ['PEBBLE_ALTERNATE_ROOTS'] = str(PEBBLE_ALTERNATE_ROOTS)
self._launch_process(
[pebble_path, '-config', pebble_config_path, '-dnsserver', '127.0.0.1:8053', '-strict'],
env=environ)
self._launch_process(
[challtestsrv_path, '-management', ':{0}'.format(CHALLTESTSRV_PORT), '-defaultIPv6', '""',
'-defaultIPv4', '127.0.0.1', '-http01', '""', '-tlsalpn01', '""', '-https01', '""'])
# pebble_ocsp_server is imported here and not at the top of module in order to avoid a useless
# ImportError, in the case where cryptography dependency is too old to support ocsp, but
# Boulder is used instead of Pebble, so pebble_ocsp_server is not used. This is the typical
# situation of integration-certbot-oldest tox testenv.
from certbot_integration_tests.utils import pebble_ocsp_server
self._launch_process([sys.executable, pebble_ocsp_server.__file__])
# Wait for the ACME CA server to be up.
print('=> Waiting for pebble instance to respond...')
misc.check_until_timeout(self.acme_xdist['directory_url'])
print('=> Finished pebble instance deployment.')
def _prepare_boulder_server(self):
"""Configure and launch the Boulder server"""
print('=> Starting boulder instance deployment...')
instance_path = join(self._workspace, 'boulder')
# Load Boulder from git, that includes a docker-compose.yml ready for production.
process = self._launch_process(['git', 'clone', 'https://github.com/letsencrypt/boulder',
'--single-branch', '--depth=1', instance_path])
process.wait()
# Allow Boulder to ignore usual limit rate policies, useful for tests.
os.rename(join(instance_path, 'test/rate-limit-policies-b.yml'),
join(instance_path, 'test/rate-limit-policies.yml'))
try:
# Launch the Boulder server
self._launch_process(['docker-compose', 'up', '--force-recreate'], cwd=instance_path)
# Wait for the ACME CA server to be up.
print('=> Waiting for boulder instance to respond...')
misc.check_until_timeout(self.acme_xdist['directory_url'], attempts=300)
# Configure challtestsrv to answer any A record request with ip of the docker host.
response = requests.post('http://localhost:{0}/set-default-ipv4'.format(CHALLTESTSRV_PORT),
json={'ip': '10.77.77.1'})
response.raise_for_status()
except BaseException:
# If we failed to set up boulder, print its logs.
print('=> Boulder setup failed. Boulder logs are:')
process = self._launch_process(['docker-compose', 'logs'], cwd=instance_path, force_stderr=True)
process.wait()
raise
print('=> Finished boulder instance deployment.')
def _prepare_http_proxy(self):
"""Configure and launch an HTTP proxy"""
print('=> Configuring the HTTP proxy...')
mapping = {r'.+\.{0}\.wtf'.format(node): 'http://127.0.0.1:{0}'.format(port)
for node, port in self.acme_xdist['http_port'].items()}
command = [sys.executable, proxy.__file__, str(HTTP_01_PORT), json.dumps(mapping)]
self._launch_process(command)
print('=> Finished configuring the HTTP proxy.')
def _launch_process(self, command, cwd=os.getcwd(), env=None, force_stderr=False):
"""Launch silently a subprocess OS command"""
if not env:
env = os.environ
stdout = sys.stderr if force_stderr else self._stdout
process = subprocess.Popen(command, stdout=stdout, stderr=subprocess.STDOUT, cwd=cwd, env=env)
self._processes.append(process)
return process
def main():
args = sys.argv[1:]
server_type = args[0] if args else 'pebble'
possible_values = ('pebble', 'boulder-v1', 'boulder-v2')
if server_type not in possible_values:
raise ValueError('Invalid server value {0}, should be one of {1}'
.format(server_type, possible_values))
acme_server = ACMEServer(server_type, [], http_proxy=False, stdout=True)
try:
with acme_server as acme_xdist:
print('--> Instance of {0} is running, directory URL is {0}'
.format(acme_xdist['directory_url']))
print('--> Press CTRL+C to stop the ACME server.')
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| 44.696581 | 108 | 0.625394 |
1273b620e276796c28d3d6ee287408bc1467646c | 291 | py | Python | vmraid/patches/v7_0/set_user_fullname.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/patches/v7_0/set_user_fullname.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | vmraid/patches/v7_0/set_user_fullname.py | sowrisurya/vmraid | f833e00978019dad87af80b41279c0146c063ed5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import vmraid
def execute():
vmraid.reload_doc("Core", "DocType", "User")
for user in vmraid.db.get_all('User'):
user = vmraid.get_doc('User', user.name)
user.set_full_name()
user.db_set('full_name', user.full_name, update_modified = False) | 29.1 | 67 | 0.738832 |
036ea907f39f81ac07aa19830525ffa9a7450677 | 7,838 | py | Python | tests/system/base.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | null | null | null | tests/system/base.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | null | null | null | tests/system/base.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | 1 | 2019-12-10T01:54:27.000Z | 2019-12-10T01:54:27.000Z | import os
import pathlib
import sys
import typing
import pytest
import yaml
import mlrun.api.schemas
from mlrun import get_run_db, mlconf, set_environment
from mlrun.utils import create_logger
logger = create_logger(level="debug", name="test-system")
class TestMLRunSystem:
project_name = "system-test-project"
root_path = pathlib.Path(__file__).absolute().parent.parent.parent
env_file_path = root_path / "tests" / "system" / "env.yml"
results_path = root_path / "tests" / "test_results" / "system"
enterprise_marker_name = "enterprise"
mandatory_env_vars = [
"MLRUN_DBPATH",
]
mandatory_enterprise_env_vars = mandatory_env_vars + [
"V3IO_API",
"V3IO_FRAMESD",
"V3IO_USERNAME",
"V3IO_ACCESS_KEY",
]
def setup_method(self, method):
self._logger = logger
self._logger.info(
f"Setting up test {self.__class__.__name__}::{method.__name__}"
)
self._test_env = {}
self._old_env = {}
self._setup_env(self._get_env_from_file())
self._run_db = get_run_db()
# the dbpath is already configured on the test startup before this stage
# so even though we set the env var, we still need to directly configure
# it in mlconf.
mlconf.dbpath = self._test_env["MLRUN_DBPATH"]
set_environment(
artifact_path="/User/data", project=self.project_name,
)
self.custom_setup()
self._logger.info(
f"Finished setting up test {self.__class__.__name__}::{method.__name__}"
)
def teardown_method(self, method):
self._logger.info(
f"Tearing down test {self.__class__.__name__}::{method.__name__}"
)
self.custom_teardown()
self._logger.debug("Removing test data from database")
self._run_db.delete_project(
self.project_name,
deletion_strategy=mlrun.api.schemas.DeletionStrategy.cascade,
)
self._teardown_env()
self._logger.info(
f"Finished tearing down test {self.__class__.__name__}::{method.__name__}"
)
def custom_setup(self):
pass
def custom_teardown(self):
pass
@classmethod
def skip_test_if_env_not_configured(cls, test):
mandatory_env_vars = (
cls.mandatory_enterprise_env_vars
if cls._has_marker(test, cls.enterprise_marker_name)
else cls.mandatory_env_vars
)
configured = True
try:
env = cls._get_env_from_file()
except FileNotFoundError:
configured = False
else:
for env_var in mandatory_env_vars:
if env_var not in env or env[env_var] is None:
configured = False
return pytest.mark.skipif(
not configured,
reason=f"This is a system test, add the needed environment variables {*mandatory_env_vars,} "
"in tests/system/env.yml to run it",
)(test)
@property
def assets_path(self):
return (
pathlib.Path(sys.modules[self.__module__].__file__).absolute().parent
/ "assets"
)
@classmethod
def _get_env_from_file(cls) -> dict:
with cls.env_file_path.open() as f:
return yaml.safe_load(f)
def _setup_env(self, env: dict):
self._logger.debug("Setting up test environment")
self._test_env.update(env)
# save old env vars for returning them on teardown
for env_var, value in env.items():
if env_var in os.environ:
self._old_env[env_var] = os.environ[env_var]
if value:
os.environ[env_var] = value
# reload the config so changes to the env vars will take affect
mlrun.config.config.reload()
def _teardown_env(self):
self._logger.debug("Tearing down test environment")
for env_var in self._test_env:
if env_var in os.environ:
del os.environ[env_var]
os.environ.update(self._old_env)
# reload the config so changes to the env vars will take affect
mlrun.config.config.reload()
def _get_v3io_user_store_path(self, path: pathlib.Path, remote: bool = True) -> str:
v3io_user = self._test_env["V3IO_USERNAME"]
prefixes = {
"remote": f"v3io:///users/{v3io_user}",
"local": "/User",
}
prefix = prefixes["remote"] if remote else prefixes["local"]
return prefix + str(path)
def _verify_run_spec(
self,
run_spec,
parameters: dict = None,
inputs: dict = None,
outputs: list = None,
output_path: str = None,
function: str = None,
secret_sources: list = None,
data_stores: list = None,
scrape_metrics: bool = None,
):
self._logger.debug("Verifying run spec", spec=run_spec)
if parameters:
assert run_spec["parameters"] == parameters
if inputs:
assert run_spec["inputs"] == inputs
if outputs:
assert run_spec["outputs"] == outputs
if output_path:
assert run_spec["output_path"] == output_path
if function:
assert run_spec["function"] == function
if secret_sources:
assert run_spec["secret_sources"] == secret_sources
if data_stores:
assert run_spec["data_stores"] == data_stores
if scrape_metrics is not None:
assert run_spec["scrape_metrics"] == scrape_metrics
def _verify_run_metadata(
self,
run_metadata,
uid: str = None,
name: str = None,
project: str = None,
labels: dict = None,
iteration: int = None,
):
self._logger.debug("Verifying run metadata", spec=run_metadata)
if uid:
assert run_metadata["uid"] == uid
if name:
assert run_metadata["name"] == name
if project:
assert run_metadata["project"] == project
if iteration:
assert run_metadata["iteration"] == project
if labels:
for label, label_value in labels.items():
assert label in run_metadata["labels"]
assert run_metadata["labels"][label] == label_value
def _verify_run_outputs(
self,
run_outputs,
uid: str,
name: str,
project: str,
output_path: pathlib.Path,
accuracy: int = None,
loss: int = None,
best_iteration: int = None,
iteration_results: bool = False,
):
self._logger.debug("Verifying run outputs", spec=run_outputs)
iterpath = str(best_iteration) if best_iteration else ""
assert run_outputs["model"] == str(output_path / iterpath / "model.txt")
assert run_outputs["html_result"] == str(output_path / iterpath / "result.html")
assert run_outputs["chart"] == str(output_path / iterpath / "chart.html")
assert run_outputs["mydf"] == f"store://artifacts/{project}/{name}_mydf:{uid}"
if accuracy:
assert run_outputs["accuracy"] == accuracy
if loss:
assert run_outputs["loss"] == loss
if best_iteration:
assert run_outputs["best_iteration"] == best_iteration
if iteration_results:
assert run_outputs["iteration_results"] == str(
output_path / "iteration_results.csv"
)
@staticmethod
def _has_marker(test: typing.Callable, marker_name: str) -> bool:
try:
return (
len([mark for mark in test.pytestmark if mark.name == marker_name]) > 0
)
except AttributeError:
return False
| 32.658333 | 105 | 0.598495 |
1d3ae7389b399516939759b9563c7b1b62287e4c | 4,476 | py | Python | src/m2_functions.py | mccurrec/02-ObjectsFunctionsAndMethods | f1662a0c1493c80294859c5385c24ef6042b3fae | [
"MIT"
] | null | null | null | src/m2_functions.py | mccurrec/02-ObjectsFunctionsAndMethods | f1662a0c1493c80294859c5385c24ef6042b3fae | [
"MIT"
] | null | null | null | src/m2_functions.py | mccurrec/02-ObjectsFunctionsAndMethods | f1662a0c1493c80294859c5385c24ef6042b3fae | [
"MIT"
] | null | null | null | """
Practice DEFINING and CALLING
FUNCTIONS
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Ezrie McCurry.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# DONE: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
#
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
#
###############################################################################
import rosegraphics as rg
import math
def main():
print(hypotenuse(3,4))
turtle_draw('pink',10)
turtle_draw('red',25)
"""
TESTS the functions that you will write below.
You write the tests per the _TODO_s below.
"""
###############################################################################
# DONE: 3a. Define a function immediately below this _TODO_.
# It takes two arguments that denote, for a right triangle,
# the lengths of the two sides adjacent to its right angle,
# and it returns the length of the hypotenuse of that triangle.
# HINT: Apply the Pythagorean theorem.
#
# You may name the function and its parameters whatever you wish.
#
# DONE: 3b. In main, CALL your function and print the returned value,
# to test whether you defined the function correctly.
#
###############################################################################
def hypotenuse(a,b):
return math.sqrt(a**2+b**2)
###############################################################################
# DONE: 4a. Define a function immediately below this _TODO_.
# It takes two arguments:
# -- a string that represents a color (e.g. 'red')
# -- a positive integer that represents the thickness of a Pen.
#
# The function should do the following (in the order listed):
# a. Constructs a TurtleWindow.
# b. Constructs two SimpleTurtles, where:
# - one has a Pen whose color is "green" and has the GIVEN thickness
# - - the other has a Pen whose color is the GIVEN color
# and whose thickness is 5
#
# Note: the "GIVEN" color means the PARAMETER that represents a color.
# Likewise, the "GIVEN" thickness means the PARAMETER for thickness.
#
# c. Makes the first (green) SimpleTurtle move FORWARD 100 pixels, and
# makes the other (thickness 5) SimpleTurtle move BACKWARD 100 pixels.
#
# d. Tells the TurtleWindow to wait until the mouse is clicked.
#
# You may name the function and its parameters whatever you wish.
#
# DONE: 4b. In main, CALL your function at least TWICE (with different values
# for the arguments) to test whether you defined the function correctly.
#
###############################################################################
def turtle_draw(color,size):
window = rg.TurtleWindow()
turtleA = rg.SimpleTurtle('turtle')
turtleA.pen = rg.Pen('green',size)
turtleB = rg.SimpleTurtle('turtle')
turtleB.pen = rg.Pen(color,5)
turtleA.forward(100)
turtleB.backward(100)
window.close_on_mouse_click()
###############################################################################
# DONE: 5.
# COMMIT-and-PUSH your work (after changing this TO-DO to DONE).
#
# As a reminder, here is how you should do so:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3. In the Commit Changes window that pops up,
# press the Commit and Push button.
# Note: If you see only a Commit button:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.
#
# COMMIT adds the changed work to the version control on your computer.
# PUSH adds the changed work into your Github repository in the "cloud".
#
# COMMIT-and-PUSH your work as often as you want, but at the least, commit
# and push after you have tested a module and believe that it is correct.
#
###############################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 37.932203 | 79 | 0.557194 |
611149c794c223bdb644f790d781937a96a12fa5 | 8,148 | py | Python | hexa/plugins/connector_s3/schema.py | hastakecorp/openhexa-app | dde38f14701c3bfe7fb51f29d73b35e6b8d45b7b | [
"MIT"
] | null | null | null | hexa/plugins/connector_s3/schema.py | hastakecorp/openhexa-app | dde38f14701c3bfe7fb51f29d73b35e6b8d45b7b | [
"MIT"
] | 1 | 2021-09-07T08:10:18.000Z | 2021-09-07T08:10:18.000Z | hexa/plugins/connector_s3/schema.py | hastakecorp/openhexa-app | dde38f14701c3bfe7fb51f29d73b35e6b8d45b7b | [
"MIT"
] | null | null | null | from ariadne import convert_kwargs_to_snake_case, QueryType, ObjectType, MutationType
from django.http import HttpRequest
from django.template.defaultfilters import filesizeformat
from django.templatetags.static import static
from django.utils.translation import gettext_lazy as trans
from hexa.core.resolvers import resolve_tags
from hexa.core.graphql import result_page
from hexa.plugins.connector_s3.models import Bucket, Object
s3_type_defs = """
extend type Query {
s3Bucket(id: String, s3Name: String): S3Bucket
s3Object(id: String, bucketS3Name: String, s3Key: String): S3Object
s3Objects(
bucketS3Name: String!,
parentS3Key: String,
page: Int!,
perPage: Int
): S3ObjectPage!
}
type S3Bucket {
id: String!
contentType: String!
name: String!
s3Name: String!
shortName: String!
description: String!
url: String!
contentSummary: String!
countries: [Country!]
owner: Organization
lastSyncedAt: DateTime
tags: [CatalogTag!]
icon: String!
objects(
page: Int!,
perPage: Int
): S3ObjectPage!
}
type S3Object {
# Base
id: String!
createdAt: DateTime!
updatedAt: DateTime!
# RichContent
owner: Organization
shortName: String!
description: String!
countries: [Country!]
locale: String!
# Content
tags: [CatalogTag!]
# S3Object
bucket: S3Bucket
"Path of the object within the bucket"
s3Key: String
s3Size: Int
s3Type: ObjectS3Type
s3LastModified: DateTime
s3Extension: String
objects(
page: Int!,
perPage: Int
): S3ObjectPage!
name: String!
typeDescription: String!
sizeDescription: String!
detailUrl: String!
}
enum ObjectS3Type {
file
directory
}
type S3ObjectPage {
pageNumber: Int!
totalPages: Int!
totalItems: Int!
items: [S3Object!]!
}
input S3BucketInput {
name: String
shortName: String
countries: [CountryInput!]
tags: [CatalogTagInput!]
url: String
description: String
owner: OrganizationInput
s3Name: String
}
extend type Mutation {
s3BucketUpdate(id: String!, input: S3BucketInput!): S3Bucket!
}
type S3BucketUpdateResult {
bucket: S3Bucket
errors: [FormError!]
}
type FormError {
field: String
message: String
code: String
}
"""
s3_query = QueryType()
@s3_query.field("s3Bucket")
def resolve_s3_bucket(_, info, **kwargs):
request: HttpRequest = info.context["request"]
if "s3Name" in kwargs:
return Bucket.objects.filter_for_user(request.user).get(name=kwargs["s3Name"])
elif "id" in kwargs:
return Bucket.objects.filter_for_user(request.user).get(pk=kwargs["id"])
return None
@s3_query.field("s3Object")
def resolve_s3_object(_, info, **kwargs):
request: HttpRequest = info.context["request"]
if "bucketS3Name" and "s3Key" in kwargs:
return Object.objects.filter_for_user(request.user).get(
bucket__name=kwargs["bucketS3Name"],
key=f"{kwargs['bucketS3Name']}/{kwargs['s3Key']}",
)
elif "id" in kwargs:
return Object.objects.filter_for_user(request.user).get(pk=kwargs["id"])
return None
@s3_query.field("s3Objects")
def resolve_s3_objects(_, info, **kwargs):
request: HttpRequest = info.context["request"]
queryset = Object.objects.filter_for_user(request.user).filter(
bucket__name=kwargs["bucketS3Name"]
)
if "parentS3Key" in kwargs:
queryset = queryset.filter(
parent_key=f"{kwargs['bucketS3Name']}/{kwargs['parentS3Key']}",
)
else:
queryset = queryset.filter(
parent_key=f"{kwargs['bucketS3Name']}/",
)
return result_page(queryset=queryset, page=kwargs["page"])
bucket = ObjectType("S3Bucket")
bucket.set_field("tags", resolve_tags)
bucket.set_alias("s3Name", "name")
@bucket.field("icon")
def resolve_icon(obj: Bucket, info):
request: HttpRequest = info.context["request"]
return request.build_absolute_uri(static(f"connector_s3/img/symbol.svg"))
@bucket.field("contentType")
def resolve_content_type(obj: Bucket, info):
return trans("S3 Bucket")
@bucket.field("objects")
@convert_kwargs_to_snake_case
def resolve_objects(obj: Bucket, info, page, per_page=None):
queryset = obj.object_set.filter(parent_key=f"{obj.name}/")
return result_page(queryset, page, per_page)
s3_object = ObjectType("S3Object")
s3_object.set_alias("s3Key", "key")
s3_object.set_alias("s3Size", "size")
s3_object.set_alias("s3Type", "type")
s3_object.set_alias("s3LastModified", "last_modified")
s3_object.set_alias("s3Extension", "extension")
s3_object.set_field("tags", resolve_tags)
@s3_object.field("objects")
@convert_kwargs_to_snake_case
def resolve_S3_objects_on_object(obj: Object, info, page, per_page=None):
queryset = Object.objects.filter(parent_key=obj.key)
return result_page(queryset, page, per_page)
s3_mutation = MutationType()
@s3_object.field("name")
def resolve_file_name(obj: Object, *_): # TODO: proper method or property on model
if obj.type == "directory":
return obj.key.rstrip("/").split("/")[-1] + "/"
return obj.key.split("/")[-1]
@s3_object.field("typeDescription")
def resolve_object_type(obj: Object, *_):
if obj.type == "directory":
return trans("Directory")
file_type = {
"xlsx": "Excel file",
"md": "Markdown document",
"ipynb": "Jupyter Notebook",
"csv": "CSV file",
}.get(obj.extension, "File")
return trans(file_type)
@s3_object.field("sizeDescription")
def resolve_file_size_display(obj: Object, *_):
return filesizeformat(obj.size) if obj.size > 0 else "-"
@s3_object.field("detailUrl")
def resolve_detail_url(obj: Object, *_):
return f"/s3/{obj.key}"
@s3_mutation.field("s3BucketUpdate")
def resolve_s3_bucket_update(_, info, **kwargs):
updated_bucket = Bucket.objects.get(id=kwargs["id"])
bucket_data = kwargs["input"]
# Obviously we need some kind of serializer here
if "name" in bucket_data:
updated_bucket.name = bucket_data["name"]
if "shortName" in bucket_data:
updated_bucket.short_name = bucket_data["shortName"]
if "countries" in bucket_data:
updated_bucket.countries = [
country["code"] for country in bucket_data["countries"]
]
if "tags" in bucket_data:
updated_bucket.tags.set([tag["id"] for tag in bucket_data["tags"]])
if "owner" in bucket_data:
updated_bucket.owner_id = bucket_data["owner"]["id"]
if "url" in bucket_data:
updated_bucket.url = bucket_data["url"]
if "description" in bucket_data:
updated_bucket.description = bucket_data["description"]
updated_bucket.save()
return updated_bucket
# class BucketForm(GraphQLModelForm):
# name = forms.CharField(required=False, min_length=3, empty_value=EmptyValue())
# short_name = forms.CharField(required=False)
# tags = GraphQLModelMultipleChoiceField(queryset=Tag.objects.all(), required=False)
# countries = GraphQLMultipleChoiceField(
# required=False, key_name="code", choices=dict(countries).items()
# )
# owner = GraphQLModelChoiceField(queryset=Organization.objects.all(), required=False)
# url = forms.URLField(required=False)
# description = forms.CharField(required=False)
#
#
# @s3_mutation.field("s3BucketUpdate")
# def resolve_s3_bucket_update(_, info, **kwargs):
# bucket = Bucket.objects.get(id=kwargs["id"])
# form = BucketForm(kwargs["input"], instance=bucket)
# if form.is_valid():
# return form.save()
# else:
# return form.graphql_errors
s3_bindables = [s3_query, s3_mutation, bucket, s3_object]
| 28 | 90 | 0.652062 |
b84ae3516df453378e341d9a3e5d9e99b0c3ef93 | 5,679 | py | Python | pettingzoo/classic/backgammon/backgammon_env.py | rodrigodelazcano/PettingZoo | 41fe43c7da2fd92fa8c6aa5a5a28083664092aa5 | [
"Apache-2.0"
] | null | null | null | pettingzoo/classic/backgammon/backgammon_env.py | rodrigodelazcano/PettingZoo | 41fe43c7da2fd92fa8c6aa5a5a28083664092aa5 | [
"Apache-2.0"
] | null | null | null | pettingzoo/classic/backgammon/backgammon_env.py | rodrigodelazcano/PettingZoo | 41fe43c7da2fd92fa8c6aa5a5a28083664092aa5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from gym import spaces
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector, wrappers
from . import bg_utils
from .backgammon import BLACK, COLORS, WHITE
from .backgammon import Backgammon as Game
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.CaptureStdoutWrapper(env)
env = wrappers.TerminateIllegalWrapper(env, illegal_reward=-1)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
class raw_env(AECEnv):
metadata = {"render.modes": ["human"], "name": "backgammon_v3"}
def __init__(self):
super().__init__()
self.game = Game()
self.seed()
self.agents = [f"player_{i}" for i in range(2)]
self.possible_agents = self.agents[:]
self._agent_order = list(self.agents)
self._agent_selector = agent_selector(self._agent_order)
self.infos = {i: {} for i in self.agents}
self.action_spaces = {
name: spaces.Discrete(26 * 26 * 2 + 1) for name in self.agents
}
low = np.zeros((198,))
high = np.ones((198,))
for i in range(3, 97, 4):
high[i] = 6.0
high[96] = 7.5
for i in range(101, 195, 4):
high[i] = 6.0
high[194] = 7.5
self.observation_spaces = {
i: spaces.Dict(
{
"observation": spaces.Box(
low=np.float32(low), high=np.float32(high), dtype=np.float32
),
"action_mask": spaces.Box(
low=0, high=1, shape=(1353,), dtype=np.int8
),
}
)
for i in self.agents
}
self.double_roll = 0
def seed(self, seed=None):
self.np_random = np.random.RandomState(seed)
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
if action != 26 ** 2 * 2:
action = bg_utils.to_bg_format(action, self.roll)
self.game.execute_play(self.colors[self.agent_selection], action)
winner = self.game.get_winner()
if winner is not None:
opp_agent = bg_utils.opp_agent(self, self.agent_selection)
if winner == self.colors[self.agent_selection]:
self.rewards[self.agent_selection] = 1
self.rewards[opp_agent] = -1
else:
self.rewards[self.agent_selection] = -1
self.rewards[opp_agent] = 1
self.dones = {i: True for i in self.agents}
else:
self._clear_rewards()
if self.double_roll == 0:
self.agent_selection = self._agent_selector.next()
roll = self.np_random.randint(1, 6), self.np_random.randint(1, 6)
if roll[0] == roll[1]:
self.double_roll = 2
if self.colors[self.agent_selection] == WHITE:
roll = (-roll[0], -roll[1])
self.roll = roll
self._accumulate_rewards()
def observe(self, agent):
action_mask = np.zeros(1353, int)
observation = np.array(
self.game.get_board_features(agent), dtype=np.float32
).reshape(
198,
)
# only current agent can make legal moves
if agent == self.agent_selection:
valid_moves = bg_utils.get_valid_actions(self, self.roll)
if self.double_roll > 0:
self.handle_double_roll()
valid_moves = bg_utils.double_roll(valid_moves)
self.double_roll -= 1
legal_moves = bg_utils.to_gym_format(valid_moves, self.roll)
if len(legal_moves) == 0:
legal_moves = [26 ** 2 * 2]
else:
legal_moves = []
for i in legal_moves:
action_mask[i] = 1
return {"observation": observation, "action_mask": action_mask}
def reset(self):
self.agents = self.possible_agents[:]
self.dones = {i: False for i in self.agents}
self.infos = {i: {"legal_moves": []} for i in self.agents}
self._agent_order = list(self.agents)
self._agent_selector.reinit(self._agent_order)
self.agent_selection = self._agent_selector.reset()
self.rewards = {i: 0 for i in self.agents}
self._cumulative_rewards = {i: 0 for i in self.agents}
self.colors = {}
self.double_roll = 0
self.game = Game()
opp_agent = bg_utils.opp_agent(self, self.agent_selection)
roll = self.np_random.randint(1, 6), self.np_random.randint(1, 6)
while roll[0] == roll[1]:
roll = self.np_random.randint(1, 6), self.np_random.randint(1, 6)
if roll[0] > roll[1]:
self.colors[self.agent_selection] = WHITE
self.colors[opp_agent] = BLACK
roll = (-roll[0], -roll[1])
else:
self.colors[self.agent_selection] = BLACK
self.colors[opp_agent] = WHITE
self.roll = roll
def render(self, mode="human"):
assert mode in ["human"], print(mode)
if mode == "human":
self.game.render()
return True
def close(self):
pass
def handle_double_roll(self):
if self.double_roll == 1:
a = self._agent_order[0]
self._agent_order[0] = self._agent_order[1]
self._agent_order[1] = a
self._agent_selector.reinit(self._agent_order)
if self.agent_selection == self._agent_order[0]:
self._agent_selector.next()
| 33.60355 | 84 | 0.564184 |
6f013dc09e14747419a9c36d1fc6eb291ac64ff5 | 12,394 | py | Python | chemdataextractor_batteries/chemdataextractor/parse/previous_parsers/battery_voltage.py | ShuHuang/batterydatabase | 10a94617c00ddca6e5e382f50bfe0b6079457166 | [
"MIT"
] | 26 | 2020-08-06T13:40:58.000Z | 2022-03-23T13:34:45.000Z | chemdataextractor_batteries/chemdataextractor/parse/previous_parsers/battery_voltage.py | ShuHuang/batterydatabase | 10a94617c00ddca6e5e382f50bfe0b6079457166 | [
"MIT"
] | 3 | 2020-08-07T10:54:37.000Z | 2021-07-13T20:42:21.000Z | chemdataextractor_batteries/chemdataextractor/parse/previous_parsers/battery_voltage.py | ShuHuang/batterydatabase | 10a94617c00ddca6e5e382f50bfe0b6079457166 | [
"MIT"
] | 8 | 2020-09-15T14:48:12.000Z | 2022-01-29T05:54:24.000Z | # -*- coding: utf-8 -*-
"""
chemdataextractor.parse.battery_voltage.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parser for voltage.
"""
import logging
from lxml import etree
import traceback
from . import R, I, W, Optional, merge, join
from .base import BaseSentenceParser
from ..utils import first
from .cem import cem, chemical_label, lenient_chemical_label, solvent_name
from .common import lbrct, dt, rbrct, comma
from .elements import W, I, R, T, Optional, Any, OneOrMore, Not, ZeroOrMore, SkipTo
log = logging.getLogger(__name__)
delim = R(r'^[:;\.,]$')
units = (R('^m?V$') +
Optional(Optional(lbrct).hide() +
R('^v(s(.)?|ersus)$') +
Optional(R('Li|Na|Ag|K')) +
Optional(rbrct).hide()) +
Not(Optional(R('/')) +
R('s')))('units').add_action(merge)
joined_range = R(
r'^[\+\-–−]?\d+(\.\\d+)?(\(\d\))?[\-––-−~∼˜]\d+(\.\d+)?(\(\d\))?$')('value').add_action(join)
spaced_range = (R(r'^[\+\-–−]?\d+(\.d+)?(\(\d\))?$') + Optional(units).hide() + (R(r'^[\-±–−~∼˜]$') + R(
r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') | R(r'^[\+\-–−]\d+(\.\d+)?(\(\d\))?$')))('value').add_action(join)
to_range = (
ZeroOrMore(
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') +
Optional(units).hide()) +
Optional(
I('to')) +
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$'))('value').add_action(join)
and_range = (
ZeroOrMore(
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') +
Optional(units).hide() +
Optional(comma)) +
Optional(
I('and') | comma) +
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$'))('value').add_action(join)
range = (Optional(R(r'^[\-–−]$')) + (and_range | to_range |
spaced_range | joined_range)).add_action(join)
value = (Optional(R(r'^[\-–−]$')) +
Optional(R(r'^[~∼˜\<\>\≤\≥]$')) +
Optional(R(r'^[\-\–\–\−±∓⨤⨦±]$')) +
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$')).add_action(join)
ordinal = T('JJ').add_action(join)
power = (Optional((range | value) + R('×')) + (R('10') + W('−') + \
R(r'\d') | R(r'^10[\-–−]?\d+$'))).add_action(join)
capa = (power | range | value | ordinal)('value')
cem_prefix = (
Optional('oxidized') +
cem('cem') +
Optional(
I('battery')) +
Optional(delim).hide())
multi_cem = ZeroOrMore(cem_prefix + Optional(comma).hide()) + \
Optional(I('and') | comma).hide() + cem_prefix
capa_specifier = (Optional(I('electronic') | I('electrical')) +
(I('voltage') | I('potential')))('specifier')
prefix = (
Optional(
I('the') | I('a') | I('an') | I('its') | I('with')).hide() +
Optional(
I('inherently')).hide() +
Optional(
I('excellent') | I('high') | I('low') | I('stable') | I('superior') | I('maximum') | I('highest')).hide() +
Optional(
I('initial')).hide() +
capa_specifier +
Optional(
I('varies') +
I('from')).hide() +
Optional(
W('=') | W('~') | W('≈') | W('≃') | I('of') | I('was') | I('is') | I('at') | I('as') | I('near') | I('above') | I('below')).hide() +
Optional(
I('reported') | I('determined') | I('measured') | I('calculated') | I('known')).hide() +
Optional(
I('as') | (
I('to') +
I('be'))).hide() +
Optional(
I('in') +
I('the') +
I('range') | I('ranging')).hide() +
Optional(
I('of')).hide() +
Optional(
I('about') | I('from') | I('approximately') | I('around') | (
I('high') +
I('as')) | (
I('higher') | I('lower') +
I('than')) | (
I('up') +
I('to') | I('in') +
I('excess') +
I('of'))).hide())
capa_and_units = (
Optional(lbrct).hide() +
capa +
units +
Optional(rbrct).hide())('volt')
capa_specifier_and_value = Optional(prefix) + (Optional(delim).hide() + Optional(
lbrct | I('[')).hide() + capa + units + Optional(rbrct | I(']')).hide())('volt')
prefix_cem_value = (
prefix +
Optional(
I('the') | I('a') | I('an') | I('these') | I('those') | I('this') | I('that')).hide() +
Optional(
multi_cem | cem_prefix | lenient_chemical_label) +
Optional(
lbrct +
Optional(
cem_prefix | lenient_chemical_label | multi_cem) +
rbrct) +
Optional(
I('is') | I('was') | I('were') | I('occurs') | I('of') | I('could') | I('can') | I('remained') | (
I('can') +
I('be') +
I('assigned') +
Optional(
I('at') | I('to')))).hide() +
Optional(
I('reach') | I('reaching') | I('observed') | I('determined') | I('measured') | I('calculated') | I('found') | I('increased') | I('expected')).hide() +
Optional(
I('in') +
I('the') +
I('range') +
I('of') | I('ranging') +
I('from') | I('as') | I('to') | I('to') +
I('be') | I('about') | I('over') | (
I('higher') | I('lower')) +
I('than') | I('above')).hide() +
Optional(lbrct).hide() +
(
capa_specifier_and_value | capa_and_units) +
Optional(rbrct).hide())('volt_phrase')
cem_prefix_value = (
(Optional(multi_cem | cem_prefix | lenient_chemical_label))
+ Optional(delim).hide()
+ Optional(I('that') | I('which') | I('was') | I('since') | I('the') | I('resulting') + I('in')).hide()
+ Optional(I('typically') | I('also')).hide()
+ Optional(prefix)
+ Optional(I('display') | I('displays') | I('exhibit') | I('exhibited') | I('exhibits') | I('exhibiting') | I('shows') | I('show') | I('showed') | I('gave') | I('demonstrate') | I('demonstrates') | I('are') | I('remains') | I('maintains') | I('delivered') | I('provided') |
I('undergo') | I('undergoes') | I('has') | I('have') | I('having') | I('determined') | I('with') | I('where') | I('orders') | I('were') | (I('is') + Optional(I('classified') + I('as')))).hide()
+ Optional((I('reported') + I('to') + I('have')) | I('at') | I('with')).hide()
+ Optional(lbrct).hide() + (capa_specifier_and_value | capa_and_units) + Optional(rbrct).hide()
+ Optional(I('can') + I('be') + I('achieved'))
)('volt_phrase')
prefix_value_cem = (
Optional(
I('below') | I('at')).hide() +
Optional(prefix) +
Optional(
I('is') | I('were') | I('was') | I('are')).hide() +
(
capa_specifier_and_value | capa_and_units) +
Optional(
Optional(
I('has') +
I('been') +
I('found')) +
Optional(
I('is') | I('were') | I('was') | I('are')) +
Optional(
I('observed') | I('determined') | I('measured') | I('calculated') | I('reported'))).hide() +
Optional(
capa_specifier_and_value | capa_and_units) +
Optional(
I('in') | I('for') | I('of')).hide() +
Optional(
I('the')).hide() +
Optional(
R('^[:;,]$')).hide() +
Optional(lbrct).hide() +
Optional(
I('of')).hide() +
Optional(
multi_cem | cem_prefix | lenient_chemical_label) +
Optional(rbrct).hide())('volt_phrase')
value_prefix_cem = (Optional(I('of')) +
(capa_specifier_and_value | capa_and_units) +
Optional(delim).hide() +
Optional(I('which') | I('that')).hide() +
Optional(I('has') +
I('been') | I('was') | I('is') | I('were')).hide() +
Optional(I('found') | I('observed') | I('measured') | I('calculated') | I('determined')).hide() +
Optional(I('likely') | I('close') | (I('can') +
I('be'))).hide() +
Optional(I('corresponds') | I('associated')).hide() +
Optional(I('to') +
I('be') | I('with') | I('is') | I('as')).hide() +
Optional(I('the')).hide() +
capa_specifier +
Optional(I('of') | I('in')).hide() +
(multi_cem | cem_prefix | lenient_chemical_label))('volt_phrase')
cem_value_prefix = ((multi_cem | cem_prefix | lenient_chemical_label)
+ Optional((I('is') | I('was') | I('were')) + Optional(I('reported') | I('found') | I('calculate') | I('measured') | I('shown') | I('found')) + Optional(I('to'))).hide()
+ Optional(I('display') | I('displays') | I('exhibit') | I('exhibits') | I('exhibiting') | I('shows') | I('show') | I('demonstrate') | I('demonstrates') |
I('undergo') | I('undergoes') | I('has') | I('have') | I('having') | I('determined') | I('with') | I('where') | I('orders') | (I('is') + Optional(I('classified') + I('as')))).hide()
+ Optional(I('the') | I('a') | I('an')).hide()
+ Optional(I('value') | I('values')).hide()
+ Optional(I('varies') + I('from')).hide()
+ Optional(W('=') | W('~') | W('≈') | W('≃') | I('was') | I('is') | I('at') | I('as') | I('near') | I('above') | I('below')).hide()
+ Optional(I('in') + I('the') + I('range') | I('ranging')).hide()
+ Optional(I('of') | I('about') | I('from') | I('approximately') | I('around') | (I('high') + I('as')) | (I('higher') | I('lower') + I('than'))).hide()
+ (capa_specifier_and_value | capa_and_units)
+ Optional(I('as') | I('of') | I('for')).hide()
+ Optional(I('its') | I('their') | I('the')).hide() + capa_specifier)('volt_phrase')
bc = (
prefix_value_cem
| prefix_cem_value
| value_prefix_cem
| cem_value_prefix
| cem_prefix_value
)
def print_tree(trees):
print(trees)
try:
print(etree.tostring(trees))
except BaseException:
print('no tree')
class VoltageParser(BaseSentenceParser):
""""""
root = bc
def interpret(self, result, start, end):
# try:
compound = self.model.fields['compound'].model_class()
raw_value = first(result.xpath('./volt/value/text()'))
raw_units = first(result.xpath('./volt/units/text()'))
try:
specifier = ' '.join(
[i for i in (first(result.xpath('./specifier'))).itertext()])
except BaseException:
specifier = ''
# print_tree(first(result.xpath('.')))
battery_capacity = self.model(raw_value=raw_value,
raw_units=raw_units,
specifier=specifier,
value=self.extract_value(raw_value),
error=self.extract_error(raw_value),
units=self.extract_units(raw_units),
)
cem_lists = []
for cem_el in result.xpath('./cem'):
print_tree(cem_el)
if cem_el is not None:
log.debug(etree.tostring(cem_el))
cem_lists.append(''.join(cem_el.xpath('./names/text()')))
battery_capacity.compound = compound
battery_capacity.compound.names = cem_lists
battery_capacity.compound.labels = cem_el.xpath('./labels/text()')
log.debug(battery_capacity.serialize())
yield battery_capacity
# except TypeError as e:
# print('==========Error===============')
# traceback.print_exc()
# log.debug(e)
| 43.487719 | 277 | 0.43634 |
01d44cafcfb81f8eba56ee090428ddd16d8380de | 1,562 | py | Python | modules/test/test_info.py | amuritna/phenny | c01f409f41db125fe3f50093ed1ec3454f95a529 | [
"EFL-2.0"
] | 7 | 2018-10-29T18:01:47.000Z | 2022-01-21T04:13:46.000Z | modules/test/test_info.py | amuritna/phenny | c01f409f41db125fe3f50093ed1ec3454f95a529 | [
"EFL-2.0"
] | 225 | 2018-03-08T10:41:50.000Z | 2021-11-01T19:51:17.000Z | modules/test/test_info.py | amuritna/phenny | c01f409f41db125fe3f50093ed1ec3454f95a529 | [
"EFL-2.0"
] | 44 | 2018-03-19T15:30:15.000Z | 2020-07-29T08:47:45.000Z | """
test_info.py - tests for the info module
author: nu11us <work.willeggleston@gmail.com>
"""
import re
import unittest
from mock import MagicMock
from modules import info
from web import catch_timeout
class TestInfo(unittest.TestCase):
def setUp(self):
self.phenny = MagicMock()
self.input = MagicMock()
def test_help_invalid(self):
self.input.group = lambda x: [None, 'notacommand'][x]
self.input.sender = "user"
info.help(self.phenny, self.input)
out = self.phenny.say.call_args[0][0]
self.assertTrue("Sorry, I don't know that command." in out)
def test_help_channel(self):
self.input.group = lambda x: [None, ''][x]
self.input.sender = "#channel"
info.help(self.phenny, self.input)
out = self.phenny.say.call_args[0][0]
help_string = "Hey there, I'm a friendly bot for #apertium. Say \".help\" to me in private for a list of my commands or check out my help page at"
self.assertTrue(help_string in out)
def test_help_pm(self):
self.input.sender = "username"
self.input.channels = []
self.input.group = lambda x: [None, False][x]
info.help(self.phenny, self.input)
out = self.phenny.say.call_count
self.assertTrue(out == 3) # calls for 'hey there..', 'for help with...', 'talk to my owner
def test_stats(self):
info.stats(self.phenny, self.input)
out = self.phenny.say.call_count
self.assertTrue(out == 3) # calls for most-used, power-users, power-chans
| 35.5 | 154 | 0.643406 |
3688f280234700471cdc0c0ccb6f82858e2914a2 | 4,126 | py | Python | dolweb/downloads/views.py | Armada651/www | 829a1bc3f578b92b3ad7aa646a451775ef02c3b3 | [
"MIT"
] | null | null | null | dolweb/downloads/views.py | Armada651/www | 829a1bc3f578b92b3ad7aa646a451775ef02c3b3 | [
"MIT"
] | null | null | null | dolweb/downloads/views.py | Armada651/www | 829a1bc3f578b92b3ad7aa646a451775ef02c3b3 | [
"MIT"
] | null | null | null | from annoying.decorators import render_to
from django.conf import settings
from django.core.paginator import EmptyPage
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from dolweb.downloads.diggpaginator import DiggPaginator
from dolweb.downloads.models import BranchInfo, DevVersion, ReleaseVersion
import hashlib
import hmac
import multiprocessing
_addbuild_lock = multiprocessing.Lock()
@render_to('downloads-index.html')
def index(request):
"""Displays the downloads index"""
releases = ReleaseVersion.objects.order_by('-date')
master_builds = DevVersion.objects.filter(branch='master').order_by('-date')[:10]
last_master = master_builds[0] if len(master_builds) else None
return { 'releases': releases, 'master_builds': master_builds,
'last_master': last_master }
@render_to('downloads-branches.html')
def branches(request):
"""Displays all the visible branches"""
infos = BranchInfo.objects.filter(visible=True).order_by('name')
branches = []
for info in infos:
branches.append((
info.name,
DevVersion.objects.filter(branch=info.name).order_by('-date')[:5]
))
return { 'branches': branches }
@render_to('downloads-view-devrel.html')
def view_dev_release(request, hash):
release = get_object_or_404(DevVersion, hash=hash)
return { 'ver': release }
@render_to('downloads-list.html')
def list(request, branch, page):
if page is None:
page = 1
builds = DevVersion.objects.filter(branch=branch).order_by('-date')
if len(builds) == 0 and branch != 'master':
get_object_or_404(BranchInfo, name=branch)
pagi = DiggPaginator(builds, 20, body=9, tail=2)
try:
page_obj = pagi.page(page)
except EmptyPage:
raise Http404
return { 'branch': branch, 'page': page, 'page_obj': page_obj,
'pagi': pagi }
@csrf_exempt
def new(request):
"""Callback used by the buildbot to register a new build"""
if request.method != 'POST':
raise Http404
# Check the message signature
branch = request.POST['branch']
shortrev = request.POST['shortrev']
hash = request.POST['hash']
author = request.POST['author']
description = request.POST['description']
build_type = request.POST['build_type']
build_url = request.POST['build_url']
msg = "%d|%d|%d|%d|%d|%d|%d|%s|%s|%s|%s|%s|%s|%s" % (
len(branch), len(shortrev), len(hash), len(author), len(description),
len(build_type), len(build_url),
branch, shortrev, hash, author, description, build_type, build_url
)
hm = hmac.new(settings.DOWNLOADS_CREATE_KEY, msg, hashlib.sha1)
if hm.hexdigest() != request.POST['hmac']:
return HttpResponse('Invalid HMAC', status=403)
# Lock to avoid race conditions
try:
_addbuild_lock.acquire()
# Check if we already have a commit with the same hash
try:
build_obj = DevVersion.objects.get(hash=hash)
except DevVersion.DoesNotExist:
build_obj = DevVersion()
build_obj.branch = branch
build_obj.shortrev = shortrev
build_obj.hash = hash
build_obj.author = author
build_obj.description = description
if build_type == 'win32':
build_obj.win32_url = build_url
elif build_type == 'win64':
build_obj.win64_url = build_url
elif build_type == 'osx':
build_obj.osx_url = build_url
elif build_type == 'ubu':
build_obj.ubu_url = build_url
else:
return HttpResponse('Wrong build type', status=400)
build_obj.save()
return HttpResponse('OK')
finally:
_addbuild_lock.release()
def get_latest(request, branch):
"""Callback used by the emulator to get the latest version on a branch."""
build = DevVersion.objects.filter(branch=branch).order_by('-date')
if len(build) == 0:
raise Http404
return HttpResponse(build[0].hash)
| 32.234375 | 85 | 0.660446 |
89a486952b4b0a943cfaf7dfdcaf06f87aa6848a | 1,507 | py | Python | lists/migrations/0001_initial.py | brendanodwyer/python-tdd-book | ff3a8a8254a3112937ce9924dfa05ba52069c8bf | [
"Apache-2.0"
] | null | null | null | lists/migrations/0001_initial.py | brendanodwyer/python-tdd-book | ff3a8a8254a3112937ce9924dfa05ba52069c8bf | [
"Apache-2.0"
] | null | null | null | lists/migrations/0001_initial.py | brendanodwyer/python-tdd-book | ff3a8a8254a3112937ce9924dfa05ba52069c8bf | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.6 on 2021-07-19 18:28
import django.db.models.deletion
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="List",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
),
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField(default="")),
(
"list",
models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to="lists.list",
),
),
],
options={
"ordering": ("id",),
"unique_together": {("list", "text")},
},
),
]
| 26.910714 | 68 | 0.363636 |
f5516d9eb1c4f6d1f921ae8e1fc78d1636b967c3 | 872 | py | Python | b_cfn_custom_userpool_authorizer_test/integration/tests/test_authorizer_allow.py | simonasvaitkus/B.CfnCustomUserPoolAuthorizer | 6532665d3b29031f86c19205cd58b0bcd9e4f827 | [
"Apache-2.0"
] | null | null | null | b_cfn_custom_userpool_authorizer_test/integration/tests/test_authorizer_allow.py | simonasvaitkus/B.CfnCustomUserPoolAuthorizer | 6532665d3b29031f86c19205cd58b0bcd9e4f827 | [
"Apache-2.0"
] | null | null | null | b_cfn_custom_userpool_authorizer_test/integration/tests/test_authorizer_allow.py | simonasvaitkus/B.CfnCustomUserPoolAuthorizer | 6532665d3b29031f86c19205cd58b0bcd9e4f827 | [
"Apache-2.0"
] | 1 | 2022-01-04T07:46:14.000Z | 2022-01-04T07:46:14.000Z | import urllib3
from urllib3 import HTTPResponse
from b_cfn_custom_userpool_authorizer_test.integration.infrastructure.main_stack import MainStack
def test_authorizer_allow(access_token: str) -> None:
"""
Tests whether the authorizer allows the request to pass through, if the
access token is valid.
:param access_token: (Fixture) valid access token.
:return: No return.
"""
endpoint = MainStack.get_output(MainStack.API_ENDPOINT_KEY)
http = urllib3.PoolManager()
response: HTTPResponse = http.request(
method='GET',
url=endpoint,
headers={
'Authorization': access_token
},
)
assert response.status == 200
data = response.data
data = data.decode()
# Response from a dummy lambda function defined in the infrastructure main stack.
assert data == 'Hello World!'
| 25.647059 | 97 | 0.692661 |
3d8a58ea8c4dcecec6cf8d9a6280ec5c177f56c5 | 226,439 | py | Python | nova/tests/unit/api/openstack/compute/test_serversV21.py | Juniper/nova | f7876474846be4b32070cc33c29535a4174aca7f | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_serversV21.py | Juniper/nova | f7876474846be4b32070cc33c29535a4174aca7f | [
"Apache-2.0"
] | 2 | 2015-02-03T06:25:24.000Z | 2015-02-04T10:10:36.000Z | nova/tests/unit/api/openstack/compute/test_serversV21.py | Juniper/nova | f7876474846be4b32070cc33c29535a4174aca7f | [
"Apache-2.0"
] | 7 | 2015-01-20T10:30:08.000Z | 2020-02-05T10:29:05.000Z | # Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime
import ddt
import uuid
import fixtures
import iso8601
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack import compute
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute import ips
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import views
from nova.api.openstack import wsgi as os_wsgi
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models
from nova import exception
from nova.image import glance
from nova.network import manager
from nova import objects
from nova.objects import instance as instance_obj
from nova.objects import tag
from nova.policies import servers as server_policies
from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests import uuidsentinel as uuids
from nova import utils as nova_utils
CONF = nova.conf.CONF
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
def fake_gen_uuid():
return FAKE_UUID
def return_servers_empty(context, *args, **kwargs):
return objects.InstanceList(objects=[])
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, values):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return inst
def fake_compute_api(cls, req, id):
return True
def fake_start_stop_not_ready(self, context, instance):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_start_stop_invalid_state(self, context, instance):
raise exception.InstanceInvalidState(
instance_uuid=instance['uuid'], attr='fake_attr',
method='fake_method', state='fake_state')
def fake_instance_get_by_uuid_not_found(context, uuid,
columns_to_join, use_slave=False):
raise exception.InstanceNotFound(instance_id=uuid)
def fake_instance_get_all_with_locked(context, list_locked, **kwargs):
obj_list = []
s_id = 0
for locked in list_locked:
uuid = fakes.get_fake_uuid(locked)
s_id = s_id + 1
kwargs['locked_by'] = None if locked == 'not_locked' else locked
server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
def fake_instance_get_all_with_description(context, list_desc, **kwargs):
obj_list = []
s_id = 0
for desc in list_desc:
uuid = fakes.get_fake_uuid(desc)
s_id = s_id + 1
kwargs['display_description'] = desc
server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance_id, password):
self.instance_id = instance_id
self.password = password
class ControllerTest(test.TestCase):
def setUp(self):
super(ControllerTest, self).setUp()
self.flags(use_ipv6=False)
fakes.stub_out_key_pair_funcs(self)
fake.stub_out_image_service(self)
return_server = fakes.fake_compute_get()
return_servers = fakes.fake_compute_get_all()
# Server sort keys extension is enabled in v21 so sort data is passed
# to the instance API and the sorted DB API is invoked
self.stubs.Set(compute_api.API, 'get_all',
lambda api, *a, **k: return_servers(*a, **k))
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: return_server(*a, **k))
self.stub_out('nova.db.instance_update_and_get_original',
instance_update_and_get_original)
self.flags(group='glance', api_servers=['http://localhost:9292'])
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.ips_controller = ips.IPsController()
policy.reset()
policy.init()
fake_network.stub_out_nw_api_get_instance_nw_info(self)
class ServersControllerTest(ControllerTest):
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_cellsv1_instance_lookup_no_target(self, mock_get_im,
mock_get_inst):
self.flags(enable=True, group='cells')
ctxt = context.RequestContext('fake', 'fake')
self.controller._get_instance(ctxt, 'foo')
self.assertFalse(mock_get_im.called)
self.assertIsNone(ctxt.db_connection)
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_instance_lookup_targets(self, mock_get_im, mock_get_inst):
ctxt = context.RequestContext('fake', 'fake')
mock_get_im.return_value.cell_mapping.database_connection = uuids.cell1
self.controller._get_instance(ctxt, 'foo')
mock_get_im.assert_called_once_with(ctxt, 'foo')
self.assertIsNotNone(ctxt.db_connection)
def test_requested_networks_prefix(self):
self.flags(use_neutron=True)
uuid = 'br-00000000-0000-0000-0000-000000000000'
requested_networks = [{'uuid': uuid}]
res = self.controller._get_requested_networks(requested_networks)
self.assertIn((uuid, None, None, None), res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_port(self):
self.flags(use_neutron=True)
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_network(self):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_with_duplicate_networks_nova_net(self):
# duplicate networks are allowed only for nova neutron v2.0
self.flags(use_neutron=False)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}, {'uuid': network}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_with_neutronv2_and_duplicate_networks(self):
# duplicate networks are allowed only for nova neutron v2.0
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}, {'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None, None),
(network, None, None, None)], res.as_tuples())
def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
addr = '10.0.0.1'
requested_networks = [{'uuid': network,
'fixed_ip': addr,
'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_neutronv2_disabled_with_port(self):
self.flags(use_neutron=False)
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_api_enabled_with_v2_subclass(self):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_get_server_by_uuid(self):
req = self.req('/fake/servers/%s' % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
def test_get_server_joins(self):
def fake_get(_self, *args, **kwargs):
expected_attrs = kwargs['expected_attrs']
self.assertEqual(['flavor', 'info_cache', 'metadata',
'numa_topology'], expected_attrs)
ctxt = context.RequestContext('fake', 'fake')
return fake_instance.fake_instance_obj(
ctxt, expected_attrs=expected_attrs)
self.stubs.Set(compute_api.API, 'get', fake_get)
req = self.req('/fake/servers/%s' % FAKE_UUID)
self.controller.show(req, FAKE_UUID)
def test_unique_host_id(self):
"""Create two servers with the same host and different
project_ids and check that the host_id's are unique.
"""
def return_instance_with_host(context, *args, **kwargs):
project_id = uuidutils.generate_uuid()
return fakes.stub_instance_obj(context, id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
self.stubs.Set(compute_api.API, 'get',
return_instance_with_host)
req = self.req('/fake/servers/%s' % FAKE_UUID)
with mock.patch.object(compute_api.API, 'get') as mock_get:
mock_get.side_effect = return_instance_with_host
server1 = self.controller.show(req, FAKE_UUID)
server2 = self.controller.show(req, FAKE_UUID)
self.assertNotEqual(server1['server']['hostId'],
server2['server']['hostId'])
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
return {
"server": {
"id": uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": progress,
"name": "server2",
"status": status,
"hostId": '',
"image": {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "2",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {
"seq": "2",
},
"links": [
{
"rel": "self",
"href": "http://localhost/v2/fake/servers/%s" % uuid,
},
{
"rel": "bookmark",
"href": "http://localhost/fake/servers/%s" % uuid,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
}
}
def test_get_server_by_id(self):
self.flags(use_ipv6=True)
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
uuid = FAKE_UUID
req = self.req('/v2/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
progress=0)
expected_server['server']['name'] = 'server1'
expected_server['server']['metadata']['seq'] = '1'
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
new_return_server = fakes.fake_compute_get(
id=2, vm_state=vm_states.ACTIVE, progress=100)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: new_return_server(*a, **k))
uuid = FAKE_UUID
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_id_image_ref_by_id(self):
image_ref = "10"
image_bookmark = "http://localhost/fake/images/10"
flavor_id = "1"
flavor_bookmark = "http://localhost/fake/flavors/2"
new_return_server = fakes.fake_compute_get(
id=2, vm_state=vm_states.ACTIVE, image_ref=image_ref,
flavor_id=flavor_id, progress=100)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: new_return_server(*a, **k))
uuid = FAKE_UUID
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_addresses_from_cache(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
pub1 = ('1.2.3.4',)
pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
priv0 = ('192.168.0.3', '192.168.0.4',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'public',
'subnets': [{'cidr': '172.19.0.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': '1.2.3.0/16',
'ips': [_ip(ip) for ip in pub1]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub2]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br1',
'id': 2,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip(ip) for ip in priv0]}]}}]
return_server = fakes.fake_compute_get(nw_cache=nw_cache)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: return_server(*a, **k))
req = self.req('/fake/servers/%s/ips' % FAKE_UUID)
res_dict = self.ips_controller.index(req, FAKE_UUID)
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3'},
{'version': 4, 'addr': '192.168.0.4'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1'},
{'version': 4, 'addr': '172.19.0.2'},
{'version': 4, 'addr': '1.2.3.4'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
],
},
}
self.assertThat(res_dict, matchers.DictMatches(expected))
# Make sure we kept the addresses in order
self.assertIsInstance(res_dict['addresses'], collections.OrderedDict)
labels = [vif['network']['label'] for vif in nw_cache]
for index, label in enumerate(res_dict['addresses'].keys()):
self.assertEqual(label, labels[index])
def test_get_server_addresses_nonexistent_network(self):
url = '/v2/fake/servers/%s/ips/network_0' % FAKE_UUID
req = self.req(url)
self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
req, FAKE_UUID, 'network_0')
def test_get_server_addresses_nonexistent_server(self):
def fake_instance_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_instance_get)
server_id = uuids.fake
req = self.req('/fake/servers/%s/ips' % server_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, server_id)
def test_get_server_list_empty(self):
self.stubs.Set(compute_api.API, 'get_all',
return_servers_empty)
req = self.req('/fake/servers')
res_dict = self.controller.index(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_list_with_reservation_id(self):
req = self.req('/fake/servers?reservation_id=foo')
res_dict = self.controller.index(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_empty(self):
req = self.req('/fake/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_details(self):
req = self.req('/fake/servers/detail?'
'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list(self):
req = self.req('/fake/servers')
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict['servers']), 5)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertIsNone(s.get('image', None))
expected_links = [
{
"rel": "self",
"href": "http://localhost/v2/fake/servers/%s" % s['id'],
},
{
"rel": "bookmark",
"href": "http://localhost/fake/servers/%s" % s['id'],
},
]
self.assertEqual(s['links'], expected_links)
def test_get_servers_with_limit(self):
req = self.req('/fake/servers?limit=3')
res_dict = self.controller.index(req)
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = self.req('/fake/servers?limit=aaa')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_server_details_empty(self):
self.stubs.Set(compute_api.API, 'get_all',
return_servers_empty)
req = self.req('/fake/servers/detail')
res_dict = self.controller.detail(req)
num_servers = len(res_dict['servers'])
self.assertEqual(0, num_servers)
def test_get_server_details_with_bad_name(self):
req = self.req('/fake/servers/detail?name=%2Binstance')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_server_details_with_limit(self):
req = self.req('/fake/servers/detail?limit=3')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = self.req('/fake/servers/detail?limit=aaa')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_get_server_details_with_limit_and_other_params(self):
req = self.req('/fake/servers/detail'
'?limit=3&blah=2:t'
'&sort_key=uuid&sort_dir=asc')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers/detail', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'],
'sort_key': ['uuid'], 'sort_dir': ['asc'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = self.req('/fake/servers?limit=30')
res_dict = self.controller.index(req)
self.assertNotIn('servers_links', res_dict)
def test_get_servers_with_bad_limit(self):
req = self.req('/fake/servers?limit=asdf')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_servers_with_marker(self):
url = '/v2/fake/servers?marker=%s' % fakes.get_fake_uuid(2)
req = self.req(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
def test_get_servers_with_limit_and_marker(self):
url = ('/v2/fake/servers?limit=2&marker=%s' %
fakes.get_fake_uuid(1))
req = self.req(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
def test_get_servers_with_bad_marker(self):
req = self.req('/fake/servers?limit=2&marker=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_invalid_filter_param(self):
req = self.req('/fake/servers?info_cache=asdf',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
req = self.req('/fake/servers?__foo__=asdf',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_invalid_regex_filter_param(self):
req = self.req('/fake/servers?flavor=[[[',
use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_servers_invalid_sort_key(self):
req = self.req('/fake/servers?sort_key=foo&sort_dir=desc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_ignore_sort_key(self, mock_get):
req = self.req('/fake/servers?sort_key=vcpus&sort_dir=asc')
self.controller.index(req)
mock_get.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=[], sort_dirs=[])
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_ignore_sort_key_only_one_dir(self, mock_get):
req = self.req(
'/fake/servers?sort_key=user_id&sort_key=vcpus&sort_dir=asc')
self.controller.index(req)
mock_get.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=['user_id'],
sort_dirs=['asc'])
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_ignore_sort_key_with_no_sort_dir(self, mock_get):
req = self.req('/fake/servers?sort_key=vcpus&sort_key=user_id')
self.controller.index(req)
mock_get.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=['user_id'], sort_dirs=[])
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_ignore_sort_key_with_bad_sort_dir(self, mock_get):
req = self.req('/fake/servers?sort_key=vcpus&sort_dir=bad_dir')
self.controller.index(req)
mock_get.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=[], sort_dirs=[])
def test_get_servers_non_admin_with_admin_only_sort_key(self):
req = self.req('/fake/servers?sort_key=host&sort_dir=desc')
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.index, req)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_admin_with_admin_only_sort_key(self, mock_get):
req = self.req('/fake/servers?sort_key=node&sort_dir=desc',
use_admin_context=True)
self.controller.detail(req)
mock_get.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=['node'], sort_dirs=['desc'])
def test_get_servers_with_bad_option(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?unknownoption=whee')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_image(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('image', search_opts)
self.assertEqual(search_opts['image'], '12345')
db_list = [fakes.stub_instance(100, uuid=server_uuid)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?image=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_tenant_id_filter_no_admin_context(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('tenant_id', search_opts)
self.assertEqual(search_opts['project_id'], 'fake')
return [fakes.stub_instance_obj(100)]
req = self.req('/fake/servers?tenant_id=newfake')
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_tenant_id_filter_admin_context(self):
""""Test tenant_id search opt is dropped if all_tenants is not set."""
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('tenant_id', search_opts)
self.assertEqual('fake', search_opts['project_id'])
return [fakes.stub_instance_obj(100)]
req = self.req('/fake/servers?tenant_id=newfake',
use_admin_context=True)
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_normal(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('project_id', search_opts)
return [fakes.stub_instance_obj(100)]
req = self.req('/fake/servers?all_tenants',
use_admin_context=True)
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_one(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('project_id', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=1',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_zero(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=0',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_false(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=false',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_invalid(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=xxx',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_admin_restricted_tenant(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertEqual(search_opts['project_id'], 'fake')
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_pass_policy(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('project_id', search_opts)
self.assertTrue(context.is_admin)
return [fakes.stub_instance_obj(100)]
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
rules = {
"os_compute_api:servers:index": "project_id:fake",
"os_compute_api:servers:index:get_all_tenants": "project_id:fake"
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req = self.req('/fake/servers?all_tenants=1')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_fail_policy(self):
def fake_get_all(api, context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
return [fakes.stub_instance_obj(100)]
rules = {
"os_compute_api:servers:index:get_all_tenants":
"project_id:non_fake",
"os_compute_api:servers:get_all": "project_id:fake",
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?all_tenants=1')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_get_servers_allows_flavor(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('flavor', search_opts)
# flavor is an integer ID
self.assertEqual(search_opts['flavor'], '12345')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?flavor=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_with_bad_flavor(self):
req = self.req('/fake/servers?flavor=abcde')
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.return_value = objects.InstanceList(objects=[])
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_server_details_with_bad_flavor(self):
req = self.req('/fake/servers?flavor=abcde')
with mock.patch.object(compute_api.API, 'get_all') as mock_get:
mock_get.return_value = objects.InstanceList(objects=[])
servers = self.controller.detail(req)['servers']
self.assertThat(servers, testtools.matchers.HasLength(0))
def test_get_servers_allows_status(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=active')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_task_status(self):
server_uuid = uuids.fake
task_state = task_states.REBOOTING
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('task_state', search_opts)
self.assertEqual([task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING],
search_opts['task_state'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid,
task_state=task_state)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=reboot')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_resize_status(self):
# Test when resize status, it maps list of vm states.
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'],
[vm_states.ACTIVE, vm_states.STOPPED])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=resize')
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
# Test getting servers by invalid status.
req = self.req('/fake/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_servers_deleted_status_as_user(self):
req = self.req('/fake/servers?status=deleted',
use_admin_context=False)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], ['deleted'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?status=deleted',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_deleted_filter_str_to_bool(self, mock_get_all):
server_uuid = uuids.fake
db_list = objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid,
vm_state='deleted')])
mock_get_all.return_value = db_list
req = self.req('/fake/servers?deleted=true',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(server_uuid, servers[0]['id'])
# Assert that 'deleted' filter value is converted to boolean
# while calling get_all() method.
expected_search_opts = {'deleted': True, 'project_id': 'fake'}
self.assertEqual(expected_search_opts,
mock_get_all.call_args[1]['search_opts'])
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_deleted_filter_invalid_str(self, mock_get_all):
server_uuid = uuids.fake
db_list = objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
mock_get_all.return_value = db_list
req = fakes.HTTPRequest.blank('/fake/servers?deleted=abc',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(server_uuid, servers[0]['id'])
# Assert that invalid 'deleted' filter value is converted to boolean
# False while calling get_all() method.
expected_search_opts = {'deleted': False, 'project_id': 'fake'}
self.assertEqual(expected_search_opts,
mock_get_all.call_args[1]['search_opts'])
def test_get_servers_allows_name(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('name', search_opts)
self.assertEqual(search_opts['name'], 'whee.*')
self.assertEqual([], expected_attrs)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?name=whee.*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_flavor_not_found(self, get_all_mock):
get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
req = fakes.HTTPRequest.blank(
'/fake/servers?status=active&flavor=abc')
servers = self.controller.index(req)['servers']
self.assertEqual(0, len(servers))
def test_get_servers_allows_changes_since(self):
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('changes-since', search_opts)
changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes-since'], changes_since)
self.assertNotIn('deleted', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
params = 'changes-since=2011-01-24T17:08:01Z'
req = self.req('/fake/servers?%s' % params)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since_bad_value(self):
params = 'changes-since=asdf'
req = self.req('/fake/servers?%s' % params)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_get_servers_admin_filters_as_user(self):
"""Test getting servers by admin-only or unknown options when
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('ip', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertNotIn('unknown_option', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str)
res = self.controller.index(req)
servers = res['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_options_as_admin(self):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('terminated_at', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertIn('ip', search_opts)
self.assertNotIn('unknown_option', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
query_str = ("name=foo&ip=10.*&status=active&unknown_option=meow&"
"terminated_at=^2016-02-01.*")
req = self.req('/fake/servers?%s' % query_str,
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip(self):
"""Test getting servers by ip."""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip', search_opts)
self.assertEqual(search_opts['ip'], '10\..*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?ip=10\..*')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_allows_ip6(self):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?ip6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip6_with_new_version(self):
"""Test getting servers by ip6 with new version requested
and no admin context
"""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?ip6=ffff.*')
req.api_version_request = api_version_request.APIVersionRequest('2.5')
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_admin_allows_access_ip_v4(self):
"""Test getting servers by access_ip_v4 with admin_api enabled and
admin context
"""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('access_ip_v4', search_opts)
self.assertEqual(search_opts['access_ip_v4'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?access_ip_v4=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(server_uuid, servers[0]['id'])
def test_get_servers_admin_allows_access_ip_v6(self):
"""Test getting servers by access_ip_v6 with admin_api enabled and
admin context
"""
server_uuid = uuids.fake
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIsNotNone(search_opts)
self.assertIn('access_ip_v6', search_opts)
self.assertEqual(search_opts['access_ip_v6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers?access_ip_v6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(server_uuid, servers[0]['id'])
def test_get_all_server_details(self):
expected_flavor = {
"id": "2",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/fake/flavors/2',
},
],
}
expected_image = {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": 'http://localhost/fake/images/10',
},
],
}
req = self.req('/fake/servers/detail')
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertEqual(s['image'], expected_image)
self.assertEqual(s['flavor'], expected_flavor)
self.assertEqual(s['status'], 'ACTIVE')
self.assertEqual(s['metadata']['seq'], str(i + 1))
def test_get_all_server_details_with_host(self):
"""We want to make sure that if two instances are on the same host,
then they return the same hostId. If two instances are on different
hosts, they should return different hostIds. In this test,
there are 5 instances - 2 on one host and 3 on another.
"""
def return_servers_with_host(*args, **kwargs):
return objects.InstanceList(
objects=[fakes.stub_instance_obj(None,
id=i + 1,
user_id='fake',
project_id='fake',
host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in range(5)])
self.stubs.Set(compute_api.API, 'get_all', return_servers_with_host)
req = self.req('/fake/servers/detail')
res_dict = self.controller.detail(req)
server_list = res_dict['servers']
host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
self.assertTrue(host_ids[0] and host_ids[1])
self.assertNotEqual(host_ids[0], host_ids[1])
for i, s in enumerate(server_list):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % (i + 1))
def test_get_servers_joins_services(self):
def fake_get_all(compute_self, context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
self.assertIn('services', expected_attrs)
return objects.InstanceList()
self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
req = self.req('/fake/servers/detail', use_admin_context=True)
self.assertIn('servers', self.controller.detail(req))
class ServersControllerTestV29(ServersControllerTest):
wsgi_api_version = '2.9'
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
server_dict = super(ServersControllerTestV29,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']['locked'] = False
return server_dict
@mock.patch.object(compute_api.API, 'get')
def _test_get_server_with_lock(self, locked_by, get_mock):
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
uuid = FAKE_UUID
get_mock.side_effect = fakes.fake_compute_get(id=2,
locked_by=locked_by,
uuid=uuid)
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
progress=0)
expected_server['server']['locked'] = True if locked_by else False
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
def test_get_server_with_locked_by_admin(self):
res_dict = self._test_get_server_with_lock('admin')
self.assertTrue(res_dict['server']['locked'])
def test_get_server_with_locked_by_owner(self):
res_dict = self._test_get_server_with_lock('owner')
self.assertTrue(res_dict['server']['locked'])
def test_get_server_not_locked(self):
res_dict = self._test_get_server_with_lock(None)
self.assertFalse(res_dict['server']['locked'])
@mock.patch.object(compute_api.API, 'get_all')
def _test_list_server_detail_with_lock(self,
s1_locked,
s2_locked,
get_all_mock):
get_all_mock.return_value = fake_instance_get_all_with_locked(
context, [s1_locked, s2_locked])
req = self.req('/fake/servers/detail')
servers_list = self.controller.detail(req)
# Check that each returned server has the same 'locked' value
# and 'id' as they were created.
for locked in [s1_locked, s2_locked]:
server = next(server for server in servers_list['servers']
if (server['id'] == fakes.get_fake_uuid(locked)))
expected = False if locked == 'not_locked' else True
self.assertEqual(expected, server['locked'])
def test_list_server_detail_with_locked_s1_admin_s2_owner(self):
self._test_list_server_detail_with_lock('admin', 'owner')
def test_list_server_detail_with_locked_s1_owner_s2_admin(self):
self._test_list_server_detail_with_lock('owner', 'admin')
def test_list_server_detail_with_locked_s1_admin_s2_admin(self):
self._test_list_server_detail_with_lock('admin', 'admin')
def test_list_server_detail_with_locked_s1_admin_s2_not_locked(self):
self._test_list_server_detail_with_lock('admin', 'not_locked')
def test_list_server_detail_with_locked_s1_s2_not_locked(self):
self._test_list_server_detail_with_lock('not_locked',
'not_locked')
@mock.patch.object(compute_api.API, 'get_all')
def test_get_servers_remove_non_search_options(self, get_all_mock):
req = fakes.HTTPRequestV21.blank('/servers'
'?sort_key=uuid&sort_dir=asc'
'&sort_key=user_id&sort_dir=desc'
'&limit=1&marker=123',
use_admin_context=True)
self.controller.index(req)
kwargs = get_all_mock.call_args[1]
search_opts = kwargs['search_opts']
for key in ('sort_key', 'sort_dir', 'limit', 'marker'):
self.assertNotIn(key, search_opts)
class ServersControllerTestV219(ServersControllerTest):
wsgi_api_version = '2.19'
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100, description=None):
server_dict = super(ServersControllerTestV219,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']['locked'] = False
server_dict['server']['description'] = description
return server_dict
@mock.patch.object(compute_api.API, 'get')
def _test_get_server_with_description(self, description, get_mock):
image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2"
uuid = FAKE_UUID
get_mock.side_effect = fakes.fake_compute_get(id=2,
display_description=description,
uuid=uuid)
req = self.req('/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
progress=0,
description=description)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
@mock.patch.object(compute_api.API, 'get_all')
def _test_list_server_detail_with_descriptions(self,
s1_desc,
s2_desc,
get_all_mock):
get_all_mock.return_value = fake_instance_get_all_with_description(
context, [s1_desc, s2_desc])
req = self.req('/fake/servers/detail')
servers_list = self.controller.detail(req)
# Check that each returned server has the same 'description' value
# and 'id' as they were created.
for desc in [s1_desc, s2_desc]:
server = next(server for server in servers_list['servers']
if (server['id'] == fakes.get_fake_uuid(desc)))
expected = desc
self.assertEqual(expected, server['description'])
def test_get_server_with_description(self):
self._test_get_server_with_description('test desc')
def test_list_server_detail_with_descriptions(self):
self._test_list_server_detail_with_descriptions('desc1', 'desc2')
class ServersControllerTestV226(ControllerTest):
wsgi_api_version = '2.26'
@mock.patch.object(compute_api.API, 'get')
def test_get_server_with_tags_by_id(self, mock_get):
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID,
version=self.wsgi_api_version)
ctxt = req.environ['nova.context']
tags = ['tag1', 'tag2']
def fake_get(_self, *args, **kwargs):
self.assertIn('tags', kwargs['expected_attrs'])
fake_server = fakes.stub_instance_obj(
ctxt, id=2, vm_state=vm_states.ACTIVE, progress=100)
tag_list = objects.TagList(objects=[
objects.Tag(resource_id=FAKE_UUID, tag=tag)
for tag in tags])
fake_server.tags = tag_list
return fake_server
mock_get.side_effect = fake_get
res_dict = self.controller.show(req, FAKE_UUID)
self.assertIn('tags', res_dict['server'])
self.assertEqual(res_dict['server']['tags'], tags)
@mock.patch.object(compute_api.API, 'get_all')
def _test_get_servers_allows_tag_filters(self, filter_name, mock_get_all):
server_uuid = uuids.fake
req = fakes.HTTPRequest.blank('/fake/servers?%s=t1,t2' % filter_name,
version=self.wsgi_api_version)
ctxt = req.environ['nova.context']
def fake_get_all(*a, **kw):
self.assertIsNotNone(kw['search_opts'])
self.assertIn(filter_name, kw['search_opts'])
self.assertEqual(kw['search_opts'][filter_name], ['t1', 't2'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(ctxt, uuid=server_uuid)])
mock_get_all.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_tags_filter(self):
self._test_get_servers_allows_tag_filters('tags')
def test_get_servers_allows_tags_any_filter(self):
self._test_get_servers_allows_tag_filters('tags-any')
def test_get_servers_allows_not_tags_filter(self):
self._test_get_servers_allows_tag_filters('not-tags')
def test_get_servers_allows_not_tags_any_filter(self):
self._test_get_servers_allows_tag_filters('not-tags-any')
class ServerControllerTestV238(ControllerTest):
wsgi_api_version = '2.38'
def _test_invalid_status(self, is_admin):
req = fakes.HTTPRequest.blank('/fake/servers/detail?status=invalid',
version=self.wsgi_api_version,
use_admin_context=is_admin)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail, req)
def test_list_servers_detail_invalid_status_for_admin(self):
self._test_invalid_status(True)
def test_list_servers_detail_invalid_status_for_non_admin(self):
self._test_invalid_status(False)
class ServerControllerTestV247(ControllerTest):
"""Server controller test for microversion 2.47
The intent here is simply to verify that when showing server details
after microversion 2.47 that the flavor is shown as a dict of flavor
information rather than as dict of id/links. The existence of the
'extra_specs' key is controlled by policy.
"""
wsgi_api_version = '2.47'
@mock.patch.object(objects.TagList, 'get_by_resource_id')
def test_get_all_server_details(self, mock_get_by_resource_id):
# Fake out tags on the instances
mock_get_by_resource_id.return_value = objects.TagList()
expected_flavor = {
'disk': 20,
'ephemeral': 0,
'extra_specs': {},
'original_name': u'm1.small',
'ram': 2048,
'swap': 0,
'vcpus': 1}
req = fakes.HTTPRequest.blank('/fake/servers/detail',
version=self.wsgi_api_version)
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['flavor'], expected_flavor)
@mock.patch.object(objects.TagList, 'get_by_resource_id')
def test_get_all_server_details_no_extra_spec(self,
mock_get_by_resource_id):
# Fake out tags on the instances
mock_get_by_resource_id.return_value = objects.TagList()
# Set the policy so we don't have permission to index
# flavor extra-specs but are able to get server details.
servers_rule = 'os_compute_api:servers:detail'
extraspec_rule = 'os_compute_api:os-flavor-extra-specs:index'
self.policy.set_rules({
extraspec_rule: 'rule:admin_api',
servers_rule: '@'})
expected_flavor = {
'disk': 20,
'ephemeral': 0,
'original_name': u'm1.small',
'ram': 2048,
'swap': 0,
'vcpus': 1}
req = fakes.HTTPRequest.blank('/fake/servers/detail',
version=self.wsgi_api_version)
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['flavor'], expected_flavor)
class ServersControllerDeleteTest(ControllerTest):
def setUp(self):
super(ServersControllerDeleteTest, self).setUp()
self.server_delete_called = False
def fake_delete(api, context, instance):
if instance.uuid == uuids.non_existent_uuid:
raise exception.InstanceNotFound(instance_id=instance.uuid)
self.server_delete_called = True
self.stubs.Set(compute_api.API, 'delete', fake_delete)
def _create_delete_request(self, uuid):
fakes.stub_out_instance_quota(self, 0, 10)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % uuid)
req.method = 'DELETE'
fake_get = fakes.fake_compute_get(
uuid=uuid,
vm_state=vm_states.ACTIVE,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.stub_out('nova.compute.api.API.get',
lambda api, *a, **k: fake_get(*a, **k))
return req
def _delete_server_instance(self, uuid=FAKE_UUID):
req = self._create_delete_request(uuid)
self.controller.delete(req, uuid)
def test_delete_server_instance(self):
self._delete_server_instance()
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self._delete_server_instance,
uuid=uuids.non_existent_uuid)
def test_delete_server_instance_while_building(self):
req = self._create_delete_request(FAKE_UUID)
self.controller.delete(req, FAKE_UUID)
self.assertTrue(self.server_delete_called)
def test_delete_locked_server(self):
req = self._create_delete_request(FAKE_UUID)
self.stubs.Set(compute_api.API, 'soft_delete',
fakes.fake_actions_to_locked_server)
self.stubs.Set(compute_api.API, 'delete',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, FAKE_UUID)
def test_delete_server_instance_while_resize(self):
req = self._create_delete_request(FAKE_UUID)
fake_get = fakes.fake_compute_get(
vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
self.controller.delete(req, FAKE_UUID)
def test_delete_server_instance_if_not_launched(self):
self.flags(reclaim_instance_interval=3600)
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'DELETE'
self.server_delete_called = False
fake_get = fakes.fake_compute_get(
launched_at=None,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
self.stub_out('nova.db.instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
# even if reclaim_instance_interval has been set.
self.assertTrue(self.server_delete_called)
class ServersControllerRebuildInstanceTest(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def setUp(self):
super(ServersControllerRebuildInstanceTest, self).setUp()
self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
self.req_user_id = self.req.environ['nova.context'].user_id
self.req_project_id = self.req.environ['nova.context'].project_id
self.useFixture(nova_fixtures.SingleCellSimple())
def fake_get(ctrl, ctxt, uuid):
if uuid == 'test_inst':
raise webob.exc.HTTPNotFound(explanation='fakeout')
return fakes.stub_instance_obj(None,
vm_state=vm_states.ACTIVE,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.useFixture(
fixtures.MonkeyPatch('nova.api.openstack.compute.servers.'
'ServersController._get_instance',
fake_get))
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
self.body = {
'rebuild': {
'name': 'new_name',
'imageRef': self.image_uuid,
'metadata': {
'open': 'stack',
},
},
}
def test_rebuild_server_with_image_not_uuid(self):
self.body['rebuild']['imageRef'] = 'not-uuid'
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID,
body=self.body)
def test_rebuild_server_with_image_as_full_url(self):
image_href = ('http://localhost/v2/fake/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
self.body['rebuild']['imageRef'] = image_href
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID,
body=self.body)
def test_rebuild_server_with_image_as_empty_string(self):
self.body['rebuild']['imageRef'] = ''
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID,
body=self.body)
def test_rebuild_instance_name_with_spaces_in_the_middle(self):
self.body['rebuild']['name'] = 'abc def'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller._action_rebuild(self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_with_leading_trailing_spaces(self):
self.body['rebuild']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_with_leading_trailing_spaces_compat_mode(
self):
self.body['rebuild']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.set_legacy_v2()
def fake_rebuild(*args, **kwargs):
self.assertEqual('abc def', kwargs['display_name'])
with mock.patch.object(compute_api.API, 'rebuild') as mock_rebuild:
mock_rebuild.side_effect = fake_rebuild
self.controller._action_rebuild(self.req, FAKE_UUID,
body=self.body)
def test_rebuild_instance_with_blank_metadata_key(self):
self.body['rebuild']['metadata'][''] = 'world'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_key_too_long(self):
self.body['rebuild']['metadata'][('a' * 260)] = 'world'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_too_long(self):
self.body['rebuild']['metadata']['key1'] = ('a' * 260)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_not_string(self):
self.body['rebuild']['metadata']['key1'] = 1
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_fails_when_min_ram_too_small(self):
# make min_ram larger than our instance ram size
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="4096", min_disk="10")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_fails_when_min_disk_too_small(self):
# make min_disk larger than our instance disk size
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="100000")
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_image_too_large(self):
# make image size larger than our instance disk size
size = str(1000 * (1024 ** 3))
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', size=size)
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_all_blank(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.body['rebuild']['name'] = ' '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_deleted_image(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='DELETED')
self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_onset_file_limit_over_quota(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
with test.nested(
mock.patch.object(fake._FakeImageService, 'show',
side_effect=fake_get_image),
mock.patch.object(self.controller.compute_api, 'rebuild',
side_effect=exception.OnsetFileLimitExceeded)
) as (
show_mock, rebuild_mock
):
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"personality": [{
"path": "/path/to/file",
"contents": base64.encode_as_text("Test String"),
}]
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertNotIn('personality', body['server'])
@mock.patch.object(compute_api.API, 'start')
def test_start(self, mock_start):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.controller._start_server(req, FAKE_UUID, body)
mock_start.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(compute_api.API, 'start', fake_start_stop_not_ready)
def test_start_not_ready(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
@mock.patch.object(
compute_api.API, 'start', fakes.fake_actions_to_locked_server)
def test_start_locked_server(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'start', fake_start_stop_invalid_state)
def test_start_invalid(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'stop')
def test_stop(self, mock_stop):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.controller._stop_server(req, FAKE_UUID, body)
mock_stop.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(compute_api.API, 'stop', fake_start_stop_not_ready)
def test_stop_not_ready(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
@mock.patch.object(
compute_api.API, 'stop', fakes.fake_actions_to_locked_server)
def test_stop_locked_server(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'stop', fake_start_stop_invalid_state)
def test_stop_invalid_state(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
@mock.patch(
'nova.db.instance_get_by_uuid', fake_instance_get_by_uuid_not_found)
def test_start_with_bogus_id(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
@mock.patch(
'nova.db.instance_get_by_uuid', fake_instance_get_by_uuid_not_found)
def test_stop_with_bogus_id(self):
req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action')
body = dict(stop="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
class ServersControllerRebuildTestV219(ServersControllerRebuildInstanceTest):
def setUp(self):
super(ServersControllerRebuildTestV219, self).setUp()
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.19')
def _rebuild_server(self, set_desc, desc):
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
display_description=desc,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: fake_get(*a, **k))
if set_desc:
self.body['rebuild']['description'] = desc
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller._action_rebuild(self.req, FAKE_UUID,
body=self.body).obj['server']
self.assertEqual(server['id'], FAKE_UUID)
self.assertEqual(server['description'], desc)
def test_rebuild_server_with_description(self):
self._rebuild_server(True, 'server desc')
def test_rebuild_server_empty_description(self):
self._rebuild_server(True, '')
def test_rebuild_server_without_description(self):
self._rebuild_server(False, '')
def test_rebuild_server_remove_description(self):
self._rebuild_server(True, None)
def test_rebuild_server_description_too_long(self):
self.body['rebuild']['description'] = 'x' * 256
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_server_description_invalid(self):
# Invalid non-printable control char in the desc.
self.body['rebuild']['description'] = "123\0d456"
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
class ServersControllerUpdateTest(ControllerTest):
def _get_request(self, body=None):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
fake_get = fakes.fake_compute_get(
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.stub_out('nova.compute.api.API.get',
lambda api, *a, **k: fake_get(*a, **k))
return req
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name(self):
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name_too_long(self):
body = {'server': {'name': 'x' * 256}}
req = self._get_request(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_all_blank_spaces(self):
self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' ' * 64}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_with_spaces_in_the_middle(self):
body = {'server': {'name': 'abc def'}}
req = self._get_request(body)
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces(self):
self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' abc def '}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces_compat_mode(self):
body = {'server': {'name': ' abc def '}}
req = self._get_request(body)
req.set_legacy_v2()
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_admin_password_extra_arg(self):
inst_dict = dict(name='server_test', admin_password='bacon')
body = dict(server=inst_dict)
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_host_id(self):
inst_dict = dict(host_id='123')
body = dict(server=inst_dict)
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_not_found(self):
def fake_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute_api.API, 'get', fake_get)
body = {'server': {'name': 'server_test'}}
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'update_instance')
def test_update_server_not_found_on_update(self, mock_update_instance):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
mock_update_instance.side_effect = fake_update
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_policy_fail(self):
rule = {'compute:update': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update, req, FAKE_UUID, body=body)
class ServersControllerTriggerCrashDumpTest(ControllerTest):
def setUp(self):
super(ServersControllerTriggerCrashDumpTest, self).setUp()
self.instance = fakes.stub_instance_obj(None,
vm_state=vm_states.ACTIVE,
project_id='fake')
def fake_get(ctrl, ctxt, uuid):
if uuid != FAKE_UUID:
raise webob.exc.HTTPNotFound(explanation='fakeout')
return self.instance
self.useFixture(
fixtures.MonkeyPatch('nova.api.openstack.compute.servers.'
'ServersController._get_instance',
fake_get))
self.req = fakes.HTTPRequest.blank('/servers/%s/action' % FAKE_UUID)
self.req.api_version_request =\
api_version_request.APIVersionRequest('2.17')
self.body = dict(trigger_crash_dump=None)
@mock.patch.object(compute_api.API, 'trigger_crash_dump')
def test_trigger_crash_dump(self, mock_trigger_crash_dump):
ctxt = self.req.environ['nova.context']
self.controller._action_trigger_crash_dump(self.req, FAKE_UUID,
body=self.body)
mock_trigger_crash_dump.assert_called_with(ctxt, self.instance)
def test_trigger_crash_dump_policy_failed(self):
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
self.assertIn("os_compute_api:servers:trigger_crash_dump",
exc.format_message())
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
fake_start_stop_not_ready)
def test_trigger_crash_dump_not_ready(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
fakes.fake_actions_to_locked_server)
def test_trigger_crash_dump_locked_server(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
fake_start_stop_invalid_state)
def test_trigger_crash_dump_invalid_state(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
def test_trigger_crash_dump_with_bogus_id(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_trigger_crash_dump,
self.req, 'test_inst', body=self.body)
def test_trigger_crash_dump_schema_invalid_type(self):
self.body['trigger_crash_dump'] = 'not null'
self.assertRaises(exception.ValidationError,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
def test_trigger_crash_dump_schema_extra_property(self):
self.body['extra_property'] = 'extra'
self.assertRaises(exception.ValidationError,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
side_effect=exception.TriggerCrashDumpNotSupported)
def test_trigger_crash_dump_not_supported(self, mock_trigger_crash_dump):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
class ServersControllerUpdateTestV219(ServersControllerUpdateTest):
def _get_request(self, body=None):
req = super(ServersControllerUpdateTestV219, self)._get_request(
body=body)
req.api_version_request = api_version_request.APIVersionRequest('2.19')
return req
def _update_server_desc(self, set_desc, desc=None):
body = {'server': {}}
if set_desc:
body['server']['description'] = desc
req = self._get_request()
res_dict = self.controller.update(req, FAKE_UUID, body=body)
return res_dict
def test_update_server_description(self):
res_dict = self._update_server_desc(True, 'server_desc')
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['description'], 'server_desc')
def test_update_server_empty_description(self):
res_dict = self._update_server_desc(True, '')
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['description'], '')
def test_update_server_without_description(self):
res_dict = self._update_server_desc(False)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertIsNone(res_dict['server']['description'])
def test_update_server_remove_description(self):
res_dict = self._update_server_desc(True)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertIsNone(res_dict['server']['description'])
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
'description': 'server_desc'
}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
self.assertEqual(res_dict['server']['description'], 'server_desc')
def test_update_server_description_too_long(self):
body = {'server': {'description': 'x' * 256}}
req = self._get_request(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_description_invalid(self):
# Invalid non-printable control char in the desc.
body = {'server': {'description': "123\0d456"}}
req = self._get_request(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
class ServerStatusTest(test.TestCase):
def setUp(self):
super(ServerStatusTest, self).setUp()
fakes.stub_out_nw_api(self)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _get_with_state(self, vm_state, task_state=None):
self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_state,
task_state=task_state))
request = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
return self.controller.show(request, FAKE_UUID)
def test_active(self):
response = self._get_with_state(vm_states.ACTIVE)
self.assertEqual(response['server']['status'], 'ACTIVE')
def test_reboot(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING)
self.assertEqual(response['server']['status'], 'REBOOT')
def test_reboot_hard(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING_HARD)
self.assertEqual(response['server']['status'], 'HARD_REBOOT')
def test_reboot_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:reboot': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_reboot, req, '1234',
body={'reboot': {'type': 'HARD'}})
def test_rebuild(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
def test_rebuild_error(self):
response = self._get_with_state(vm_states.ERROR)
self.assertEqual(response['server']['status'], 'ERROR')
def test_resize(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.RESIZE_PREP)
self.assertEqual(response['server']['status'], 'RESIZE')
def test_confirm_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:confirm_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_confirm_resize, req, '1234', {})
def test_verify_resize(self):
response = self._get_with_state(vm_states.RESIZED, None)
self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
def test_revert_resize(self):
response = self._get_with_state(vm_states.RESIZED,
task_states.RESIZE_REVERTING)
self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
def test_revert_resize_policy_fail(self):
def fake_get_server(context, req, id):
return fakes.stub_instance(id)
self.stubs.Set(self.controller, '_get_server', fake_get_server)
rule = {'compute:revert_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_revert_resize, req, '1234', {})
def test_password_update(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.UPDATING_PASSWORD)
self.assertEqual(response['server']['status'], 'PASSWORD')
def test_stopped(self):
response = self._get_with_state(vm_states.STOPPED)
self.assertEqual(response['server']['status'], 'SHUTOFF')
class ServersControllerCreateTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(enable_instance_password=True, group='api')
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/fake/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'display_description': inst['display_description'] or '',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update_and_get_original(
context, instance_uuid, params, columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
fakes.stub_out_key_pair_funcs(self)
fake.stub_out_image_service(self)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stub_out('nova.db.project_get_networks', project_get_networks)
self.stub_out('nova.db.instance_create', instance_create)
self.stub_out('nova.db.instance_system_metadata_update', fake_method)
self.stub_out('nova.db.instance_get', instance_get)
self.stub_out('nova.db.instance_update', instance_update)
self.stub_out('nova.db.instance_update_and_get_original',
server_update_and_get_original)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'personality': [
{
"path": "/etc/banner.txt",
"contents": "MQ==",
},
],
},
}
self.bdm = [{'delete_on_termination': 1,
'device_name': 123,
'volume_size': 1,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("adminPass", server_dict)
def _test_create_instance(self, flavor=2):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.body['server']['imageRef'] = image_uuid
self.body['server']['flavorRef'] = flavor
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_with_none_value_port(self):
self.body['server'] = {'networks': [{'port': None, 'uuid': FAKE_UUID}]}
self.body['server']['name'] = 'test'
self._test_create_instance()
def test_create_instance_private_flavor(self):
values = {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': '1324',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': False,
}
db.flavor_create(context.get_admin_context(), values)
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
flavor=1324)
def test_create_server_bad_image_uuid(self):
self.body['server']['min_count'] = 1
self.body['server']['imageRef'] = 1,
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_server_with_invalid_networks_parameter(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# flavor_ref = 'http://localhost/123/flavors/3'
# body = {
# 'server': {
# 'name': 'server_test',
# 'imageRef': image_href,
# 'flavorRef': flavor_ref,
# 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
# }
# }
# req = fakes.HTTPRequest.blank('/fake/servers')
# req.method = 'POST'
# req.body = jsonutils.dump_as_bytes(body)
# req.headers["content-type"] = "application/json"
# self.assertRaises(webob.exc.HTTPBadRequest,
# self.controller.create,
# req,
# body)
def test_create_server_with_deleted_image(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, '')
image_service.update(context, self.image_uuid, {'status': 'DELETED'})
self.addCleanup(image_service.update, context, self.image_uuid,
{'status': 'active'})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dump_as_bytes(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(self.req, body=self.body)
def test_create_server_image_too_large(self):
# Get the fake image service so we can update the size of the image
(image_service, image_id) = glance.get_remote_image_service(
context, self.image_uuid)
image = image_service.show(context, image_id)
orig_size = image['size']
new_size = str(1000 * (1024 ** 3))
image_service.update(context, self.image_uuid, {'size': new_size})
self.addCleanup(image_service.update, context, self.image_uuid,
{'size': orig_size})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dump_as_bytes(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Flavor's disk is too small for requested image."):
self.controller.create(self.req, body=self.body)
def test_create_instance_with_image_non_uuid(self):
self.body['server']['imageRef'] = 'not-uuid'
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_image_as_full_url(self):
image_href = ('http://localhost/v2/fake/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
self.body['server']['imageRef'] = image_href
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_image_as_empty_string(self):
self.body['server']['imageRef'] = ''
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self, have_key_pair=False)
self._test_create_instance()
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-keypairs extension tests
# def test_create_instance_with_keypairs_enabled(self):
# self.ext_mgr.extensions = {'os-keypairs': 'fake'}
# key_name = 'green'
#
# params = {'key_name': key_name}
# old_create = compute_api.API.create
#
# # NOTE(sdague): key pair goes back to the database,
# # so we need to stub it out for tests
# def key_pair_get(context, user_id, name):
# return {'public_key': 'FAKE_KEY',
# 'fingerprint': 'FAKE_FINGERPRINT',
# 'name': name}
#
# def create(*args, **kwargs):
# self.assertEqual(kwargs['key_name'], key_name)
# return old_create(*args, **kwargs)
#
# self.stub_out('nova.db.key_pair_get', key_pair_get)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
#
# TODO(cyeoh): bp-v3-api-unittests
# This needs to be ported to the os-networks extension tests
# def test_create_instance_with_networks_enabled(self):
# self.ext_mgr.extensions = {'os-networks': 'fake'}
# net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
# requested_networks = [{'uuid': net_uuid}]
# params = {'networks': requested_networks}
# old_create = compute_api.API.create
# def create(*args, **kwargs):
# result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
# self.assertEqual(kwargs['requested_networks'], result)
# return old_create(*args, **kwargs)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
def test_create_instance_with_port_with_no_fixed_ips(self):
port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port_id}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortRequiresFixedIP(port_id=port_id)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_raise_user_data_too_large(self, mock_create):
mock_create.side_effect = exception.InstanceUserDataTooLarge(
maxsize=1, length=2)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_network_with_no_subnet(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkRequiresSubnet(network_uuid=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_non_unique_secgroup_name(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
def fake_create(*args, **kwargs):
raise exception.NoUniqueMatch("No Unique match found for ...")
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
def test_create_instance_secgroup_leading_trailing_spaces(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': ' sg '}]}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
def test_create_instance_secgroup_leading_trailing_spaces_compat_mode(
self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': ' sg '}]}
def fake_create(*args, **kwargs):
self.assertEqual([' sg '], kwargs['security_groups'])
return (objects.InstanceList(objects=[fakes.stub_instance_obj(
self.req.environ['nova.context'])]), None)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.req.set_legacy_v2()
self._test_create_extra(params)
def test_create_instance_with_networks_disabled_neutronv2(self):
self.flags(use_neutron=True)
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
None, None)]
self.assertEqual(result, kwargs['requested_networks'].as_tuples())
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_pass_disabled(self):
# test with admin passwords disabled See lp bug 921814
self.flags(enable_instance_password=False, group='api')
self.flags(enable_instance_password=False, group='api')
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_name_too_long(self):
self.body['server']['name'] = 'X' * 256
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
def test_create_instance_name_with_spaces_in_the_middle(self):
self.body['server']['name'] = 'abc def'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body)
def test_create_instance_name_with_leading_trailing_spaces(self):
self.body['server']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_name_with_leading_trailing_spaces_in_compat_mode(
self):
self.body['server']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.set_legacy_v2()
self.controller.create(self.req, body=self.body)
def test_create_instance_name_all_blank_spaces(self):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/fake/flavors/3'
body = {
'server': {
'name': ' ' * 64,
'imageRef': image_uuid,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_az_with_leading_trailing_spaces(self):
self.body['server']['availability_zone'] = ' zone1 '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_az_with_leading_trailing_spaces_in_compat_mode(
self):
self.body['server']['name'] = ' abc def '
self.body['server']['availability_zones'] = ' zone1 '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.set_legacy_v2()
with mock.patch.object(availability_zones, 'get_availability_zones',
return_value=[' zone1 ']):
self.controller.create(self.req, body=self.body)
def test_create_instance(self):
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_extension_create_exception(self):
def fake_keypair_server_create(server_dict,
create_kwargs, body_deprecated_param):
raise KeyError
self.controller.server_create_func_list.append(
fake_keypair_server_create)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': image_uuid,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequestV21.blank('/fake/servers')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.create, req, body=body)
self.controller.server_create_func_list.remove(
fake_keypair_server_create)
def test_create_instance_pass_disabled(self):
self.flags(enable_instance_password=False, group='api')
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
@mock.patch('nova.virt.hardware.numa_get_constraints')
def _test_create_instance_numa_topology_wrong(self, exc,
numa_constraints_mock):
numa_constraints_mock.side_effect = exc(**{'name': None,
'cpunum': 0,
'cpumax': 0,
'cpuset': None,
'memsize': 0,
'memtotal': 0})
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_numa_topology_wrong(self):
for exc in [exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.ImageNUMATopologyMemoryOutOfRange]:
self._test_create_instance_numa_topology_wrong(exc)
def test_create_instance_too_much_metadata(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata']['vote'] = 'fiddletown'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_too_long(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {('a' * 260): '12345'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_too_long(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {'key1': ('a' * 260)}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_blank(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {'': 'abcd'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_not_dict(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = 'string'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_not_string(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {1: 'test'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_not_string(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {'test': ['a', 'list']}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_user_data_malformed_bad_request(self):
params = {'user_data': 'u1234'}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
def test_create_instance_invalid_key_name(self):
self.body['server']['key_name'] = 'nonexistentkey'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_valid_key_name(self):
self.body['server']['key_name'] = 'key'
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_instance_invalid_flavor_href(self):
flavor_ref = 'http://localhost/v2/flavors/asdf'
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_int(self):
flavor_ref = -1
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
@mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor())
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_non_existing_snapshot_id(
self, mock_create,
mock_get_flavor_by_flavor_id):
mock_create.side_effect = exception.SnapshotNotFound(snapshot_id='123')
self.body['server'] = {'name': 'server_test',
'flavorRef': self.flavor_ref,
'block_device_mapping_v2':
[{'source_type': 'snapshot',
'uuid': '123'}]}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_empty(self):
flavor_ref = ""
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_bad_flavor_href(self):
flavor_ref = 'http://localhost/v2/flavors/17'
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_local_href(self):
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_admin_password(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(server['adminPass'],
self.body['server']['adminPass'])
def test_create_instance_admin_password_pass_disabled(self):
self.flags(enable_instance_password=False, group='api')
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertIn('server', res)
self.assertIn('adminPass', self.body['server'])
def test_create_instance_admin_password_empty(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = ''
self.req.body = jsonutils.dump_as_bytes(self.body)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body)
def test_create_location(self):
selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
self.req.body = jsonutils.dump_as_bytes(self.body)
robj = self.controller.create(self.req, body=self.body)
self.assertEqual(encodeutils.safe_decode(robj['Location']), selfhref)
@mock.patch('nova.objects.Quotas.get_all_by_project')
@mock.patch('nova.objects.Quotas.get_all_by_project_and_user')
@mock.patch('nova.objects.Quotas.count_as_dict')
def _do_test_create_instance_above_quota(self, resource, allowed,
quota, expected_msg, mock_count, mock_get_all_pu,
mock_get_all_p):
count = {'project': {}, 'user': {}}
for res in ('instances', 'ram', 'cores'):
if res == resource:
value = quota - allowed
count['project'][res] = count['user'][res] = value
else:
count['project'][res] = count['user'][res] = 0
mock_count.return_value = count
mock_get_all_p.return_value = {'project_id': 'fake'}
mock_get_all_pu.return_value = {'project_id': 'fake',
'user_id': 'fake_user'}
if resource in db_api.PER_PROJECT_QUOTAS:
mock_get_all_p.return_value[resource] = quota
else:
mock_get_all_pu.return_value[resource] = quota
fakes.stub_out_instance_quota(self, allowed, quota, resource)
self.body['server']['flavorRef'] = 3
self.req.body = jsonutils.dump_as_bytes(self.body)
try:
self.controller.create(self.req, body=self.body).obj['server']
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_above_quota_instances(self):
msg = ('Quota exceeded for instances: Requested 1, but'
' already used 10 of 10 instances')
self._do_test_create_instance_above_quota('instances', 0, 10, msg)
def test_create_instance_above_quota_ram(self):
msg = ('Quota exceeded for ram: Requested 4096, but'
' already used 8192 of 10240 ram')
self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
def test_create_instance_above_quota_cores(self):
msg = ('Quota exceeded for cores: Requested 2, but'
' already used 9 of 10 cores')
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
def test_create_instance_above_quota_server_group_members(self):
ctxt = self.req.environ['nova.context']
fake_group = objects.InstanceGroup(ctxt)
fake_group.project_id = ctxt.project_id
fake_group.user_id = ctxt.user_id
fake_group.create()
real_count = fakes.QUOTAS.count_as_dict
def fake_count(context, name, group, user_id):
if name == 'server_group_members':
self.assertEqual(group.uuid, fake_group.uuid)
self.assertEqual(user_id,
self.req.environ['nova.context'].user_id)
return {'user': {'server_group_members': 10}}
else:
return real_count(context, name, group, user_id)
def fake_limit_check(context, **kwargs):
if 'server_group_members' in kwargs:
raise exception.OverQuota(overs={})
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stubs.Set(fakes.QUOTAS, 'count_as_dict', fake_count)
self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
self.req.body = jsonutils.dump_as_bytes(self.body)
expected_msg = "Quota exceeded, too many servers in group"
try:
self.controller.create(self.req, body=self.body).obj
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_with_group_hint(self):
ctxt = self.req.environ['nova.context']
test_group = objects.InstanceGroup(ctxt)
test_group.project_id = ctxt.project_id
test_group.user_id = ctxt.user_id
test_group.create()
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': test_group.uuid}
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
test_group = objects.InstanceGroup.get_by_uuid(ctxt, test_group.uuid)
self.assertIn(server['id'], test_group.members)
def test_create_instance_with_group_hint_group_not_found(self):
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {
'group': '5b674f73-c8cf-40ef-9965-3b6fe4b304b1'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_with_group_hint_wrong_uuid_format(self):
self.body['os:scheduler_hints'] = {
'group': 'non-uuid'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_with_neutronv2_port_in_use(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortInUse(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_public_network_non_admin(self, mock_create):
public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
params = {'networks': [{'uuid': public_network_uuid}]}
self.req.body = jsonutils.dump_as_bytes(self.body)
mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
network_uuid=public_network_uuid)
self.assertRaises(webob.exc.HTTPForbidden,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_multiple_instance_with_specified_ip_neutronv2(self,
_api_mock):
_api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
reason="")
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
address = '10.0.0.1'
requested_networks = [{'uuid': network, 'fixed_ip': address,
'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_multiple_instance_with_neutronv2_port(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
def fake_create(*args, **kwargs):
msg = ("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neutronv2_not_found_network(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.NetworkNotFound(network_id=network)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_with_neutronv2_port_not_found(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
params = {'networks': requested_networks}
def fake_create(*args, **kwargs):
raise exception.PortNotFound(port_id=port)
self.stubs.Set(compute_api.API, 'create', fake_create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_network_ambiguous(self, mock_create):
mock_create.side_effect = exception.NetworkAmbiguous()
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.UnableToAutoAllocateNetwork(
project_id=FAKE_UUID))
def test_create_instance_with_unable_to_auto_allocate_network(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.ImageNotAuthorized(
image_id=FAKE_UUID))
def test_create_instance_with_image_not_authorized(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InstanceExists(
name='instance-name'))
def test_create_instance_raise_instance_exists(self, mock_create):
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMEphemeralSize)
def test_create_instance_raise_invalid_bdm_ephsize(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidNUMANodesNumber(
nodes='-1'))
def test_create_instance_raise_invalid_numa_nodes(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMFormat(details=''))
def test_create_instance_raise_invalid_bdm_format(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMSwapSize)
def test_create_instance_raise_invalid_bdm_swapsize(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDM)
def test_create_instance_raise_invalid_bdm(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.ImageBadRequest(
image_id='dummy', response='dummy'))
def test_create_instance_raise_image_bad_request(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_invalid_availability_zone(self):
self.body['server']['availability_zone'] = 'invalid::::zone'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.FixedIpNotFoundForAddress(
address='dummy'))
def test_create_instance_raise_fixed_ip_not_found_bad_request(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.CPUThreadPolicyConfigurationInvalid())
def test_create_instance_raise_cpu_thread_policy_configuration_invalid(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.ImageCPUPinningForbidden())
def test_create_instance_raise_image_cpu_pinning_forbidden(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.ImageCPUThreadPolicyForbidden())
def test_create_instance_raise_image_cpu_thread_policy_forbidden(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.MemoryPageSizeInvalid(pagesize='-1'))
def test_create_instance_raise_memory_page_size_invalid(self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.MemoryPageSizeForbidden(pagesize='1',
against='2'))
def test_create_instance_raise_memory_page_size_forbidden(self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.RealtimeConfigurationInvalid())
def test_create_instance_raise_realtime_configuration_invalid(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.RealtimeMaskNotFoundOrInvalid())
def test_create_instance_raise_realtime_mask_not_found_or_invalid(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_invalid_personality(self, mock_create):
codec = 'utf8'
content = encodeutils.safe_encode(
'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==')
start_position = 19
end_position = 20
msg = 'invalid start byte'
mock_create.side_effect = UnicodeDecodeError(codec, content,
start_position,
end_position, msg)
self.body['server']['personality'] = [
{
"path": "/etc/banner.txt",
"contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
},
]
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_without_personality_should_get_empty_list(self):
old_create = compute_api.API.create
del self.body['server']['personality']
def create(*args, **kwargs):
self.assertEqual([], kwargs['injected_files'])
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_instance()
def test_create_instance_with_extra_personality_arg(self):
self.body['server']['personality'] = [
{
"path": "/etc/banner.txt",
"contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
"extra_arg": "extra value"
},
]
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.PciRequestAliasNotDefined(
alias='fake_name'))
def test_create_instance_pci_alias_not_defined(self, mock_create):
# Tests that PciRequestAliasNotDefined is translated to a 400 error.
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
class ServersControllerCreateTestV219(ServersControllerCreateTest):
def _create_instance_req(self, set_desc, desc=None):
if set_desc:
self.body['server']['description'] = desc
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.19')
def test_create_instance_with_description(self):
self._create_instance_req(True, 'server_desc')
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_none_description(self):
self._create_instance_req(True)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_empty_description(self):
self._create_instance_req(True, '')
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_without_description(self):
self._create_instance_req(False)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_description_too_long(self):
self._create_instance_req(True, 'X' * 256)
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
def test_create_instance_description_invalid(self):
self._create_instance_req(True, "abc\0ddef")
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
class ServersControllerCreateTestV232(test.NoDBTestCase):
def setUp(self):
super(ServersControllerCreateTestV232, self).setUp()
self.flags(use_neutron=True)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.body = {
'server': {
'name': 'device-tagging-server',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
'networks': [{
'uuid': 'ff608d40-75e9-48cb-b745-77bb55b5eaf2'
}],
'block_device_mapping_v2': [{
'uuid': '70a599e0-31e7-49b7-b260-868f441e862b',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'volume_size': '1'
}]
}
}
self.req = fakes.HTTPRequestV21.blank('/fake/servers', version='2.32')
self.req.method = 'POST'
self.req.headers['content-type'] = 'application/json'
def _create_server(self):
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body)
def test_create_server_no_tags_old_compute(self):
with test.nested(
mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=13),
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f60012d9-5ba4-4547-ab48-f94ff7e62d4e'}],
1)),
):
self._create_server()
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=13)
def test_create_server_tagged_nic_old_compute_fails(self, get_min_ver):
self.body['server']['networks'][0]['tag'] = 'foo'
self.assertRaises(webob.exc.HTTPBadRequest, self._create_server)
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=13)
def test_create_server_tagged_bdm_old_compute_fails(self, get_min_ver):
self.body['server']['block_device_mapping_v2'][0]['tag'] = 'foo'
self.assertRaises(webob.exc.HTTPBadRequest, self._create_server)
def test_create_server_tagged_nic_new_compute(self):
with test.nested(
mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=14),
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f60012d9-5ba4-4547-ab48-f94ff7e62d4e'}],
1)),
):
self.body['server']['networks'][0]['tag'] = 'foo'
self._create_server()
def test_create_server_tagged_bdm_new_compute(self):
with test.nested(
mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=14),
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f60012d9-5ba4-4547-ab48-f94ff7e62d4e'}],
1)),
):
self.body['server']['block_device_mapping_v2'][0]['tag'] = 'foo'
self._create_server()
class ServersControllerCreateTestV237(test.NoDBTestCase):
"""Tests server create scenarios with the v2.37 microversion.
These tests are mostly about testing the validation on the 2.37
server create request with emphasis on negative scenarios.
"""
def setUp(self):
super(ServersControllerCreateTestV237, self).setUp()
# Set the use_neutron flag to process requested networks.
self.flags(use_neutron=True)
# Create the server controller.
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
# Define a basic server create request body which tests can customize.
self.body = {
'server': {
'name': 'auto-allocate-test',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
},
}
# Create a fake request using the 2.37 microversion.
self.req = fakes.HTTPRequestV21.blank('/fake/servers', version='2.37')
self.req.method = 'POST'
self.req.headers['content-type'] = 'application/json'
def _create_server(self, networks):
self.body['server']['networks'] = networks
self.req.body = jsonutils.dump_as_bytes(self.body)
return self.controller.create(self.req, body=self.body).obj['server']
def test_create_server_auth_pre_2_37_fails(self):
"""Negative test to make sure you can't pass 'auto' before 2.37"""
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.36')
self.assertRaises(exception.ValidationError, self._create_server,
'auto')
def test_create_server_no_requested_networks_fails(self):
"""Negative test for a server create request with no networks requested
which should fail with the v2.37 schema validation.
"""
self.assertRaises(exception.ValidationError, self._create_server, None)
def test_create_server_network_id_not_uuid_fails(self):
"""Negative test for a server create request where the requested
network id is not one of the auto/none enums.
"""
self.assertRaises(exception.ValidationError, self._create_server,
'not-auto-or-none')
def test_create_server_network_id_empty_string_fails(self):
"""Negative test for a server create request where the requested
network id is the empty string.
"""
self.assertRaises(exception.ValidationError, self._create_server, '')
@mock.patch.object(context.RequestContext, 'can')
def test_create_server_networks_none_skip_policy(self, context_can):
"""Test to ensure skip checking policy rule create:attach_network,
when networks is 'none' which means no network will be allocated.
"""
with test.nested(
mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=14),
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f9bccadf-5ab1-4a56-9156-c00c178fe5f5'}],
1)),
):
network_policy = server_policies.SERVERS % 'create:attach_network'
self._create_server('none')
call_list = [c for c in context_can.call_args_list
if c[0][0] == network_policy]
self.assertEqual(0, len(call_list))
@mock.patch.object(objects.Flavor, 'get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='2'))
def test_create_server_auto_flavornotfound(self, get_flavor):
"""Tests that requesting auto networking is OK. This test
short-circuits on a FlavorNotFound error.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(
webob.exc.HTTPBadRequest, self._create_server, 'auto')
# make sure it was a flavor not found error and not something else
self.assertIn('Flavor 2 could not be found', six.text_type(ex))
@mock.patch.object(objects.Flavor, 'get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='2'))
def test_create_server_none_flavornotfound(self, get_flavor):
"""Tests that requesting none for networking is OK. This test
short-circuits on a FlavorNotFound error.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(
webob.exc.HTTPBadRequest, self._create_server, 'none')
# make sure it was a flavor not found error and not something else
self.assertIn('Flavor 2 could not be found', six.text_type(ex))
@mock.patch.object(objects.Flavor, 'get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='2'))
def test_create_server_multiple_specific_nics_flavornotfound(self,
get_flavor):
"""Tests that requesting multiple specific network IDs is OK. This test
short-circuits on a FlavorNotFound error.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(
webob.exc.HTTPBadRequest, self._create_server,
[{'uuid': 'e3b686a8-b91d-4a61-a3fc-1b74bb619ddb'},
{'uuid': 'e0f00941-f85f-46ec-9315-96ded58c2f14'}])
# make sure it was a flavor not found error and not something else
self.assertIn('Flavor 2 could not be found', six.text_type(ex))
def test_create_server_legacy_neutron_network_id_fails(self):
"""Tests that we no longer support the legacy br-<uuid> format for
a network id.
"""
uuid = 'br-00000000-0000-0000-0000-000000000000'
self.assertRaises(exception.ValidationError, self._create_server,
[{'uuid': uuid}])
@ddt.ddt
class ServersControllerCreateTestV252(test.NoDBTestCase):
def setUp(self):
super(ServersControllerCreateTestV252, self).setUp()
self.controller = servers.ServersController()
self.body = {
'server': {
'name': 'device-tagging-server',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
'networks': [{
'uuid': 'ff608d40-75e9-48cb-b745-77bb55b5eaf2'
}]
}
}
self.req = fakes.HTTPRequestV21.blank('/fake/servers', version='2.52')
self.req.method = 'POST'
self.req.headers['content-type'] = 'application/json'
def _create_server(self, tags):
self.body['server']['tags'] = tags
self.req.body = jsonutils.dump_as_bytes(self.body)
return self.controller.create(self.req, body=self.body).obj['server']
def test_create_server_with_tags_pre_2_52_fails(self):
"""Negative test to make sure you can't pass 'tags' before 2.52"""
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.51')
self.assertRaises(
exception.ValidationError, self._create_server, ['tag1'])
@ddt.data([','],
['/'],
['a' * (tag.MAX_TAG_LENGTH + 1)],
['a'] * (instance_obj.MAX_TAG_COUNT + 1),
[''],
[1, 2, 3],
{'tag': 'tag'})
def test_create_server_with_tags_incorrect_tags(self, tags):
"""Negative test to incorrect tags are not allowed"""
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.52')
self.assertRaises(
exception.ValidationError, self._create_server, tags)
class ServersControllerCreateTestWithMock(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestWithMock, self).setUp()
self.flags(enable_instance_password=True, group='api')
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
create_mock):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
create_mock.side_effect = exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=network)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertEqual(1, len(create_mock.call_args_list))
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_invalid_fixed_ip(self,
create_mock):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '999.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
self.assertFalse(create_mock.called)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidVolume(reason='error'))
def test_create_instance_with_invalid_volume_error(self, create_mock):
# Tests that InvalidVolume is translated to a 400 error.
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
class ServersViewBuilderTest(test.TestCase):
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
self.flags(use_ipv6=True)
self.flags(group='glance', api_servers=['http://localhost:9292'])
nw_cache_info = self._generate_nw_cache_info()
db_inst = fakes.stub_instance(
id=1,
image_ref="5",
uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
display_name="test_server",
include_fake_metadata=False,
nw_cache=nw_cache_info)
privates = ['172.19.0.1']
publics = ['192.168.0.3']
public6s = ['b33f::fdee:ddff:fecc:bbaa']
def nw_info(*args, **kwargs):
return [(None, {'label': 'public',
'ips': [dict(ip=ip) for ip in publics],
'ip6s': [dict(ip=ip) for ip in public6s]}),
(None, {'label': 'private',
'ips': [dict(ip=ip) for ip in privates]})]
fakes.stub_out_nw_api_get_instance_nw_info(self, nw_info)
self.uuid = db_inst['uuid']
self.view_builder = views.servers.ViewBuilder()
self.request = fakes.HTTPRequestV21.blank("/fake")
self.request.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_instance_obj(
self.request.context,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
**db_inst)
self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
def _generate_nw_cache_info(self):
fixed_ipv4 = ('192.168.1.100', '192.168.2.100', '192.168.3.100')
fixed_ipv6 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(fixed_ipv4[0])]},
{'cidr': 'b33f::/64',
'ips': [_ip(fixed_ipv6[0])]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.2.0/24',
'ips': [_ip(fixed_ipv4[1])]}]}},
{'address': 'cc:cc:cc:cc:cc:cc',
'id': 3,
'network': {'bridge': 'br0',
'id': 2,
'label': 'test2',
'subnets': [{'cidr': '192.168.3.0/24',
'ips': [_ip(fixed_ipv4[2])]}]}}]
return nw_cache
def test_get_flavor_valid_instance_type(self):
flavor_bookmark = "http://localhost/fake/flavors/1"
expected = {"id": "1",
"links": [{"rel": "bookmark",
"href": flavor_bookmark}]}
result = self.view_builder._get_flavor(self.request, self.instance)
self.assertEqual(result, expected)
def test_build_server(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"name": "test_server",
"status": "ERROR",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"fault": {
"code": 404,
"created": "2010-10-10T12:00:00Z",
"message": "HTTPNotFound",
"details": "Stock details for test",
},
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
}
}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_that_has_been_deleted(self):
self.instance['deleted'] = 1
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "No valid host was found"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
# Regardless of vm_state deleted servers should be DELETED
self.assertEqual("DELETED", output['server']['status'])
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_build_server_detail_with_fault_no_instance_mapping(self,
mock_im):
self.instance['vm_state'] = vm_states.ERROR
mock_im.side_effect = exception.InstanceMappingNotFound(uuid='foo')
self.request.context = context.RequestContext('fake', 'fake')
self.view_builder.show(self.request, self.instance)
mock_im.assert_called_once_with(mock.ANY, self.uuid)
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_build_server_detail_with_fault_loaded(self, mock_im):
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
self.request.context = context.RequestContext('fake', 'fake')
self.view_builder.show(self.request, self.instance)
self.assertFalse(mock_im.called)
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error",
'details': 'Stock details for test'}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error',
details='')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
output = self.view_builder.show(self.request, self.instance)
self.assertNotIn('fault', output['server'])
def test_build_server_detail_active_status(self):
# set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
metadata = nova_utils.metadata_to_dict(metadata)
self.instance['metadata'] = metadata
image_bookmark = "http://localhost/fake/images/5"
flavor_bookmark = "http://localhost/fake/flavors/1"
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {"Open": "Stack"},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
class ServersAllExtensionsTestCase(test.TestCase):
"""Servers tests using default API router with all extensions enabled.
The intent here is to catch cases where extensions end up throwing
an exception because of a malformed request before the core API
gets a chance to validate the request and return a 422 response.
For example, AccessIPsController extends servers.Controller::
| @wsgi.extends
| def create(self, req, resp_obj, body):
| context = req.environ['nova.context']
| if authorize(context) and 'server' in resp_obj.obj:
| resp_obj.attach(xml=AccessIPTemplate())
| server = resp_obj.obj['server']
| self._extend_server(req, server)
we want to ensure that the extension isn't barfing on an invalid
body.
"""
def setUp(self):
super(ServersAllExtensionsTestCase, self).setUp()
self.app = compute.APIRouterV21()
def test_create_missing_server(self):
# Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
self.stubs.Set(compute_api.API, 'create', fake_create)
req = fakes.HTTPRequestV21.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
def test_update_missing_server(self):
# Test update with malformed body.
req = fakes.HTTPRequestV21.blank('/fake/servers/1')
req.method = 'PUT'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dump_as_bytes(body)
with mock.patch('nova.objects.Instance.save') as mock_save:
res = req.get_response(self.app)
self.assertFalse(mock_save.called)
self.assertEqual(400, res.status_int)
class ServersInvalidRequestTestCase(test.TestCase):
"""Tests of places we throw 400 Bad Request from."""
def setUp(self):
super(ServersInvalidRequestTestCase, self).setUp()
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
def _invalid_server_create(self, body):
req = fakes.HTTPRequestV21.blank('/fake/servers')
req.method = 'POST'
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_server_no_body(self):
self._invalid_server_create(body=None)
def test_create_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_server_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
def _unprocessable_server_update(self, body):
req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_no_body(self):
self._invalid_server_create(body=None)
def test_update_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_update_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
# TODO(alex_xu): There isn't specified file for ips extension. Most of
# unittest related to ips extension is in this file. So put the ips policy
# enforcement tests at here until there is specified file for ips extension.
class IPsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(IPsPolicyEnforcementV21, self).setUp()
self.controller = ips.IPsController()
self.req = fakes.HTTPRequest.blank("/v2/fake")
def test_index_policy_failed(self):
rule_name = "os_compute_api:ips:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:ips:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServersPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServersPolicyEnforcementV21, self).setUp()
self.useFixture(nova_fixtures.AllServicesCurrent())
ext_info = extension_info.LoadedExtensionInfo()
ext_info.extensions.update({'os-networks': 'fake'})
self.controller = servers.ServersController(extension_info=ext_info)
self.req = fakes.HTTPRequest.blank('')
self.image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch.object(servers.ServersController, '_get_instance')
def test_start_policy_failed(self, _get_instance_mock):
_get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:start"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._start_server,
self.req, FAKE_UUID, body={})
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_policy_failed_with_other_project(
self, _get_instance_mock):
_get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'])
rule_name = "os_compute_api:servers:trigger_crash_dump"
rule = {rule_name: "project_id:%(project_id)s"}
self.req.api_version_request =\
api_version_request.APIVersionRequest('2.17')
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body={'trigger_crash_dump': None})
@mock.patch('nova.compute.api.API.trigger_crash_dump')
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_overridden_policy_pass_with_same_project(
self, _get_instance_mock, trigger_crash_dump_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
_get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
self.req.api_version_request = (
api_version_request.APIVersionRequest('2.17'))
self.controller._action_trigger_crash_dump(
self.req, fakes.FAKE_UUID, body={'trigger_crash_dump': None})
trigger_crash_dump_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_overridden_policy_failed_with_other_user(
self, _get_instance_mock):
_get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
self.req.api_version_request = (
api_version_request.APIVersionRequest('2.17'))
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_trigger_crash_dump,
self.req,
fakes.FAKE_UUID,
body={'trigger_crash_dump': None})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.trigger_crash_dump')
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_overridden_policy_pass_with_same_user(
self, _get_instance_mock, trigger_crash_dump_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
_get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.req.api_version_request = (
api_version_request.APIVersionRequest('2.17'))
self.controller._action_trigger_crash_dump(
self.req, fakes.FAKE_UUID, body={'trigger_crash_dump': None})
trigger_crash_dump_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
def test_index_policy_failed(self):
rule_name = "os_compute_api:servers:index"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.index, self.req)
def test_detail_policy_failed(self):
rule_name = "os_compute_api:servers:detail"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.detail, self.req)
def test_detail_get_tenants_policy_failed(self):
req = fakes.HTTPRequest.blank('')
req.GET["all_tenants"] = "True"
rule_name = "os_compute_api:servers:detail:get_all_tenants"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._get_servers, req, True)
def test_index_get_tenants_policy_failed(self):
req = fakes.HTTPRequest.blank('')
req.GET["all_tenants"] = "True"
rule_name = "os_compute_api:servers:index:get_all_tenants"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._get_servers, req, False)
@mock.patch.object(common, 'get_instance')
def test_show_policy_failed(self, get_instance_mock):
get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:show"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.show, self.req, FAKE_UUID)
@mock.patch.object(common, 'get_instance')
def test_delete_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'])
rule_name = "os_compute_api:servers:delete"
rule = {rule_name: "project_id:%(project_id)s"}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
@mock.patch('nova.compute.api.API.soft_delete')
@mock.patch('nova.api.openstack.common.get_instance')
def test_delete_overridden_policy_pass_with_same_project(self,
get_instance_mock,
soft_delete_mock):
self.flags(reclaim_instance_interval=3600)
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:delete"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
self.controller.delete(self.req, fakes.FAKE_UUID)
soft_delete_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
@mock.patch('nova.api.openstack.common.get_instance')
def test_delete_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:delete"
rule = {rule_name: "user_id:%(user_id)s"}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
self._common_policy_check(
rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
@mock.patch('nova.compute.api.API.soft_delete')
@mock.patch('nova.api.openstack.common.get_instance')
def test_delete_overridden_policy_pass_with_same_user(self,
get_instance_mock,
soft_delete_mock):
self.flags(reclaim_instance_interval=3600)
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:delete"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.controller.delete(self.req, fakes.FAKE_UUID)
soft_delete_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
@mock.patch.object(common, 'get_instance')
def test_update_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'])
rule_name = "os_compute_api:servers:update"
rule = {rule_name: "project_id:%(project_id)s"}
body = {'server': {'name': 'server_test'}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller.update, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
@mock.patch.object(compute_api.API, 'update_instance')
@mock.patch.object(common, 'get_instance')
def test_update_overridden_policy_pass_with_same_project(
self, get_instance_mock, update_instance_mock, view_show_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:update"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
body = {'server': {'name': 'server_test'}}
self.controller.update(self.req, fakes.FAKE_UUID, body=body)
@mock.patch.object(common, 'get_instance')
def test_update_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:update"
rule = {rule_name: "user_id:%(user_id)s"}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller.update, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
@mock.patch.object(compute_api.API, 'update_instance')
@mock.patch.object(common, 'get_instance')
def test_update_overridden_policy_pass_with_same_user(self,
get_instance_mock,
update_instance_mock,
view_show_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:update"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {'server': {'name': 'server_test'}}
self.controller.update(self.req, fakes.FAKE_UUID, body=body)
def test_confirm_resize_policy_failed(self):
rule_name = "os_compute_api:servers:confirm_resize"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_policy_failed(self):
rule_name = "os_compute_api:servers:revert_resize"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_reboot_policy_failed(self):
rule_name = "os_compute_api:servers:reboot"
rule = {rule_name: "project:non_fake"}
body = {'reboot': {'type': 'HARD'}}
self._common_policy_check(
rule, rule_name, self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:resize"
rule = {rule_name: "project_id:%(project_id)s"}
body = {'resize': {'flavorRef': '1'}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller._action_resize, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize')
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_overridden_policy_pass_with_same_project(self,
get_instance_mock,
resize_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:resize"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
body = {'resize': {'flavorRef': '1'}}
self.controller._action_resize(self.req, fakes.FAKE_UUID, body=body)
resize_mock.assert_called_once_with(self.req.environ['nova.context'],
instance, '1')
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:resize"
rule = {rule_name: "user_id:%(user_id)s"}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
body = {'resize': {'flavorRef': '1'}}
self._common_policy_check(
rule, rule_name, self.controller._action_resize, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.compute.api.API.resize')
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_overridden_policy_pass_with_same_user(self,
get_instance_mock,
resize_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:resize"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {'resize': {'flavorRef': '1'}}
self.controller._action_resize(self.req, fakes.FAKE_UUID, body=body)
resize_mock.assert_called_once_with(self.req.environ['nova.context'],
instance, '1')
@mock.patch('nova.api.openstack.common.get_instance')
def test_rebuild_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
rule_name = "os_compute_api:servers:rebuild"
rule = {rule_name: "project_id:%(project_id)s"}
body = {'rebuild': {'imageRef': self.image_uuid}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.get_instance')
def test_rebuild_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:rebuild"
rule = {rule_name: "user_id:%(user_id)s"}
body = {'rebuild': {'imageRef': self.image_uuid}}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
self._common_policy_check(
rule, rule_name, self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
@mock.patch('nova.compute.api.API.rebuild')
@mock.patch('nova.api.openstack.common.get_instance')
def test_rebuild_overridden_policy_pass_with_same_user(self,
get_instance_mock,
rebuild_mock,
view_show_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:rebuild"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {'rebuild': {'imageRef': self.image_uuid,
'adminPass': 'dumpy_password'}}
self.controller._action_rebuild(self.req, fakes.FAKE_UUID, body=body)
rebuild_mock.assert_called_once_with(self.req.environ['nova.context'],
instance,
self.image_uuid,
'dumpy_password')
def test_create_image_policy_failed(self):
rule_name = "os_compute_api:servers:create_image"
rule = {rule_name: "project:non_fake"}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rule, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(servers.ServersController, '_get_server')
def test_create_vol_backed_img_snapshotting_policy_blocks_project(self,
mock_get_server,
mock_get_uuidi,
mock_is_vol_back):
"""Don't permit a snapshot of a volume backed instance if configured
not to based on project
"""
rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
rules = {
rule_name: "project:non_fake",
"os_compute_api:servers:create_image": "",
}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rules, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(servers.ServersController, '_get_server')
def test_create_vol_backed_img_snapshotting_policy_blocks_role(self,
mock_get_server,
mock_get_uuidi,
mock_is_vol_back):
"""Don't permit a snapshot of a volume backed instance if configured
not to based on role
"""
rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
rules = {
rule_name: "role:non_fake",
"os_compute_api:servers:create_image": "",
}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rules, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def _create_policy_check(self, rules, rule_name):
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': flavor_ref,
'availability_zone': "zone1:host1:node1",
'block_device_mapping': [{'device_name': "/dev/sda1"}],
'networks': [{'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}],
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self._common_policy_check(
rules, rule_name, self.controller.create, self.req, body=body)
def test_create_policy_failed(self):
rule_name = "os_compute_api:servers:create"
rules = {rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
def test_create_forced_host_policy_failed(self):
rule_name = "os_compute_api:servers:create:forced_host"
rule = {"os_compute_api:servers:create": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rule, rule_name)
def test_create_attach_volume_policy_failed(self):
rule_name = "os_compute_api:servers:create:attach_volume"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
def test_create_attach_attach_network_policy_failed(self):
rule_name = "os_compute_api:servers:create:attach_network"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
"os_compute_api:servers:create:attach_volume": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
class ServersActionsJsonTestV239(test.NoDBTestCase):
def setUp(self):
super(ServersActionsJsonTestV239, self).setUp()
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.req = fakes.HTTPRequest.blank('', version='2.39')
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(common, 'get_instance')
def test_server_create_image_no_quota_checks(self, mock_get_instance,
mock_check_quotas):
# 'mock_get_instance' helps to skip the whole logic of the action,
# but to make the test
mock_get_instance.side_effect = webob.exc.HTTPNotFound
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_create_image, self.req,
FAKE_UUID, body=body)
# starting from version 2.39 no quota checks on Nova side are performed
# for 'createImage' action after removing 'image-metadata' proxy API
mock_check_quotas.assert_not_called()
| 43.705655 | 79 | 0.594889 |
037fb7e08e6a587159778909b6f4172506336054 | 828 | py | Python | tests/contrib/pyramid/app/app.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 308 | 2016-12-07T16:49:27.000Z | 2022-03-15T10:06:45.000Z | tests/contrib/pyramid/app/app.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 | 2016-11-28T17:13:18.000Z | 2022-03-31T21:43:19.000Z | tests/contrib/pyramid/app/app.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 311 | 2016-11-27T03:01:49.000Z | 2022-03-18T21:34:03.000Z | from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from ddtrace import tracer
from tests.webclient import PingFilter
tracer.configure(
settings={
"FILTERS": [PingFilter()],
}
)
def hello_world(request):
return Response("Hello World!")
def tracer_shutdown(request):
tracer.shutdown()
return Response("shutdown")
if __name__ == "__main__":
with Configurator() as config:
config.add_route("hello", "/")
config.add_view(hello_world, route_name="hello")
config.add_route("tracer-shutdown", "/shutdown-tracer")
config.add_view(tracer_shutdown, route_name="tracer-shutdown")
app = config.make_wsgi_app()
server = make_server("0.0.0.0", 8000, app)
server.serve_forever()
| 22.378378 | 70 | 0.699275 |
c42d6b61f8fc8519d496e503e142e2c2e9d84cc7 | 692 | py | Python | expenses/urls.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | expenses/urls.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | expenses/urls.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from django.views.decorators.csrf import csrf_exempt
# Create your urls here.
app_name = 'expenses'
urlpatterns = [
path('', views.index, name="expenses"),
path('add-expense/', views.add_expense, name="add-expenses"),
path('edit-expense/<int:id>/', views.expense_edit, name="expense-edit"),
path('expense-delete/<int:id>/', views.delete_expense, name="expense-delete"),
path('search-expenses/', csrf_exempt(views.search_expenses),
name="search_expenses"),
path('expense_category_summary/', views.expense_category_summary,
name="expense_category_summary"),
path('stats/', views.stats_view,
name="stats")
]
| 32.952381 | 80 | 0.718208 |
0f401b2f6cdd5d9a443bafe8eba58ace8bf74dc0 | 38,757 | py | Python | bundle/sagemaker_rl_agent/lib/python3.5/site-packages/markov/multi_agent_coach/multi_agent_graph_manager.py | Asdafers/deepracer-simapp | 539ee72942c18c453c65fb7300beb586dd440690 | [
"MIT"
] | null | null | null | bundle/sagemaker_rl_agent/lib/python3.5/site-packages/markov/multi_agent_coach/multi_agent_graph_manager.py | Asdafers/deepracer-simapp | 539ee72942c18c453c65fb7300beb586dd440690 | [
"MIT"
] | null | null | null | bundle/sagemaker_rl_agent/lib/python3.5/site-packages/markov/multi_agent_coach/multi_agent_graph_manager.py | Asdafers/deepracer-simapp | 539ee72942c18c453c65fb7300beb586dd440690 | [
"MIT"
] | null | null | null | import copy
import glob
import os
import time
from collections import OrderedDict
from distutils.dir_util import copy_tree, remove_tree
from typing import Dict, List, Tuple
import contextlib
from rl_coach.base_parameters import iterable_to_items, TaskParameters, DistributedTaskParameters, Frameworks, \
VisualizationParameters, PresetValidationParameters, RunType, AgentParameters
from rl_coach.checkpoint import CheckpointStateUpdater, get_checkpoint_state, SingleCheckpoint, CheckpointState
from rl_coach.core_types import TotalStepsCounter, RunPhase, PlayingStepsType, TrainingSteps, EnvironmentEpisodes, \
EnvironmentSteps, StepMethod, Transition, TimeTypes
from rl_coach.environments.environment import Environment, EnvironmentParameters
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.logger import screen, Logger
from rl_coach.saver import SaverCollection
from rl_coach.utils import set_cpu, short_dynamic_import
from rl_coach.data_stores.data_store_impl import get_data_store as data_store_creator
from rl_coach.memories.backend.memory_impl import get_memory_backend
from rl_coach.data_stores.data_store import SyncFiles
from rl_coach.checkpoint import CheckpointStateReader
import markov.deepracer_memory as deepracer_memory
from markov.multi_agent_coach.multi_agent_level_manager import MultiAgentLevelManager
class MultiAgentGraphManager(object):
"""
A simple multi-agent graph manager and a single environment which is interacted with.
"""
def __init__(self, agents_params: List[AgentParameters], env_params: EnvironmentParameters,
schedule_params: ScheduleParameters,
vis_params: VisualizationParameters = VisualizationParameters(),
preset_validation_params: PresetValidationParameters = PresetValidationParameters(),
done_condition=any):
self.done_condition = done_condition
self.sess = {agent_params.name: None for agent_params in agents_params}
self.level_managers = [] # type: List[MultiAgentLevelManager]
self.top_level_manager = None
self.environments = []
self.set_schedule_params(schedule_params)
self.visualization_parameters = vis_params
self.name = 'multi_agent_graph'
self.task_parameters = None
self._phase = self.phase = RunPhase.UNDEFINED
self.preset_validation_params = preset_validation_params
self.reset_required = False
self.num_checkpoints_to_keep = 4 # TODO: make this a parameter
# timers
self.graph_creation_time = None
self.last_checkpoint_saving_time = time.time()
# counters
self.total_steps_counters = {
RunPhase.HEATUP: TotalStepsCounter(),
RunPhase.TRAIN: TotalStepsCounter(),
RunPhase.TEST: TotalStepsCounter()
}
self.checkpoint_id = 0
self.checkpoint_saver = {agent_params.name: None for agent_params in agents_params}
self.checkpoint_state_updater = None
self.graph_logger = Logger()
self.data_store = None
self.is_batch_rl = False
self.time_metric = TimeTypes.EpisodeNumber
self.env_params = env_params
self.agents_params = agents_params
self.agent_params = agents_params[0] # ...(find a better way)...
for agent_index, agent_params in enumerate(agents_params):
if len(agents_params) == 1:
agent_params.name = "agent"
else:
agent_params.name = "agent_{}".format(agent_index)
agent_params.visualization = copy.copy(vis_params)
if agent_params.input_filter is None:
agent_params.input_filter = copy.copy(env_params.default_input_filter())
if agent_params.output_filter is None:
agent_params.output_filter = copy.copy(env_params.default_output_filter())
def create_graph(self, task_parameters=TaskParameters(),
stop_physics=None, start_physics=None, empty_service_call=None):
self.graph_creation_time = time.time()
self.task_parameters = task_parameters
if isinstance(task_parameters, DistributedTaskParameters):
screen.log_title("Creating graph - name: {} task id: {} type: {}".format(self.__class__.__name__,
task_parameters.task_index,
task_parameters.job_type))
else:
screen.log_title("Creating graph - name: {}".format(self.__class__.__name__))
# "hide" the gpu if necessary
if task_parameters.use_cpu:
set_cpu()
# create a target server for the worker and a device
if isinstance(task_parameters, DistributedTaskParameters):
task_parameters.worker_target, task_parameters.device = \
self.create_worker_or_parameters_server(task_parameters=task_parameters)
# If necessary start the physics and then stop it after agent creation
screen.log_title("Start physics before creating graph")
if start_physics and empty_service_call:
start_physics(empty_service_call())
# create the graph modules
screen.log_title("Create graph")
self.level_managers, self.environments = self._create_graph(task_parameters)
screen.log_title("Stop physics after creating graph")
if stop_physics and empty_service_call:
stop_physics(empty_service_call())
# set self as the parent of all the level managers
self.top_level_manager = self.level_managers[0]
for level_manager in self.level_managers:
level_manager.parent_graph_manager = self
# create a session (it needs to be created after all the graph ops were created)
self.sess = {agent_params.name: None for agent_params in self.agents_params}
screen.log_title("Creating session")
self.create_session(task_parameters=task_parameters)
self._phase = self.phase = RunPhase.UNDEFINED
self.setup_logger()
return self
def _create_graph(self, task_parameters: TaskParameters) -> Tuple[List[MultiAgentLevelManager], List[Environment]]:
# environment loading
self.env_params.seed = task_parameters.seed
self.env_params.experiment_path = task_parameters.experiment_path
env = short_dynamic_import(self.env_params.path)(**self.env_params.__dict__,
visualization_parameters=self.visualization_parameters)
# agent loading
agents = OrderedDict()
for agent_params in self.agents_params:
agent_params.task_parameters = copy.copy(task_parameters)
agent = short_dynamic_import(agent_params.path)(agent_params)
agents[agent_params.name] = agent
screen.log_title("Created agent: {}".format(agent_params.name))
if hasattr(self, 'memory_backend_params') and \
self.memory_backend_params.run_type == str(RunType.ROLLOUT_WORKER):
agent.memory.memory_backend = deepracer_memory.DeepRacerRolloutBackEnd(self.memory_backend_params,
agent_params.algorithm.num_consecutive_playing_steps,
agent_params.name)
# set level manager
level_manager = MultiAgentLevelManager(agents=agents, environment=env, name="main_level", done_condition=self.done_condition)
return [level_manager], [env]
@staticmethod
def _create_worker_or_parameters_server_tf(task_parameters: DistributedTaskParameters):
import tensorflow as tf
config = tf.ConfigProto()
config.allow_soft_placement = True # allow placing ops on cpu if they are not fit for gpu
config.gpu_options.allow_growth = True # allow the gpu memory allocated for the worker to grow if needed
config.gpu_options.per_process_gpu_memory_fraction = 0.2
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = 1
from rl_coach.architectures.tensorflow_components.distributed_tf_utils import \
create_and_start_parameters_server, \
create_cluster_spec, create_worker_server_and_device
# create cluster spec
cluster_spec = create_cluster_spec(parameters_server=task_parameters.parameters_server_hosts,
workers=task_parameters.worker_hosts)
# create and start parameters server (non-returning function) or create a worker and a device setter
if task_parameters.job_type == "ps":
create_and_start_parameters_server(cluster_spec=cluster_spec,
config=config)
elif task_parameters.job_type == "worker":
return create_worker_server_and_device(cluster_spec=cluster_spec,
task_index=task_parameters.task_index,
use_cpu=task_parameters.use_cpu,
config=config)
else:
raise ValueError("The job type should be either ps or worker and not {}"
.format(task_parameters.job_type))
@staticmethod
def create_worker_or_parameters_server(task_parameters: DistributedTaskParameters):
if task_parameters.framework_type == Frameworks.tensorflow:
return GraphManager._create_worker_or_parameters_server_tf(task_parameters)
elif task_parameters.framework_type == Frameworks.mxnet:
raise NotImplementedError('Distributed training not implemented for MXNet')
else:
raise ValueError('Invalid framework {}'.format(task_parameters.framework_type))
def _create_session_tf(self, task_parameters: TaskParameters):
import tensorflow as tf
config = tf.ConfigProto()
config.allow_soft_placement = True # allow placing ops on cpu if they are not fit for gpu
config.gpu_options.allow_growth = True # allow the gpu memory allocated for the worker to grow if needed
# config.gpu_options.per_process_gpu_memory_fraction = 0.2
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = 1
if isinstance(task_parameters, DistributedTaskParameters):
# the distributed tensorflow setting
from rl_coach.architectures.tensorflow_components.distributed_tf_utils import create_monitored_session
if hasattr(self.task_parameters, 'checkpoint_restore_path') and self.task_parameters.checkpoint_restore_path:
checkpoint_dir = os.path.join(task_parameters.experiment_path, 'checkpoint')
if os.path.exists(checkpoint_dir):
remove_tree(checkpoint_dir)
# in the locally distributed case, checkpoints are always restored from a directory (and not from a
# file)
copy_tree(task_parameters.checkpoint_restore_path, checkpoint_dir)
else:
checkpoint_dir = task_parameters.checkpoint_save_dir
self.sess = create_monitored_session(target=task_parameters.worker_target,
task_index=task_parameters.task_index,
checkpoint_dir=checkpoint_dir,
checkpoint_save_secs=task_parameters.checkpoint_save_secs,
config=config)
# set the session for all the modules
self.set_session(self.sess)
else:
# regular session
print("Creating regular session")
self.sess = {agent_params.name: tf.Session(config=config) for agent_params in self.agents_params}
# set the session for all the modules
self.set_session(self.sess)
# the TF graph is static, and therefore is saved once - in the beginning of the experiment
if hasattr(self.task_parameters, 'checkpoint_save_dir') and self.task_parameters.checkpoint_save_dir:
self.save_graph()
def _create_session_mx(self):
"""
Call set_session to initialize parameters and construct checkpoint_saver
"""
self.set_session(sess=None) # Initialize all modules
def create_session(self, task_parameters: TaskParameters):
if task_parameters.framework_type == Frameworks.tensorflow:
self._create_session_tf(task_parameters)
elif task_parameters.framework_type == Frameworks.mxnet:
self._create_session_mx()
else:
raise ValueError('Invalid framework {}'.format(task_parameters.framework_type))
# Create parameter saver
self.checkpoint_saver = {agent_params.name: SaverCollection() for agent_params in self.agents_params}
for level in self.level_managers:
for agent_params in self.agents_params:
self.checkpoint_saver[agent_params.name].update(level.collect_savers(agent_params.name))
# restore from checkpoint if given
self.restore_checkpoint()
def save_graph(self) -> None:
"""
Save the TF graph to a protobuf description file in the experiment directory
:return: None
"""
import tensorflow as tf
# write graph
tf.train.write_graph(tf.get_default_graph(),
logdir=self.task_parameters.checkpoint_save_dir,
name='graphdef.pb',
as_text=False)
def _save_onnx_graph_tf(self) -> None:
"""
Save the tensorflow graph as an ONNX graph.
This requires the graph and the weights checkpoint to be stored in the experiment directory.
It then freezes the graph (merging the graph and weights checkpoint), and converts it to ONNX.
:return: None
"""
# collect input and output nodes
input_nodes = []
output_nodes = []
for level in self.level_managers:
for agent in level.agents.values():
for network in agent.networks.values():
for input_key, input in network.online_network.inputs.items():
if not input_key.startswith("output_"):
input_nodes.append(input.name)
for output in network.online_network.outputs:
output_nodes.append(output.name)
from rl_coach.architectures.tensorflow_components.architecture import save_onnx_graph
save_onnx_graph(input_nodes, output_nodes, self.task_parameters.checkpoint_save_dir)
def save_onnx_graph(self) -> None:
"""
Save the graph as an ONNX graph.
This requires the graph and the weights checkpoint to be stored in the experiment directory.
It then freezes the graph (merging the graph and weights checkpoint), and converts it to ONNX.
:return: None
"""
if self.task_parameters.framework_type == Frameworks.tensorflow:
self._save_onnx_graph_tf()
def setup_logger(self) -> None:
# dump documentation
logger_prefix = "{graph_name}".format(graph_name=self.name)
self.graph_logger.set_logger_filenames(self.task_parameters.experiment_path, logger_prefix=logger_prefix,
add_timestamp=True, task_id=self.task_parameters.task_index)
if self.visualization_parameters.dump_parameters_documentation:
self.graph_logger.dump_documentation(str(self))
[manager.setup_logger() for manager in self.level_managers]
@property
def phase(self) -> RunPhase:
"""
Get the phase of the graph
:return: the current phase
"""
return self._phase
@phase.setter
def phase(self, val: RunPhase):
"""
Change the phase of the graph and all the hierarchy levels below it
:param val: the new phase
:return: None
"""
self._phase = val
for level_manager in self.level_managers:
level_manager.phase = val
for environment in self.environments:
environment.phase = val
environment._notify_phase(val)
@property
def current_step_counter(self) -> TotalStepsCounter:
return self.total_steps_counters[self.phase]
@contextlib.contextmanager
def phase_context(self, phase):
"""
Create a context which temporarily sets the phase to the provided phase.
The previous phase is restored afterwards.
"""
old_phase = self.phase
self.phase = phase
yield
self.phase = old_phase
def set_session(self, sess) -> None:
"""
Set the deep learning framework session for all the modules in the graph
:return: None
"""
[manager.set_session(sess) for manager in self.level_managers]
def heatup(self, steps: PlayingStepsType) -> None:
"""
Perform heatup for several steps, which means taking random actions and storing the results in memory
:param steps: the number of steps as a tuple of steps time and steps count
:return: None
"""
self.verify_graph_was_created()
if steps.num_steps > 0:
with self.phase_context(RunPhase.HEATUP):
screen.log_title("{}: Starting heatup".format(self.name))
# reset all the levels before starting to heatup
self.reset_internal_state(force_environment_reset=True)
# act for at least steps, though don't interrupt an episode
count_end = self.current_step_counter + steps
while self.current_step_counter < count_end:
self.act(EnvironmentEpisodes(1))
def handle_episode_ended(self) -> None:
"""
End an episode and reset all the episodic parameters
:return: None
"""
self.current_step_counter[EnvironmentEpisodes] += 1
[environment.handle_episode_ended() for environment in self.environments]
def train(self) -> None:
"""
Perform several training iterations for all the levels in the hierarchy
:param steps: number of training iterations to perform
:return: None
"""
self.verify_graph_was_created()
with self.phase_context(RunPhase.TRAIN):
self.current_step_counter[TrainingSteps] += 1
[manager.train() for manager in self.level_managers]
def reset_internal_state(self, force_environment_reset=False) -> None:
"""
Reset an episode for all the levels
:param force_environment_reset: force the environment to reset the episode even if it has some conditions that
tell it not to. for example, if ale life is lost, gym will tell the agent that
the episode is finished but won't actually reset the episode if there are more
lives available
:return: None
"""
self.verify_graph_was_created()
self.reset_required = False
[environment.reset_internal_state(force_environment_reset) for environment in self.environments]
[manager.reset_internal_state() for manager in self.level_managers]
def act(self, steps: PlayingStepsType, wait_for_full_episodes=False) -> None:
"""
Do several steps of acting on the environment
:param wait_for_full_episodes: if set, act for at least `steps`, but make sure that the last episode is complete
:param steps: the number of steps as a tuple of steps time and steps count
"""
self.verify_graph_was_created()
# perform several steps of playing
count_end = self.current_step_counter + steps
done = False
while self.current_step_counter < count_end or (wait_for_full_episodes and not done):
# reset the environment if the previous episode was terminated
if self.reset_required:
self.reset_internal_state()
steps_begin = self.environments[0].total_steps_counter
done = self.top_level_manager.step(None)
steps_end = self.environments[0].total_steps_counter
if done:
self.handle_episode_ended()
self.reset_required = True
self.current_step_counter[EnvironmentSteps] += (steps_end - steps_begin)
# if no steps were made (can happen when no actions are taken while in the TRAIN phase, either in batch RL
# or in imitation learning), we force end the loop, so that it will not continue forever.
if (steps_end - steps_begin) == 0:
break
def train_and_act(self, steps: StepMethod) -> None:
"""
Train the agent by doing several acting steps followed by several training steps continually
:param steps: the number of steps as a tuple of steps time and steps count
:return: None
"""
self.verify_graph_was_created()
# perform several steps of training interleaved with acting
if steps.num_steps > 0:
with self.phase_context(RunPhase.TRAIN):
self.reset_internal_state(force_environment_reset=True)
count_end = self.current_step_counter + steps
while self.current_step_counter < count_end:
# The actual number of steps being done on the environment
# is decided by the agent, though this inner loop always
# takes at least one step in the environment (at the GraphManager level).
# The agent might also decide to skip acting altogether.
# Depending on internal counters and parameters, it doesn't always train or save checkpoints.
self.act(EnvironmentSteps(1))
self.train()
self.occasionally_save_checkpoint()
def sync(self) -> None:
"""
Sync the global network parameters to the graph
:return:
"""
[manager.sync() for manager in self.level_managers]
def evaluate(self, steps: PlayingStepsType) -> bool:
"""
Perform evaluation for several steps
:param steps: the number of steps as a tuple of steps time and steps count
:return: bool, True if the target reward and target success has been reached
"""
self.verify_graph_was_created()
if steps.num_steps > 0:
with self.phase_context(RunPhase.TEST):
# reset all the levels before starting to evaluate
self.reset_internal_state(force_environment_reset=True)
self.sync()
# act for at least `steps`, though don't interrupt an episode
count_end = self.current_step_counter + steps
while self.current_step_counter < count_end:
self.act(EnvironmentEpisodes(1))
self.sync()
if self.should_stop():
self.flush_finished()
screen.success("Reached required success rate. Exiting.")
return True
return False
def improve(self):
"""
The main loop of the run.
Defined in the following steps:
1. Heatup
2. Repeat:
2.1. Repeat:
2.1.1. Act
2.1.2. Train
2.1.3. Possibly save checkpoint
2.2. Evaluate
:return: None
"""
self.verify_graph_was_created()
# initialize the network parameters from the global network
self.sync()
# heatup
self.heatup(self.heatup_steps)
# improve
if self.task_parameters.task_index is not None:
screen.log_title("Starting to improve {} task index {}".format(self.name, self.task_parameters.task_index))
else:
screen.log_title("Starting to improve {}".format(self.name))
count_end = self.total_steps_counters[RunPhase.TRAIN] + self.improve_steps
while self.total_steps_counters[RunPhase.TRAIN] < count_end:
self.train_and_act(self.steps_between_evaluation_periods)
if self.evaluate(self.evaluation_steps):
break
def restore_checkpoint(self):
self.verify_graph_was_created()
# TODO: find better way to load checkpoints that were saved with a global network into the online network
if self.task_parameters.checkpoint_restore_path:
restored_checkpoint_paths = []
for agent_params in self.agents_params:
if len(self.agents_params) == 1:
agent_checkpoint_restore_path = self.task_parameters.checkpoint_restore_path
else:
agent_checkpoint_restore_path = os.path.join(self.task_parameters.checkpoint_restore_path, agent_params.name)
if os.path.isdir(agent_checkpoint_restore_path):
# a checkpoint dir
if self.task_parameters.framework_type == Frameworks.tensorflow and\
'checkpoint' in os.listdir(agent_checkpoint_restore_path):
# TODO-fixme checkpointing
# MonitoredTrainingSession manages save/restore checkpoints autonomously. Doing so,
# it creates it own names for the saved checkpoints, which do not match the "{}_Step-{}.ckpt"
# filename pattern. The names used are maintained in a CheckpointState protobuf file named
# 'checkpoint'. Using Coach's '.coach_checkpoint' protobuf file, results in an error when trying to
# restore the model, as the checkpoint names defined do not match the actual checkpoint names.
raise NotImplementedError('Checkpointing not implemented for TF monitored training session')
else:
checkpoint = get_checkpoint_state(agent_checkpoint_restore_path, all_checkpoints=True)
if checkpoint is None:
raise ValueError("No checkpoint to restore in: {}".format(agent_checkpoint_restore_path))
model_checkpoint_path = checkpoint.model_checkpoint_path
checkpoint_restore_dir = self.task_parameters.checkpoint_restore_path
restored_checkpoint_paths.append(model_checkpoint_path)
# Set the last checkpoint ID - only in the case of the path being a dir
chkpt_state_reader = CheckpointStateReader(agent_checkpoint_restore_path,
checkpoint_state_optional=False)
self.checkpoint_id = chkpt_state_reader.get_latest().num + 1
else:
# a checkpoint file
if self.task_parameters.framework_type == Frameworks.tensorflow:
model_checkpoint_path = agent_checkpoint_restore_path
checkpoint_restore_dir = os.path.dirname(model_checkpoint_path)
restored_checkpoint_paths.append(model_checkpoint_path)
else:
raise ValueError("Currently restoring a checkpoint using the --checkpoint_restore_file argument is"
" only supported when with tensorflow.")
try:
self.checkpoint_saver[agent_params.name].restore(self.sess[agent_params.name],
model_checkpoint_path)
except Exception as ex:
raise ValueError("Failed to restore {}'s checkpoint: {}".format(agent_params.name, ex))
all_checkpoints = sorted(list(set([c.name for c in checkpoint.all_checkpoints]))) # remove duplicates :-(
if self.num_checkpoints_to_keep < len(all_checkpoints):
checkpoint_to_delete = all_checkpoints[-self.num_checkpoints_to_keep - 1]
agent_checkpoint_to_delete = os.path.join(agent_checkpoint_restore_path, checkpoint_to_delete)
for file in glob.glob("{}*".format(agent_checkpoint_to_delete)):
os.remove(file)
[manager.restore_checkpoint(checkpoint_restore_dir) for manager in self.level_managers]
[manager.post_training_commands() for manager in self.level_managers]
screen.log_dict(
OrderedDict([
("Restoring from path", restored_checkpoint_path) for restored_checkpoint_path in restored_checkpoint_paths
]),
prefix="Checkpoint"
)
def _get_checkpoint_state_tf(self, checkpoint_restore_dir):
import tensorflow as tf
return tf.train.get_checkpoint_state(checkpoint_restore_dir)
def occasionally_save_checkpoint(self):
# only the chief process saves checkpoints
if self.task_parameters.checkpoint_save_secs \
and time.time() - self.last_checkpoint_saving_time >= self.task_parameters.checkpoint_save_secs \
and (self.task_parameters.task_index == 0 # distributed
or self.task_parameters.task_index is None # single-worker
):
self.save_checkpoint()
def save_checkpoint(self):
# create current session's checkpoint directory
if self.task_parameters.checkpoint_save_dir is None:
self.task_parameters.checkpoint_save_dir = os.path.join(self.task_parameters.experiment_path, 'checkpoint')
if not os.path.exists(self.task_parameters.checkpoint_save_dir):
os.mkdir(self.task_parameters.checkpoint_save_dir) # Create directory structure
if self.checkpoint_state_updater is None:
self.checkpoint_state_updater = CheckpointStateUpdater(self.task_parameters.checkpoint_save_dir)
checkpoint_name = "{}_Step-{}.ckpt".format(
self.checkpoint_id, self.total_steps_counters[RunPhase.TRAIN][EnvironmentSteps])
saved_checkpoint_paths = []
for agent_params in self.agents_params:
if len(self.agents_params) == 1:
agent_checkpoint_save_dir =self.task_parameters.checkpoint_save_dir
else:
agent_checkpoint_save_dir = os.path.join(self.task_parameters.checkpoint_save_dir, agent_params.name)
if not os.path.exists(agent_checkpoint_save_dir):
os.mkdir(agent_checkpoint_save_dir)
agent_checkpoint_path = os.path.join(agent_checkpoint_save_dir, checkpoint_name)
if not isinstance(self.task_parameters, DistributedTaskParameters):
saved_checkpoint_paths.append(self.checkpoint_saver[agent_params.name].save(self.sess[agent_params.name], agent_checkpoint_path))
else:
saved_checkpoint_paths.append(agent_checkpoint_path)
if self.num_checkpoints_to_keep < len(self.checkpoint_state_updater.all_checkpoints):
checkpoint_to_delete = self.checkpoint_state_updater.all_checkpoints[-self.num_checkpoints_to_keep - 1]
agent_checkpoint_to_delete = os.path.join(agent_checkpoint_save_dir, checkpoint_to_delete.name)
for file in glob.glob("{}*".format(agent_checkpoint_to_delete)):
os.remove(file)
# this is required in order for agents to save additional information like a DND for example
[manager.save_checkpoint(checkpoint_name) for manager in self.level_managers]
# Purge Redis memory after saving the checkpoint as Transitions are no longer needed at this point.
if hasattr(self, 'memory_backend'):
self.memory_backend.memory_purge()
# the ONNX graph will be stored only if checkpoints are stored and the -onnx flag is used
if self.task_parameters.export_onnx_graph:
self.save_onnx_graph()
# write the new checkpoint name to a file to signal this checkpoint has been fully saved
self.checkpoint_state_updater.update(SingleCheckpoint(self.checkpoint_id, checkpoint_name))
screen.log_dict(
OrderedDict([
("Saving in path", saved_checkpoint_path) for saved_checkpoint_path in saved_checkpoint_paths
]),
prefix="Checkpoint"
)
self.checkpoint_id += 1
self.last_checkpoint_saving_time = time.time()
if hasattr(self, 'data_store_params'):
data_store = self.get_data_store(self.data_store_params)
data_store.save_to_store()
def verify_graph_was_created(self):
"""
Verifies that the graph was already created, and if not, it creates it with the default task parameters
:return: None
"""
if self.graph_creation_time is None:
self.create_graph()
def __str__(self):
result = ""
for key, val in self.__dict__.items():
params = ""
if isinstance(val, list) or isinstance(val, dict) or isinstance(val, OrderedDict):
items = iterable_to_items(val)
for k, v in items:
params += "{}: {}\n".format(k, v)
else:
params = val
result += "{}: \n{}\n".format(key, params)
return result
def should_train(self) -> bool:
return any([manager.should_train() for manager in self.level_managers])
# TODO-remove - this is a temporary flow, used by the trainer worker, duplicated from observe() - need to create
# an external trainer flow reusing the existing flow and methods [e.g. observe(), step(), act()]
def emulate_act_on_trainer(self, steps: PlayingStepsType, transitions: Dict[str, Transition]) -> None:
"""
This emulates the act using the transition obtained from the rollout worker on the training worker
in case of distributed training.
Do several steps of acting on the environment
:param steps: the number of steps as a tuple of steps time and steps count
"""
self.verify_graph_was_created()
# perform several steps of playing
count_end = self.current_step_counter + steps
while self.current_step_counter < count_end:
# reset the environment if the previous episode was terminated
if self.reset_required:
self.reset_internal_state()
steps_begin = self.environments[0].total_steps_counter
done = self.top_level_manager.emulate_step_on_trainer(transitions)
steps_end = self.environments[0].total_steps_counter
# add the diff between the total steps before and after stepping, such that environment initialization steps
# (like in Atari) will not be counted.
# We add at least one step so that even if no steps were made (in case no actions are taken in the training
# phase), the loop will end eventually.
self.current_step_counter[EnvironmentSteps] += max(1, steps_end - steps_begin)
if done:
self.handle_episode_ended()
self.reset_required = True
def fetch_from_worker(self, num_consecutive_playing_steps=None):
if hasattr(self, 'memory_backend'):
for transitions in self.memory_backend.fetch(num_consecutive_playing_steps):
self.emulate_act_on_trainer(EnvironmentSteps(1), transitions)
if hasattr(self, 'sample_collector'):
self.sample_collector.sample(transitions)
def setup_memory_backend(self) -> None:
if hasattr(self, 'memory_backend_params'):
self.memory_backend = deepracer_memory.DeepRacerTrainerBackEnd(self.memory_backend_params, self.agents_params)
def should_stop(self) -> bool:
return self.task_parameters.apply_stop_condition and all([manager.should_stop() for manager in self.level_managers])
def get_data_store(self, param):
if self.data_store:
return self.data_store
return data_store_creator(param)
def signal_ready(self):
if self.task_parameters.checkpoint_save_dir and os.path.exists(self.task_parameters.checkpoint_save_dir):
open(os.path.join(self.task_parameters.checkpoint_save_dir, SyncFiles.TRAINER_READY.value), 'w').close()
if hasattr(self, 'data_store_params'):
data_store = self.get_data_store(self.data_store_params)
data_store.save_to_store()
def close(self) -> None:
"""
Clean up to close environments.
:return: None
"""
for env in self.environments:
env.close()
def get_current_episodes_count(self):
"""
Returns the current EnvironmentEpisodes counter
"""
return self.current_step_counter[EnvironmentEpisodes]
def flush_finished(self):
"""
To indicate the training has finished, writes a `.finished` file to the checkpoint directory and calls
the data store to updload that file.
"""
if self.task_parameters.checkpoint_save_dir and os.path.exists(self.task_parameters.checkpoint_save_dir):
open(os.path.join(self.task_parameters.checkpoint_save_dir, SyncFiles.FINISHED.value), 'w').close()
if hasattr(self, 'data_store_params'):
data_store = self.get_data_store(self.data_store_params)
data_store.save_to_store()
def set_schedule_params(self, schedule_params: ScheduleParameters):
"""
Set schedule parameters for the graph.
:param schedule_params: the schedule params to set.
"""
self.heatup_steps = schedule_params.heatup_steps
self.evaluation_steps = schedule_params.evaluation_steps
self.steps_between_evaluation_periods = schedule_params.steps_between_evaluation_periods
self.improve_steps = schedule_params.improve_steps
| 48.567669 | 145 | 0.649689 |
67dc6fac67595026ac5eec16595785c6091aa0ba | 182 | py | Python | carto/master/config.py | wallarelvo/SmallCartography | 007e621386eb86d904fefef3f518b1d5f1dc7fe6 | [
"Apache-2.0"
] | null | null | null | carto/master/config.py | wallarelvo/SmallCartography | 007e621386eb86d904fefef3f518b1d5f1dc7fe6 | [
"Apache-2.0"
] | null | null | null | carto/master/config.py | wallarelvo/SmallCartography | 007e621386eb86d904fefef3f518b1d5f1dc7fe6 | [
"Apache-2.0"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
app.config.from_object(__name__)
mappers = list()
reducers = list()
workers = dict()
# in seconds
max_time = 3
OMISSION_PROB = 0.1
| 12.133333 | 32 | 0.71978 |
edeb32e1062d26d4c53c45814eb9589b94062bf4 | 1,200 | py | Python | setup.py | jjaakola/bang-a-gong | d30f889c18eeaff3d62d47cd02e93516e4d24dd7 | [
"MIT"
] | null | null | null | setup.py | jjaakola/bang-a-gong | d30f889c18eeaff3d62d47cd02e93516e4d24dd7 | [
"MIT"
] | null | null | null | setup.py | jjaakola/bang-a-gong | d30f889c18eeaff3d62d47cd02e93516e4d24dd7 | [
"MIT"
] | null | null | null | import setuptools
install_requires = [
"kafka-python==2.0.2",
"psycopg2-binary==2.8.6",
"aiohttp==3.7.4.post0",
"docopt==0.6.2",
"PyYAML==5.4.1",
"jsonpath-ng==1.5.2"
]
development_requirements = {
"dev" : ["coverage==5.5",
"pylint==2.8.2",
"pycodestyle==2.7.0",
"aioresponses==0.7.2",
"docker-compose==1.29.1",
"build==0.3.1.post1"]
}
setuptools.setup(
name="api-status-monitor",
version="0.0.1",
author="jjaakola",
description="Distributed API status monitor system.",
url="https://github.com/jjaakola/bang-a-gong",
project_urls={
"Bug Tracker": "https://github.com/jjaakola/bang-a-gong/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Linux",
],
entry_points = {
'console_scripts': ['statusmonitor=api_status_monitor.statusmonitor:main'],
},
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
install_requires=install_requires,
extras_require=development_requirements
)
| 26.666667 | 83 | 0.591667 |
4abdae46be3bb6c2b4fe0fa6769ee70c1ae12df7 | 3,359 | py | Python | tests/unit/test_sentry.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 3,103 | 2015-01-30T00:24:10.000Z | 2022-03-31T23:21:39.000Z | tests/unit/test_sentry.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 6,709 | 2015-01-05T01:23:20.000Z | 2022-03-31T14:49:46.000Z | tests/unit/test_sentry.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 959 | 2015-01-12T22:22:40.000Z | 2022-03-31T22:21:51.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
import sentry_sdk
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from warehouse import sentry
def test_sentry_request_method():
sentry_sdk = pretend.stub()
request = pretend.stub(registry={"sentry": sentry_sdk}, sentry=sentry)
assert sentry._sentry(request) is sentry_sdk
class TestSentryBeforeSend:
def test_ignore_exception(self):
hint = {"exc_info": (SystemExit, SystemExit(), "tracebk")}
assert sentry.before_send(pretend.stub(), hint) is None
@pytest.mark.parametrize(
"hint",
[
{"exc_info": (ConnectionError, ConnectionError(), "tracebk")},
{"event_info": "This is a random event."},
],
)
def test_report_event(self, hint):
event = pretend.stub()
assert sentry.before_send(event, hint) is event
def test_includeme(monkeypatch):
class Registry(dict):
def __init__(self):
self.settings = {}
init_obj = pretend.call_recorder(lambda *a, **kw: "1")
pyramid_obj = pretend.call_recorder(lambda *a, **kw: "2")
celery_obj = pretend.call_recorder(lambda *a, **kw: "3")
sql_obj = pretend.call_recorder(lambda *a, **kw: "4")
log_obj = pretend.call_recorder(lambda *a, **kw: "5")
monkeypatch.setattr(sentry_sdk, "init", init_obj)
monkeypatch.setattr("warehouse.sentry.PyramidIntegration", pyramid_obj)
monkeypatch.setattr("warehouse.sentry.CeleryIntegration", celery_obj)
monkeypatch.setattr("warehouse.sentry.SqlalchemyIntegration", sql_obj)
monkeypatch.setattr("warehouse.sentry.LoggingIntegration", log_obj)
config = pretend.stub(
registry=Registry(),
add_request_method=pretend.call_recorder(lambda *a, **kw: None),
add_wsgi_middleware=pretend.call_recorder(lambda *a, **kw: None),
)
config.registry.settings.update(
{
"warehouse.commit": "rand3rfgkn3424",
"sentry.dsn": "test_dsn",
"sentry.transport": "proxy_transport",
}
)
sentry.includeme(config)
assert init_obj.calls == [
pretend.call(
dsn="test_dsn",
release="rand3rfgkn3424",
transport="proxy_transport",
before_send=sentry.before_send,
attach_stacktrace=True,
integrations=["2", "3", "4", "5"],
)
]
assert pyramid_obj.calls == [pretend.call()]
assert celery_obj.calls == [pretend.call()]
assert sql_obj.calls == [pretend.call()]
assert log_obj.calls == [pretend.call()]
assert config.registry["sentry"] is sentry_sdk
assert config.add_request_method.calls == [
pretend.call(sentry._sentry, name="sentry", reify=True)
]
assert config.add_wsgi_middleware.calls == [pretend.call(SentryWsgiMiddleware)]
| 34.628866 | 83 | 0.673117 |
dbb3f4ca5db1c6da44cd9516ce73e3f67be85635 | 2,283 | py | Python | lldb/test/API/python_api/sbplatform/TestSBPlatform.py | ornata/llvm-project | 494913b8b4e4bce0b3525e5569d8e486e82b9a52 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/python_api/sbplatform/TestSBPlatform.py | ornata/llvm-project | 494913b8b4e4bce0b3525e5569d8e486e82b9a52 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/python_api/sbplatform/TestSBPlatform.py | ornata/llvm-project | 494913b8b4e4bce0b3525e5569d8e486e82b9a52 | [
"Apache-2.0"
] | null | null | null | """Test the SBPlatform APIs."""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class SBPlatformAPICase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfRemote # Remote environment not supported.
def test_run(self):
self.build()
plat = lldb.SBPlatform.GetHostPlatform()
os.environ["MY_TEST_ENV_VAR"]="SBPlatformAPICase.test_run"
def cleanup():
del os.environ["MY_TEST_ENV_VAR"]
self.addTearDownHook(cleanup)
cmd = lldb.SBPlatformShellCommand(self.getBuildArtifact("a.out"))
self.assertSuccess(plat.Run(cmd))
self.assertIn("MY_TEST_ENV_VAR=SBPlatformAPICase.test_run", cmd.GetOutput())
def test_SetSDKRoot(self):
plat = lldb.SBPlatform("remote-linux") # arbitrary choice
self.assertTrue(plat)
plat.SetSDKRoot(self.getBuildDir())
self.dbg.SetSelectedPlatform(plat)
self.expect("platform status",
substrs=["Sysroot:", self.getBuildDir()])
def test_SetCurrentPlatform_floating(self):
# floating platforms cannot be referenced by name until they are
# associated with a debugger
floating_platform = lldb.SBPlatform("remote-netbsd")
floating_platform.SetWorkingDirectory(self.getBuildDir())
self.assertSuccess(self.dbg.SetCurrentPlatform("remote-netbsd"))
dbg_platform = self.dbg.GetSelectedPlatform()
self.assertEqual(dbg_platform.GetName(), "remote-netbsd")
self.assertIsNone(dbg_platform.GetWorkingDirectory())
def test_SetCurrentPlatform_associated(self):
# associated platforms are found by name-based lookup
floating_platform = lldb.SBPlatform("remote-netbsd")
floating_platform.SetWorkingDirectory(self.getBuildDir())
orig_platform = self.dbg.GetSelectedPlatform()
self.dbg.SetSelectedPlatform(floating_platform)
self.dbg.SetSelectedPlatform(orig_platform)
self.assertSuccess(self.dbg.SetCurrentPlatform("remote-netbsd"))
dbg_platform = self.dbg.GetSelectedPlatform()
self.assertEqual(dbg_platform.GetName(), "remote-netbsd")
self.assertEqual(dbg_platform.GetWorkingDirectory(), self.getBuildDir())
| 42.277778 | 84 | 0.708717 |
2a5674ee0772db84380338ab9a382f5236c83c3e | 7,231 | py | Python | electrum_scribe/crypto.py | scribenetwork/scribe-electrum-desktop | b0507a8df1a88038e200905070524fded0a00978 | [
"MIT"
] | null | null | null | electrum_scribe/crypto.py | scribenetwork/scribe-electrum-desktop | b0507a8df1a88038e200905070524fded0a00978 | [
"MIT"
] | null | null | null | electrum_scribe/crypto.py | scribenetwork/scribe-electrum-desktop | b0507a8df1a88038e200905070524fded0a00978 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import os
import hashlib
import hmac
import lyra2re2_hash
from typing import Union
import pyaes
from .util import assert_bytes, InvalidPassword, to_bytes, to_string, WalletFileException
from .i18n import _
try:
from Cryptodome.Cipher import AES
except:
AES = None
class InvalidPadding(Exception):
pass
def append_PKCS7_padding(data: bytes) -> bytes:
assert_bytes(data)
padlen = 16 - (len(data) % 16)
return data + bytes([padlen]) * padlen
def strip_PKCS7_padding(data: bytes) -> bytes:
assert_bytes(data)
if len(data) % 16 != 0 or len(data) == 0:
raise InvalidPadding("invalid length")
padlen = data[-1]
if not (0 < padlen <= 16):
raise InvalidPadding("invalid padding byte (out of range)")
for i in data[-padlen:]:
if i != padlen:
raise InvalidPadding("invalid padding byte (inconsistent)")
return data[0:-padlen]
def aes_encrypt_with_iv(key: bytes, iv: bytes, data: bytes) -> bytes:
assert_bytes(key, iv, data)
data = append_PKCS7_padding(data)
if AES:
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc, padding=pyaes.PADDING_NONE)
e = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
return e
def aes_decrypt_with_iv(key: bytes, iv: bytes, data: bytes) -> bytes:
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc, padding=pyaes.PADDING_NONE)
data = aes.feed(data) + aes.feed() # empty aes.feed() flushes buffer
try:
return strip_PKCS7_padding(data)
except InvalidPadding:
raise InvalidPassword()
def EncodeAES_base64(secret: bytes, msg: bytes) -> bytes:
"""Returns base64 encoded ciphertext."""
e = EncodeAES_bytes(secret, msg)
return base64.b64encode(e)
def EncodeAES_bytes(secret: bytes, msg: bytes) -> bytes:
assert_bytes(msg)
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, msg)
return iv + ct
def DecodeAES_base64(secret: bytes, ciphertext_b64: Union[bytes, str]) -> bytes:
ciphertext = bytes(base64.b64decode(ciphertext_b64))
return DecodeAES_bytes(secret, ciphertext)
def DecodeAES_bytes(secret: bytes, ciphertext: bytes) -> bytes:
assert_bytes(ciphertext)
iv, e = ciphertext[:16], ciphertext[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
PW_HASH_VERSION_LATEST = 1
KNOWN_PW_HASH_VERSIONS = (1, 2, )
SUPPORTED_PW_HASH_VERSIONS = (1, )
assert PW_HASH_VERSION_LATEST in KNOWN_PW_HASH_VERSIONS
assert PW_HASH_VERSION_LATEST in SUPPORTED_PW_HASH_VERSIONS
class UnexpectedPasswordHashVersion(InvalidPassword, WalletFileException):
def __init__(self, version):
self.version = version
def __str__(self):
return "{unexpected}: {version}\n{instruction}".format(
unexpected=_("Unexpected password hash version"),
version=self.version,
instruction=_('You are most likely using an outdated version of Electrum. Please update.'))
class UnsupportedPasswordHashVersion(InvalidPassword, WalletFileException):
def __init__(self, version):
self.version = version
def __str__(self):
return "{unsupported}: {version}\n{instruction}".format(
unsupported=_("Unsupported password hash version"),
version=self.version,
instruction=f"To open this wallet, try 'git checkout password_v{self.version}'.\n"
"Alternatively, restore from seed.")
def _hash_password(password: Union[bytes, str], *, version: int) -> bytes:
pw = to_bytes(password, 'utf8')
if version not in SUPPORTED_PW_HASH_VERSIONS:
raise UnsupportedPasswordHashVersion(version)
if version == 1:
return sha256d(pw)
else:
assert version not in KNOWN_PW_HASH_VERSIONS
raise UnexpectedPasswordHashVersion(version)
def pw_encode(data: str, password: Union[bytes, str, None], *, version: int) -> str:
if not password:
return data
if version not in KNOWN_PW_HASH_VERSIONS:
raise UnexpectedPasswordHashVersion(version)
# derive key from password
secret = _hash_password(password, version=version)
# encrypt given data
ciphertext = EncodeAES_bytes(secret, to_bytes(data, "utf8"))
ciphertext_b64 = base64.b64encode(ciphertext)
return ciphertext_b64.decode('utf8')
def pw_decode(data: str, password: Union[bytes, str, None], *, version: int) -> str:
if password is None:
return data
if version not in KNOWN_PW_HASH_VERSIONS:
raise UnexpectedPasswordHashVersion(version)
data_bytes = bytes(base64.b64decode(data))
# derive key from password
secret = _hash_password(password, version=version)
# decrypt given data
try:
d = to_string(DecodeAES_bytes(secret, data_bytes), "utf8")
except Exception as e:
raise InvalidPassword() from e
return d
def sha256(x: Union[bytes, str]) -> bytes:
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
def sha256d(x: Union[bytes, str]) -> bytes:
x = to_bytes(x, 'utf8')
out = bytes(sha256(sha256(x)))
return out
def lyra2re2(x: Union[bytes, str]) -> bytes:
x = to_bytes(x, 'utf8')
out = bytes(lyra2re2_hash.getPoWHash(x))
return out
def hash_160(x: bytes) -> bytes:
return ripemd(sha256(x))
def ripemd(x):
try:
md = hashlib.new('ripemd160')
md.update(x)
return md.digest()
except BaseException:
from . import ripemd
md = ripemd.new(x)
return md.digest()
def hmac_oneshot(key: bytes, msg: bytes, digest) -> bytes:
if hasattr(hmac, 'digest'):
# requires python 3.7+; faster
return hmac.digest(key, msg, digest)
else:
return hmac.new(key, msg, digest).digest()
| 31.854626 | 103 | 0.689116 |
3996bb1e863a29130d89ec78075d97a321df5b32 | 1,705 | py | Python | src/waldur_slurm/executors.py | virtengine/ve-waldur-v2 | c9d90bc659171de678bd552e92cfc59cffb6fc3a | [
"MIT"
] | 2 | 2018-08-16T14:42:24.000Z | 2019-07-20T03:36:59.000Z | src/waldur_slurm/executors.py | virtengine/ve-waldur-v2 | c9d90bc659171de678bd552e92cfc59cffb6fc3a | [
"MIT"
] | null | null | null | src/waldur_slurm/executors.py | virtengine/ve-waldur-v2 | c9d90bc659171de678bd552e92cfc59cffb6fc3a | [
"MIT"
] | 3 | 2019-02-27T19:17:49.000Z | 2019-07-25T21:40:01.000Z | from waldur_core.core import executors as core_executors
from waldur_core.core import tasks as core_tasks
from waldur_core.structure import executors as structure_executors
from . import models
## Class to create Allocation
class AllocationCreateExecutor(core_executors.CreateExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_allocation, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_allocation,
'create_allocation',
state_transition='begin_creating',
)
class AllocationSetLimitsExecutor(core_executors.ActionExecutor):
@classmethod
def get_task_signature(cls, allocation, serialized_allocation, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_allocation,
'set_resource_limits',
state_transition='begin_updating',
)
class AllocationPullExecutor(core_executors.ActionExecutor):
action = 'Pull'
@classmethod
def get_task_signature(cls, volume, serialized_volume, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_volume, 'pull_allocation', state_transition='begin_updating'
)
## Class to delete allocation
class AllocationDeleteExecutor(core_executors.DeleteExecutor):
@classmethod
def get_task_signature(cls, volume, serialized_allocation, **kwargs):
return core_tasks.BackendMethodTask().si(
serialized_allocation,
'delete_allocation',
state_transition='begin_deleting',
)
class SlurmCleanupExecutor(structure_executors.BaseCleanupExecutor):
executors = ((models.Allocation, AllocationDeleteExecutor),)
| 34.795918 | 83 | 0.730205 |
3641178c663da592e82db814c4e99731afe8e199 | 1,072 | py | Python | heat/core/constants.py | shssf/heat | 9db0a936c92491fa5aa862f558cb385c9916216b | [
"MIT"
] | 105 | 2018-05-18T11:34:03.000Z | 2022-03-29T06:37:23.000Z | heat/core/constants.py | shssf/heat | 9db0a936c92491fa5aa862f558cb385c9916216b | [
"MIT"
] | 909 | 2018-05-18T07:50:26.000Z | 2022-03-31T20:16:30.000Z | heat/core/constants.py | shssf/heat | 9db0a936c92491fa5aa862f558cb385c9916216b | [
"MIT"
] | 28 | 2018-05-24T14:39:18.000Z | 2022-03-31T19:18:47.000Z | """
This module defines constants used in HeAT.
"""
import torch
__all__ = ["e", "Euler", "inf", "Inf", "Infty", "Infinity", "nan", "NaN", "pi"]
# infinity
INF = float("inf")
# Not a number
NAN = float("nan")
# Negative infinity
NINF = -float("inf")
# Archimedes' constant
PI = 3.141592653589793
# Euler's number
E = 2.718281828459045
# aliases
inf = INF
"""IEEE 754 floating point representation of (positive) infinity (:math:`\\infty`)."""
Inf = INF
"""IEEE 754 floating point representation of (positive) infinity (:math:`\\infty`)."""
Infty = INF
"""IEEE 754 floating point representation of (positive) infinity (:math:`\\infty`)."""
Infinity = INF
"""IEEE 754 floating point representation of (positive) infinity (:math:`\\infty`)."""
nan = NAN
"""IEEE 754 floating point representation of Not a Number (NaN)."""
NaN = NAN
"""IEEE 754 floating point representation of Not a Number (NaN)."""
pi = PI
"""Archimedes' constant (:math:`\\pi`)."""
e = E
"""Euler's number, Euler's constant (:math:`e`)."""
Euler = E
"""Euler's number, Euler's constant (:math:`e`)."""
| 26.8 | 86 | 0.662313 |
56999720f058de647229bc7fab8fae7d063fb302 | 1,117 | py | Python | src/python/dart/engine/emr/steps/python/fix_partition_folder_names.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 18 | 2016-03-03T19:10:21.000Z | 2021-07-14T22:37:35.000Z | src/python/dart/engine/emr/steps/python/fix_partition_folder_names.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 62 | 2016-04-11T15:17:23.000Z | 2017-09-08T17:18:53.000Z | src/python/dart/engine/emr/steps/python/fix_partition_folder_names.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 15 | 2016-03-03T15:38:34.000Z | 2019-03-27T19:33:08.000Z | #!/usr/bin/env python27
import sys
import subprocess
__author__ = 'dmcpherson'
def call(cmd):
try:
print cmd
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print result
return result
except subprocess.CalledProcessError as e:
print e.output
raise e
hdfs_root = sys.argv[1].rstrip('/')
partition_names = sys.argv[2].split(',')
def rename_from_root(root, partitions):
if not partitions:
return
dir_lines = call('hdfs dfs -ls %s' % root)
for line in dir_lines.split('\n'):
parts = line.split()
# skip lines without real file/dir data
if len(parts) < 8:
continue
if line[0] == 'd':
file_path = parts[-1]
dir_name = file_path.split('/')[-1]
prefix = file_path.rsplit(dir_name, 1)[0].rstrip('/')
new_path = prefix + '/' + partitions[0] + '=' + dir_name
call('hdfs dfs -mv %s %s' % (file_path, new_path))
rename_from_root(new_path, partitions[1:])
rename_from_root(hdfs_root, partition_names)
| 25.976744 | 83 | 0.59624 |
03fb52da97ccf19bfd928b7b1f078ce6cd273bc1 | 249 | py | Python | WeatherSpider/WeatherSpider/spiders/SeniverseSpider15d.py | superlova/WeatherSpider | fa8ed79f780d8ef065a41208ed7e80f94581fac4 | [
"MIT"
] | 1 | 2020-05-26T01:51:13.000Z | 2020-05-26T01:51:13.000Z | WeatherSpider/WeatherSpider/spiders/SeniverseSpider15d.py | superlova/WeatherSpider | fa8ed79f780d8ef065a41208ed7e80f94581fac4 | [
"MIT"
] | null | null | null | WeatherSpider/WeatherSpider/spiders/SeniverseSpider15d.py | superlova/WeatherSpider | fa8ed79f780d8ef065a41208ed7e80f94581fac4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
class Seniversespider15dSpider(scrapy.Spider):
name = 'SeniverseSpider15d'
allowed_domains = ['seniverse.comm']
start_urls = ['http://seniverse.comm/']
def parse(self, response):
pass
| 20.75 | 46 | 0.658635 |
b012a1549e2b6536258719ad0bf53ce95a30453f | 906 | py | Python | test/com/facebook/buck/features/python/testdata/python_binary/external_sources/wheel_package/setup.py | isfaaghyth/buck | cad8f7ac2de2c9a4f08ce66180db677e44d61aee | [
"Apache-2.0"
] | 2 | 2019-09-22T05:33:37.000Z | 2019-09-22T06:36:24.000Z | test/com/facebook/buck/features/python/testdata/python_binary/external_sources/wheel_package/setup.py | isfaaghyth/buck | cad8f7ac2de2c9a4f08ce66180db677e44d61aee | [
"Apache-2.0"
] | null | null | null | test/com/facebook/buck/features/python/testdata/python_binary/external_sources/wheel_package/setup.py | isfaaghyth/buck | cad8f7ac2de2c9a4f08ce66180db677e44d61aee | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="wheel_package",
version="0.0.1",
description="A sample Python project",
long_description="A sample Python project",
url="https://buck.build",
author="Buck",
license="Apache License 2.0",
packages=["wheel_package"],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: Apache Software License"
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
data_files=[
("lib", ["lib/__init__.py"]),
("lib/foo", ["lib/foo/bar.py", "lib/foo/__init__.py"]),
("lib/foobar", ["lib/foobar/baz.py", "lib/foobar/__init__.py"]),
],
)
| 33.555556 | 72 | 0.610375 |
00f84a6f5d032cdf2143ec342ebca9a83d7c1fbc | 1,236 | py | Python | arjuna/interact/http/model/internal/processor/text/store.py | bhargavkumar-65/arjuna | 400dfd598096199e89d64eb6e8d2932892d37f6d | [
"Apache-2.0"
] | 13 | 2020-05-12T06:32:51.000Z | 2022-01-24T18:21:19.000Z | arjuna/interact/http/model/internal/processor/text/store.py | bhargavkumar-65/arjuna | 400dfd598096199e89d64eb6e8d2932892d37f6d | [
"Apache-2.0"
] | 5 | 2020-02-14T12:51:07.000Z | 2021-12-01T10:39:51.000Z | arjuna/interact/http/model/internal/processor/text/store.py | bhargavkumar-65/arjuna | 400dfd598096199e89d64eb6e8d2932892d37f6d | [
"Apache-2.0"
] | 25 | 2020-01-16T10:44:25.000Z | 2022-02-24T13:22:22.000Z | # This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna.tpi.helper.arjtype import NotFound
class TextExtractor:
def __init__(self, response):
self.__response = response
@property
def response(self):
return self.__response
def store(self, name, regex, strict):
try:
value = self.response.text.find(regex)
except Exception:
if not strict:
value = NotFound()
else:
raise Exception(f"Issue in extracting value for >{name}< as no element was found using regex >{regex}<.")
self.response.store[name] = value
| 30.9 | 121 | 0.68123 |
1198d083d0a1f87bffc27dd0a2f4a1798f7f9e80 | 8,794 | py | Python | ag/forest/bin/tree_cli.py | justyre/jus | 1339c010ac4499c253061d2cce5e638ec06062bd | [
"MIT"
] | null | null | null | ag/forest/bin/tree_cli.py | justyre/jus | 1339c010ac4499c253061d2cce5e638ec06062bd | [
"MIT"
] | null | null | null | ag/forest/bin/tree_cli.py | justyre/jus | 1339c010ac4499c253061d2cce5e638ec06062bd | [
"MIT"
] | null | null | null | # Licensed under MIT License.
# See LICENSE in the project root for license information.
"""A simple CLI application."""
import cmd
from typing import Optional
from forest import tree_exceptions
from forest.binary_trees import avl_tree
from forest.binary_trees import binary_search_tree
from forest.binary_trees import binary_tree
from forest.binary_trees import red_black_tree
from forest.binary_trees import single_threaded_binary_trees
from forest.binary_trees import double_threaded_binary_trees
from forest.binary_trees import traversal
class cli(cmd.Cmd):
"""A CLI for operating tree data structures."""
intro = "Welcome to the Tree CLI. Type help or ? to list available commands.\n"
prompt = "tree> "
def __init__(self) -> None:
cmd.Cmd.__init__(self)
self._tree: Optional[binary_tree.BinaryTree] = None
def do_build(self, line):
"""Build a binary tree.
Options: avl, bst, rb, threaded
Example
-------
tree> build avl
"""
try:
if self._tree is not None:
print(f"ERROR: A tree of type {type(self._tree)} already exists.")
return
tree_type = self._get_single_arg(line=line).lower()
if tree_type == "avl":
self._tree = avl_tree.AVLTree()
elif tree_type == "bst":
self._tree == binary_search_tree.BinarySearchTree()
elif tree_type == "rb":
self._tree == red_black_tree.RBTree()
elif tree_type == "threaded":
threaded_type = input(
"Please input threaded BST type (left, right, double): "
).lower()
if threaded_type == "left":
self._tree = single_threaded_binary_trees.LeftThreadedBinaryTree()
elif threaded_type == "right":
self._tree = single_threaded_binary_trees.RightThreadedBinaryTree()
elif threaded_type == "double":
self._tree = double_threaded_binary_trees.DoubleThreadedBinaryTree()
else:
print(f"ERROR: {threaded_type} is an invalid threaded type.")
else:
print(f"ERROR: {tree_type} is an invalid tree type.")
except KeyError as error:
print(error)
def do_search(self, line):
"""Search data by a given key.
Example
-------
tree> search 3
"""
try:
key = self._get_key(line=line)
output = self._tree.search(key=key)
if output is None:
print(f"ERROR: A node with key {key} does not exist.")
else:
print(output.key, output.data)
except KeyError as error:
print(error)
def do_insert(self, line):
"""Insert a (key, data) pair. The key must be an integer.
Example
-------
tree> insert 7 data
"""
args = line.split()
# Note: the `insert` is not included in `args`.
if len(args) != 2:
print("ERROR: Invalid number of arguments: Two expected.")
return
try:
key = self._get_key(line=line)
self._tree.insert(key=key, data=args[1])
print(f"(key, data) = ({args[0]}, {args[1]}) has been inserted.")
except tree_exceptions.DuplicateKeyError:
print(f"ERROR: A node with {key} already exists.")
except KeyError as error:
print(error)
def do_delete(self, line):
"""Delete an item by the given key.
Example
-------
tree> delete 5
"""
try:
key = self._get_key(line=line)
self._tree.delete(key=key)
print(f"Key {key} has been removed.")
except KeyError as error:
print(error)
def do_traverse(self, line):
"""Traverse the binary tree.
Options: pre, in, post, reverse
Example
-------
tree> traverse pre
"""
try:
arg = self._get_single_arg(line=line).lower()
if isinstance(
self._tree, single_threaded_binary_trees.LeftThreadedBinaryTree
):
if arg == "reverse":
for item in self._tree.reverse_inorder_traverse():
print(item)
else:
print(f"ERROR: {arg} is an invalid traversal type for this tree.")
elif isinstance(
self._tree, single_threaded_binary_trees.RightThreadedBinaryTree
):
if arg == "pre":
for item in self._tree.preorder_traverse():
print(item)
elif arg == "in":
for item in self._tree.inorder_traverse():
print(item)
else:
print(f"ERROR: {arg} is an invalid traversal type for this tree.")
elif isinstance(
self._tree, double_threaded_binary_trees.DoubleThreadedBinaryTree
):
if arg == "pre":
for item in self._tree.preorder_traverse():
print(item)
elif arg == "in":
for item in self._tree.inorder_traverse():
print(item)
elif arg == "reverse":
for item in self._tree.reverse_inorder_traverse():
print(item)
else:
print(f"ERROR: {arg} is an invalid traversal type for this tree.")
elif isinstance(self._tree, red_black_tree.RBTree):
if arg == "pre":
for item in self._tree.preorder_traverse():
print(item)
elif arg == "in":
for item in self._tree.inorder_traverse():
print(item)
elif arg == "post":
for item in self._tree.postorder_traverse():
print(item)
else:
print(f"ERROR: {arg} is an invalid traversal type for this tree.")
else:
# For avl and bst
if arg == "pre":
for item in traversal.preorder_traverse(tree=self._tree):
print(item)
elif arg == "in":
for item in traversal.inorder_traverse(tree=self._tree):
print(item)
elif arg == "post":
for item in traversal.postorder_traverse(tree=self._tree):
print(item)
elif arg == "reverse":
for item in traversal.reverse_inorder_traverse(tree=self._tree):
print(item)
else:
print(f"ERROR: {arg} is an invalid traversal type.")
except KeyError as error:
print(error)
def do_display(self, line):
"""Display the tree."""
if isinstance(self._tree.root, binary_tree.Node):
if isinstance(
self._tree, single_threaded_binary_trees.LeftThreadedBinaryTree
) or isinstance(
self._tree, single_threaded_binary_trees.RightThreadedBinaryTree
) or isinstance(
self._tree, double_threaded_binary_trees.DoubleThreadedBinaryTree
):
self._tree.root.display_keys(self._tree)
else:
self._tree.root.display_keys()
def do_destroy(self, line):
"""Destroy the existing tree."""
self._tree = None
print("The tree has been destroyed.")
def do_exit(self, line):
"""Exit the application."""
print("Bye!")
raise SystemExit()
def _get_single_arg(self, line):
# Get only the only argument from the line of input.
arg = line.split()
if len(arg) > 1:
raise KeyError("Too many arguments! Only one expected.")
return arg[0]
def _get_key(self, line):
# Get the key of a node from the line of input.
arg = line.split()
if len(arg) == 0:
raise KeyError("ERROR: No argument provided!")
# str.isdigit() checks if str is made ONLY of digits
if not arg[0].isdigit():
raise KeyError("ERROR: The key must be an integer!")
else:
return int(arg[0])
def main():
"""Entry point for the tree CLI."""
cli().cmdloop() | 36.641667 | 88 | 0.521947 |
dd1e46c4fa1beff069538ad42e9264635b06a278 | 345 | py | Python | game/game_stats.py | ArturYazykov/alien-invasion | eb5728cdbecbe34a53d96bb16aea2e60a59aa9c0 | [
"MIT"
] | null | null | null | game/game_stats.py | ArturYazykov/alien-invasion | eb5728cdbecbe34a53d96bb16aea2e60a59aa9c0 | [
"MIT"
] | null | null | null | game/game_stats.py | ArturYazykov/alien-invasion | eb5728cdbecbe34a53d96bb16aea2e60a59aa9c0 | [
"MIT"
] | null | null | null | class GameStats:
def __init__(self, ai_game):
self.settings = ai_game.settings
self.reset_stats()
self.game_active = False
#
self.high_score = 0
def reset_stats(self):
self.ships_left = self.settings.ship_limit
self.score = 0
self.level = 1
print(self.ships_left)
| 24.642857 | 50 | 0.597101 |
dcc68a8988bad400edd820c3ec55859e92f2e2e1 | 1,687 | py | Python | ivi/agilent/agilentMSO7104B.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 161 | 2015-01-23T17:43:01.000Z | 2022-03-29T14:42:42.000Z | ivi/agilent/agilentMSO7104B.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 45 | 2015-01-15T13:35:04.000Z | 2021-06-03T01:58:55.000Z | ivi/agilent/agilentMSO7104B.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 87 | 2015-01-31T10:55:23.000Z | 2022-03-17T08:18:47.000Z | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent7000B import *
class agilentMSO7104B(agilent7000B):
"Agilent InfiniiVision MSO7104B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO7104B')
super(agilentMSO7104B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._init_channels()
| 37.488889 | 86 | 0.755187 |
0b23072cc6482f8a6601120fdf7bfbc7effedcb3 | 1,516 | py | Python | twittertail/http_server.py | ndavison/twitter-demo | 9f753c6211c3388db6816f6c7d1915b734d89f66 | [
"MIT"
] | null | null | null | twittertail/http_server.py | ndavison/twitter-demo | 9f753c6211c3388db6816f6c7d1915b734d89f66 | [
"MIT"
] | 1 | 2021-03-13T03:35:42.000Z | 2021-03-13T03:35:42.000Z | twittertail/http_server.py | ndavison/twitter-demo | 9f753c6211c3388db6816f6c7d1915b734d89f66 | [
"MIT"
] | null | null | null | from aiohttp import web
import json
class HTTPServer:
def start_server(self, loop, interface, port, tweets):
'''
Starts the aiohttp server on the supplied interface and port.
Arguments:
loop: the asyncio event loop.
interface (string): the interface to listen on.
port (int): the port to listen on.
tweets (list): the list of tweets to return.
'''
runner = self.___http_server(tweets)
loop.run_until_complete(runner.setup())
server = web.TCPSite(runner, interface, int(port))
loop.run_until_complete(server.start())
def ___http_server(self, tweets):
'''
Establishes the endponts and HTTP server configuration.
Arguments:
tweets (list): the list of tweets to return.
Returns:
runner: the app runner for asyncio.
'''
async def http_tweets(request):
tweets_json = list(
{
'created_at': x[0],
'full_text': x[1],
'id': x[2]
}
for x in tweets
)
return web.Response(
body=json.dumps({'tweets': tweets_json}),
headers={
'content-type': 'application/json'
}
)
app = web.Application()
app.router.add_get('/tweets', http_tweets)
runner = web.AppRunner(app)
return runner
| 29.153846 | 69 | 0.523087 |
48873997e74133ebc841372457358b9a373f7f1c | 730 | py | Python | Extracting_the_Data/recipe_3_collecting_data_from_word_file/main.py | Rezwanul-Haque/NLP-Project | 50555c740a66e229dc723b448456c6534fd17707 | [
"MIT"
] | null | null | null | Extracting_the_Data/recipe_3_collecting_data_from_word_file/main.py | Rezwanul-Haque/NLP-Project | 50555c740a66e229dc723b448456c6534fd17707 | [
"MIT"
] | null | null | null | Extracting_the_Data/recipe_3_collecting_data_from_word_file/main.py | Rezwanul-Haque/NLP-Project | 50555c740a66e229dc723b448456c6534fd17707 | [
"MIT"
] | null | null | null | from docx import Document
# Creating a word file object
doc = open('data/file.docx', 'rb')
# creating word reader object
document = Document(doc)
# create an empty string and call this document. This document
# variable store each paragraph in the Word document.We then
# create a for loop that goes through each paragraph in the Word
# document and appends the paragraph.
docu = ""
for paragraph in document.paragraphs:
docu += paragraph.text
# To see the output call docu
print(docu)
# doc.close()
# or
with open('data/file.docx', 'rb') as word:
document = Document(doc)
docu = ""
for paragraph in document.paragraphs:
docu += paragraph.text
# To see the output call docu
print(docu)
| 20.857143 | 64 | 0.70411 |
70d0b07fba9de9e0e8e7ab6b56ca5f07e148caf7 | 7,171 | py | Python | python/mead/trainer.py | amyhemmeter/baseline | 101a393398570747d14a32eb3af72664e2774c8b | [
"Apache-2.0"
] | null | null | null | python/mead/trainer.py | amyhemmeter/baseline | 101a393398570747d14a32eb3af72664e2774c8b | [
"Apache-2.0"
] | null | null | null | python/mead/trainer.py | amyhemmeter/baseline | 101a393398570747d14a32eb3af72664e2774c8b | [
"Apache-2.0"
] | null | null | null | import os
import logging
import argparse
import copy
from itertools import chain
from baseline.utils import read_config_stream, normalize_backend
import mead
from mead.utils import convert_path, parse_extra_args, configure_logger
DEFAULT_SETTINGS_LOC = 'config/mead-settings.json'
DEFAULT_DATASETS_LOC = 'config/datasets.json'
DEFAULT_LOGGING_LOC = 'config/logging.json'
DEFAULT_EMBEDDINGS_LOC = 'config/embeddings.json'
logger = logging.getLogger('mead')
def update_datasets(datasets_config, config_params, train, valid, test):
"""Take an existing datasets index file and update to include a record with train/valid/test overrides
If the label provided in the dataset is found in the dataset index, it will use that as a template and
individually override the provided `train`, `valid` and `test` params to update that record.
If the label does not exist, it creates a dummy record and augments that record with the provided `train`,
`valid`, and optionally, the `test`
:param datasets_config: The datasets config to update
:param config_params: The mead config
:param train: (`str`) An override train set or `None`. If `dataset` key doesnt exist, cannot be `None`
:param valid: (`str`) An override valid set or `None` If `dataset` key doesnt exist, cannot be `None`
:param test: (`str`) An override test set or None
:return: None
"""
for file_name in [train, valid, test]:
if not os.path.exists(train):
raise Exception('No such file exists for override: {}'.format(file_name))
original_dataset_label = config_params['dataset']
original_record = [entry for entry in datasets_config if entry['label'] == original_dataset_label]
if not original_record:
if not train or not valid:
raise Exception('No template label provided, so you must provide at least train and valid params!')
updated_record = {'label': original_record, 'train_file': None, 'valid_file': None, 'test_file': None}
else:
if len(original_record) != 1:
logger.warning('Warning: multiple templates found for dataset override, using first!')
updated_record = copy.deepcopy(original_record[0])
if 'sha1' in updated_record:
logger.info('Ignoring SHA1 due to user override')
del updated_record['sha1']
if 'download' in updated_record:
if not train or not valid:
raise Exception('Cannot override downloadable dataset without providing file '
'locations for both training and validation')
if not test and 'test_file' in updated_record:
del updated_record['test_file']
del updated_record['download']
new_dataset_label = '{}.{}'.format(original_dataset_label, os.getpid())
updated_record['label'] = new_dataset_label
if train:
updated_record['train_file'] = train
if valid:
updated_record['valid_file'] = valid
if test:
updated_record['test_file'] = test
logger.warning(updated_record)
config_params['dataset'] = new_dataset_label
logger.info("The dataset key for this override is {}".format(new_dataset_label))
datasets_config.append(updated_record)
def main():
parser = argparse.ArgumentParser(description='Train a text classifier')
parser.add_argument('--config', help='configuration for an experiment', type=convert_path, default="$MEAD_CONFIG")
parser.add_argument('--settings', help='configuration for mead', default=DEFAULT_SETTINGS_LOC, type=convert_path)
parser.add_argument('--datasets', help='index of dataset labels', type=convert_path)
parser.add_argument('--modules', help='modules to load', default=[], nargs='+', required=False)
parser.add_argument('--mod_train_file', help='override the training set')
parser.add_argument('--mod_valid_file', help='override the validation set')
parser.add_argument('--mod_test_file', help='override the test set')
parser.add_argument('--embeddings', help='index of embeddings', type=convert_path)
parser.add_argument('--logging', help='config file for logging', default=DEFAULT_LOGGING_LOC, type=convert_path)
parser.add_argument('--task', help='task to run', choices=['classify', 'tagger', 'seq2seq', 'lm'])
parser.add_argument('--gpus', help='Number of GPUs (defaults to number available)', type=int, default=-1)
parser.add_argument('--basedir', help='Override the base directory where models are stored', type=str)
parser.add_argument('--reporting', help='reporting hooks', nargs='+')
parser.add_argument('--backend', help='The deep learning backend to use')
parser.add_argument('--checkpoint', help='Restart training from this checkpoint')
args, reporting_args = parser.parse_known_args()
config_params = read_config_stream(args.config)
if args.basedir is not None:
config_params['basedir'] = args.basedir
task_name = config_params.get('task', 'classify') if args.task is None else args.task
args.logging = read_config_stream(args.logging)
configure_logger(args.logging, config_params.get('basedir', './{}'.format(task_name)))
try:
args.settings = read_config_stream(args.settings)
except:
logger.warning('Warning: no mead-settings file was found at [{}]'.format(args.settings))
args.settings = {}
args.datasets = args.datasets if args.datasets else args.settings.get('datasets', convert_path(DEFAULT_DATASETS_LOC))
args.datasets = read_config_stream(args.datasets)
if args.mod_train_file or args.mod_valid_file or args.mod_test_file:
logging.warning('Warning: overriding the training/valid/test data with user-specified files'
' different from what was specified in the dataset index. Creating a new key for this entry')
update_datasets(args.datasets, config_params, args.mod_train_file, args.mod_valid_file, args.mod_test_file)
args.embeddings = args.embeddings if args.embeddings else args.settings.get('embeddings', convert_path(DEFAULT_EMBEDDINGS_LOC))
args.embeddings = read_config_stream(args.embeddings)
if args.gpus is not None:
config_params['model']['gpus'] = args.gpus
if args.backend is None and 'backend' in args.settings:
args.backend = args.settings['backend']
if args.backend is not None:
config_params['backend'] = normalize_backend(args.backend)
config_params['modules'] = list(set(chain(config_params.get('modules', []), args.modules)))
cmd_hooks = args.reporting if args.reporting is not None else []
config_hooks = config_params.get('reporting') if config_params.get('reporting') is not None else []
reporting = parse_extra_args(set(chain(cmd_hooks, config_hooks)), reporting_args)
config_params['reporting'] = reporting
logger.info('Task: [{}]'.format(task_name))
task = mead.Task.get_task_specific(task_name, args.settings)
task.read_config(config_params, args.datasets, reporting_args=reporting_args)
task.initialize(args.embeddings)
task.train(args.checkpoint)
if __name__ == "__main__":
main()
| 49.798611 | 131 | 0.714266 |
20fe887f1b9f2a764d2d5da035684fb1fde6e624 | 8,201 | py | Python | tests/bundle_download/test_download.py | reuterbal/ecbundle | 94118ffa4384795ceed9d54cf886c975db4af9af | [
"Apache-2.0"
] | null | null | null | tests/bundle_download/test_download.py | reuterbal/ecbundle | 94118ffa4384795ceed9d54cf886c975db4af9af | [
"Apache-2.0"
] | 2 | 2022-01-12T10:02:57.000Z | 2022-02-22T21:11:29.000Z | tests/bundle_download/test_download.py | reuterbal/ecbundle | 94118ffa4384795ceed9d54cf886c975db4af9af | [
"Apache-2.0"
] | 1 | 2022-02-22T20:39:14.000Z | 2022-02-22T20:39:14.000Z | # (C) Copyright 2020- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import os
from pathlib import Path
from shutil import rmtree
import pytest
from conftest import Watcher
from ecbundle import BundleDownloader, Git, logger
os.environ["BITBUCKET"] = "ssh://git@git.ecmwf.int"
@pytest.fixture
def watcher():
return Watcher(logger=logger)
@pytest.fixture
def here():
return Path(__file__).parent.resolve()
@pytest.fixture
def project1_dir(here):
"""
Create empty source/project1 directory
"""
project_dir = here / "source/project1"
if project_dir.exists():
project_dir.rmdir()
project_dir.mkdir(parents=True)
yield
# Clean up after us
rmtree(here / "source")
@pytest.fixture
def args(here):
return {
"no_colour": True,
"verbose": False,
"dryrun": True,
"dry_run": True,
"bundle": "%s" % here,
"src_dir": "%s" % (here / "source"),
"update": False,
"forced_update": False,
"threads": 1,
"shallow": False,
}
class CleanRepo(Git):
"""
Custom Git object to mimick existing git repos
"""
@classmethod
def is_dirty(cls, *args, **kwargs):
return False
@classmethod
def is_remote(cls, *args, **kwargs):
return True
@classmethod
def is_branch(cls, *args, **kwargs):
return False
@classmethod
def is_tag(cls, *args, **kwargs):
return False
@classmethod
def is_commit(cls, *args, **kwargs):
return False
@classmethod
def remote_url(cls, src_dir, origin, dryrun):
return "ssh://git@git.ecmwf.int/user/" + Path(src_dir).name
def test_download_simple(args, here, watcher):
"""
Simple bundle creation test that git clones a single project.
"""
with watcher:
BundleDownloader(**args).download()
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project1" in watcher.output
)
assert "git -c advice.detachedHead=false checkout 0.0.1" in watcher.output
def test_download_shallow(args, here, watcher):
"""
Simple bundle creation test that shallow clones a single project.
"""
args["bundle"] = "%s" % (here / "bundle.yml")
args["shallow"] = True
with watcher:
BundleDownloader(**args).download()
expected = "git -c advice.detachedHead=false clone -o ec-user -b 0.0.1 "
"ssh://git@git.ecmwf.int/user/project1 %s/project1 --depth=1" % args["src_dir"]
assert expected in watcher.output
def test_download_multi(args, here, watcher):
"""
Simple bundle creation test with multiple projects.
"""
args["bundle"] = "%s" % (here / "bundle_multi.yml")
with watcher:
BundleDownloader(**args).download()
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project1" in watcher.output
)
assert "git -c advice.detachedHead=false checkout 0.0.1" in watcher.output
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project2" in watcher.output
)
assert "git -c advice.detachedHead=false checkout 0.0.2" in watcher.output
def test_download_update_fetch(args, here, project1_dir, watcher):
"""
Test repository update for existing project (fetch and checkout)
"""
args["update"] = True
args["bundle"] = "%s" % (here / "bundle.yml")
with watcher:
BundleDownloader(git=CleanRepo, **args).download()
# Expect a clean new fetch
assert "git fetch --tags ec-user" in watcher.output
assert "git checkout -b 0.0.1 ec-user/0.0.1" in watcher.output
def test_download_update_branch(args, here, project1_dir, watcher):
"""
Test repository branch update for existing project (checkout and pull)
"""
args["update"] = True
args["bundle"] = "%s" % (here / "bundle.yml")
class CleanBranchRepo(CleanRepo):
@classmethod
def branch_upstream(cls, *args, **kwargs):
return "ec-user/0.0.1"
@classmethod
def is_branch(cls, *args, **kwargs):
return True
@classmethod
def remote_url(cls, src_dir, origin, dryrun):
return "ssh://git@git.ecmwf.int/user/" + Path(src_dir).name
with watcher:
BundleDownloader(git=CleanBranchRepo, **args).download()
assert "git -c advice.detachedHead=false checkout 0.0.1" in watcher.output
assert "git pull ec-user 0.0.1" in watcher.output
def test_download_force_update_branch(args, here, project1_dir, watcher):
"""
Test forced-update for existing project branch (fetch and hard reset)
"""
args["update"] = True
args["forced_update"] = True
args["bundle"] = "%s" % (here / "bundle.yml")
class CleanBranchRepo(CleanRepo):
@classmethod
def branch_upstream(cls, *args, **kwargs):
return "ec-user/0.0.1"
@classmethod
def is_branch(cls, *args, **kwargs):
return True
@classmethod
def remote_url(cls, src_dir, origin, dryrun):
return "ssh://git@git.ecmwf.int/user/" + Path(src_dir).name
with watcher:
BundleDownloader(git=CleanBranchRepo, **args).download()
assert "git -c advice.detachedHead=false checkout 0.0.1" in watcher.output
assert "git fetch --tags ec-user" in watcher.output
assert "git reset --hard ec-user/0.0.1" in watcher.output
def test_download_submodules(args, here, watcher):
"""
Simple bundle creation test with multiple projects that have submodules.
"""
args["bundle"] = "%s" % (here / "bundle_submodules.yml")
with watcher:
BundleDownloader(**args).download()
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project1" in watcher.output
)
assert "git -c advice.detachedHead=false checkout 0.0.1" in watcher.output
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project2" in watcher.output
)
assert "git -c advice.detachedHead=false checkout 0.0.2" in watcher.output
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project3" in watcher.output
)
assert "git -c advice.detachedHead=false checkout develop" in watcher.output
assert watcher.output.count("git submodule update --init --recursive") == 2
def test_download_submodules_shallow(args, here, watcher):
"""
Simple bundle creation test using shallow clones with multiple projects that have submodules.
"""
args["bundle"] = "%s" % (here / "bundle_submodules.yml")
args["shallow"] = True
with watcher:
BundleDownloader(**args).download()
def expected_command(project, rev):
base = "git -c advice.detachedHead=false clone -o ec-user"
version = f"-b {rev}"
remote = "ssh://git@git.ecmwf.int/user/" + project
local = here / "source/" / project
tail = "--depth=1"
return f"{base} {version} {remote} {local} {tail}"
assert expected_command("project1", "0.0.1") in watcher.output
assert expected_command("project2", "0.0.2") in watcher.output
assert expected_command("project3", "develop") in watcher.output
assert watcher.output.count("git submodule update --init --recursive") == 2
def test_download_fail_optional(args, here, watcher):
"""
Test download of an optional project that fails
"""
args["dryrun"] = False
args["dry_run"] = False
args["bundle"] = "%s" % (here / "bundle_optional.yml")
with watcher:
BundleDownloader(**args).download()
assert (
"git clone -o ec-user ssh://git@git.ecmwf.int/user/project1" in watcher.output
)
assert "Could not download or update optional project project1" in watcher.output
assert (
"Following projects failed to download but are marked optional:"
in watcher.output
)
assert " - project1" in watcher.output
| 29.289286 | 97 | 0.646385 |
540030a6387198673a55191608cfe8eef7659e2d | 701 | py | Python | binary_search.py | Kabongosalomon/Sorting-Algorithm | 6e8fa49accb20582390ae2eff64b21645d9c1a8a | [
"Apache-2.0"
] | null | null | null | binary_search.py | Kabongosalomon/Sorting-Algorithm | 6e8fa49accb20582390ae2eff64b21645d9c1a8a | [
"Apache-2.0"
] | null | null | null | binary_search.py | Kabongosalomon/Sorting-Algorithm | 6e8fa49accb20582390ae2eff64b21645d9c1a8a | [
"Apache-2.0"
] | null | null | null | def binary_search(input_array, value):
"""this function take an input array, and return the index of value
if present in the input array or -1 if not"""
min_index = 0
max_index = len(input_array)-1
while (min_index<=max_index):
midle = (min_index+max_index)//2
if input_array[midle]==value:
return midle
elif value > input_array[midle]:
min_index = midle+1
else :
max_index = max_index-1
return -1
test_list = [1,3,9,11,15,19,29]
test_val1 = 25
test_val2 = 29
print(binary_search(test_list, test_val1))
print(binary_search(test_list, test_val2))
print(binary_search(test_list, -7))
| 28.04 | 72 | 0.631954 |
328298e8cd9e52e5550a235a30949d5de8a58cea | 39,783 | py | Python | tests/integration-tests/tests/schedulers/test_slurm.py | enrico-usai/aws-parallelcluster | caed724204c7db00424fabd803aa07d9fac2d962 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/tests/schedulers/test_slurm.py | enrico-usai/aws-parallelcluster | caed724204c7db00424fabd803aa07d9fac2d962 | [
"Apache-2.0"
] | 14 | 2022-03-11T10:26:58.000Z | 2022-03-28T10:40:43.000Z | tests/integration-tests/tests/schedulers/test_slurm.py | enrico-usai/aws-parallelcluster | caed724204c7db00424fabd803aa07d9fac2d962 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import time
from datetime import datetime, timezone
import boto3
import pytest
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor
from retrying import retry
from time_utils import minutes, seconds
from utils import InstanceTypesData, get_compute_nodes_instance_ids
from tests.common.assertions import (
assert_errors_in_logs,
assert_no_errors_in_logs,
assert_no_msg_in_logs,
assert_no_node_in_ec2,
assert_num_instances_constant,
assert_num_instances_in_cluster,
assert_scaling_worked,
wait_for_num_instances_in_cluster,
)
from tests.common.hit_common import (
assert_compute_node_states,
assert_initial_conditions,
assert_num_nodes_in_scheduler,
submit_initial_job,
wait_for_num_nodes_in_scheduler,
)
from tests.common.mpi_common import compile_mpi_ring
from tests.common.schedulers_common import SlurmCommands, TorqueCommands, get_scheduler_commands
@pytest.mark.regions(["us-east-2"])
@pytest.mark.instances(["c5.xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("instance", "scheduler", "os")
def test_slurm(region, pcluster_config_reader, clusters_factory, test_datadir, architecture):
"""
Test all AWS Slurm related features.
Grouped all tests in a single function so that cluster can be reused for all of them.
"""
scaledown_idletime = 3
gpu_instance_type = "g3.4xlarge"
gpu_instance_type_info = InstanceTypesData.get_instance_info(gpu_instance_type, region)
# For OSs running _test_mpi_job_termination, spin up 2 compute nodes at cluster creation to run test
# Else do not spin up compute node and start running regular slurm tests
supports_impi = architecture == "x86_64"
cluster_config = pcluster_config_reader(scaledown_idletime=scaledown_idletime, gpu_instance_type=gpu_instance_type)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
slurm_commands = SlurmCommands(remote_command_executor)
_test_slurm_version(remote_command_executor)
if supports_impi:
_test_mpi_job_termination(remote_command_executor, test_datadir)
_assert_no_node_in_cluster(region, cluster.cfn_name, slurm_commands)
_test_job_dependencies(slurm_commands, region, cluster.cfn_name, scaledown_idletime)
_test_job_arrays_and_parallel_jobs(
slurm_commands,
region,
cluster.cfn_name,
scaledown_idletime,
partition="ondemand",
instance_type="c5.xlarge",
cpu_per_instance=4,
)
_gpu_resource_check(
slurm_commands, partition="gpu", instance_type=gpu_instance_type, instance_type_info=gpu_instance_type_info
)
_test_cluster_limits(
slurm_commands, partition="ondemand", instance_type="c5.xlarge", max_count=5, cpu_per_instance=4
)
_test_cluster_gpu_limits(
slurm_commands,
partition="gpu",
instance_type=gpu_instance_type,
max_count=5,
gpu_per_instance=_get_num_gpus_on_instance(gpu_instance_type_info),
gpu_type="m60",
)
# Test torque command wrapper
_test_torque_job_submit(remote_command_executor, test_datadir)
assert_no_errors_in_logs(remote_command_executor, "slurm")
@pytest.mark.regions(["eu-west-1"])
@pytest.mark.instances(["c5.xlarge", "m6g.xlarge"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("os", "instance", "scheduler")
def test_slurm_pmix(pcluster_config_reader, clusters_factory):
"""Test interactive job submission using PMIx."""
num_computes = 2
cluster_config = pcluster_config_reader(queue_size=num_computes)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
# Ensure the expected PMIx version is listed when running `srun --mpi=list`.
# Since we're installing PMIx v3.1.5, we expect to see pmix and pmix_v3 in the output.
# Sample output:
# [ec2-user@ip-172-31-33-187 ~]$ srun 2>&1 --mpi=list
# srun: MPI types are...
# srun: none
# srun: openmpi
# srun: pmi2
# srun: pmix
# srun: pmix_v3
mpi_list_output = remote_command_executor.run_remote_command("srun 2>&1 --mpi=list").stdout
assert_that(mpi_list_output).matches(r"\s+pmix($|\s+)")
assert_that(mpi_list_output).matches(r"\s+pmix_v3($|\s+)")
# Compile and run an MPI program interactively
mpi_module = "openmpi"
binary_path = "/shared/ring"
compile_mpi_ring(mpi_module, remote_command_executor, binary_path=binary_path)
interactive_command = f"module load {mpi_module} && srun --mpi=pmix -N {num_computes} {binary_path}"
remote_command_executor.run_remote_command(interactive_command)
@pytest.mark.usefixtures("region", "os", "instance", "scheduler")
@pytest.mark.slurm_scaling
def test_slurm_scaling(scheduler, region, instance, pcluster_config_reader, clusters_factory, test_datadir):
"""Test that slurm-specific scaling logic is behaving as expected for normal actions and failures."""
cluster_config = pcluster_config_reader(scaledown_idletime=3)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_assert_cluster_initial_conditions(scheduler_commands, instance, 20, 20, 4, 1)
_test_partition_states(
scheduler_commands,
cluster.cfn_name,
region,
active_partition="ondemand1",
inactive_partition="ondemand2",
num_static_nodes=2,
num_dynamic_nodes=3,
dynamic_instance_type=instance,
)
_test_reset_terminated_nodes(
scheduler_commands,
cluster.cfn_name,
region,
partition="ondemand1",
num_static_nodes=2,
num_dynamic_nodes=3,
dynamic_instance_type=instance,
)
_test_replace_down_nodes(
remote_command_executor,
scheduler_commands,
test_datadir,
cluster.cfn_name,
region,
partition="ondemand1",
num_static_nodes=2,
num_dynamic_nodes=3,
dynamic_instance_type=instance,
)
_test_keep_or_replace_suspended_nodes(
scheduler_commands,
cluster.cfn_name,
region,
partition="ondemand1",
num_static_nodes=2,
num_dynamic_nodes=3,
dynamic_instance_type=instance,
)
assert_no_errors_in_logs(remote_command_executor, scheduler)
@pytest.mark.usefixtures("region", "os", "instance", "scheduler")
@pytest.mark.slurm_error_handling
def test_error_handling(scheduler, region, instance, pcluster_config_reader, clusters_factory, test_datadir):
"""Test that slurm-specific scaling logic can handle rare failures."""
cluster_config = pcluster_config_reader(scaledown_idletime=3)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_assert_cluster_initial_conditions(scheduler_commands, instance, 10, 10, 1, 1)
_test_cloud_node_health_check(
remote_command_executor,
scheduler_commands,
cluster.cfn_name,
region,
partition="ondemand1",
num_static_nodes=1,
# Test only works with num_dynamic = 1
num_dynamic_nodes=1,
dynamic_instance_type=instance,
)
_test_ec2_status_check_replacement(
remote_command_executor,
scheduler_commands,
cluster.cfn_name,
region,
partition="ondemand1",
num_static_nodes=1,
)
# Next test will introduce error in logs, assert no error now
assert_no_errors_in_logs(remote_command_executor, scheduler)
_test_clustermgtd_down_logic(
remote_command_executor,
scheduler_commands,
cluster.cfn_name,
region,
test_datadir,
partition="ondemand1",
num_static_nodes=1,
num_dynamic_nodes=1,
dynamic_instance_type=instance,
)
def _assert_cluster_initial_conditions(
scheduler_commands,
instance,
expected_num_dummy,
expected_num_instance_node,
expected_num_static,
expected_num_dynamic,
):
"""Assert that expected nodes are in cluster."""
cluster_node_states = scheduler_commands.get_nodes_status()
c5l_nodes, instance_nodes, static_nodes, dynamic_nodes = [], [], [], []
logging.info(cluster_node_states)
for nodename, node_states in cluster_node_states.items():
if "c5l" in nodename:
c5l_nodes.append(nodename)
# "c5.xlarge"[: "c5.xlarge".index(".")+2].replace(".", "") = c5x
if instance[: instance.index(".") + 2].replace(".", "") in nodename:
instance_nodes.append(nodename)
if node_states == "idle":
if "-st-" in nodename:
static_nodes.append(nodename)
if "-dy-" in nodename:
dynamic_nodes.append(nodename)
assert_that(len(c5l_nodes)).is_equal_to(expected_num_dummy)
assert_that(len(instance_nodes)).is_equal_to(expected_num_instance_node)
assert_that(len(static_nodes)).is_equal_to(expected_num_static)
assert_that(len(dynamic_nodes)).is_equal_to(expected_num_dynamic)
def _test_partition_states(
scheduler_commands,
cluster_name,
region,
active_partition,
inactive_partition,
num_static_nodes,
num_dynamic_nodes,
dynamic_instance_type,
):
"""Partition states INACTIVE and UP are processed."""
logging.info("Testing that INACTIVE partiton are cleaned up")
# submit job to inactive partition to scale up some dynamic nodes
init_job_id = submit_initial_job(
scheduler_commands,
"sleep 300",
inactive_partition,
dynamic_instance_type,
num_dynamic_nodes,
other_options="--no-requeue",
)
assert_initial_conditions(
scheduler_commands, num_static_nodes, num_dynamic_nodes, partition=inactive_partition, cancel_job_id=init_job_id
)
# set partition to inactive and wait for instances/node to terminate
scheduler_commands.set_partition_state(inactive_partition, "inactive")
# wait for all instances from inactive_partition to terminate
# active_partition should only have 2 static instances
wait_for_num_instances_in_cluster(cluster_name, region, 2)
# Assert no nodes in inactive partition
wait_for_num_nodes_in_scheduler(scheduler_commands, desired=0, filter_by_partition=inactive_partition)
# Assert active partition is not affected
assert_num_nodes_in_scheduler(scheduler_commands, desired=num_static_nodes, filter_by_partition=active_partition)
# set inactive partition back to active and wait for nodes to spin up
scheduler_commands.set_partition_state(inactive_partition, "up")
wait_for_num_nodes_in_scheduler(
scheduler_commands, desired=num_static_nodes, filter_by_partition=inactive_partition
)
# set inactive partition to inactive to save resources, this partition will not be used for later tests
scheduler_commands.set_partition_state(inactive_partition, "inactive")
def _test_reset_terminated_nodes(
scheduler_commands, cluster_name, region, partition, num_static_nodes, num_dynamic_nodes, dynamic_instance_type
):
"""
Test that slurm nodes are reset if instances are terminated manually.
Static capacity should be replaced and dynamic capacity power saved.
"""
logging.info("Testing that nodes are reset when instances are terminated manually")
init_job_id = submit_initial_job(
scheduler_commands,
"sleep 300",
partition,
dynamic_instance_type,
num_dynamic_nodes,
other_options="--no-requeue",
)
static_nodes, dynamic_nodes = assert_initial_conditions(
scheduler_commands, num_static_nodes, num_dynamic_nodes, partition, cancel_job_id=init_job_id
)
instance_ids = get_compute_nodes_instance_ids(cluster_name, region)
# terminate all instances manually
_terminate_nodes_manually(instance_ids, region)
# Assert that cluster replaced static node and reset dynamic nodes
_wait_for_node_reset(scheduler_commands, static_nodes, dynamic_nodes)
assert_num_instances_in_cluster(cluster_name, region, len(static_nodes))
def _test_replace_down_nodes(
remote_command_executor,
scheduler_commands,
test_datadir,
cluster_name,
region,
partition,
num_static_nodes,
num_dynamic_nodes,
dynamic_instance_type,
):
"""Test that slurm nodes are replaced if nodes are marked DOWN."""
logging.info("Testing that nodes replaced when set to down state")
init_job_id = submit_initial_job(
scheduler_commands,
"sleep 300",
partition,
dynamic_instance_type,
num_dynamic_nodes,
other_options="--no-requeue",
)
static_nodes, dynamic_nodes = assert_initial_conditions(
scheduler_commands, num_static_nodes, num_dynamic_nodes, partition, cancel_job_id=init_job_id
)
# kill slurmd on static nodes, these nodes will be in down*
for node in static_nodes:
remote_command_executor.run_remote_script(str(test_datadir / "slurm_kill_slurmd_job.sh"), args=[node])
# set dynamic to down manually
_set_nodes_to_down_manually(scheduler_commands, dynamic_nodes)
_wait_for_node_reset(scheduler_commands, static_nodes, dynamic_nodes)
assert_num_instances_in_cluster(cluster_name, region, len(static_nodes))
def _test_keep_or_replace_suspended_nodes(
scheduler_commands, cluster_name, region, partition, num_static_nodes, num_dynamic_nodes, dynamic_instance_type
):
"""Test keep DRAIN nodes if there is job running, or terminate if no job is running."""
logging.info(
"Testing that nodes are NOT terminated when set to suspend state and there is job running on the nodes"
)
job_id = submit_initial_job(
scheduler_commands,
"sleep 500",
partition,
dynamic_instance_type,
num_dynamic_nodes,
other_options="--no-requeue",
)
static_nodes, dynamic_nodes = assert_initial_conditions(
scheduler_commands, num_static_nodes, num_dynamic_nodes, partition
)
# Set all nodes to drain, static should be in DRAINED and dynamic in DRAINING
_set_nodes_to_suspend_state_manually(scheduler_commands, static_nodes + dynamic_nodes)
# Static nodes in DRAINED are immediately replaced
_wait_for_node_reset(scheduler_commands, static_nodes=static_nodes, dynamic_nodes=[])
# Assert dynamic nodes in DRAINING are not terminated during job run
_assert_nodes_not_terminated(scheduler_commands, dynamic_nodes)
# wait until the job is completed and check that the DRAINING dynamic nodes are then terminated
scheduler_commands.wait_job_completed(job_id)
scheduler_commands.assert_job_succeeded(job_id)
_wait_for_node_reset(scheduler_commands, static_nodes=[], dynamic_nodes=dynamic_nodes)
assert_num_instances_in_cluster(cluster_name, region, len(static_nodes))
def _test_cloud_node_health_check(
remote_command_executor,
scheduler_commands,
cluster_name,
region,
partition,
num_static_nodes,
num_dynamic_nodes,
dynamic_instance_type,
):
"""
Test nodes with networking failure are correctly replaced.
This will test if slurm is performing health check on CLOUD nodes correctly.
"""
logging.info("Testing that nodes with networking failure fails slurm health check and replaced")
job_id = submit_initial_job(
scheduler_commands,
"sleep 500",
partition,
dynamic_instance_type,
num_dynamic_nodes,
other_options="--no-requeue",
)
static_nodes, dynamic_nodes = assert_initial_conditions(
scheduler_commands, num_static_nodes, num_dynamic_nodes, partition, job_id
)
# Assert that the default SlurmdTimeout=180 is in effect
_assert_slurmd_timeout(remote_command_executor, timeout=180)
# Nodes with networking failures should fail slurm health check before failing ec2_status_check
# Test on freshly launched dynamic nodes
kill_job_id = _submit_kill_networking_job(
remote_command_executor, scheduler_commands, partition, node_type="dynamic", num_nodes=num_dynamic_nodes
)
# Sleep for a bit so the command to detach network interface can be run
time.sleep(15)
# Job will hang, cancel it manually to avoid waiting for job failing
scheduler_commands.cancel_job(kill_job_id)
# Assert nodes are put into DOWN for not responding
# TO-DO: this test only works with num_dynamic = 1 because slurm will record this error in nodelist format
# i.e. error: Nodes q2-st-t2large-[1-2] not responding, setting DOWN
# To support multiple nodes, need to convert list of node into nodelist format string
retry(wait_fixed=seconds(20), stop_max_delay=minutes(5))(assert_errors_in_logs)(
remote_command_executor,
["/var/log/slurmctld.log"],
["Nodes {} not responding, setting DOWN".format(",".join(dynamic_nodes))],
)
# Assert dynamic nodes are reset
_wait_for_node_reset(scheduler_commands, static_nodes=[], dynamic_nodes=dynamic_nodes)
assert_num_instances_in_cluster(cluster_name, region, len(static_nodes))
# Assert ec2_status_check code path is not triggered
assert_no_msg_in_logs(
remote_command_executor,
["/var/log/parallelcluster/clustermgtd"],
["Setting nodes failing health check type ec2_health_check to DRAIN"],
)
def _test_ec2_status_check_replacement(
remote_command_executor,
scheduler_commands,
cluster_name,
region,
partition,
num_static_nodes,
):
"""Test nodes with failing ec2 status checks are correctly replaced."""
logging.info("Testing that nodes with failing ec2 status checks are correctly replaced")
static_nodes, _ = assert_initial_conditions(scheduler_commands, num_static_nodes, 0, partition)
# Can take up to 15 mins for ec2_status_check to show
# Need to increase SlurmdTimeout to avoid slurm health check and trigger ec2_status_check code path
_set_slurmd_timeout(remote_command_executor, timeout=10000)
kill_job_id = _submit_kill_networking_job(
remote_command_executor, scheduler_commands, partition, node_type="static", num_nodes=num_static_nodes
)
# Assert ec2_status_check code path is triggered
retry(wait_fixed=seconds(20), stop_max_delay=minutes(15))(assert_errors_in_logs)(
remote_command_executor,
["/var/log/parallelcluster/clustermgtd"],
["Setting nodes failing health check type ec2_health_check to DRAIN"],
)
scheduler_commands.cancel_job(kill_job_id)
# Assert static nodes are reset
_wait_for_node_reset(scheduler_commands, static_nodes=static_nodes, dynamic_nodes=[])
assert_num_instances_in_cluster(cluster_name, region, len(static_nodes))
# Reset SlurmdTimeout to 180s
_set_slurmd_timeout(remote_command_executor, timeout=180)
def _test_clustermgtd_down_logic(
remote_command_executor,
scheduler_commands,
cluster_name,
region,
test_datadir,
partition,
num_static_nodes,
num_dynamic_nodes,
dynamic_instance_type,
):
"""Test that computemgtd is able to shut nodes down when clustermgtd and slurmctld are offline."""
logging.info("Testing cluster protection logic when clustermgtd is down.")
submit_initial_job(
scheduler_commands,
"sleep infinity",
partition,
dynamic_instance_type,
num_dynamic_nodes,
other_options="--no-requeue",
)
static_nodes, dynamic_nodes = assert_initial_conditions(
scheduler_commands, num_static_nodes, num_dynamic_nodes, partition
)
logging.info("Killing clustermgtd and rewriting timestamp file to trigger timeout.")
remote_command_executor.run_remote_script(str(test_datadir / "slurm_kill_clustermgtd.sh"), run_as_root=True)
# Overwrite clusterctld heartbeat to trigger timeout path
timestamp_format = "%Y-%m-%d %H:%M:%S.%f%z"
overwrite_time_str = datetime(2020, 1, 1, tzinfo=timezone.utc).strftime(timestamp_format)
remote_command_executor.run_remote_command(
"echo -n '{}' | sudo tee /opt/slurm/etc/pcluster/.slurm_plugin/clustermgtd_heartbeat".format(overwrite_time_str)
)
# Test that computemgtd will terminate compute nodes that are down or in power_save
# Put first static node and first dynamic node into DOWN
# Put rest of dynamic nodes into POWER_DOWN
logging.info("Asserting that computemgtd will terminate nodes in DOWN or POWER_SAVE")
_set_nodes_to_down_manually(scheduler_commands, static_nodes[:1] + dynamic_nodes[:1])
_set_nodes_to_power_down_manually(scheduler_commands, dynamic_nodes[1:])
wait_for_num_instances_in_cluster(cluster_name, region, num_static_nodes - 1)
logging.info("Testing that ResumeProgram launches no instance when clustermgtd is down")
submit_initial_job(
scheduler_commands,
"sleep infinity",
partition,
dynamic_instance_type,
num_dynamic_nodes,
)
logging.info("Asserting that computemgtd is not self-terminating when slurmctld is up")
assert_num_instances_constant(cluster_name, region, desired=num_static_nodes - 1, timeout=2)
logging.info("Killing slurmctld")
remote_command_executor.run_remote_script(str(test_datadir / "slurm_kill_slurmctld.sh"), run_as_root=True)
logging.info("Waiting for computemgtd to self-terminate all instances")
wait_for_num_instances_in_cluster(cluster_name, region, 0)
assert_errors_in_logs(
remote_command_executor,
["/var/log/parallelcluster/slurm_resume.log"],
["No valid clustermgtd heartbeat detected"],
)
def _wait_for_node_reset(scheduler_commands, static_nodes, dynamic_nodes):
"""Wait for static and dynamic nodes to be reset."""
if static_nodes:
logging.info("Assert static nodes are placed in DOWN during replacement")
# DRAIN+DOWN = drained
_wait_for_compute_nodes_states(
scheduler_commands, static_nodes, expected_states=["down", "down*", "drained", "drained*"]
)
logging.info("Assert static nodes are replaced")
_wait_for_compute_nodes_states(scheduler_commands, static_nodes, expected_states=["idle"])
# dynamic nodes are power saved after SuspendTimeout. static_nodes must be checked first
if dynamic_nodes:
logging.info("Assert dynamic nodes are power saved")
_wait_for_compute_nodes_states(scheduler_commands, dynamic_nodes, expected_states=["idle~"])
node_addr_host = scheduler_commands.get_node_addr_host()
_assert_node_addr_host_reset(node_addr_host, dynamic_nodes)
def _assert_node_addr_host_reset(addr_host_list, nodes):
"""Assert that NodeAddr and NodeHostname are reset."""
for nodename in nodes:
assert_that(addr_host_list).contains("{0} {0} {0}".format(nodename))
def _assert_nodes_not_terminated(scheduler_commands, nodes, timeout=5):
logging.info("Waiting for cluster daemon action")
start_time = time.time()
while time.time() < start_time + 60 * (timeout):
assert_that(set(nodes) <= set(scheduler_commands.get_compute_nodes())).is_true()
time.sleep(20)
def _set_nodes_to_suspend_state_manually(scheduler_commands, compute_nodes):
scheduler_commands.set_nodes_state(compute_nodes, state="drain")
# draining means that there is job currently running on the node
# drained would mean we placed node in drain when there is no job running on the node
assert_compute_node_states(scheduler_commands, compute_nodes, expected_states=["draining", "drained"])
def _set_nodes_to_down_manually(scheduler_commands, compute_nodes):
scheduler_commands.set_nodes_state(compute_nodes, state="down")
assert_compute_node_states(scheduler_commands, compute_nodes, expected_states=["down"])
def _set_nodes_to_power_down_manually(scheduler_commands, compute_nodes):
scheduler_commands.set_nodes_state(compute_nodes, state="power_down")
time.sleep(5)
scheduler_commands.set_nodes_state(compute_nodes, state="resume")
assert_compute_node_states(scheduler_commands, compute_nodes, expected_states=["idle~"])
@retry(wait_fixed=seconds(20), stop_max_delay=minutes(5))
def _wait_for_compute_nodes_states(scheduler_commands, compute_nodes, expected_states):
assert_compute_node_states(scheduler_commands, compute_nodes, expected_states)
def _terminate_nodes_manually(instance_ids, region):
ec2_client = boto3.client("ec2", region_name=region)
for instance_id in instance_ids:
instance_states = ec2_client.terminate_instances(InstanceIds=[instance_id]).get("TerminatingInstances")[0]
assert_that(instance_states.get("InstanceId")).is_equal_to(instance_id)
assert_that(instance_states.get("CurrentState").get("Name")).is_in("shutting-down", "terminated")
logging.info("Terminated nodes: {}".format(instance_ids))
def _test_mpi_job_termination(remote_command_executor, test_datadir):
"""
Test canceling mpirun job will not leave stray processes.
IntelMPI is known to leave stray processes after job termination if slurm process tracking is not setup correctly,
i.e. using ProctrackType=proctrack/pgid
Test IntelMPI script to make sure no stray processes after the job is cancelled
This bug cannot be reproduced using OpenMPI
"""
logging.info("Testing no stray process left behind after mpirun job is terminated")
slurm_commands = SlurmCommands(remote_command_executor)
# Assert initial condition
assert_that(slurm_commands.compute_nodes_count()).is_equal_to(2)
# Submit mpi_job, which runs Intel MPI benchmarks with intelmpi
# Leaving 1 vcpu on each node idle so that the process check job can run while mpi_job is running
result = slurm_commands.submit_script(str(test_datadir / "mpi_job.sh"))
job_id = slurm_commands.assert_job_submitted(result.stdout)
# Check that mpi processes are started
_assert_job_state(slurm_commands, job_id, job_state="RUNNING")
_check_mpi_process(remote_command_executor, slurm_commands, test_datadir, num_nodes=2, after_completion=False)
slurm_commands.cancel_job(job_id)
# Make sure mpirun job is cancelled
_assert_job_state(slurm_commands, job_id, job_state="CANCELLED")
# Check that mpi processes are terminated
_check_mpi_process(remote_command_executor, slurm_commands, test_datadir, num_nodes=2, after_completion=True)
@retry(wait_fixed=seconds(10), stop_max_attempt_number=4)
def _check_mpi_process(remote_command_executor, slurm_commands, test_datadir, num_nodes, after_completion):
"""Submit script and check for MPI processes."""
# Clean up old datafiles
remote_command_executor.run_remote_command("rm -f /shared/check_proc.out")
result = slurm_commands.submit_command("ps aux | grep IMB | grep MPI >> /shared/check_proc.out", nodes=num_nodes)
job_id = slurm_commands.assert_job_submitted(result.stdout)
slurm_commands.wait_job_completed(job_id)
proc_track_result = remote_command_executor.run_remote_command("cat /shared/check_proc.out")
if after_completion:
assert_that(proc_track_result.stdout).does_not_contain("IMB-MPI1")
else:
assert_that(proc_track_result.stdout).contains("IMB-MPI1")
def _test_cluster_gpu_limits(slurm_commands, partition, instance_type, max_count, gpu_per_instance, gpu_type):
"""Test edge cases regarding the number of GPUs."""
logging.info("Testing scheduler does not accept jobs when requesting for more GPUs than available")
# Expect commands below to fail with exit 1
_submit_command_and_assert_job_rejected(
slurm_commands,
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": "--gpus-per-task {0}".format(gpu_per_instance + 1),
"raise_on_error": False,
},
)
_submit_command_and_assert_job_rejected(
slurm_commands,
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": "--gres=gpu:{0}".format(gpu_per_instance + 1),
"raise_on_error": False,
},
)
_submit_command_and_assert_job_rejected(
slurm_commands,
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": "-G:{0}".format(gpu_per_instance * max_count + 1),
"raise_on_error": False,
},
)
logging.info("Testing scheduler does not accept jobs when requesting job containing conflicting options")
_submit_command_and_assert_job_rejected(
slurm_commands,
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": "-G 1 --cpus-per-gpu 32 --cpus-per-task 20",
"raise_on_error": False,
},
)
# Commands below should be correctly submitted
slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"slots": gpu_per_instance,
"other_options": "-G {0}:{1} --gpus-per-task={0}:1".format(gpu_type, gpu_per_instance),
}
)
slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": "--gres=gpu:{0}:{1}".format(gpu_type, gpu_per_instance),
}
)
# Submit job without '-N' option(nodes=-1)
slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"nodes": -1,
"other_options": "-G {0} --gpus-per-node={1}".format(gpu_per_instance * max_count, gpu_per_instance),
}
)
def _test_cluster_limits(slurm_commands, partition, instance_type, max_count, cpu_per_instance):
logging.info("Testing scheduler rejects jobs that require a capacity that is higher than the max available")
# Check node limit job is rejected at submission
_submit_command_and_assert_job_rejected(
slurm_commands,
submit_command_args={
"command": "sleep 1",
"partition": partition,
"nodes": (max_count + 1),
"constraint": instance_type,
"raise_on_error": False,
},
)
# Check cpu limit job is rejected at submission
_submit_command_and_assert_job_rejected(
slurm_commands,
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": "--cpus-per-task {0}".format(cpu_per_instance + 1),
"raise_on_error": False,
},
)
def _submit_command_and_assert_job_rejected(slurm_commands, submit_command_args):
"""Submit a limit-violating job and assert the job is failed at submission."""
result = slurm_commands.submit_command(**submit_command_args)
assert_that(result.stdout).contains("sbatch: error: Batch job submission failed:")
def _gpu_resource_check(slurm_commands, partition, instance_type, instance_type_info):
"""Test GPU related resources are correctly allocated."""
logging.info("Testing number of GPU/CPU resources allocated to job")
cpus_per_gpu = min(5, instance_type_info.get("VCpuInfo").get("DefaultCores"))
job_id = slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": f"-G 1 --cpus-per-gpu {cpus_per_gpu}",
}
)
job_info = slurm_commands.get_job_info(job_id)
assert_that(job_info).contains("TresPerJob=gpu:1", f"CpusPerTres=gpu:{cpus_per_gpu}")
gpus_per_instance = _get_num_gpus_on_instance(instance_type_info)
job_id = slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"partition": partition,
"constraint": instance_type,
"other_options": f"--gres=gpu:{gpus_per_instance} --cpus-per-gpu {cpus_per_gpu}",
}
)
job_info = slurm_commands.get_job_info(job_id)
assert_that(job_info).contains(f"TresPerNode=gpu:{gpus_per_instance}", f"CpusPerTres=gpu:{cpus_per_gpu}")
def _test_slurm_version(remote_command_executor):
logging.info("Testing Slurm Version")
version = remote_command_executor.run_remote_command("sinfo -V").stdout
assert_that(version).is_equal_to("slurm 20.11.7")
def _test_job_dependencies(slurm_commands, region, stack_name, scaledown_idletime):
logging.info("Testing cluster doesn't scale when job dependencies are not satisfied")
job_id = slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={"command": "sleep 60", "nodes": 1}
)
dependent_job_id = slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={"command": "sleep 1", "nodes": 1, "after_ok": job_id}
)
# Wait for reason to be computed
time.sleep(3)
# Job should be in CF and waiting for nodes to power_up
assert_that(slurm_commands.get_job_info(job_id)).contains("JobState=CONFIGURING")
assert_that(slurm_commands.get_job_info(dependent_job_id)).contains("JobState=PENDING Reason=Dependency")
assert_scaling_worked(slurm_commands, region, stack_name, scaledown_idletime, expected_max=1, expected_final=0)
# Assert jobs were completed
_assert_job_completed(slurm_commands, job_id)
_assert_job_completed(slurm_commands, dependent_job_id)
def _test_job_arrays_and_parallel_jobs(
slurm_commands, region, stack_name, scaledown_idletime, partition, instance_type, cpu_per_instance
):
logging.info("Testing cluster scales correctly with array jobs and parallel jobs")
# Following 2 jobs requires total of 3 nodes
array_job_id = slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"nodes": -1,
"partition": partition,
"constraint": instance_type,
"other_options": "-a 1-{0}".format(cpu_per_instance + 1),
}
)
parallel_job_id = slurm_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sleep 1",
"nodes": -1,
"slots": 2,
"partition": partition,
"constraint": instance_type,
"other_options": "-c {0}".format(cpu_per_instance - 1),
}
)
# Assert scaling worked as expected
assert_scaling_worked(slurm_commands, region, stack_name, scaledown_idletime, expected_max=3, expected_final=0)
# Assert jobs were completed
_assert_job_completed(slurm_commands, array_job_id)
_assert_job_completed(slurm_commands, parallel_job_id)
@retry(wait_fixed=seconds(20), stop_max_delay=minutes(7))
def _assert_no_node_in_cluster(region, stack_name, scheduler_commands, partition=None):
assert_that(scheduler_commands.compute_nodes_count(filter_by_partition=partition)).is_equal_to(0)
assert_no_node_in_ec2(region, stack_name)
def _assert_job_completed(slurm_commands, job_id):
_assert_job_state(slurm_commands, job_id, job_state="COMPLETED")
@retry(wait_fixed=seconds(3), stop_max_delay=seconds(15))
def _assert_job_state(slurm_commands, job_id, job_state):
try:
result = slurm_commands.get_job_info(job_id)
assert_that(result).contains("JobState={}".format(job_state))
except RemoteCommandExecutionError as e:
# Handle the case when job is deleted from history
assert_that(e.result.stdout).contains("slurm_load_jobs error: Invalid job id specified")
def _test_torque_job_submit(remote_command_executor, test_datadir):
"""Test torque job submit command in slurm cluster."""
logging.info("Testing cluster submits job by torque command")
torque_commands = TorqueCommands(remote_command_executor)
result = torque_commands.submit_script(str(test_datadir / "torque_job.sh"))
torque_commands.assert_job_submitted(result.stdout)
def _submit_kill_networking_job(remote_command_executor, scheduler_commands, partition, node_type, num_nodes):
"""Submit job that will detach network interface on compute."""
# Get network interface name from Head node, assuming Head node and Compute are of the same instance type
interface_name = remote_command_executor.run_remote_command(
"nmcli device status | grep ether | awk '{print $1}'"
).stdout
logging.info("Detaching network interface {} on {} Compute nodes".format(interface_name, node_type))
# Submit job that will detach network interface on all dynamic nodes
return scheduler_commands.submit_command_and_assert_job_accepted(
submit_command_args={
"command": "sudo ifconfig {} down && sleep 600".format(interface_name),
"partition": partition,
"constraint": "{}".format(node_type),
"other_options": "-a 1-{} --exclusive --no-requeue".format(num_nodes),
}
)
def _set_slurmd_timeout(remote_command_executor, timeout):
"""Set SlurmdTimeout in slurm.conf."""
remote_command_executor.run_remote_command(
"sudo sed -i '/SlurmdTimeout/s/=.*/={0}/' /opt/slurm/etc/slurm.conf".format(timeout)
)
remote_command_executor.run_remote_command("sudo /opt/slurm/bin/scontrol reconfigure")
_assert_slurmd_timeout(remote_command_executor, timeout)
def _assert_slurmd_timeout(remote_command_executor, timeout):
"""Assert that SlurmdTimeout is correctly set."""
configured_timeout = remote_command_executor.run_remote_command(
'scontrol show config | grep -oP "^SlurmdTimeout\\s*\\=\\s*\\K(.+)"'
).stdout
assert_that(configured_timeout).is_equal_to("{0} sec".format(timeout))
def _get_num_gpus_on_instance(instance_type_info):
"""
Return the number of GPUs attached to the instance type.
instance_type_info is expected to be as returned by DescribeInstanceTypes:
{
...,
"GpuInfo": {
"Gpus": [
{
"Name": "M60",
"Manufacturer": "NVIDIA",
"Count": 2,
"MemoryInfo": {
"SizeInMiB": 8192
}
}
],
}
...
}
"""
return sum([gpu_type.get("Count") for gpu_type in instance_type_info.get("GpuInfo").get("Gpus")])
| 42.232484 | 120 | 0.72619 |
e1fb5cb431a5f274e61cd65b4964c085b1f6715e | 2,502 | py | Python | examples/issue_asset.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | null | null | null | examples/issue_asset.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | 27 | 2022-01-12T10:55:38.000Z | 2022-03-28T01:38:24.000Z | examples/issue_asset.py | Shaptic/py-stellar-base | f5fa47f4d96f215889d99249fb25c7be002f5cf3 | [
"Apache-2.0"
] | null | null | null | """
This example shows how to issue assets on the Stellar network.
# See: https://developers.stellar.org/docs/issuing-assets/
"""
from stellar_sdk.asset import Asset
from stellar_sdk.keypair import Keypair
from stellar_sdk.network import Network
from stellar_sdk.server import Server
from stellar_sdk.transaction_builder import TransactionBuilder
# Configure StellarSdk to talk to the horizon instance hosted by Stellar.org
# To use the live network, set the hostname to 'horizon.stellar.org'
server = Server(horizon_url="https://horizon-testnet.stellar.org")
# Keys for accounts to issue and receive the new asset
issuing_keypair = Keypair.from_secret(
"SCBHQEGSNBTT4S7Y73YAF3M3JSVSTSNBGAVU5M4XVFGUF7664EUXQHFU"
)
issuing_public = issuing_keypair.public_key
distributor_keypair = Keypair.from_secret(
"SB6MJ6M3BPJZUGFP2QCODUIKWQWF6AIN4Z6L3J6PWL3QGDW4L6YR3QIU"
)
distributor_public = distributor_keypair.public_key
# Transactions require a valid sequence number that is specific to this account.
# We can fetch the current sequence number for the source account from Horizon.
distributor_account = server.load_account(distributor_public)
# Create an object to represent the new asset
hello_asset = Asset("Hello", issuing_public)
# First, the receiving account must trust the asset
trust_transaction = (
TransactionBuilder(
source_account=distributor_account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=100,
)
.append_change_trust_op(asset=hello_asset)
.set_timeout(30)
.build()
)
trust_transaction.sign(distributor_keypair)
resp = server.submit_transaction(trust_transaction)
print(f"Change Trust Op Resp:\n{resp}")
print("-" * 32)
issuing_account = server.load_account(issuing_public)
# Second, the issuing account actually sends a payment using the asset.
# We recommend that you use the distribution account to distribute assets and
# add more security measures to the issue account. Other acceptances should also
# add a trust line to accept assets like the distribution account.
payment_transaction = (
TransactionBuilder(
source_account=issuing_account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=100,
)
.append_payment_op(destination=distributor_public, amount="1000", asset=hello_asset)
.set_timeout(30)
.build()
)
payment_transaction.sign(issuing_keypair)
resp = server.submit_transaction(payment_transaction)
print(f"Payment Op Resp:\n{resp}")
| 35.742857 | 88 | 0.792566 |
7a20713bf2afd7c8056561ad8e08e1c01878e9c4 | 1,896 | py | Python | test/test_caper_uri.py | guma44/caper | 1fe8c6bfa23045f798e27fd083187beb6d6ddf85 | [
"MIT"
] | null | null | null | test/test_caper_uri.py | guma44/caper | 1fe8c6bfa23045f798e27fd083187beb6d6ddf85 | [
"MIT"
] | null | null | null | test/test_caper_uri.py | guma44/caper | 1fe8c6bfa23045f798e27fd083187beb6d6ddf85 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Tester for CaperURI
Author:
Jin Lee (leepc12@gmail.com) at ENCODE-DCC
"""
import unittest
import os
import json
try:
import caper
except:
import sys, os
script_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(script_path, '../'))
import caper
from caper import caper_uri
from caper.caper_uri import CaperURI, URI_GCS, URI_LOCAL
class TestCaperURI(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestCaperURI, self).__init__(*args, **kwargs)
caper_uri.init_caper_uri(
tmp_dir='~/.caper/test/tmp_dir',
tmp_s3_bucket='s3://encode-pipeline-test-runs/caper_tmp',
tmp_gcs_bucket='gs://encode-pipeline-test-runs/caper_tmp',
verbose=True)
def test_deepcopy(self):
tmp_json = {
# 'file1' : 'gs://encode-pipeline-genome-data/hg38_chr19_chrM_caper.tsv',
'file2' : 'string',
'file3' : 'gs://xxx',
'file3' : '~/.bashrc',
'file4' : 's3://encode-pipeline-genome-data/hg38_chr19_chrM_aws.tsv',
}
tmp_json_file = os.path.expanduser('~/.caper/test/tmp.json')
with open(tmp_json_file, 'w') as fp:
fp.write(json.dumps(tmp_json, indent=4))
f, _ = CaperURI(tmp_json_file).deepcopy(URI_GCS, uri_exts=('.tsv','.json'))
print(f)
# c.get_local_file()
# c = CaperURI('gs://encode-pipeline-genome-data/hg38_chr19_chrM_caper.tsv').deepcopy(URI_LOCAL, uri_exts=('.tsv'))
# c = CaperURI('https://storage.googleapis.com/encode-pipeline-genome-data/hg38_chr19_chrM_caper.tsv').deepcopy(URI_GCS, uri_exts=('.tsv'))
# c = CaperURI('https://storage.googleapis.com/encode-pipeline-genome-data/hg38_chr19_chrM_caper.tsv').deepcopy(URI_GCS, uri_exts=('.tsv'))
if __name__ == '__main__':
unittest.main()
| 35.773585 | 147 | 0.643987 |
9580157dfdb2795a929ca230ea79da98d81ccd04 | 11,099 | py | Python | custom_components/xiaomi_cloud_map_extractor/common/image_handler.py | GuyKh/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | 65e0a905fdb6048facdb34cbec40b7ece4fef991 | [
"MIT"
] | 697 | 2020-09-30T08:35:58.000Z | 2022-03-31T17:14:20.000Z | custom_components/xiaomi_cloud_map_extractor/common/image_handler.py | Neonox31/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | 7bc868278f74fdaba475987dd5fdf485e430fe53 | [
"MIT"
] | 216 | 2020-10-01T12:05:24.000Z | 2022-03-31T11:35:46.000Z | custom_components/xiaomi_cloud_map_extractor/common/image_handler.py | Neonox31/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor | 7bc868278f74fdaba475987dd5fdf485e430fe53 | [
"MIT"
] | 92 | 2020-09-30T18:10:19.000Z | 2022-03-24T12:15:18.000Z | import logging
from typing import Callable
from PIL import Image, ImageDraw, ImageFont
from PIL.Image import Image as ImageType
from custom_components.xiaomi_cloud_map_extractor.common.map_data import ImageData
from custom_components.xiaomi_cloud_map_extractor.const import *
_LOGGER = logging.getLogger(__name__)
class ImageHandler:
COLORS = {
COLOR_MAP_INSIDE: (32, 115, 185),
COLOR_MAP_OUTSIDE: (19, 87, 148),
COLOR_MAP_WALL: (100, 196, 254),
COLOR_MAP_WALL_V2: (93, 109, 126),
COLOR_GREY_WALL: (93, 109, 126),
COLOR_CLEANED_AREA: (127, 127, 127, 127),
COLOR_PATH: (147, 194, 238),
COLOR_GOTO_PATH: (0, 255, 0),
COLOR_PREDICTED_PATH: (255, 255, 0),
COLOR_ZONES: (0xAD, 0xD8, 0xFF, 0x8F),
COLOR_ZONES_OUTLINE: (0xAD, 0xD8, 0xFF),
COLOR_VIRTUAL_WALLS: (255, 0, 0),
COLOR_NEW_DISCOVERED_AREA: (64, 64, 64),
COLOR_NO_GO_ZONES: (255, 33, 55, 127),
COLOR_NO_GO_ZONES_OUTLINE: (255, 0, 0),
COLOR_NO_MOPPING_ZONES: (163, 130, 211, 127),
COLOR_NO_MOPPING_ZONES_OUTLINE: (163, 130, 211),
COLOR_CHARGER: (0x66, 0xfe, 0xda, 0x7f),
COLOR_ROBO: (75, 235, 149),
COLOR_ROOM_NAMES: (0, 0, 0),
COLOR_OBSTACLE: (0, 0, 0, 128),
COLOR_IGNORED_OBSTACLE: (0, 0, 0, 128),
COLOR_OBSTACLE_WITH_PHOTO: (0, 0, 0, 128),
COLOR_IGNORED_OBSTACLE_WITH_PHOTO: (0, 0, 0, 128),
COLOR_UNKNOWN: (0, 0, 0),
COLOR_SCAN: (0xDF, 0xDF, 0xDF),
COLOR_ROOM_1: (240, 178, 122),
COLOR_ROOM_2: (133, 193, 233),
COLOR_ROOM_3: (217, 136, 128),
COLOR_ROOM_4: (52, 152, 219),
COLOR_ROOM_5: (205, 97, 85),
COLOR_ROOM_6: (243, 156, 18),
COLOR_ROOM_7: (88, 214, 141),
COLOR_ROOM_8: (245, 176, 65),
COLOR_ROOM_9: (252, 212, 81),
COLOR_ROOM_10: (72, 201, 176),
COLOR_ROOM_11: (84, 153, 199),
COLOR_ROOM_12: (133, 193, 233),
COLOR_ROOM_13: (245, 176, 65),
COLOR_ROOM_14: (82, 190, 128),
COLOR_ROOM_15: (72, 201, 176),
COLOR_ROOM_16: (165, 105, 189)
}
ROOM_COLORS = [COLOR_ROOM_1, COLOR_ROOM_2, COLOR_ROOM_3, COLOR_ROOM_4, COLOR_ROOM_5, COLOR_ROOM_6, COLOR_ROOM_7,
COLOR_ROOM_8, COLOR_ROOM_9, COLOR_ROOM_10, COLOR_ROOM_11, COLOR_ROOM_12, COLOR_ROOM_13,
COLOR_ROOM_14, COLOR_ROOM_15, COLOR_ROOM_16]
@staticmethod
def create_empty_map_image(colors, text="NO MAP") -> ImageType:
color = ImageHandler.__get_color__(COLOR_MAP_OUTSIDE, colors)
image = Image.new('RGBA', (300, 200), color=color)
if sum(color[0:3]) > 382:
text_color = (0, 0, 0)
else:
text_color = (255, 255, 255)
draw = ImageDraw.Draw(image, "RGBA")
w, h = draw.textsize(text)
draw.text(((image.size[0] - w) / 2, (image.size[1] - h) / 2), text, fill=text_color)
return image
@staticmethod
def draw_path(image: ImageData, path, colors, scale):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_PATH, colors), scale)
@staticmethod
def draw_goto_path(image: ImageData, path, colors, scale):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_GOTO_PATH, colors), scale)
@staticmethod
def draw_predicted_path(image: ImageData, path, colors, scale):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_PREDICTED_PATH, colors), scale)
@staticmethod
def draw_no_go_areas(image: ImageData, areas, colors):
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_NO_GO_ZONES, colors),
ImageHandler.__get_color__(COLOR_NO_GO_ZONES_OUTLINE, colors))
@staticmethod
def draw_no_mopping_areas(image: ImageData, areas, colors):
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_NO_MOPPING_ZONES, colors),
ImageHandler.__get_color__(COLOR_NO_MOPPING_ZONES_OUTLINE, colors))
@staticmethod
def draw_walls(image: ImageData, walls, colors):
draw = ImageDraw.Draw(image.data, 'RGBA')
for wall in walls:
draw.line(wall.to_img(image.dimensions).as_list(),
ImageHandler.__get_color__(COLOR_VIRTUAL_WALLS, colors), width=2)
@staticmethod
def draw_zones(image: ImageData, zones, colors):
areas = list(map(lambda z: z.as_area(), zones))
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_ZONES, colors),
ImageHandler.__get_color__(COLOR_ZONES_OUTLINE, colors))
@staticmethod
def draw_charger(image: ImageData, charger, sizes, colors):
color = ImageHandler.__get_color__(COLOR_CHARGER, colors)
radius = sizes[CONF_SIZE_CHARGER_RADIUS]
ImageHandler.__draw_circle__(image, charger, radius, color, color)
@staticmethod
def draw_obstacles(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_OBSTACLE, colors)
radius = sizes[CONF_SIZE_OBSTACLE_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_ignored_obstacles(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_IGNORED_OBSTACLE, colors)
radius = sizes[CONF_SIZE_IGNORED_OBSTACLE_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_obstacles_with_photo(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_OBSTACLE_WITH_PHOTO, colors)
radius = sizes[CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_ignored_obstacles_with_photo(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_IGNORED_OBSTACLE_WITH_PHOTO, colors)
radius = sizes[CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_all_obstacles(image: ImageData, obstacles, radius, color):
for obstacle in obstacles:
ImageHandler.__draw_circle__(image, obstacle, radius, color, color)
@staticmethod
def draw_vacuum_position(image: ImageData, vacuum_position, sizes, colors):
color = ImageHandler.__get_color__(COLOR_ROBO, colors)
radius = sizes[CONF_SIZE_VACUUM_RADIUS]
ImageHandler.__draw_circle__(image, vacuum_position, radius, color, color)
@staticmethod
def draw_room_names(image: ImageData, rooms, colors):
color = ImageHandler.__get_color__(COLOR_ROOM_NAMES, colors)
for room in rooms.values():
p = room.point()
if p is not None:
point = p.to_img(image.dimensions)
ImageHandler.__draw_text__(image, room.name, point.x, point.y, color)
@staticmethod
def rotate(image: ImageData):
if image.dimensions.rotation == 90:
image.data = image.data.transpose(Image.ROTATE_90)
if image.dimensions.rotation == 180:
image.data = image.data.transpose(Image.ROTATE_180)
if image.dimensions.rotation == 270:
image.data = image.data.transpose(Image.ROTATE_270)
@staticmethod
def draw_texts(image: ImageData, texts):
for text_config in texts:
x = text_config[CONF_X] * image.data.size[0] / 100
y = text_config[CONF_Y] * image.data.size[1] / 100
ImageHandler.__draw_text__(image, text_config[CONF_TEXT], x, y, text_config[CONF_COLOR],
text_config[CONF_FONT], text_config[CONF_FONT_SIZE])
@staticmethod
def draw_layer(image: ImageData, layer_name):
ImageHandler.__draw_layer__(image, image.additional_layers[layer_name])
@staticmethod
def __draw_circle__(image: ImageData, center, r, outline, fill):
def draw_func(draw: ImageDraw):
point = center.to_img(image.dimensions)
coords = [point.x - r, point.y - r, point.x + r, point.y + r]
draw.ellipse(coords, outline=outline, fill=fill)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_areas__(image: ImageData, areas, fill, outline):
if len(areas) == 0:
return
for area in areas:
def draw_func(draw: ImageDraw):
draw.polygon(area.to_img(image.dimensions).as_list(), fill, outline)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_path__(image: ImageData, path, color, scale):
if len(path.path) < 2:
return
def draw_func(draw: ImageDraw):
s = path.path[0].to_img(image.dimensions)
for point in path.path[1:]:
e = point.to_img(image.dimensions)
draw.line([s.x * scale, s.y * scale, e.x * scale, e.y * scale], width=int(scale), fill=color)
s = e
ImageHandler.__draw_on_new_layer__(image, draw_func, scale)
@staticmethod
def __draw_text__(image: ImageData, text, x, y, color, font_file=None, font_size=None):
def draw_func(draw: ImageDraw):
font = ImageFont.load_default()
try:
if font_file is not None and font_size > 0:
font = ImageFont.truetype(font_file, font_size)
except OSError:
_LOGGER.warning("Unable to find font file: %s", font_file)
except ImportError:
_LOGGER.warning("Unable to open font: %s", font_file)
finally:
w, h = draw.textsize(text, font)
draw.text((x - w / 2, y - h / 2), text, font=font, fill=color)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __get_color__(name, colors, default_name=None):
if name in colors:
return colors[name]
if default_name is None:
return ImageHandler.COLORS[name]
return ImageHandler.COLORS[default_name]
@staticmethod
def __draw_on_new_layer__(image: ImageData, draw_function: Callable, scale=1):
if scale == 1:
size = image.data.size
else:
size = [int(image.data.size[0] * scale), int(image.data.size[1] * scale)]
layer = Image.new("RGBA", size, (255, 255, 255, 0))
draw = ImageDraw.Draw(layer, "RGBA")
draw_function(draw)
if scale != 1:
layer = layer.resize(image.data.size, resample=Image.BOX)
ImageHandler.__draw_layer__(image, layer)
@staticmethod
def __draw_layer__(image: ImageData, layer: ImageType):
image.data = Image.alpha_composite(image.data, layer)
| 43.01938 | 116 | 0.643662 |
75d545bc583d1f422e15b338241928ccf02ba440 | 797 | py | Python | tests/test_help.py | rzr/mopidy | f6556ffafce34aebbc43ca266f69ac0068edc31d | [
"Apache-2.0"
] | 2 | 2015-07-09T09:36:26.000Z | 2019-10-05T04:13:19.000Z | tests/test_help.py | rzr/mopidy | f6556ffafce34aebbc43ca266f69ac0068edc31d | [
"Apache-2.0"
] | null | null | null | tests/test_help.py | rzr/mopidy | f6556ffafce34aebbc43ca266f69ac0068edc31d | [
"Apache-2.0"
] | 1 | 2019-10-05T04:13:10.000Z | 2019-10-05T04:13:10.000Z | from __future__ import unicode_literals
import os
import subprocess
import sys
import unittest
import mopidy
class HelpTest(unittest.TestCase):
def test_help_has_mopidy_options(self):
mopidy_dir = os.path.dirname(mopidy.__file__)
args = [sys.executable, mopidy_dir, '--help']
process = subprocess.Popen(
args,
env={'PYTHONPATH': os.path.join(mopidy_dir, '..')},
stdout=subprocess.PIPE)
output = process.communicate()[0]
self.assertIn('--version', output)
self.assertIn('--help', output)
self.assertIn('--quiet', output)
self.assertIn('--verbose', output)
self.assertIn('--save-debug-log', output)
self.assertIn('--config', output)
self.assertIn('--option', output)
| 29.518519 | 63 | 0.632371 |
ba564fa1b1319e1cdfd4519052afe820daa16c1a | 6,163 | py | Python | sacred/arg_parser.py | ahallermed/sacred | 5a25c89aae75192a52dce8772ed0979104627fed | [
"MIT"
] | 3,895 | 2015-03-16T18:52:44.000Z | 2022-03-31T01:43:56.000Z | sacred/arg_parser.py | ahallermed/sacred | 5a25c89aae75192a52dce8772ed0979104627fed | [
"MIT"
] | 710 | 2015-03-26T11:45:42.000Z | 2022-03-31T21:51:08.000Z | sacred/arg_parser.py | ahallermed/sacred | 5a25c89aae75192a52dce8772ed0979104627fed | [
"MIT"
] | 401 | 2015-03-18T14:34:42.000Z | 2022-03-05T23:26:50.000Z | """
Contains the command-line parsing and help for experiments.
The command-line interface of sacred is built on top of ``docopt``, which
constructs a command-line parser from a usage text. Curiously in sacred we
first programmatically generate a usage text and then parse it with ``docopt``.
"""
import ast
import textwrap
import inspect
from shlex import quote
from sacred.serializer import restore
from sacred.settings import SETTINGS
from sacred.utils import set_by_dotted_path
from sacred.commandline_options import CLIOption
__all__ = ("get_config_updates", "format_usage")
USAGE_TEMPLATE = """Usage:
{program_name} [(with UPDATE...)] [options]
{program_name} help [COMMAND]
{program_name} (-h | --help)
{program_name} COMMAND [(with UPDATE...)] [options]
{description}
Options:
{options}
Arguments:
COMMAND Name of command to run (see below for list of commands)
UPDATE Configuration assignments of the form foo.bar=17
{arguments}
{commands}"""
def get_config_updates(updates):
"""
Parse the UPDATES given on the commandline.
Parameters
----------
updates (list[str]):
list of update-strings of the form NAME=LITERAL or just NAME.
Returns
-------
(dict, list):
Config updates and named configs to use
"""
config_updates = {}
named_configs = []
if not updates:
return config_updates, named_configs
for upd in updates:
if upd == "":
continue
path, sep, value = upd.partition("=")
if sep == "=":
path = path.strip() # get rid of surrounding whitespace
value = value.strip() # get rid of surrounding whitespace
set_by_dotted_path(config_updates, path, _convert_value(value))
else:
named_configs.append(path)
return config_updates, named_configs
def _format_options_usage(options):
"""
Format the Options-part of the usage text.
Parameters
----------
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
Text formatted as a description for the commandline options
"""
options_usage = ""
for op in options:
short, long = op.get_flags()
if op.arg:
flag = "{short} {arg} {long}={arg}".format(
short=short, long=long, arg=op.arg
)
else:
flag = "{short} {long}".format(short=short, long=long)
if isinstance(op, CLIOption):
doc = op.get_description()
else:
# legacy
doc = inspect.cleandoc(op.__doc__)
wrapped_description = textwrap.wrap(
doc, width=79, initial_indent=" " * 32, subsequent_indent=" " * 32
)
wrapped_description = "\n".join(wrapped_description).strip()
options_usage += " {:28} {}\n".format(flag, wrapped_description)
return options_usage
def _format_arguments_usage(options):
"""
Construct the Arguments-part of the usage text.
Parameters
----------
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
Text formatted as a description of the arguments supported by the
commandline options.
"""
argument_usage = ""
for op in options:
if op.arg and op.arg_description:
wrapped_description = textwrap.wrap(
op.arg_description,
width=79,
initial_indent=" " * 12,
subsequent_indent=" " * 12,
)
wrapped_description = "\n".join(wrapped_description).strip()
argument_usage += " {:8} {}\n".format(op.arg, wrapped_description)
return argument_usage
def _format_command_usage(commands):
"""
Construct the Commands-part of the usage text.
Parameters
----------
commands : dict[str, func]
dictionary of supported commands.
Each entry should be a tuple of (name, function).
Returns
-------
str
Text formatted as a description of the commands.
"""
if not commands:
return ""
command_usage = "\nCommands:\n"
cmd_len = max([len(c) for c in commands] + [8])
for cmd_name, cmd_doc in commands.items():
cmd_doc = _get_first_line_of_docstring(cmd_doc)
command_usage += (" {:%d} {}\n" % cmd_len).format(cmd_name, cmd_doc)
return command_usage
def format_usage(program_name, description, commands=None, options=()):
"""
Construct the usage text.
Parameters
----------
program_name : str
Usually the name of the python file that contains the experiment.
description : str
description of this experiment (usually the docstring).
commands : dict[str, func]
Dictionary of supported commands.
Each entry should be a tuple of (name, function).
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
The complete formatted usage text for this experiment.
It adheres to the structure required by ``docopt``.
"""
usage = USAGE_TEMPLATE.format(
program_name=quote(program_name),
description=description.strip() if description else "",
options=_format_options_usage(options),
arguments=_format_arguments_usage(options),
commands=_format_command_usage(commands),
)
return usage
def _get_first_line_of_docstring(func):
return textwrap.dedent(func.__doc__ or "").strip().split("\n")[0]
def _convert_value(value):
"""Parse string as python literal if possible and fallback to string."""
try:
return restore(ast.literal_eval(value))
except (ValueError, SyntaxError):
if SETTINGS.COMMAND_LINE.STRICT_PARSING:
raise
# use as string if nothing else worked
return value
| 28.665116 | 80 | 0.623722 |
7aa79d299fc604f84a7d006f2f2aebb8de01e309 | 20,697 | py | Python | keras/applications/mobilenet_v2.py | quantumalaviya/keras | 8d874de12ed2e199d9528bfff891f4f60ee2a636 | [
"Apache-2.0"
] | 1 | 2021-09-11T21:25:20.000Z | 2021-09-11T21:25:20.000Z | keras/applications/mobilenet_v2.py | quantumalaviya/keras | 8d874de12ed2e199d9528bfff891f4f60ee2a636 | [
"Apache-2.0"
] | null | null | null | keras/applications/mobilenet_v2.py | quantumalaviya/keras | 8d874de12ed2e199d9528bfff891f4f60ee2a636 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v2 models for Keras.
MobileNetV2 is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 22 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.35, 0.5, 0.75, 1.0, 1.3, and 1.4
For each of these `alpha` values, weights for 5 different input image sizes
are provided (224, 192, 160, 128, and 96).
The following table describes the performance of
MobileNet on various input sizes:
------------------------------------------------------------------------
MACs stands for Multiply Adds
Classification Checkpoint|MACs (M)|Parameters (M)|Top 1 Accuracy|Top 5 Accuracy
--------------------------|------------|---------------|---------|----|---------
| [mobilenet_v2_1.4_224] | 582 | 6.06 | 75.0 | 92.5 |
| [mobilenet_v2_1.3_224] | 509 | 5.34 | 74.4 | 92.1 |
| [mobilenet_v2_1.0_224] | 300 | 3.47 | 71.8 | 91.0 |
| [mobilenet_v2_1.0_192] | 221 | 3.47 | 70.7 | 90.1 |
| [mobilenet_v2_1.0_160] | 154 | 3.47 | 68.8 | 89.0 |
| [mobilenet_v2_1.0_128] | 99 | 3.47 | 65.3 | 86.9 |
| [mobilenet_v2_1.0_96] | 56 | 3.47 | 60.3 | 83.2 |
| [mobilenet_v2_0.75_224] | 209 | 2.61 | 69.8 | 89.6 |
| [mobilenet_v2_0.75_192] | 153 | 2.61 | 68.7 | 88.9 |
| [mobilenet_v2_0.75_160] | 107 | 2.61 | 66.4 | 87.3 |
| [mobilenet_v2_0.75_128] | 69 | 2.61 | 63.2 | 85.3 |
| [mobilenet_v2_0.75_96] | 39 | 2.61 | 58.8 | 81.6 |
| [mobilenet_v2_0.5_224] | 97 | 1.95 | 65.4 | 86.4 |
| [mobilenet_v2_0.5_192] | 71 | 1.95 | 63.9 | 85.4 |
| [mobilenet_v2_0.5_160] | 50 | 1.95 | 61.0 | 83.2 |
| [mobilenet_v2_0.5_128] | 32 | 1.95 | 57.7 | 80.8 |
| [mobilenet_v2_0.5_96] | 18 | 1.95 | 51.2 | 75.8 |
| [mobilenet_v2_0.35_224] | 59 | 1.66 | 60.3 | 82.9 |
| [mobilenet_v2_0.35_192] | 43 | 1.66 | 58.2 | 81.2 |
| [mobilenet_v2_0.35_160] | 30 | 1.66 | 55.7 | 79.1 |
| [mobilenet_v2_0.35_128] | 20 | 1.66 | 50.8 | 75.0 |
| [mobilenet_v2_0.35_96] | 11 | 1.66 | 45.5 | 70.4 |
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
"""
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v2/')
layers = None
@keras_export('keras.applications.mobilenet_v2.MobileNetV2',
'keras.applications.MobileNetV2')
def MobileNetV2(input_shape=None,
alpha=1.0,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNetV2 architecture.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNetV2, call `tf.keras.applications.mobilenet_v2.preprocess_input`
on your inputs before passing them to the model.
`mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: Float, larger than zero, controls the width of the network. This is
known as the width multiplier in the MobileNetV2 paper, but the name is
kept for consistency with `applications.MobileNetV1` model in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1.0, default number of filters from the paper
are used at each layer.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization), 'imagenet'
(pre-training on ImageNet), or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction when
`include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional integer number of classes to classify images into, only to
be specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError(f'Unknown argument(s): {kwargs}')
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded. '
f'Received `weights={weights}`')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError(
'If using `weights` as `"imagenet"` with `include_top` '
f'as true, `classes` should be 1000. Received `classes={classes}`')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError(
f'input_tensor: {input_tensor}'
'is not type input_tensor. '
f'Received `type(input_tensor)={type(input_tensor)}`'
)
if is_input_t_tensor:
if backend.image_data_format() == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape[1] must equal shape(input_tensor)[1] '
'when `image_data_format` is `channels_first`; '
'Received `input_tensor.shape='
f'{input_tensor.shape}`'
f', `input_shape={input_shape}`')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError(
'input_tensor.shape[2] must equal input_shape[1]; '
'Received `input_tensor.shape='
f'{input_tensor.shape}`, '
f'`input_shape={input_shape}`')
else:
raise ValueError('input_tensor is not a Keras tensor; '
f'Received `input_tensor={input_tensor}`')
# If input_shape is None, infer shape from input_tensor.
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor must be a valid Keras tensor type; '
f'Received {input_tensor} of type {type(input_tensor)}')
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size.
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError('If imagenet weights are being loaded, '
'alpha must be one of `0.35`, `0.50`, `0.75`, '
'`1.0`, `1.3` or `1.4` only;'
f' Received `alpha={alpha}`')
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [96, 128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be '
'loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.Conv2D(
first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='same',
use_bias=False,
name='Conv1')(img_input)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name='bn_Conv1')(
x)
x = layers.ReLU(6., name='Conv1_relu')(x)
x = _inverted_res_block(
x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15)
x = _inverted_res_block(
x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we increase the number of output
# channels.
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(
last_block_filters, kernel_size=1, use_bias=False, name='Conv_1')(
x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(
x)
x = layers.ReLU(6., name='out_relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account any potential predecessors of
# `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if include_top:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(float(alpha)) + '_' + str(rows) + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
str(float(alpha)) + '_' + str(rows) + '_no_top' + '.h5')
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
"""Inverted ResNet block."""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
in_channels = backend.int_shape(inputs)[channel_axis]
pointwise_conv_filters = int(filters * alpha)
# Ensure the number of filters on the last 1x1 convolution is divisible by 8.
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand with a pointwise 1x1 convolution.
x = layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'expand')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand_BN')(
x)
x = layers.ReLU(6., name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise 3x3 convolution.
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3),
name=prefix + 'pad')(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding='same' if stride == 1 else 'valid',
name=prefix + 'depthwise')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise_BN')(
x)
x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)
# Project wiht a pointwise 1x1 convolution.
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding='same',
use_bias=False,
activation=None,
name=prefix + 'project')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_BN')(
x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=prefix + 'add')([inputs, x])
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@keras_export('keras.applications.mobilenet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 39.050943 | 87 | 0.65594 |
32cf29818e06947aa32ddfbd02d85ab6e1772325 | 3,862 | py | Python | pandas/tests/extension/base/missing.py | mapehe/pandas | 8ddc0fd801d794fcd7735816790dff66d1c678e2 | [
"BSD-3-Clause"
] | 3 | 2019-04-01T11:03:04.000Z | 2019-12-31T02:17:15.000Z | pandas/tests/extension/base/missing.py | mapehe/pandas | 8ddc0fd801d794fcd7735816790dff66d1c678e2 | [
"BSD-3-Clause"
] | 1 | 2021-04-15T18:46:45.000Z | 2021-04-15T18:46:45.000Z | pandas/tests/extension/base/missing.py | mapehe/pandas | 8ddc0fd801d794fcd7735816790dff66d1c678e2 | [
"BSD-3-Clause"
] | 1 | 2020-06-18T15:56:53.000Z | 2020-06-18T15:56:53.000Z | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseMissingTests(BaseExtensionTests):
def test_isna(self, data_missing):
expected = np.array([True, False])
result = pd.isna(data_missing)
tm.assert_numpy_array_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
expected = ser.iloc[[1]]
self.assert_series_equal(result, expected)
def test_dropna_frame(self, data_missing):
df = pd.DataFrame({"A": data_missing})
# defaults
result = df.dropna()
expected = df.iloc[[1]]
self.assert_frame_equal(result, expected)
# axis = 1
result = df.dropna(axis='columns')
expected = pd.DataFrame(index=[0, 1])
self.assert_frame_equal(result, expected)
# multiple
df = pd.DataFrame({"A": data_missing,
"B": [1, np.nan]})
result = df.dropna()
expected = df.iloc[:0]
self.assert_frame_equal(result, expected)
def test_fillna_scalar(self, data_missing):
valid = data_missing[1]
result = data_missing.fillna(valid)
expected = data_missing.fillna(valid)
self.assert_extension_array_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).fillna(method='ffill', limit=2)
expected = pd.Series(data_missing.take([1, 1, 1, 0, 1]))
self.assert_series_equal(result, expected)
def test_fillna_limit_backfill(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = pd.Series(arr).fillna(method='backfill', limit=2)
expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))
self.assert_series_equal(result, expected)
def test_fillna_series(self, data_missing):
fill_value = data_missing[1]
ser = pd.Series(data_missing)
result = ser.fillna(fill_value)
expected = pd.Series(
data_missing._from_sequence([fill_value, fill_value]))
self.assert_series_equal(result, expected)
# Fill with a series
result = ser.fillna(expected)
self.assert_series_equal(result, expected)
# Fill with a series not affecting the missing values
result = ser.fillna(ser)
self.assert_series_equal(result, ser)
@pytest.mark.parametrize('method', ['ffill', 'bfill'])
def test_fillna_series_method(self, data_missing, method):
fill_value = data_missing[1]
if method == 'ffill':
data_missing = type(data_missing)(data_missing[::-1])
result = pd.Series(data_missing).fillna(method=method)
expected = pd.Series(
data_missing._from_sequence([fill_value, fill_value]))
self.assert_series_equal(result, expected)
def test_fillna_frame(self, data_missing):
fill_value = data_missing[1]
result = pd.DataFrame({
"A": data_missing,
"B": [1, 2]
}).fillna(fill_value)
expected = pd.DataFrame({
"A": data_missing._from_sequence([fill_value, fill_value]),
"B": [1, 2],
})
self.assert_frame_equal(result, expected)
def test_fillna_fill_other(self, data):
result = pd.DataFrame({
"A": data,
"B": [np.nan] * len(data)
}).fillna({"B": 0.0})
expected = pd.DataFrame({
"A": data,
"B": [0.0] * len(result),
})
self.assert_frame_equal(result, expected)
| 31.655738 | 71 | 0.615743 |
895baeefd7f79415307cfe7b140ce052399a9a31 | 7,196 | py | Python | NeurIPS_2021/Figure_1/QD_cubic/DataGen.py | streeve/PI3NN | f7f08a195096e0388bb9230bc67c6acd6f41581a | [
"Apache-2.0"
] | 11 | 2021-11-08T20:38:50.000Z | 2022-01-30T02:46:39.000Z | NeurIPS_2021/Figure_1/QD_cubic/DataGen.py | streeve/PI3NN | f7f08a195096e0388bb9230bc67c6acd6f41581a | [
"Apache-2.0"
] | 1 | 2022-01-13T19:46:32.000Z | 2022-02-09T16:23:56.000Z | NeurIPS_2021/Figure_1/QD_cubic/DataGen.py | streeve/PI3NN | f7f08a195096e0388bb9230bc67c6acd6f41581a | [
"Apache-2.0"
] | 1 | 2021-12-17T18:38:26.000Z | 2021-12-17T18:38:26.000Z | # -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import math
import matplotlib.pyplot as plt
import importlib
"""
this file contains object to create data sets for regression
synthetic datasets:
drunk_bow_tie - as in paper, with gaussian noise
drunk_bow_tie_exp - as in paper with exp noise
x_cubed_gap - as in paper to show model uncertainty
real datasets:
~boston - standard boston housing dataset
"""
class DataGenerator:
def __init__(self, type_in, n_feat=1):
# select type of data to produce
# not really using no. feat anymore
self.n_feat = n_feat
self.type_in = type_in
return
def CreateData(self, n_samples, seed_in=5,
train_prop=0.9, bound_limit=6., n_std_devs=1.96,**kwargs):
np.random.seed(seed_in)
scale_c=1.0 # default
shift_c=1.0
# for ideal boundary
X_ideal = np.linspace(start=-bound_limit,stop=bound_limit, num=500)
y_ideal_U = np.ones_like(X_ideal)+1. # default
y_ideal_L = np.ones_like(X_ideal)-1.
y_ideal_mean = np.ones_like(X_ideal)+0.5
if self.type_in=="drunk_bow_tie":
"""
similar to bow tie but less linear
"""
X = np.random.uniform(low=-2.,high=2.,size=(n_samples,1))
y = 1.5*np.sin(np.pi*X[:,0]) + np.random.normal(loc=0.,scale=1.*np.power(X[:,0],2))
y = y.reshape([-1,1])/5.
X_train = X
y_train = y
X = np.random.uniform(low=-2.,high=2.,size=(int(10*n_samples),1))
y = 1.5*np.sin(np.pi*X[:,0]) + np.random.normal(loc=0.,scale=1.*np.power(X[:,0],2))
y = y.reshape([-1,1])/5.
X_val = X
y_val = y
y_ideal_U = 1.5*np.sin(np.pi*X_ideal) + n_std_devs*np.power(X_ideal,2)
y_ideal_U = y_ideal_U/5.
y_ideal_L = 1.5*np.sin(np.pi*X_ideal) - n_std_devs*np.power(X_ideal,2)
y_ideal_L = y_ideal_L/5.
y_ideal_mean = 1.5*np.sin(np.pi*X_ideal)
y_ideal_mean = y_ideal_mean/5.
# overwrite for convenience!
X_val = X_train
y_val = y_train
elif self.type_in=="drunk_bow_tie_exp":
"""
similar to bow tie but less linear, now with non-gaussian noise
"""
X = np.random.uniform(low=-2.,high=2.,size=(n_samples,1))
y = 1.5*np.sin(np.pi*X[:,0]) + np.random.exponential(scale=1.*np.power(X[:,0],2))
y = y.reshape([-1,1])/5.
X_train = X
y_train = y
X = np.random.uniform(low=-2.,high=2.,size=(int(10*n_samples),1))
y = 1.5*np.sin(np.pi*X[:,0]) + np.random.exponential(scale=1.*np.power(X[:,0],2))
y = y.reshape([-1,1])/5.
X_val = X
y_val = y
# for exponential quantile = ln(1/quantile) /lambda
# note that np inputs beta = 1/lambda
y_ideal_U = 1.5*np.sin(np.pi*X_ideal) + np.log(1/(1-0.95))*np.power(X_ideal,2)
y_ideal_U = y_ideal_U/5.
y_ideal_L = 1.5*np.sin(np.pi*X_ideal)
y_ideal_L = y_ideal_L/5.
y_ideal_mean = 1.5*np.sin(np.pi*X_ideal)
y_ideal_mean = y_ideal_mean/5.
X_val = X_train
y_val = y_train
elif self.type_in=="periodic_1":
"""
creates a bow tie shape with changing variance
"""
X = np.random.uniform(low=-5.,high=5.,size=(n_samples,self.n_feat))
y = 2.1*np.cos(0.2*X[:,0]) + 0.7*np.cos(20.1*X[:,0]) + 0.2*np.cos(10.4*X[:,0]) + np.random.normal(loc=0.,scale=0.1*np.ones_like(X[:,0]))
y = y.reshape([-1,1])/1.
X_train = X
y_train = y
X_val = X_train
y_val = y_train
# y_ideal_U = X_ideal/5. + n_std_devs * np.abs(X_ideal)/5.
# y_ideal_L = X_ideal/5. - n_std_devs * np.abs(X_ideal)/5.
elif self.type_in=="x_cubed_gap":
"""
toy data problem from Probabilistic Backprop (Lobato) &
deep ensembles (Blundell)
but added gap here
"""
scale_c = 50.
half_samp = int(round(n_samples/2))
X_1 = np.random.uniform(low=-4.,high=-1.,size=(half_samp,1))
X_2 = np.random.uniform(low=1.,high=4.,size=(n_samples - half_samp,1))
X = np.concatenate((X_1, X_2))
y = X[:,0]**3 + np.random.normal(loc=0.,scale=3., size=X[:,0].shape[0])
y = y.reshape([-1,1])/scale_c
X_train = X
y_train = y
X_val = X_train
y_val = y_train
y_ideal_U = X_ideal**3 + n_std_devs*3.
y_ideal_U = y_ideal_U/scale_c
y_ideal_L = X_ideal**3 - n_std_devs*3.
y_ideal_L = y_ideal_L/scale_c
y_ideal_mean = X_ideal**3
y_ideal_mean = y_ideal_mean/scale_c
##### ===== ======
elif self.type_in=="x_cubed_nonGaussianNoise":
"""
toy data problem from Probabilistic Backprop (Lobato) &
deep ensembles (Blundell)
but added gap here
"""
scale_c = 50.
X = np.random.uniform(low=-4.,high=4.,size=(n_samples,1))
noise = np.random.randn(X.shape[0])
for i in range(X.shape[0]):
if(noise[i]>0):
noise[i] = noise[i] * 10.0
else:
noise[i] = noise[i] * 2.0
y = X[:,0]**3 + noise
y = y.reshape([-1,1])/scale_c
X_train = X
y_train = y
X_val = X_train
y_val = y_train
y_ideal_U = X_ideal**3 + n_std_devs*3.
y_ideal_U = y_ideal_U/scale_c
y_ideal_L = X_ideal**3 - n_std_devs*3.
y_ideal_L = y_ideal_L/scale_c
y_ideal_mean = X_ideal**3
y_ideal_mean = y_ideal_mean/scale_c
# use single char '~' at start to identify real data sets
elif self.type_in[:1] == '~':
if self.type_in=="~boston":
path = 'boston_housing_data.csv'
data = np.loadtxt(path,skiprows=0)
elif self.type_in=="~concrete":
path = 'Concrete_Data.csv'
data = np.loadtxt(path, delimiter=',',skiprows=1)
# work out normalisation constants (need when unnormalising later)
scale_c = np.std(data[:,-1])
shift_c = np.mean(data[:,-1])
# normalise data
for i in range(0,data.shape[1]):
# avoid zero variance features (exist one or two)
sdev_norm = np.std(data[:,i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm
data[:,i] = (data[:,i] - np.mean(data[:,i]) )/sdev_norm
# split into train/test
perm = np.random.permutation(data.shape[0])
train_size = int(round(train_prop*data.shape[0]))
train = data[perm[:train_size],:]
test = data[perm[train_size:],:]
y_train = train[:,-1].reshape(-1,1)
X_train = train[:,:-1]
y_val = test[:,-1].reshape(-1,1)
X_val = test[:,:-1]
# save important stuff
self.X_train = X_train
self.y_train = y_train
self.X_val = X_val
self.y_val = y_val
self.X_ideal = X_ideal
self.y_ideal_U = y_ideal_U
self.y_ideal_L = y_ideal_L
self.y_ideal_mean = y_ideal_mean
self.scale_c = scale_c
self.shift_c = shift_c
return X_train, y_train, X_val, y_val
def ViewData(self, n_rows=5, hist=False, plot=False, print_=True):
"""
print first few rows of data
option to view histogram of x and y
option to view scatter plot of x vs y
"""
if print_:
print("\nX_train\n",self.X_train[:n_rows],
"\ny_train\n", self.y_train[:n_rows],
"\nX_val\n", self.X_val[:n_rows],
"\ny_val\n", self.y_val[:n_rows])
if hist:
fig, ax = plt.subplots(1, 2)
ax[0].hist(self.X_train)
ax[1].hist(self.y_train)
ax[0].set_title("X_train")
ax[1].set_title("y_train")
fig.show()
if plot:
n_feat = self.X_train.shape[1]
fig, ax = plt.subplots(n_feat, 1) # create an extra
if n_feat == 1: ax = [ax] # make into list
for i in range(0,n_feat):
ax[i].scatter(self.X_train[:,i],self.y_train,
alpha=0.5,s=2.0)
ax[i].set_xlabel('x_'+str(i))
ax[i].set_ylabel('y')
fig.show()
return
| 28 | 139 | 0.638549 |
570045baa58513e37ae03d8f13bf0cbab49762e4 | 4,692 | py | Python | examples-master/python_examples/test_pot_dq.py | JungHoonJung/2021MD | 29bfae7a750217d50654e4973a2be6fb0d968bdf | [
"MIT"
] | 186 | 2017-04-30T18:11:12.000Z | 2022-03-31T18:35:09.000Z | examples-master/python_examples/test_pot_dq.py | JungHoonJung/2021MD | 29bfae7a750217d50654e4973a2be6fb0d968bdf | [
"MIT"
] | 17 | 2017-02-09T11:08:49.000Z | 2022-01-29T14:40:09.000Z | examples-master/python_examples/test_pot_dq.py | JungHoonJung/2021MD | 29bfae7a750217d50654e4973a2be6fb0d968bdf | [
"MIT"
] | 92 | 2017-09-13T05:20:40.000Z | 2022-03-19T16:17:02.000Z | #!/usr/bin/env python3
# test_pot_dq.py
#------------------------------------------------------------------------------------------------#
# This software was written in 2016/17 #
# by Michael P. Allen <m.p.allen@warwick.ac.uk>/<m.p.allen@bristol.ac.uk> #
# and Dominic J. Tildesley <d.tildesley7@gmail.com> ("the authors"), #
# to accompany the book "Computer Simulation of Liquids", second edition, 2017 ("the text"), #
# published by Oxford University Press ("the publishers"). #
# #
# LICENCE #
# Creative Commons CC0 Public Domain Dedication. #
# To the extent possible under law, the authors have dedicated all copyright and related #
# and neighboring rights to this software to the PUBLIC domain worldwide. #
# This software is distributed without any warranty. #
# You should have received a copy of the CC0 Public Domain Dedication along with this software. #
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #
# #
# DISCLAIMER #
# The authors and publishers make no warranties about the software, and disclaim liability #
# for all uses of the software, to the fullest extent permitted by applicable law. #
# The authors and publishers do not recommend use of this software for any purpose. #
# It is made freely available, solely to clarify points made in the text. When using or citing #
# the software, you should not imply endorsement by the authors or publishers. #
#------------------------------------------------------------------------------------------------#
"""Dipole-quadrupole potential and forces."""
import numpy as np
n = 2 # Two-molecule potential
print('test_pot_dq module')
print('Returns potential and force for dipole-quadrupole')
print(n,'-molecule potential',sep='')
def force ( r, e ):
"""Returns potential pot and numpy arrays f, t of shape (n,3), same as input arguments.
Demonstrates the calculation of forces from the dipole-quadrupole potential.
Written for ease of comparison with the text rather than efficiency!
"""
from math import isclose
assert r.shape == (n,3), 'Incorrect shape of r'
assert e.shape == (n,3), 'Incorrect shape of e'
# Notation to match appendix
i = 0
j = 1
ei = e[i,:]
ej = e[j,:]
assert isclose(np.sum(ei**2),1.0), 'Non-unit vector {} {} {}'.format(*ei)
assert isclose(np.sum(ej**2),1.0), 'Non-unit vector {} {} {}'.format(*ej)
rij = r[i,:] - r[j,:]
rij_mag = np.sqrt( np.sum(rij**2) ) # Magnitude of separation vector
sij = rij / rij_mag # Unit vector
ci = np.dot( ei, sij )
cj = np.dot( ej, sij )
cij = np.dot( ei, ej )
# The dipole-quadrupole potential with mu_i = 1, Q_j = 1
vij_dq = 1.5 * (ci*(1.0-5.0*cj**2)+2*cj*cij)/rij_mag**4
# The quadrupole-dipole potential with Q_i = 1, mu_j = 1
vij_qd = -1.5 * (cj*(1.0-5.0*ci**2)+2*ci*cij)/rij_mag**4
# Forces and torque gradients for dipole-quadrupole potential with mu_i = 1, Q_j = 1
dvdrij = -4.0*vij_dq/rij_mag
dvdci = 1.5 * (1-5.0*cj**2) / rij_mag**4
dvdcj = 3.0 * (cij-5.0*ci*cj) / rij_mag**4
dvdcij = 3.0 * cj / rij_mag**4
fij_dq = - dvdrij*sij - dvdci*(ei-ci*sij)/rij_mag - dvdcj*(ej-cj*sij)/rij_mag
gi_dq = dvdci*sij + dvdcij*ej
gj_dq = dvdcj*sij + dvdcij*ei
# Forces and torque gradients for quadrupole-dipole potential with Q_i = 1, mu_j = 1
dvdrij = -4.0*vij_qd/rij_mag
dvdci = -3.0 * (cij-5.0*ci*cj) / rij_mag**4
dvdcj = -1.5 * (1-5.0*ci**2) / rij_mag**4
dvdcij = -3.0 * ci / rij_mag**4
fij_qd = - dvdrij*sij - dvdci*(ei-ci*sij)/rij_mag - dvdcj*(ej-cj*sij)/rij_mag
gi_qd = dvdci*sij + dvdcij*ej
gj_qd = dvdcj*sij + dvdcij*ei
# Final potential, forces and torques
pot = vij_dq + vij_qd
f = np.empty_like(r)
t = np.empty_like(r)
f[i,:] = fij_dq + fij_qd
f[j,:] = -fij_dq - fij_qd
t[i,:] = -np.cross(ei,gi_dq+gi_qd)
t[j,:] = -np.cross(ej,gj_dq+gj_qd)
return pot, f, t
| 48.875 | 98 | 0.5211 |
a017ffa7109803cae535880895bc7b0bb8e39b17 | 164 | py | Python | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_Seasonal_MonthOfYear_NoAR.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_Seasonal_MonthOfYear_NoAR.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_Seasonal_MonthOfYear_NoAR.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['NoAR'] ); | 41 | 91 | 0.762195 |
fe67c40ce9ecf1d76fc170ecd6e31ab09246551a | 64 | py | Python | prml/linear/_classifier.py | alexandru-dinu/PRML | acd823e098df67abe0306a70225e7539f8edda40 | [
"MIT"
] | null | null | null | prml/linear/_classifier.py | alexandru-dinu/PRML | acd823e098df67abe0306a70225e7539f8edda40 | [
"MIT"
] | null | null | null | prml/linear/_classifier.py | alexandru-dinu/PRML | acd823e098df67abe0306a70225e7539f8edda40 | [
"MIT"
] | 1 | 2019-06-22T20:56:02.000Z | 2019-06-22T20:56:02.000Z | class Classifier(object):
"""Base class for classifiers."""
| 21.333333 | 37 | 0.6875 |
5eec48a9800b4ea4b6e828b3ac90ba6752022547 | 3,329 | py | Python | malaya_speech/train/model/vits/transformer/__init__.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | null | null | null | malaya_speech/train/model/vits/transformer/__init__.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | null | null | null | malaya_speech/train/model/vits/transformer/__init__.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 YoungJoong Kim
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import List, Tuple
import tensorflow as tf
from .block import Block
from .pe import PositionalEncodings
from .prenet import Prenet
from ..config import Config
class Transformer(tf.keras.Model):
"""Transformer.
"""
def __init__(self, config: Config):
"""Initializer.
Args:
config: transformer configuration.
channels: int, size of the hidden channels.
prenet_layers: int, the number of the prenet layers.
prenet_kernel: int, size of the prenet kernels.
prenet_dropout: float, dropout rate for prenet.
block_ffn: int, size of the hidden channels for
feed-forward network.
block_heads: int, the number of the attention heads.
block_dropout: float, dropout rate for transformer blocks.
block_num: int, the number of the attention blocks.
"""
super(Transformer, self).__init__()
self.prenet = Prenet(
config.prenet_layers,
config.channels,
config.prenet_kernel,
config.prenet_dropout)
self.pe = PositionalEncodings(config.channels)
self.blocks = [
Block(
config.channels,
config.block_ffn,
config.block_heads,
config.block_dropout)
for _ in range(config.block_num)]
def call(self, inputs: tf.Tensor, mask: tf.Tensor) \
-> Tuple[tf.Tensor, List[tf.Tensor]]:
"""Transform the inputs.
Args:
inputs: [tf.float32; [B, T, C]], input tensor.
mask: [tf.float32; [B, T]], mask tensor.
Returns:
x: [tf.float32; [B, T, C]], transformed tensor.
attn: [tf.Tensor, [tf.float32; [B, K, T, T]]; N], attentions.
"""
# [B, T, C]
x = self.prenet(inputs, mask)
# [B, T, C]
x = x + self.pe(tf.shape(x)[1])[None]
attn = []
for block in self.blocks:
# [B, T, C], [B, K, T, T]
x, align = block(x, mask)
# N x [B, K, T, T]
attn.append(align)
# [B, T, C], N x [B, K, T, T]
return x, attn
| 36.184783 | 78 | 0.619706 |
176160226aaab4a1c2632a248c8487c4f1da78b8 | 33,972 | py | Python | third_party/cython/src/Cython/Compiler/TypeSlots.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/cython/src/Cython/Compiler/TypeSlots.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | third_party/cython/src/Cython/Compiler/TypeSlots.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | #
# Tables describing slots in the CPython type object
# and associated know-how.
#
import Naming
import PyrexTypes
import StringEncoding
invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
# Method slot signature descriptor.
#
# has_dummy_arg boolean
# has_generic_args boolean
# fixed_arg_format string
# ret_format string
# error_value string
#
# The formats are strings made up of the following
# characters:
#
# 'O' Python object
# 'T' Python object of the type of 'self'
# 'v' void
# 'p' void *
# 'P' void **
# 'i' int
# 'b' bint
# 'I' int *
# 'l' long
# 'f' float
# 'd' double
# 'h' Py_hash_t
# 'z' Py_ssize_t
# 'Z' Py_ssize_t *
# 's' char *
# 'S' char **
# 'r' int used only to signal exception
# 'B' Py_buffer *
# '-' dummy 'self' argument (not used)
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
'p': PyrexTypes.c_void_ptr_type,
'P': PyrexTypes.c_void_ptr_ptr_type,
'i': PyrexTypes.c_int_type,
'b': PyrexTypes.c_bint_type,
'I': PyrexTypes.c_int_ptr_type,
'l': PyrexTypes.c_long_type,
'f': PyrexTypes.c_float_type,
'd': PyrexTypes.c_double_type,
'h': PyrexTypes.c_py_hash_t_type,
'z': PyrexTypes.c_py_ssize_t_type,
'Z': PyrexTypes.c_py_ssize_t_ptr_type,
's': PyrexTypes.c_char_ptr_type,
'S': PyrexTypes.c_char_ptr_ptr_type,
'r': PyrexTypes.c_returncode_type,
'B': PyrexTypes.c_py_buffer_ptr_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
type_to_format_map = dict([(type_, format_)
for format_, type_ in format_map.iteritems()])
error_value_map = {
'O': "NULL",
'T': "NULL",
'i': "-1",
'b': "-1",
'l': "-1",
'r': "-1",
'h': "-1",
'z': "-1",
}
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
if arg_format[:1] == '-':
self.has_dummy_arg = 1
arg_format = arg_format[1:]
if arg_format[-1:] == '*':
self.has_generic_args = 1
arg_format = arg_format[:-1]
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
self.exception_check = ret_format != 'r' and self.error_value is not None
self.is_staticmethod = False
def num_fixed_args(self):
return len(self.fixed_arg_format)
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
def return_type(self):
return self.format_map[self.ret_format]
def format_from_type(self, arg_type):
if arg_type.is_pyobject:
arg_type = PyrexTypes.py_object_type
return self.type_to_format_map[arg_type]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
for i in xrange(self.num_fixed_args()):
if self_arg_override is not None and self.is_self_arg(i):
assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg)
args.append(self_arg_override)
else:
arg_type = self.fixed_arg_type(i)
args.append(PyrexTypes.CFuncTypeArg("", arg_type, None))
if self_arg_override is not None and self.returns_self_type():
ret_type = self_arg_override.type
else:
ret_type = self.return_type()
exc_value = self.exception_value()
return PyrexTypes.CFuncType(
ret_type, args, exception_value=exc_value,
exception_check=self.exception_check)
def method_flags(self):
if self.ret_format == "O":
full_args = self.fixed_arg_format
if self.has_dummy_arg:
full_args = "O" + full_args
if full_args in ["O", "T"]:
if self.has_generic_args:
return [method_varargs, method_keywords]
else:
return [method_noargs]
elif full_args in ["OO", "TO"] and not self.has_generic_args:
return [method_onearg]
if self.is_staticmethod:
return [method_varargs, method_keywords]
return None
class SlotDescriptor(object):
# Abstract base class for type slot descriptors.
#
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
# is_inherited Is inherited by subtypes (see PyType_Ready())
# py3 Indicates presence of slot in Python 3
# py2 Indicates presence of slot in Python 2
# ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
def __init__(self, slot_name, dynamic=False, inherited=False,
py3=True, py2=True, ifdef=None):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
self.is_inherited = inherited
self.ifdef = ifdef
self.py3 = py3
self.py2 = py2
def preprocessor_guard_code(self):
ifdef = self.ifdef
py2 = self.py2
py3 = self.py3
guard = None
if ifdef:
guard = ("#if %s" % ifdef)
elif not py3 or py3 == '<RESERVED>':
guard = ("#if PY_MAJOR_VERSION < 3")
elif not py2:
guard = ("#if PY_MAJOR_VERSION >= 3")
return guard
def generate(self, scope, code):
end_pypy_guard = False
if self.is_initialised_dynamically:
value = "0"
else:
value = self.slot_code(scope)
if value == "0" and self.is_inherited:
# PyPy currently has a broken PyType_Ready() that fails to
# inherit some slots. To work around this, we explicitly
# set inherited slots here, but only in PyPy since CPython
# handles this better than we do.
inherited_value = value
current_scope = scope
while (inherited_value == "0"
and current_scope.parent_type
and current_scope.parent_type.base_type
and current_scope.parent_type.base_type.scope):
current_scope = current_scope.parent_type.base_type.scope
inherited_value = self.slot_code(current_scope)
if inherited_value != "0":
code.putln("#if CYTHON_COMPILING_IN_PYPY")
code.putln("%s, /*%s*/" % (inherited_value, self.slot_name))
code.putln("#else")
end_pypy_guard = True
preprocessor_guard = self.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln("%s, /*%s*/" % (value, self.slot_name))
if self.py3 == '<RESERVED>':
code.putln("#else")
code.putln("0, /*reserved*/")
if preprocessor_guard:
code.putln("#endif")
if end_pypy_guard:
code.putln("#endif")
# Some C implementations have trouble statically
# initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
def generate_dynamic_init_code(self, scope, code):
if self.is_initialised_dynamically:
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
value
)
)
class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
class MethodSlot(SlotDescriptor):
# Type slot descriptor for a user-definable method.
#
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None, inherited=True):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2,
ifdef=ifdef, inherited=inherited)
self.signature = signature
self.slot_name = slot_name
self.method_name = method_name
self.alternatives = []
method_name_to_slot[method_name] = self
#
if fallback:
self.alternatives.append(fallback)
for alt in (self.py2, self.py3):
if isinstance(alt, (tuple, list)):
slot_name, method_name = alt
self.alternatives.append(method_name)
method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.func_cname:
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
if entry and entry.func_cname:
return entry.func_cname
return "0"
class InternalMethodSlot(SlotDescriptor):
# Type slot descriptor for a method which is always
# synthesized by Cython.
#
# slot_name string Member name of the slot in the type object
def __init__(self, slot_name, **kargs):
SlotDescriptor.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name)
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
if not scope.has_cyclic_pyobject_attrs:
# if the type does not have GC relevant object attributes, it can
# delegate GC methods to its parent - iff the parent functions
# are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class GCClearReferencesSlot(GCDependentSlot):
def slot_code(self, scope):
if scope.needs_tp_clear():
return GCDependentSlot.slot_code(self, scope)
return "0"
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
if (self.slot_name != 'tp_new'
and scope.parent_type.base_type
and not scope.has_pyobject_attrs
and not scope.has_memoryview_attrs
and not scope.lookup_here(self.method)):
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class SyntheticSlot(InternalMethodSlot):
# Type slot descriptor for a synthesized method which
# dispatches to one or more user-defined methods depending
# on its arguments. If none of the relevant methods are
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT"
if scope.directives['type_version_tag']:
# it's not in 'Py_TPFLAGS_DEFAULT' in Py2
value += "|Py_TPFLAGS_HAVE_VERSION_TAG"
else:
# it's enabled in 'Py_TPFLAGS_DEFAULT' in Py3
value = "(%s&~Py_TPFLAGS_HAVE_VERSION_TAG)" % value
value += "|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.parent_type.is_final_type:
value += "|Py_TPFLAGS_BASETYPE"
if scope.needs_gc():
value += "|Py_TPFLAGS_HAVE_GC"
return value
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
doc = scope.doc.utf8encode()
else:
doc = scope.doc.byteencode()
return '__Pyx_DOCSTR("%s")' % StringEncoding.escape_byte_string(doc)
else:
return "0"
class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
def is_empty(self, scope):
for slot in self.sub_slots:
if slot.slot_code(scope) != "0":
return False
return True
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
if not self.is_empty(scope):
return "&%s" % self.substructure_cname(scope)
return "0"
def generate_substructure(self, scope, code):
if not self.is_empty(scope):
code.putln("")
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
def slot_code(self, scope):
if scope.pyfunc_entries:
return scope.method_table_cname
else:
return "0"
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
else:
return "0"
class BaseClassSlot(SlotDescriptor):
# Slot descriptor for the base class slot.
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
## The following slots are (or could be) initialised with an
## extern function pointer.
#
#slots_initialised_from_extern = (
# "tp_free",
#)
#------------------------------------------------------------------------------------------
#
# Utility functions for accessing slot table data structures
#
#------------------------------------------------------------------------------------------
def get_special_method_signature(name):
# Given a method name, if it is a special method,
# return its signature, else return None.
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
else:
return None
def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
def get_base_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
if parent_slot != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return parent_slot
return None
def get_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
slot_code = slot.slot_code(scope)
if slot_code != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.name)
if entry.visibility != 'extern':
return slot_code
return None
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
#
#------------------------------------------------------------------------------------------
pyfunction_signature = Signature("-*", "O")
pymethod_signature = Signature("T*", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for simple Python functions.
#
#------------------------------------------------------------------------------------------
pyfunction_noargs = Signature("-", "O")
pyfunction_onearg = Signature("-O", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for the various kinds of function that
# can appear in the type object and its substructures.
#
#------------------------------------------------------------------------------------------
unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
lenfunc = Signature("T", "z") # typedef Py_ssize_t (*lenfunc)(PyObject *);
# typedef int (*coercion)(PyObject **, PyObject **);
intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
ssizeargfunc = Signature("Tz", "O") # typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
ssizessizeargfunc = Signature("Tzz", "O") # typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
ssizeobjargproc = Signature("TzO", 'r') # typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *);
intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
ssizessizeobjargproc = Signature("TzzO", 'r') # typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
intintargproc = Signature("Tii", 'r')
ssizessizeargproc = Signature("Tzz", 'r')
objargfunc = Signature("TO", "O")
objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
readbufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **);
writebufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **);
segcountproc = Signature("TZ", "z") # typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
charbufferproc = Signature("TzS", "z") # typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
# typedef int (*visitproc)(PyObject *, void *);
# typedef int (*traverseproc)(PyObject *, visitproc, void *);
destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
# typedef PyObject *(*getattrfunc)(PyObject *, char *);
getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
# typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
# typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
descrdelfunc = Signature("TO", 'r')
initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# typedef PyObject *(*allocfunc)(struct _typeobject *, int);
getbufferproc = Signature("TBi", "r") # typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
releasebufferproc = Signature("TB", "v") # typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#------------------------------------------------------------------------------------------
#
# Signatures for accessor methods of properties.
#
#------------------------------------------------------------------------------------------
property_accessor_signatures = {
'__get__': Signature("T", "O"),
'__set__': Signature("TO", 'r'),
'__del__': Signature("T", 'r')
}
#------------------------------------------------------------------------------------------
#
# Descriptor tables for the slots of the various type object
# substructures, in the order they appear in the structure.
#
#------------------------------------------------------------------------------------------
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
MethodSlot(binaryfunc, "nb_divide", "__div__", py3 = False),
MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
MethodSlot(ternaryfunc, "nb_power", "__pow__"),
MethodSlot(unaryfunc, "nb_negative", "__neg__"),
MethodSlot(unaryfunc, "nb_positive", "__pos__"),
MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
MethodSlot(inquiry, "nb_nonzero", "__nonzero__", py3 = ("nb_bool", "__bool__")),
MethodSlot(unaryfunc, "nb_invert", "__invert__"),
MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
MethodSlot(binaryfunc, "nb_and", "__and__"),
MethodSlot(binaryfunc, "nb_xor", "__xor__"),
MethodSlot(binaryfunc, "nb_or", "__or__"),
EmptySlot("nb_coerce", py3 = False),
MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"),
MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"),
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", py3 = False),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!!
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
# Added in release 2.5
MethodSlot(unaryfunc, "nb_index", "__index__", ifdef = "PY_VERSION_HEX >= 0x02050000")
)
PySequenceMethods = (
MethodSlot(lenfunc, "sq_length", "__len__"),
EmptySlot("sq_concat"), # nb_add used instead
EmptySlot("sq_repeat"), # nb_multiply used instead
SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
MethodSlot(ssizessizeargfunc, "sq_slice", "__getslice__"),
EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
MethodSlot(cmpfunc, "sq_contains", "__contains__"),
EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
)
PyMappingMethods = (
MethodSlot(lenfunc, "mp_length", "__len__"),
MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
SyntheticSlot("mp_ass_subscript", ["__setitem__", "__delitem__"], "0"),
)
PyBufferProcs = (
MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", py3 = False),
MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", py3 = False),
MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", py3 = False),
MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", py3 = False),
MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000"),
MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000")
)
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
# top-level type slots, beginning with tp_dealloc, in the order they
# appear in the type object.
#
#------------------------------------------------------------------------------------------
slot_table = (
ConstructorSlot("tp_dealloc", '__dealloc__'),
EmptySlot("tp_print"), #MethodSlot(printfunc, "tp_print", "__print__"),
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__", inherited=False), # Py3 checks for __richcmp__
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
GCDependentSlot("tp_traverse"),
GCClearReferencesSlot("tp_clear"),
# Later -- synthesize a method to split into separate ops?
MethodSlot(richcmpfunc, "tp_richcompare", "__richcmp__", inherited=False), # Py3 checks for __hash__
EmptySlot("tp_weaklistoffset"),
MethodSlot(getiterfunc, "tp_iter", "__iter__"),
MethodSlot(iternextfunc, "tp_iternext", "__next__"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
EmptySlot("tp_dictoffset"),
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
EmptySlot("tp_cache"),
EmptySlot("tp_subclasses"),
EmptySlot("tp_weaklist"),
EmptySlot("tp_del"),
EmptySlot("tp_version_tag", ifdef="PY_VERSION_HEX >= 0x02060000"),
EmptySlot("tp_finalize", ifdef="PY_VERSION_HEX >= 0x030400a1"),
)
#------------------------------------------------------------------------------------------
#
# Descriptors for special methods which don't appear directly
# in the type object or its substructures. These methods are
# called from slot functions synthesized by Cython.
#
#------------------------------------------------------------------------------------------
MethodSlot(initproc, "", "__cinit__")
MethodSlot(destructor, "", "__dealloc__")
MethodSlot(objobjargproc, "", "__setitem__")
MethodSlot(objargproc, "", "__delitem__")
MethodSlot(ssizessizeobjargproc, "", "__setslice__")
MethodSlot(ssizessizeargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
method_varargs = "METH_VARARGS"
method_keywords = "METH_KEYWORDS"
method_coexist = "METH_COEXIST"
| 40.539379 | 133 | 0.607 |
8ff7851d1eb838ce12779d37081a37ba235ffab9 | 1,499 | py | Python | src/gamesbyexample/middleletterscrambler.py | skinzor/PythonStdioGames | 75f27af19d7f1d555b0fd85fbcf215f07660b93f | [
"MIT"
] | 1 | 2019-11-30T17:04:09.000Z | 2019-11-30T17:04:09.000Z | src/gamesbyexample/middleletterscrambler.py | skinzor/PythonStdioGames | 75f27af19d7f1d555b0fd85fbcf215f07660b93f | [
"MIT"
] | null | null | null | src/gamesbyexample/middleletterscrambler.py | skinzor/PythonStdioGames | 75f27af19d7f1d555b0fd85fbcf215f07660b93f | [
"MIT"
] | null | null | null | # Middle Letter Scrambler, by Al Sweigart al@inventwithpython.com
# Scrambles the middle letters of words, but not the first and last letters.
import random
try:
import pyperclip
except ImportError:
pass # It's not a big deal if pyperclip is not installed.
def englishToMiddleLetterScramble(message):
if message == '':
# If the message is blank, the scrambled text is blank too.
return ''
scrambled = ''
words = message.split()
for word in words:
if len(word) <= 3:
scrambled += word + ' ' # Add the short word unscrambled.
continue
middleLetters = list(word[1:-1]) # Convert the middle letters to a list.
random.shuffle(middleLetters) # Shuffle the middle letters.
middleLetters = ''.join(middleLetters) # Convert the list back into a string.
scrambled += word[0] + middleLetters + word[-1] + ' '
return scrambled[:-1] # [:-1] to cut off the final added ' ' space.
def main():
print('''Middle Letter Scrambler
By Al Sweigart al@inventwithpython.com
Your biran can pbablroy raed sambcerld wrdos as lnog as the fsirt and
last lteters are in the rihgt pcale.
Enter your message:''')
scrambled = englishToMiddleLetterScramble(input())
print()
print(scrambled)
try:
pyperclip.copy(scrambled)
print('(Copied scrambled text to clipboard.)')
except:
pass # Do nothing if pyperclip wasn't installed.
if __name__ == '__main__':
main() | 28.826923 | 85 | 0.661775 |
0719e908e2e913b1ac1756c93c3d919e8c1226f1 | 7,567 | py | Python | e2efold_rt/e2efold/data_generator.py | Lucmon/TopologyDetect | 79607f3ce39a1ee6ded41b2500629065cf1cfe51 | [
"Apache-2.0"
] | null | null | null | e2efold_rt/e2efold/data_generator.py | Lucmon/TopologyDetect | 79607f3ce39a1ee6ded41b2500629065cf1cfe51 | [
"Apache-2.0"
] | null | null | null | e2efold_rt/e2efold/data_generator.py | Lucmon/TopologyDetect | 79607f3ce39a1ee6ded41b2500629065cf1cfe51 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
import _pickle as cPickle
import collections
from e2efold.common.utils import *
from e2efold.common.long_seq_pre_post_process import *
from multiprocessing import Pool
from torch.utils import data
from collections import Counter
from random import shuffle
import torch
class RNASSDataGenerator(object):
def __init__(self, data_dir, split, upsampling=False):
self.data_dir = data_dir
self.split = split
self.upsampling = upsampling
# Load vocab explicitly when needed
self.load_data()
# Reset batch pointer to zero
self.batch_pointer = 0
def load_data(self):
p = Pool()
data_dir = self.data_dir
# Load the current split
RNA_SS_data = collections.namedtuple('RNA_SS_data',
'seq ss_label length name pairs')
print(self.data_dir)
print(self.split)
with open(os.path.join(data_dir, '%s.pickle' % self.split), 'rb') as f:
self.data = cPickle.load(f)
if self.upsampling:
self.data = self.upsampling_data()
self.data_x = np.array([instance[0] for instance in self.data])
self.data_y = np.array([instance[1] for instance in self.data])
self.pairs = np.array([instance[-1] for instance in self.data])
self.seq_length = np.array([instance[2] for instance in self.data])
self.len = len(self.data)
self.seq = list(p.map(encoding2seq, self.data_x))
self.seq_max_len = len(self.data_x[0])
# self.matrix_rep = np.array(list(p.map(creatmat, self.seq)))
# self.matrix_rep = np.zeros([self.len, len(self.data_x[0]), len(self.data_x[0])])
def upsampling_data(self):
name = [instance.name for instance in self.data]
d_type = np.array(list(map(lambda x: x.split('/')[2], name)))
data = np.array(self.data)
max_num = max(Counter(list(d_type)).values())
data_list = list()
for t in sorted(list(np.unique(d_type))):
index = np.where(d_type==t)[0]
data_list.append(data[index])
final_d_list= list()
# for d in data_list:
# index = np.random.choice(d.shape[0], max_num)
# final_d_list += list(d[index])
for i in [0, 1, 5, 7]:
d = data_list[i]
index = np.random.choice(d.shape[0], max_num)
final_d_list += list(d[index])
for i in [2,3,4]:
d = data_list[i]
index = np.random.choice(d.shape[0], max_num*2)
final_d_list += list(d[index])
d = data_list[6]
index = np.random.choice(d.shape[0], int(max_num/2))
final_d_list += list(d[index])
shuffle(final_d_list)
return final_d_list
def next_batch(self, batch_size):
bp = self.batch_pointer
# This will return a smaller size if not sufficient
# The user must pad the batch in an external API
# Or write a TF module with variable batch size
batch_x = self.data_x[bp:bp + batch_size]
batch_y = self.data_y[bp:bp + batch_size]
batch_seq_len = self.seq_length[bp:bp + batch_size]
self.batch_pointer += batch_size
if self.batch_pointer >= len(self.data_x):
self.batch_pointer = 0
yield batch_x, batch_y, batch_seq_len
def pairs2map(self, pairs):
seq_len = self.seq_max_len
contact = np.zeros([seq_len, seq_len])
for pair in pairs:
contact[pair[0], pair[1]] = 1
return contact
def next_batch_SL(self, batch_size):
p = Pool()
bp = self.batch_pointer
# This will return a smaller size if not sufficient
# The user must pad the batch in an external API
# Or write a TF module with variable batch size
data_y = self.data_y[bp:bp + batch_size]
data_seq = self.data_x[bp:bp + batch_size]
data_pairs = self.pairs[bp:bp + batch_size]
self.batch_pointer += batch_size
if self.batch_pointer >= len(self.data_x):
self.batch_pointer = 0
contact = np.array(list(map(self.pairs2map, data_pairs)))
matrix_rep = np.zeros(contact.shape)
yield contact, data_seq, matrix_rep
def get_one_sample(self, index):
# This will return a smaller size if not sufficient
# The user must pad the batch in an external API
# Or write a TF module with variable batch size
data_y = self.data_y[index]
data_seq = self.data_x[index]
data_len = self.seq_length[index]
data_pair = self.pairs[index]
contact= self.pairs2map(data_pair)
matrix_rep = np.zeros(contact.shape)
return contact, data_seq, matrix_rep, data_len
def random_sample(self, size=1):
# random sample one RNA
# return RNA sequence and the ground truth contact map
index = np.random.randint(self.len, size=size)
data = list(np.array(self.data)[index])
data_seq = [instance[0] for instance in data]
data_stru_prob = [instance[1] for instance in data]
data_pair = [instance[-1] for instance in data]
seq = list(map(encoding2seq, data_seq))
contact = list(map(self.pairs2map, data_pair))
return contact, seq, data_seq
def get_one_sample_cdp(self, index):
data_seq = self.data_x[index]
data_label = self.data_y[index]
return data_seq, data_label
# using torch data loader to parallel and speed up the data load process
class Dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data):
'Initialization'
self.data = data
def __len__(self):
'Denotes the total number of samples'
return self.data.len
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
return self.data.get_one_sample(index)
class Dataset_1800(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data):
'Initialization'
self.data = data
def __len__(self):
'Denotes the total number of samples'
return self.data.len
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
contacts, seq_embeddings, matrix_reps, seq_lens = self.data.get_one_sample(index)
PE = get_pe(torch.Tensor([seq_lens]).long(), 1800).numpy()
PE = torch.Tensor(PE[0]).float()
small_seqs, comb_index_1 = get_chunk_combination(torch.Tensor(seq_embeddings).float())
PE_small_seqs, comb_index_2 = get_chunk_combination(PE)
contacts_b = get_chunk_gt(torch.Tensor(contacts).float(), comb_index_1)
assert comb_index_1==comb_index_2
seq_embedding_batch = torch.cat([seq.unsqueeze_(0) for seq in small_seqs], 0).float()
PE_batch = torch.cat([pe.unsqueeze_(0) for pe in PE_small_seqs], 0).float()
contacts_batch = torch.cat([contact.unsqueeze_(0) for contact in contacts_b], 0).float()
return seq_embedding_batch, PE_batch, contacts_batch, comb_index_1, seq_embeddings, contacts, seq_lens
class Dataset_cdp(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data):
'Initialization'
self.data = data
def __len__(self):
'Denotes the total number of samples'
return self.data.len
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
return self.data.get_one_sample_cdp(index)
| 35.525822 | 110 | 0.63658 |
8e75bd93873526b7914377bf9240b0b242293674 | 255 | py | Python | ddtrace/ext/system.py | KDWSS/dd-trace-py | 6d859bec403347f7c1e7efd039210908b562741e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ddtrace/ext/system.py | KDWSS/dd-trace-py | 6d859bec403347f7c1e7efd039210908b562741e | [
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2021-06-29T14:58:43.000Z | 2021-12-15T14:14:36.000Z | ddtrace/ext/system.py | KDWSS/dd-trace-py | 6d859bec403347f7c1e7efd039210908b562741e | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | """
Standard system tags
"""
from ddtrace.constants import PID
from ddtrace.utils.deprecation import deprecation
deprecation(
name="ddtrace.ext.system",
message="Use `ddtrace.constants` module instead",
version="1.0.0",
)
__all__ = ["PID"]
| 17 | 53 | 0.709804 |
9cc85ef750e0603375230be2360918982f526570 | 4,108 | py | Python | UNet.py | chenhsiu48/HiDDeN | 0ed98ffb5c4852cc45024169119f7e3732a99044 | [
"MIT"
] | 1 | 2022-03-13T03:07:41.000Z | 2022-03-13T03:07:41.000Z | UNet.py | chenhsiu48/HiDDeN | 0ed98ffb5c4852cc45024169119f7e3732a99044 | [
"MIT"
] | null | null | null | UNet.py | chenhsiu48/HiDDeN | 0ed98ffb5c4852cc45024169119f7e3732a99044 | [
"MIT"
] | 1 | 2021-12-11T08:44:57.000Z | 2021-12-11T08:44:57.000Z | # encoding: utf-8
import functools
import torch
import torch.nn as nn
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=None, use_dropout=False, output_function=nn.Sigmoid):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, output_function=output_function)
self.model = unet_block
self.tanh = output_function==nn.Tanh
if self.tanh:
self.factor = 10/255
else:
self.factor = 1.0
def forward(self, input):
return self.factor*self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,submodule=None, outermost=False, innermost=False, norm_layer=None, use_dropout=False, output_function=nn.Sigmoid):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if norm_layer == None:
use_bias = True
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
uprelu = nn.ReLU(True)
if norm_layer != None:
downnorm = norm_layer(inner_nc)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
if output_function == nn.Tanh:
up = [uprelu, upconv, nn.Tanh()]
else:
up = [uprelu, upconv, nn.Sigmoid()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
if norm_layer == None:
up = [uprelu, upconv]
else:
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
if norm_layer == None:
down = [downrelu, downconv]
up = [uprelu, upconv]
else:
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
| 42.350515 | 173 | 0.615141 |
5b6d09645a1159c80d020aba01dc62995005c7ab | 9,342 | py | Python | tests/test_approx.py | jobovy/wendy | 99545871283d55f9fb08e597ed52453dc44051aa | [
"MIT"
] | 14 | 2017-07-21T23:48:56.000Z | 2021-08-15T07:55:29.000Z | tests/test_approx.py | jobovy/wendy | 99545871283d55f9fb08e597ed52453dc44051aa | [
"MIT"
] | 6 | 2017-05-13T00:53:25.000Z | 2020-11-11T02:49:10.000Z | tests/test_approx.py | jobovy/wendy | 99545871283d55f9fb08e597ed52453dc44051aa | [
"MIT"
] | 6 | 2017-11-11T05:40:14.000Z | 2019-04-28T17:54:56.000Z | # test_approx.py: some basic tests of the approximate N-body code
import numpy
import pytest
import wendy
numpy.random.seed(2)
def test_energy_conservation():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,1.,1.])
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=100000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=100000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles():
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles_mergesort():
# Test that energy is conserved for a self-gravitating disk, using mergesort
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,sort='merge')
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles_timsort():
# Test that energy is conserved for a self-gravitating disk, using timsort
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,sort='tim')
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles_qsort():
# Test that energy is conserved for a self-gravitating disk, using qsort
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,sort='qsort')
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_manyparticles_parallel():
# Test that energy is conserved for a self-gravitating disk, using parallel_sort
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,sort='parallel')
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_energy_conservation_sech2disk_notsomanyparticles_parallel():
# Test that energy is conserved for a self-gravitating disk, using parallel_sort
# half the number of particles, so the serial version of the algorithm is
# reached at a different level to test the dst --> src and src --> dst
# versions of the parallel algorithm
N= 51
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,sort='parallel')
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
def test_momentum_conservation_unequalmasses():
# Test that momentum is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
p= wendy.momentum(v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.momentum(tv,m)-p) < 10.**-10., "Momentum not conserved during approximate N-body integration"
cnt+= 1
return None
def test_notracermasses():
# approx should work with tracer sheets
# Test that energy is conserved for a self-gravitating disk
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
m[N//2:]= 0.
m*= 2.
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with some tracer particles"
cnt+= 1
return None
def test_nleap_error():
# Code should raise ValueError if nleap is not specified for approx. calc.
x= numpy.array([-1.,1.])
v= numpy.array([0.,0.])
m= numpy.array([1.,1.])
g= wendy.nbody(x,v,m,2,approx=True)
with pytest.raises(ValueError) as excinfo:
tx,tv,ncoll, _= next(g)
assert str(excinfo.value) == 'When approx is True, the number of leapfrog steps nleap= per output time step needs to be set'
return None
def test_time():
# Just run the timer...
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000,full_output=True)
tx,tv, time_elapsed= next(g)
assert time_elapsed < 1., 'More than 1 second elapsed for simple problem'
return None
def test_againstexact_sech2disk_manyparticles():
# Test that the exact N-body and the approximate N-body agree
N= 101
totmass= 1.
sigma= 1.
zh= 2.*sigma**2./totmass
x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
v= numpy.random.normal(size=N)*sigma
v-= numpy.mean(v) # stabilize
m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=2000)
ge= wendy.nbody(x,v,m,0.05)
cnt= 0
while cnt < 100:
tx,tv= next(g)
txe,tve= next(ge)
assert numpy.all(numpy.fabs(tx-txe) < 10.**-5.), "Exact and approximate N-body give different positions"
assert numpy.all(numpy.fabs(tv-tve) < 10.**-5.), "Exact and approximate N-body give different positions"
cnt+= 1
return None
def test_samex():
# Test that the code works if two particles are at the exact same position
# middle ones setup such that they end up in the same place for the first
# force evaluation
x= numpy.array([-1.1,-2.*0.05/2./10000,0.3*0.05/2./10000,1.3])
v= numpy.array([3.,2.,-.3,-5.])
m= numpy.array([1.,1.,1.,1.])
g= wendy.nbody(x,v,m,0.05,approx=True,nleap=10000)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 1:
tx,tv= next(g)
assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
cnt+= 1
return None
| 37.518072 | 152 | 0.639264 |
4daf654ac0a6a742706bc4dddb893d1c3d03c0e9 | 2,173 | py | Python | dimod/meta.py | randomir/dimod | 8f636168a620e0dc9d969fb0a813f6b4ba5a755c | [
"Apache-2.0"
] | 1 | 2022-02-01T14:40:05.000Z | 2022-02-01T14:40:05.000Z | dimod/meta.py | randomir/dimod | 8f636168a620e0dc9d969fb0a813f6b4ba5a755c | [
"Apache-2.0"
] | 24 | 2021-07-09T08:19:47.000Z | 2022-03-08T08:15:48.000Z | dimod/meta.py | randomir/dimod | 8f636168a620e0dc9d969fb0a813f6b4ba5a755c | [
"Apache-2.0"
] | 1 | 2019-08-15T17:16:46.000Z | 2019-08-15T17:16:46.000Z | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
from __future__ import absolute_import
import abc
class SamplerABCMeta(abc.ABCMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)
samplermixins = {name
for name, value in namespace.items()
if getattr(value, "__issamplemixin__", False)}
if len(samplermixins) == 3:
abstracts = samplermixins
else:
abstracts = set()
for base in bases:
samplermixins = {name
for name in getattr(base, "__abstractmethods__", set())
if getattr(getattr(cls, name, None), "__issamplemixin__", False)}
if len(samplermixins) == 3:
abstracts.update(samplermixins)
# if we found any, update abstract methods
if abstracts:
cls.__abstractmethods__ = frozenset(abstracts.union(cls.__abstractmethods__))
return cls
def samplemixinmethod(method):
"""Marks a method as being a mixin.
Adds the '__issamplemixin__' attribute with value True to the decorated function.
Examples:
>>> @samplemixinmethod
>>> def f():
... pass
>>> f.__issamplemixin__
True
"""
# NB: decorator name was chosen to be consistent with @classmethod and @staticmethod
method.__issamplemixin__ = True
return method
| 33.953125 | 98 | 0.603313 |
e7d08c148779a57560a0e1eaa999673a0daccaef | 13,926 | py | Python | process.py | Yousof-Mahmoud/Digital_Smile_Design_Biomeetrics | 7e33796f400225ab727364f44344b8e4ce2838ae | [
"MIT"
] | null | null | null | process.py | Yousof-Mahmoud/Digital_Smile_Design_Biomeetrics | 7e33796f400225ab727364f44344b8e4ce2838ae | [
"MIT"
] | null | null | null | process.py | Yousof-Mahmoud/Digital_Smile_Design_Biomeetrics | 7e33796f400225ab727364f44344b8e4ce2838ae | [
"MIT"
] | null | null | null | import cv2
import mediapipe as mp
import numpy as np
from PIL import Image
import os, io
from PIL import Image, ImageDraw
import cv2
import numpy as np
from numpy.core.fromnumeric import shape, size
import matplotlib as plt
global Zefer
Zefer = 0
global width21
width21 =0
def template(fname,tempfilename):
img = cv2.imread(fname)
face = rect = Image.open(fname)
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
xid =[]
yid =[]
pts = np.empty([1,1])
pts.fill(0)
ptss =[]
i =0
#13 mid upper lip
ids = [80,89, 310,319]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
for id,lm in enumerate(faceLms.landmark):
ih, iw, ic = img.shape
x,y = int(lm.x*iw), int(lm.y*ih)
xid.append(x)
yid.append(y)
for i in range(0,len(ids)):
ptss.append([xid[ids[i]],yid[ids[i]]])
ptsss = np.array(ptss)
## (1) Crop the bounding rect
rect = cv2.boundingRect(ptsss)
x,y,w,h = rect
croped = img[y:y+h, x:x+w].copy()
cv2.imwrite('dst2-temp.jpg', croped)
template =Image.open(tempfilename)
rectangle = Image.open('./dst2-temp.jpg')
resizedTeeth = template.convert("RGBA").resize(rectangle.size)
face.paste(resizedTeeth, (ptss[0]), mask = resizedTeeth)
face.save('./faceTemp.png')
img = Image.open(r'faceTemp.png')
return img,rectangle.size
def ApplyColoration(fname,rangesid):
ranges =[(234 ,223 ,195),(255 ,255 ,255),(231,221,197),(228,211,169)]
range_select = ranges[rangesid]
img = cv2.imread(fname)
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
xid =[]
yid =[]
pts = np.empty([1,1])
pts.fill(0)
ptss =[]
i =0
ids = [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
#mpDraw.draw_landmarks(img, faceLms, mpFaceMesh.FACEMESH_CONTOURS,
# drawSpec,drawSpec)
for id,lm in enumerate(faceLms.landmark):
#print(lm)
ih, iw, ic = img.shape
x,y = int(lm.x*iw), int(lm.y*ih)
xid.append(x)
yid.append(y)
#print(id,x,y)
#print(xid,yid)
for i in range(0,len(ids)):
ptss.append([xid[ids[i]],yid[ids[i]]])
ptsss = np.array(ptss)
## (1) Crop the bounding rect
rect = cv2.boundingRect(ptsss)
x,y,w,h = rect
xcap =x
ycap =y
wcap =w
hcap =h
croped = img[y:y+h, x:x+w].copy()
## (2) make mask
ptsss = ptsss - ptsss.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [ptsss], -1, (255, 255, 255), -1, cv2.LINE_AA)
## (3) do bit-op
dst = cv2.bitwise_and(croped, croped, mask=mask)
## (4) add the white background
bg = np.ones_like(croped, np.uint8)*255
cv2.bitwise_not(bg,bg, mask=mask)
imgE = dst
sensitivity = 150
lower = np.array([8,0,255-sensitivity])
upper = np.array([172,sensitivity,255])
# turn image into hsv
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)
# mask that makes any non white black
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(dst,dst, mask= mask)
cv2.imwrite("output.jpg", output)
cv2.imwrite('dst.jpg',dst)
capOpener = Image.open(r"output.jpg")
img_blur = cv2.GaussianBlur(imgE, (5,5), 0)
edges = cv2.Canny(image=img_blur, threshold1=10, threshold2=120)
cv2.imwrite('edges.jpg',edges)
#width33, height33 = capOpener221.size
# Get the size of the image
width21, height21 = capOpener.size
bWANTEDArray=[]
jArray=[]
rsummer=0
gsummer=0
bsummer=0
N_points=0
for x in range(0,width21):
for y in range (0,height21):
current_color = capOpener.getpixel( (x,y) )
#print(current_color)
r,g,b= current_color
if(r>0):
rsummer+=r
gsummer+=g
bsummer+=b
N_points+=1
for x in range(0,width21):
for y in range (0,height21):
current_color = capOpener.getpixel( (x,y) )
#print(current_color)
r,g,b= current_color
if(b > 0 and r > 0 and g > 0 ):
bWANTEDArray.append(x)
jArray.append(y)
else:
capOpener.putpixel((x, y), (0, 0, 0))
for x, y in zip(bWANTEDArray, jArray):
capOpener.putpixel((x, y), range_select)
THEMODIFIED=Image.open(fname)
for x in range(xcap,xcap+ wcap ):
for y in range (ycap,ycap+hcap):
current_color = capOpener.getpixel((x-xcap, y-ycap))
#print(current_color)
r, g, b = current_color
if(r>0 and g>0 and b>0):
THEMODIFIED.putpixel((x, y),current_color)
return THEMODIFIED
def gumDetection(fname):
img = cv2.imread(fname)
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
xid =[]
yid =[]
pts = np.empty([1,1])
pts.fill(0)
ptss =[]
i =0
ids = [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
#mpDraw.draw_landmarks(img, faceLms, mpFaceMesh.FACEMESH_CONTOURS,
# drawSpec,drawSpec)
for id,lm in enumerate(faceLms.landmark):
#print(lm)
ih, iw, ic = img.shape
x,y = int(lm.x*iw), int(lm.y*ih)
xid.append(x)
yid.append(y)
#print(id,x,y)
#print(xid,yid)
for i in range(0,len(ids)):
ptss.append([xid[ids[i]],yid[ids[i]]])
ptsss = np.array(ptss)
## (1) Crop the bounding rect
rect = cv2.boundingRect(ptsss)
x,y,w,h = rect
croped = img[y:y+h, x:x+w].copy()
cv2.imwrite("cropped.jpg",croped)
## (2) make mask
ptsss = ptsss - ptsss.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [ptsss], -1, (255, 255, 255), -1, cv2.LINE_AA)
## (3) do bit-op
dst = cv2.bitwise_and(croped, croped, mask=mask)
## (4) add the white background
bg = np.ones_like(croped, np.uint8)*255
cv2.bitwise_not(bg,bg, mask=mask)
imgE = dst
cv2.imwrite('dst.jpg',dst)
img_blur = cv2.GaussianBlur(imgE, (5,5), 0)
mean, std = cv2.meanStdDev(croped)
TH1 =int(mean[0]-std[0])
TH2 = int(mean[0]+std[0])
edges = cv2.Canny(image=img_blur, threshold1=TH1, threshold2=TH2)
cv2.imwrite("edges.jpg",edges)
Xstorer =[]
capOpener221 = Image.open(r"edges.jpg")
width33, height33 = capOpener221.size
R=0
for x in range(0,width33):
for y in range (0,height33):
if(capOpener221.getpixel((x,y))>=250):
R=R+1
Xstorer.append(y)
MaxStorer=max(Xstorer)
#print(Xstorer)
Zaree=0
Final22=0
AverageArr=[]
for i in range(0,len(Xstorer)):
Zaree=MaxStorer-Xstorer[i]
if(Zaree>=5 and Zaree<=10):
Final22=Zaree
AverageArr.append(Final22)
Averagenumber=sum(AverageArr)/len(AverageArr)
#print(Averagenumber)
if(Averagenumber>7.7 and Averagenumber<8.2):
return 0
else:
return 1
def MidlineDrawing(fname):
img = cv2.imread(fname)
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
xid =[]
yid =[]
pts = np.empty([1,1])
pts.fill(0)
ptss =[]
i =0
#13 mid upper lip
ids = [8,200,78,13]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
for id,lm in enumerate(faceLms.landmark):
ih, iw, ic = img.shape
x,y = int(lm.x*iw), int(lm.y*ih)
xid.append(x)
yid.append(y)
for i in range(0,len(ids)):
ptss.append([xid[ids[i]],yid[ids[i]]])
img2 = Image.open(fname)
draw = ImageDraw.Draw(img2)
draw.line((ptss[0][0],ptss[0][1],ptss[1][0],ptss[1][1]), fill=128)
r = gumDetection(fname)
########
image = Image.open(r"cropped.jpg")
width21,height21 = image.size
Classify_Number=0
ZeroNumver=0
Neon=0
NeonArray=[]
for i in range (1,height21):
Neon=0
for j in range(1,width21):
current_color = image.getpixel((j,i))
r,g,b= current_color
b = int(b)
g = int(g)
r = int(r)
if(b<=40 and r <40 and g<40):
ZeroNumver=ZeroNumver+1
if(b>=180 and r>=180 and g>=180 ):
Classify_Number=Classify_Number+1
Neon=Neon+1
NeonArray.append(Neon)
for i in range(np.size(NeonArray)):
if(NeonArray[i]==max(NeonArray)):
Zefer=i
######
croppedimg = cv2.imread("cropped.jpg")
img_blur = cv2.GaussianBlur(croppedimg, (5,5), 0)
mean, std = cv2.meanStdDev(croppedimg)
TH1 =int(100)
TH2 = int(200)
edges2 = cv2.Canny(image=img_blur, threshold1=TH1, threshold2=TH2)
Horizontal = Zefer
Vertical = width21/2
cv2.imwrite("edges2.jpg",edges2)
Hline = findNearestWhite(edges2, Horizontal, Vertical)
center = Hline[0]
draw = ImageDraw.Draw(img2)
wid,heig = image.size
draw.line((ptss[2][0]+1.5*center,0, ptss[2][0]+1.5*center,100*height21), fill=(0,255,0))
return img2
def colorationDetection(fname):
cap = cv2.imread(r"dst.jpg")
width21, height21 = Image.open(r"dst.jpg").size
Classify_Number=0
ZeroNumver=0
Neon=0
NeonArray=[]
for i in range (1,height21):
Neon=0
for j in range(1,width21):
b,g,r = cap[i,j]
b = int(b)
g = int(g)
r = int(r)
if(b<=40 and r <40 and g<40):
ZeroNumver=ZeroNumver+1
if(b>=180 and r>=180 and g>=180 ):
Classify_Number=Classify_Number+1
Neon=Neon+1
NeonArray.append(Neon)
for i in range(np.size(NeonArray)):
if(NeonArray[i]==max(NeonArray)):
Zefer=i
Definer=Classify_Number/((width21*height21)-ZeroNumver)
return Definer
def gab_Detection(fname):
img = cv2.imread(fname)
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
xid =[]
yid =[]
pts = np.empty([1,1])
pts.fill(0)
ptss =[]
i =0
#13 mid upper lip
ids = [82,13,312,317,14,87]
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
for id,lm in enumerate(faceLms.landmark):
ih, iw, ic = img.shape
x,y = int(lm.x*iw), int(lm.y*ih)
xid.append(x)
yid.append(y)
for i in range(0,len(ids)):
ptss.append([xid[ids[i]],yid[ids[i]]])
ptsss = np.array(ptss)
## (1) Crop the bounding rect
rect = cv2.boundingRect(ptsss)
x,y,w,h = rect
croped = img[y:y+h, x:x+w].copy()
## (2) make mask
ptsss = ptsss - ptsss.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [ptsss], -1, (255, 255, 255), -1, cv2.LINE_AA)
## (3) do bit-op
dst = cv2.bitwise_and(croped, croped, mask=mask)
mean, std = cv2.meanStdDev(dst)
image = dst
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Set threshold level
threshold_level = mean[0] - std[0]
# Find coordinates of all pixels below threshold
coords = np.column_stack(np.where(gray < threshold_level))
#print(coords)
# Create mask of all pixels lower than threshold level
mask = gray < threshold_level
# Color the pixels in the mask
image[mask] = (204, 119, 0)
black =0
for i in range(0,image.shape[0]):
for j in range(0,image.shape[1]):
if image[i][j][0] == 204 and image[i][j][1] == 119 and image[i][j][2] == 0 :
black =black + 1
pixTotal =image.shape[0]* image.shape[1]
if(black/ pixTotal > 0.05):
return 1
else:
return 0
def findNearestWhite(edges, horizontal, vertical):
nonzero = np.argwhere(edges == 255) #white & vertical
width = vertical*2
Hline1 = nonzero[nonzero[:, 1] >= width/5]
Hline2 = Hline1[Hline1[:, 1] <= width/2] #lay on the horizontal line y=const
distances = np.array(abs(Hline2[:,0] - vertical) ) # nearest point to the line
nearest_index = np.argmin(distances)
# plt.imshow(edges)
# plt.axvline(x=Hline2[nearest_index][0], ymin=0.05, ymax=0.95, color='green', label='axvline - % of full height')
# plt.show()
return Hline2[nearest_index] | 28.832298 | 118 | 0.577912 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.