hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0986067f7cb2442b4c612b34da27fa8d958d7f
| 536
|
py
|
Python
|
36.py
|
JohnOmena/PPA_algorithms
|
64c043a969ff4959b924b5a34eaafad18aacbd4b
|
[
"MIT"
] | null | null | null |
36.py
|
JohnOmena/PPA_algorithms
|
64c043a969ff4959b924b5a34eaafad18aacbd4b
|
[
"MIT"
] | null | null | null |
36.py
|
JohnOmena/PPA_algorithms
|
64c043a969ff4959b924b5a34eaafad18aacbd4b
|
[
"MIT"
] | null | null | null |
def menor_i_esimo(strA, strB, i):
if len(strA) == 0:
return strB[i]
elif len(strB) == 0:
return StrA[i]
mid_strB = len(strB) // 2
mid_strA = len(strA) // 2
if mid_srtA + mid_strB < i:
if st1[mid_strA] > st2[mid_strB]:
return menor_i_esimo(strA, strB[mid_strB+1:], i - mid_strB - 1)
else:
return menor_i_esimo(strA[mid_strA +1:], strB, i - mid_strA - 1)
else:
if st1[mid_strA] > st2[mid_strB]:
return menor_i_esimo(strA[:mid_strA], strB, i)
else:
return menor_i_esimo(strA, strB[:mid_strB], i)
| 26.8
| 67
| 0.641791
|
4a09871a3c42329aa0e9b78327c54c22d955ccd5
| 4,715
|
py
|
Python
|
conf/settings.py
|
pincoin/django-summernote-demo
|
928cccf3bcf33aaed2953818718c49339ddc1f76
|
[
"MIT"
] | null | null | null |
conf/settings.py
|
pincoin/django-summernote-demo
|
928cccf3bcf33aaed2953818718c49339ddc1f76
|
[
"MIT"
] | null | null | null |
conf/settings.py
|
pincoin/django-summernote-demo
|
928cccf3bcf33aaed2953818718c49339ddc1f76
|
[
"MIT"
] | null | null | null |
import os
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(h^8r(h9dme(g!atw*x8@(amp50%i8ixlrwp61b2j6hy1785-m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_summernote',
'crispy_forms',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'conf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'conf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ko_KR'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = [
('ko', _('Korean')),
('en', _('English')),
]
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/conf'),
os.path.join(BASE_DIR, 'locale/summernote'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/assets/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static/'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# django-summernote
SUMMERNOTE_CONFIG = {
'lang': 'ko-KR',
'imageTitle': {
'specificAltField': True,
},
'popover': {
'image': [
['imagesize', ['imageSize100', 'imageSize50', 'imageSize25']],
['float', ['floatLeft', 'floatRight', 'floatNone']],
['remove', ['removeMedia']],
['custom', ['imageTitle']],
],
},
'codemirror': {
'lineNumbers': True,
'tabSize': 2,
'theme': 'monokai',
},
'css': (
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/codemirror.css',
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/theme/monokai.css',
),
'js': (
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/codemirror.js',
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/mode/xml/xml.js',
'//cdnjs.cloudflare.com/ajax/libs/codemirror/2.36.0/formatting.js',
os.path.join(STATIC_URL, 'js/summernote-image-title.js'),
),
'css_for_inplace': (
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/codemirror.css',
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/theme/monokai.css',
),
'js_for_inplace': (
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/codemirror.js',
'//cdnjs.cloudflare.com/ajax/libs/codemirror/3.20.0/mode/xml/xml.js',
'//cdnjs.cloudflare.com/ajax/libs/codemirror/2.36.0/formatting.js',
os.path.join(STATIC_URL, 'js/summernote-image-title.js'),
),
}
| 30.419355
| 91
| 0.657052
|
4a098753a488ca1468c33e5a8ac7047f2b25612a
| 527
|
py
|
Python
|
packages/merlin/components/Spell.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | 3
|
2019-08-02T21:02:47.000Z
|
2021-09-08T13:59:43.000Z
|
packages/merlin/components/Spell.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
packages/merlin/components/Spell.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# framework access
import pyre
# my superclass
from .Dashboard import Dashboard
# my protocol
from .Action import Action
# class declaration
class Spell(pyre.panel(), Dashboard):
"""
Base class for merlin spell implementations
"""
# public data
@property
def vfs(self):
"""
Access to the fileserver
"""
# merlin knows
return self.merlin.vfs
# end of file
| 15.057143
| 47
| 0.624288
|
4a0987d108a2976717c1cbabfb14f27dbe7d0743
| 3,775
|
py
|
Python
|
test_5a.py
|
RaulAstudillo06/BOCF
|
68d19984385cdb27c9f6c5002c67fc9467bbe705
|
[
"BSD-3-Clause"
] | 9
|
2019-06-16T01:18:52.000Z
|
2021-11-03T15:43:55.000Z
|
test_5a.py
|
RaulAstudillo06/BOCF
|
68d19984385cdb27c9f6c5002c67fc9467bbe705
|
[
"BSD-3-Clause"
] | 3
|
2020-09-09T06:12:51.000Z
|
2021-06-01T23:46:18.000Z
|
test_5a.py
|
RaulAstudillo06/BOCF
|
68d19984385cdb27c9f6c5002c67fc9467bbe705
|
[
"BSD-3-Clause"
] | 5
|
2019-07-07T13:17:44.000Z
|
2020-09-09T06:06:17.000Z
|
import numpy as np
import scipy
import GPyOpt
import GPy
from multi_objective import MultiObjective
from multi_outputGP import multi_outputGP
from maEI import maEI
from uEI_noiseless import uEI_noiseless
from parameter_distribution import ParameterDistribution
from utility import Utility
from expectation_utility import ExpectationUtility
import cbo
import sys
import time
# --- Function to optimize
d = 5
m = 2*(d - 1)
def h(X):
X = np.atleast_2d(X)
hX = np.empty((m, X.shape[0]))
for j in range(d-1):
hX[j,:] = X[:, j]
hX[j+d-1,:] = X[:, j+1] - X[:, j]**2
return hX
#noise_var = [0.25]*m
objective = MultiObjective(h, as_list=False, output_dim=m)
# --- Space
space = GPyOpt.Design_space(space =[{'name': 'var', 'type': 'continuous', 'domain': (-2,2), 'dimensionality': d}])
# --- Model (Multi-output GP)
n_attributes = m
model = multi_outputGP(output_dim=n_attributes, exact_feval=[True]*m, fixed_hyps=False)
#model = multi_outputGP(output_dim=n_attributes, noise_var=noise_var, fixed_hyps=True)
# --- Initial design
initial_design = GPyOpt.experiment_design.initial_design('random', space, 2*(d+1))
# --- Parameter distribution
parameter_support = np.atleast_1d([1.])
parameter_dist = np.ones((1,))
parameter_distribution = ParameterDistribution(continuous=False, support=parameter_support, prob_dist=parameter_dist)
#parameter_distribution = ParameterDistribution(continuous=True, sample_generator=beta_sampler)
# --- Utility function
def U_func(a, y):
val = 0
for j in range(d-1):
val -= (a - y[j])**2 + 100*y[j+d-1]**2
return val
def dU_func(a, y):
gradient = np.empty((m, ))
for j in range(d-1):
gradient[j] = 2*(a - y[j])
gradient[j+d-1] = -200*y[j+d-1]
return gradient
U = Utility(func=U_func,dfunc=dU_func,parameter_dist=parameter_distribution,linear=False)
# --- Expectation of utility
def psi(a, mean, var):
val = 0
for j in range(d-1):
val -= (a - mean[j])**2 + 100*mean[j+d-1]**2 + var[j] + 100*var[j+d-1]
return val
def psi_gradient(a, mean, var):
gradient = np.empty((2*m, ))
for j in range(d-1):
gradient[j] = 2*(a - mean[j])
gradient[j+d-1] = -200*mean[j+d-1]
gradient[j + 2*(d-1)] = -1.
gradient[j + 3*(d-1)] = -100.
return gradient
expectation_U = ExpectationUtility(psi, psi_gradient)
# --- Compute real optimum value
if True:
bounds = [(-2, 2)]*d
starting_points = 4.*np.random.rand(100, d) - 2.
parameter = parameter_support[0]
def func(x):
x_copy = np.atleast_2d(x)
fx = h(x_copy)
val = U_func(parameter, fx)
return -val
best_val_found = np.inf
for x0 in starting_points:
res = scipy.optimize.fmin_l_bfgs_b(func, x0, approx_grad=True, bounds=bounds)
# print(res)
if best_val_found > res[1]:
best_val_found = res[1]
x_opt = res[0]
print('optimum')
print(x_opt)
print('best value found')
print(-best_val_found)
print('true optimum')
print(0.)
# --- Optimum in cluster
#
# --- Acquisition optimizer
acq_opt = GPyOpt.optimization.AcquisitionOptimizer(optimizer='lbfgs2', inner_optimizer='lbfgs2', space=space)
# --- Aquisition function
acquisition = uEI_noiseless(model, space, optimizer=acq_opt, utility=U)
# --- Evaluator
evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
# --- Run optimization
max_iter = 50
for i in range(1):
filename = './experiments_local/test9_EIh_noiseless_' + str(i) + '.txt'
bo_model = cbo.CBO(model, space, objective, acquisition, evaluator, initial_design,
expectation_utility=expectation_U)
bo_model.run_optimization(max_iter=max_iter, parallel=False, plot=False, results_file=filename)
| 29.724409
| 117
| 0.666755
|
4a098858f769263c29306e17646a43b79bce276e
| 3,051
|
py
|
Python
|
core/model/commands/board.py
|
the-dalee/gnome-2048
|
1a461409ee1b99d142446df73a55a9b5f4a9a3ae
|
[
"MIT"
] | 1
|
2016-05-10T03:10:53.000Z
|
2016-05-10T03:10:53.000Z
|
core/model/commands/board.py
|
the-dalee/gnome-2048
|
1a461409ee1b99d142446df73a55a9b5f4a9a3ae
|
[
"MIT"
] | 11
|
2015-01-02T00:00:52.000Z
|
2015-01-13T00:01:26.000Z
|
core/model/commands/board.py
|
the-dalee/gnome-2048
|
1a461409ee1b99d142446df73a55a9b5f4a9a3ae
|
[
"MIT"
] | null | null | null |
class BoardTile(object):
type = "Board"
description = "Do nothing with board"
def execute(self):
pass
def undo(self):
pass
class MoveTile(BoardTile):
source = None
destination = None
board = None
description = "Move tile"
def __init__(self, board, source, destination):
self.source = source
self.destination = destination
self.board = board
descriptionText = _("Move tile from (%(sx)i, %(sy)i) to (%(dx)i, %(dy)i)")
self.description = descriptionText % {
"sx": source[0],
"sy": source[1],
"dx": destination[0],
"dy": destination[1]
}
def execute(self):
self.board.move(self.source, self.destination)
def undo(self):
self.board.move(self.destination, self.source)
class MergeTile(BoardTile):
source = None
destination = None
sourceTile = None
destinationTile = None
board = None
def __init__(self, board, source, destination):
self.source = source
self.destination = destination
self.sourceTile = board.tiles[source]
self.destinationTile = board.tiles[destination]
self.board = board
descriptionText = _("Merge tile at (%(x)i, %(y)i) with (%(dx)i, %(dy)i)")
self.description = descriptionText % {
"x": source[0],
"y": source[1],
"dx": destination[0],
"dy": destination[1]
}
def execute(self):
self.destinationTile.already_merged = True
self.destinationTile.merge(self.sourceTile)
self.board.remove(self.source)
def undo(self):
self.destinationTile.value = int(self.destinationTile.value / 2)
self.board.add(self.source, self.sourceTile)
class AddTile(BoardTile):
position = None
tile = None
board = None
def __init__(self, board, position, tile):
self.position = position
self.tile = tile
self.board = board
descriptionText = _("Add tile %(val)i at (%(x)i, %(y)i)")
self.description = descriptionText % {
"x": position[0],
"y": position[1],
"val": tile.value
}
def execute(self):
self.board.add(self.position, self.tile)
def undo(self):
self.board.remove(self.position)
class RemoveTile(BoardTile):
position = None
tile = None
board = None
def __init__(self, board, position):
self.position = position
self.tile = self.board.tiles[position]
self.board = board
descriptionText = _("Remove tile at (%(x)i, %(y)i)")
self.description = descriptionText % {
"x": position[0],
"y": position[1]
}
def execute(self):
self.board.remove(self.position)
def undo(self):
self.board.add(self.position, self.tile)
| 26.530435
| 82
| 0.549656
|
4a0988b27ded3586961dc3c8064ae658e0c7c2a3
| 4,679
|
py
|
Python
|
ui.py
|
calzoneman/python-chatui
|
421d389811e58e4f33e68fb6edb6e75eb3252092
|
[
"MIT"
] | 30
|
2015-01-13T07:00:22.000Z
|
2021-09-16T15:15:13.000Z
|
ui.py
|
toipacoelho/Crypto-Chat
|
a9079a5f175a2773f25ed140499c8f67a72363dd
|
[
"MIT"
] | null | null | null |
ui.py
|
toipacoelho/Crypto-Chat
|
a9079a5f175a2773f25ed140499c8f67a72363dd
|
[
"MIT"
] | 6
|
2015-08-19T20:52:13.000Z
|
2022-01-18T20:48:06.000Z
|
import curses
class ChatUI:
def __init__(self, stdscr, userlist_width=16):
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i, i, -1);
self.stdscr = stdscr
self.userlist = []
self.inputbuffer = ""
self.linebuffer = []
self.chatbuffer = []
# Curses, why must you confuse me with your height, width, y, x
userlist_hwyx = (curses.LINES - 2, userlist_width - 1, 0, 0)
chatbuffer_hwyx = (curses.LINES - 2, curses.COLS-userlist_width-1,
0, userlist_width + 1)
chatline_yx = (curses.LINES - 1, 0)
self.win_userlist = stdscr.derwin(*userlist_hwyx)
self.win_chatline = stdscr.derwin(*chatline_yx)
self.win_chatbuffer = stdscr.derwin(*chatbuffer_hwyx)
self.redraw_ui()
def resize(self):
"""Handles a change in terminal size"""
u_h, u_w = self.win_userlist.getmaxyx()
h, w = self.stdscr.getmaxyx()
self.win_chatline.mvwin(h - 1, 0)
self.win_chatline.resize(1, w)
self.win_userlist.resize(h - 2, u_w)
self.win_chatbuffer.resize(h - 2, w - u_w - 2)
self.linebuffer = []
for msg in self.chatbuffer:
self._linebuffer_add(msg)
self.redraw_ui()
def redraw_ui(self):
"""Redraws the entire UI"""
h, w = self.stdscr.getmaxyx()
u_h, u_w = self.win_userlist.getmaxyx()
self.stdscr.clear()
self.stdscr.vline(0, u_w + 1, "|", h - 2)
self.stdscr.hline(h - 2, 0, "-", w)
self.stdscr.refresh()
self.redraw_userlist()
self.redraw_chatbuffer()
self.redraw_chatline()
def redraw_chatline(self):
"""Redraw the user input textbox"""
h, w = self.win_chatline.getmaxyx()
self.win_chatline.clear()
start = len(self.inputbuffer) - w + 1
if start < 0:
start = 0
self.win_chatline.addstr(0, 0, self.inputbuffer[start:])
self.win_chatline.refresh()
def redraw_userlist(self):
"""Redraw the userlist"""
self.win_userlist.clear()
h, w = self.win_userlist.getmaxyx()
for i, name in enumerate(self.userlist):
if i >= h:
break
#name = name.ljust(w - 1) + "|"
self.win_userlist.addstr(i, 0, name[:w - 1])
self.win_userlist.refresh()
def redraw_chatbuffer(self):
"""Redraw the chat message buffer"""
self.win_chatbuffer.clear()
h, w = self.win_chatbuffer.getmaxyx()
j = len(self.linebuffer) - h
if j < 0:
j = 0
for i in range(min(h, len(self.linebuffer))):
self.win_chatbuffer.addstr(i, 0, self.linebuffer[j])
j += 1
self.win_chatbuffer.refresh()
def chatbuffer_add(self, msg):
"""
Add a message to the chat buffer, automatically slicing it to
fit the width of the buffer
"""
self.chatbuffer.append(msg)
self._linebuffer_add(msg)
self.redraw_chatbuffer()
self.redraw_chatline()
self.win_chatline.cursyncup()
def _linebuffer_add(self, msg):
h, w = self.stdscr.getmaxyx()
u_h, u_w = self.win_userlist.getmaxyx()
w = w - u_w - 2
while len(msg) >= w:
self.linebuffer.append(msg[:w])
msg = msg[w:]
if msg:
self.linebuffer.append(msg)
def prompt(self, msg):
"""Prompts the user for input and returns it"""
self.inputbuffer = msg
self.redraw_chatline()
res = self.wait_input()
res = res[len(msg):]
return res
def wait_input(self, prompt=""):
"""
Wait for the user to input a message and hit enter.
Returns the message
"""
self.inputbuffer = prompt
self.redraw_chatline()
self.win_chatline.cursyncup()
last = -1
while last != ord('\n'):
last = self.stdscr.getch()
if last == ord('\n'):
tmp = self.inputbuffer
self.inputbuffer = ""
self.redraw_chatline()
self.win_chatline.cursyncup()
return tmp[len(prompt):]
elif last == curses.KEY_BACKSPACE or last == 127:
if len(self.inputbuffer) > len(prompt):
self.inputbuffer = self.inputbuffer[:-1]
elif last == curses.KEY_RESIZE:
self.resize()
elif 32 <= last <= 126:
self.inputbuffer += chr(last)
self.redraw_chatline()
| 32.047945
| 74
| 0.549904
|
4a0989261f2857ff059263b1c9d375de6ee0133a
| 4,095
|
py
|
Python
|
storyscript/compiler/lowering/Faketree.py
|
edvald/storyscript
|
a912586a65c1ee31cb634092e952767da6215269
|
[
"Apache-2.0"
] | null | null | null |
storyscript/compiler/lowering/Faketree.py
|
edvald/storyscript
|
a912586a65c1ee31cb634092e952767da6215269
|
[
"Apache-2.0"
] | null | null | null |
storyscript/compiler/lowering/Faketree.py
|
edvald/storyscript
|
a912586a65c1ee31cb634092e952767da6215269
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from lark.lexer import Token
from storyscript.parser import Tree
class FakeTree:
# unique prefix for all compiler-inserted paths
prefix = '__p-'
"""
Creates fake trees that are not in the original story source.
"""
def __init__(self, block):
self.block = block
self.original_line = str(block.line())
self.new_lines = {}
self._check_existing_fake_lines(block)
def _check_existing_fake_lines(self, block):
for child in block.children:
if child.path:
tok = child.path.find_first_token().value
if tok.startswith(self.prefix):
self.new_lines[tok] = False
def line(self):
"""
Creates fake line numbers. The strings are decreasingly sorted,
so that the resulting tree is compiled correctly.
"""
line = self.original_line
parts = line.split('.')
if len(parts) > 1:
line = '.'.join(parts[:-1])
# We start at .1, s.t. lines from L1 are called L1.1 and not L1.0
# to avoid any potential confusion
new_suffix = len(self.new_lines) + 1
fake_line = f'{line}.{new_suffix}'
self.new_lines[fake_line] = None
return fake_line
def get_line(self, tree):
"""
Gets the tree line if it's a new one, otherwise creates it.
"""
if tree.line() in self.new_lines:
return tree.line()
return self.line()
def path(self, name=None, line=None):
"""
Creates a fake tree path.
"""
if line is None:
line = self.line()
if name is None:
name = f'{self.prefix}{line}'
return Tree('path', [Token('NAME', name, line=line)])
def mark_line(self, node, line):
"""
Updates the line for all tokens of a given `node`.
"""
for child in node.children:
if isinstance(child, Token):
child.line = line
else:
self.mark_line(child, line)
def assignment(self, value):
"""
Creates a fake assignment tree, equivalent to "$fake = value"
"""
line = self.get_line(value)
first_token = value.find_first_token()
first_token.line = line
path = self.path(line=line)
return self.assignment_path(path, value, line)
def assignment_path(self, path, value, line):
"""
Adds a new assignment: `path` = `value`
"""
# updates all tokens
self.mark_line(value, line)
equals = Token('EQUALS', '=', line=line)
if value.data == 'base_expression':
expr = value
else:
expr = Tree('base_expression', [value])
fragment = Tree('assignment_fragment', [equals, expr])
return Tree('assignment', [path, fragment])
def find_insert_pos(self, original_line):
"""
Finds the insert position for a targeted line in the fake tree block.
"""
for i, n in enumerate(self.block.children):
line = n.line()
if line == original_line:
return i
# use the last position as insert position by default
# this inserts the new assignment node _before_ the last node
return -1
def add_assignment(self, value, original_line):
"""
Creates an assignments and adds it to the current block
Returns a fake path reference to this assignment
"""
assert len(self.block.children) >= 1
insert_pos = self.find_insert_pos(original_line)
assignment = self.assignment(value)
self.block.children = [
*self.block.children[:insert_pos],
assignment,
*self.block.children[insert_pos:],
]
# we need a new node, s.t. already inserted
# fake nodes don't get changed
name = Token('NAME', assignment.path.child(0), line=original_line)
fake_path = Tree('path', [name])
return fake_path
| 31.992188
| 77
| 0.569963
|
4a098980ce6af836f139286d9b4c580b7b0b28e3
| 609
|
py
|
Python
|
catalog/migrations/0009_bookinstance_borrower.py
|
Abravobolado/django_local_library
|
7ad640d16b746fe4870b1648d07714edb5535656
|
[
"Apache-2.0"
] | null | null | null |
catalog/migrations/0009_bookinstance_borrower.py
|
Abravobolado/django_local_library
|
7ad640d16b746fe4870b1648d07714edb5535656
|
[
"Apache-2.0"
] | null | null | null |
catalog/migrations/0009_bookinstance_borrower.py
|
Abravobolado/django_local_library
|
7ad640d16b746fe4870b1648d07714edb5535656
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-10-12 08:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0008_auto_20210928_0844'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 27.681818
| 134
| 0.683087
|
4a0989b5fa413ed57b15a19f0543adb7812e26c7
| 11,850
|
py
|
Python
|
saleor/graphql/payment/mutations.py
|
avantrio/saleor
|
29735afc16fa07012a74d6116ca59eb6f920bbff
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/payment/mutations.py
|
avantrio/saleor
|
29735afc16fa07012a74d6116ca59eb6f920bbff
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/payment/mutations.py
|
avantrio/saleor
|
29735afc16fa07012a74d6116ca59eb6f920bbff
|
[
"CC-BY-4.0"
] | null | null | null |
import graphene
import opp.core
from django.core.exceptions import ValidationError
from ... import settings
from ...checkout.calculations import calculate_checkout_total_with_gift_cards
from ...checkout.checkout_cleaner import clean_billing_address, clean_checkout_shipping
from ...checkout.utils import cancel_active_payments
from ...core.permissions import OrderPermissions
from ...core.utils import get_client_ip
from ...core.utils.url import validate_storefront_url
from ...payment import PaymentError, gateway, models
from ...payment.error_codes import PaymentErrorCode
from ...payment.utils import create_payment, is_currency_supported
from ..account.i18n import I18nMixin
from ..account.types import AddressInput
from ..checkout.types import Checkout
from ..core.mutations import BaseMutation
from ..core.scalars import PositiveDecimal
from ..core.types import common as common_types
from ..core.utils import from_global_id_strict_type
from .types import Payment, PaymentInitialized
from ...plugins.models import PluginConfiguration
class PaymentInput(graphene.InputObjectType):
gateway = graphene.Field(
graphene.String,
description="A gateway to use with that payment.",
required=True,
)
token = graphene.String(
required=False,
description=(
"Client-side generated payment token, representing customer's "
"billing data in a secure manner."
),
)
amount = PositiveDecimal(
required=False,
description=(
"Total amount of the transaction, including "
"all taxes and discounts. If no amount is provided, "
"the checkout total will be used."
),
)
billing_address = AddressInput(
required=False,
description=(
"[Deprecated] Billing address. If empty, the billing address associated "
"with the checkout instance will be used. Use `checkoutCreate` or "
"`checkoutBillingAddressUpdate` mutations to set it. This field will be "
"removed after 2020-07-31."
),
)
return_url = graphene.String(
required=False,
description=(
"URL of a storefront view where user should be redirected after "
"requiring additional actions. Payment with additional actions will not be "
"finished if this field is not provided."
),
)
class CheckoutPaymentCreate(BaseMutation, I18nMixin):
checkout = graphene.Field(Checkout, description="Related checkout object.")
payment = graphene.Field(Payment, description="A newly created payment.")
class Arguments:
checkout_id = graphene.ID(description="Checkout ID.", required=True)
input = PaymentInput(
description="Data required to create a new payment.", required=True
)
class Meta:
description = "Create a new payment for given checkout."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def clean_shipping_method(cls, checkout):
if not checkout.shipping_method:
raise ValidationError(
{
"shipping_method": ValidationError(
"Shipping method not set for this checkout.",
code=PaymentErrorCode.SHIPPING_METHOD_NOT_SET,
)
}
)
@classmethod
def clean_payment_amount(cls, info, checkout_total, amount):
if amount != checkout_total.gross.amount:
raise ValidationError(
{
"amount": ValidationError(
"Partial payments are not allowed, amount should be "
"equal checkout's total.",
code=PaymentErrorCode.PARTIAL_PAYMENT_NOT_ALLOWED,
)
}
)
@classmethod
def validate_gateway(cls, gateway_id, currency):
if not is_currency_supported(currency, gateway_id):
raise ValidationError(
{
"gateway": ValidationError(
f"The gateway {gateway_id} does not support checkout currency.",
code=PaymentErrorCode.NOT_SUPPORTED_GATEWAY.value,
)
}
)
@classmethod
def validate_token(cls, manager, gateway: str, input_data: dict):
token = input_data.get("token")
is_required = manager.token_is_required_as_payment_input(gateway)
if not token and is_required:
raise ValidationError(
{
"token": ValidationError(
f"Token is required for {gateway}.",
code=PaymentErrorCode.REQUIRED.value,
),
}
)
@classmethod
def validate_return_url(cls, input_data):
return_url = input_data.get("return_url")
if not return_url:
return
try:
validate_storefront_url(return_url)
except ValidationError as error:
raise ValidationError(
{"redirect_url": error}, code=PaymentErrorCode.INVALID
)
@classmethod
def perform_mutation(cls, _root, info, checkout_id, **data):
checkout_id = from_global_id_strict_type(
checkout_id, only_type=Checkout, field="checkout_id"
)
checkout = models.Checkout.objects.prefetch_related(
"lines__variant__product__collections"
).get(pk=checkout_id)
data = data["input"]
gateway = data["gateway"]
cls.validate_gateway(gateway, checkout.currency)
cls.validate_return_url(data)
checkout_total = calculate_checkout_total_with_gift_cards(
checkout, info.context.discounts
)
amount = data.get("amount", checkout_total.gross.amount)
if gateway.lower() == 'hyperpay':
"""
HyperPay Gateway Setup
"""
gateway_plugin = PluginConfiguration.objects.get(identifier='HyperPay')
opp.config.configure(mode=0 if settings.DEBUG else 3)
api = opp.core.API(
**{"authentication.userId": gateway_plugin.configuration[0]['value'],
"authentication.password": gateway_plugin.configuration[1]['value'],
"authentication.entityId": gateway_plugin.configuration[2]['value']
})
# TODO : here the currency should change to: checkout.currency
gateway_checkout = api.checkouts().create(**{"amount": amount,
"currency": "USD",
"paymentType": "PA"
})
print(gateway_checkout)
data['token'] = gateway_checkout['id']
"""
HyperPay Gateway Setup
"""
cls.validate_token(info.context.plugins, gateway, data)
clean_checkout_shipping(
checkout, list(checkout), info.context.discounts, PaymentErrorCode
)
clean_billing_address(checkout, PaymentErrorCode)
cls.clean_payment_amount(info, checkout_total, amount)
extra_data = {
"customer_user_agent": info.context.META.get("HTTP_USER_AGENT"),
}
cancel_active_payments(checkout)
payment = create_payment(
gateway=gateway,
payment_token=data.get("token", ""),
total=amount,
currency=checkout.currency,
email=checkout.email,
extra_data=extra_data,
# FIXME this is not a customer IP address. It is a client storefront ip
customer_ip_address=get_client_ip(info.context),
checkout=checkout,
return_url=data.get("return_url"),
)
return CheckoutPaymentCreate(payment=payment, checkout=checkout)
class PaymentCapture(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
amount = PositiveDecimal(description="Transaction amount.")
class Meta:
description = "Captures the authorized payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
try:
gateway.capture(payment, amount)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentCapture(payment=payment)
class PaymentRefund(PaymentCapture):
class Meta:
description = "Refunds the captured payment amount."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id, amount=None):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
try:
gateway.refund(payment, amount=amount)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentRefund(payment=payment)
class PaymentVoid(BaseMutation):
payment = graphene.Field(Payment, description="Updated payment.")
class Arguments:
payment_id = graphene.ID(required=True, description="Payment ID.")
class Meta:
description = "Voids the authorized payment."
permissions = (OrderPermissions.MANAGE_ORDERS,)
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, payment_id):
payment = cls.get_node_or_error(
info, payment_id, field="payment_id", only_type=Payment
)
try:
gateway.void(payment)
payment.refresh_from_db()
except PaymentError as e:
raise ValidationError(str(e), code=PaymentErrorCode.PAYMENT_ERROR)
return PaymentVoid(payment=payment)
class PaymentInitialize(BaseMutation):
initialized_payment = graphene.Field(PaymentInitialized, required=False)
class Arguments:
gateway = graphene.String(
description="A gateway name used to initialize the payment.", required=True,
)
payment_data = graphene.JSONString(
required=False,
description=(
"Client-side generated data required to initialize the payment."
),
)
class Meta:
description = "Initializes payment process when it is required by gateway."
error_type_class = common_types.PaymentError
error_type_field = "payment_errors"
@classmethod
def perform_mutation(cls, _root, info, gateway, payment_data):
try:
response = info.context.plugins.initialize_payment(gateway, payment_data)
except PaymentError as e:
raise ValidationError(
{
"payment_data": ValidationError(
str(e), code=PaymentErrorCode.INVALID.value
)
}
)
return PaymentInitialize(initialized_payment=response)
| 37.147335
| 88
| 0.621772
|
4a098c457da6f7bdd925fa9bf4cc0a00aa85244a
| 1,863
|
py
|
Python
|
files/downloader/my_images.py
|
AzureBlueCobalt/analyze-gelbooru
|
694b6e843c06942b3d6110e79e083b9ab71214b9
|
[
"MIT"
] | null | null | null |
files/downloader/my_images.py
|
AzureBlueCobalt/analyze-gelbooru
|
694b6e843c06942b3d6110e79e083b9ab71214b9
|
[
"MIT"
] | null | null | null |
files/downloader/my_images.py
|
AzureBlueCobalt/analyze-gelbooru
|
694b6e843c06942b3d6110e79e083b9ab71214b9
|
[
"MIT"
] | null | null | null |
'''
TODO
call downloader.download_all_info_vectors("id: {}".format(id)) for every image, because the filenames seemed to have been truncated
'''
def get_info_vectors():
from os import listdir
from os.path import isfile, join, splitext
mypath = "res/images_mine"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
tag_string_vectors = []
for file in onlyfiles:
filename, _ = splitext(file)
tags = filename.split(" ")
tag_string_vectors.append(tags)
# print tag_string_vectors
return tag_string_vectors
# for s in [get_info_vectors()[0]]:
for s in get_info_vectors():
site = s[0]
image_id = s[1]
tags = s[2:]
import downloader
downloader.download_info("id:{}".format(image_id), filename="res/tags_mine/{}_{}.xml".format(site, image_id), site=site)
def plot_tags_quantities():
lengths = [len(v) for v in get_info_vectors()]
avg_length = 1.0*sum(lengths)/len(lengths)
print("avg_length: {}".format(avg_length))
import matplotlib.pyplot as plt
x = [i for i in range(len(lengths))]
plt.plot(x, lengths, 'ro')
plt.axis([0, len(lengths), 0, 20])
plt.show()
# plot_tags_quantities()
def print_top_tags():
tag_string_vectors = get_info_vectors()
flattened_tag_strings = [tag for v in tag_string_vectors for tag in v]
x = flattened_tag_strings
from collections import Counter
counts = Counter(x)
# for x in counts:
# print(x, counts[x])
# print(str(counts))
threshold = 10
top_counts = [(k, v) for k, v in counts.iteritems() if v > threshold]
top_counts = sorted(top_counts, key=lambda x: -x[1])
top_counts = ["{}: {}".format(k,v) for k, v in top_counts]
print("\n".join(top_counts))
# print_top_tags()
| 26.614286
| 132
| 0.633387
|
4a098c61c325cbe12fee2677ab0e1d4b04e5fc77
| 3,212
|
py
|
Python
|
app/buildhistory/migrations/0001_initial.py
|
Dhruv-Sachdev1313/macports-webapp
|
ed3d8efceac4cfb694a51241cf81023ed10aade6
|
[
"BSD-2-Clause"
] | 39
|
2018-05-09T02:14:52.000Z
|
2022-03-07T20:31:44.000Z
|
app/buildhistory/migrations/0001_initial.py
|
Dhruv-Sachdev1313/macports-webapp
|
ed3d8efceac4cfb694a51241cf81023ed10aade6
|
[
"BSD-2-Clause"
] | 337
|
2018-05-07T20:09:07.000Z
|
2022-03-31T14:16:01.000Z
|
app/buildhistory/migrations/0001_initial.py
|
Dhruv-Sachdev1313/macports-webapp
|
ed3d8efceac4cfb694a51241cf81023ed10aade6
|
[
"BSD-2-Clause"
] | 28
|
2018-05-09T18:03:07.000Z
|
2022-03-30T02:21:56.000Z
|
# Generated by Django 2.2.10 on 2020-05-01 11:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Builder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100, verbose_name='Name of the builder as per Buildbot')),
('display_name', models.CharField(db_index=True, default='', max_length=20, verbose_name='Simplified builder name: 10.XX')),
],
options={
'verbose_name': 'Builder',
'verbose_name_plural': 'Builders',
'db_table': 'builder',
},
),
migrations.CreateModel(
name='BuildHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('build_id', models.IntegerField()),
('status', models.CharField(max_length=50)),
('port_name', models.CharField(max_length=100)),
('time_start', models.DateTimeField()),
('time_elapsed', models.DurationField(null=True)),
('watcher_id', models.IntegerField()),
('builder_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buildhistory.Builder')),
],
options={
'verbose_name': 'Build',
'verbose_name_plural': 'Builds',
'db_table': 'builds',
},
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'builder_name', '-build_id'], name='builds_port_na_690929_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'builder_name', '-time_start'], name='builds_port_na_a30f18_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'status', 'builder_name'], name='builds_port_na_a1a05a_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name', 'builder_name'], name='builds_port_na_9b04f4_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['-time_start'], name='builds_time_st_741e8b_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['port_name'], name='builds_port_na_f9cad8_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['status'], name='builds_status_e27daf_idx'),
),
migrations.AddIndex(
model_name='buildhistory',
index=models.Index(fields=['builder_name'], name='builds_builder_5931b0_idx'),
),
]
| 40.658228
| 140
| 0.578456
|
4a098da1db7e793c14e93b6b0ab900be7dac7a69
| 5,505
|
py
|
Python
|
rpython/rlib/test/test_rgil.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
rpython/rlib/test/test_rgil.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
rpython/rlib/test/test_rgil.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from __future__ import print_function
from rpython.rlib import rgil
from rpython.rlib.debug import debug_print
from rpython.translator.c.test.test_standalone import StandaloneTests
from rpython.config.translationoption import get_combined_translation_config
class BaseTestGIL(StandaloneTests):
def test_simple(self):
def main(argv):
rgil.release()
# don't have the GIL here
rgil.acquire()
rgil.yield_thread()
print("OK") # there is also a release/acquire pair here
return 0
main([])
t, cbuilder = self.compile(main)
data = cbuilder.cmdexec('')
assert data == "OK\n"
def test_after_thread_switch(self):
class Foo:
pass
foo = Foo()
foo.counter = 0
def seeme():
foo.counter += 1
def main(argv):
rgil.invoke_after_thread_switch(seeme)
print("Test") # one release/acquire pair here
print(foo.counter)
print(foo.counter)
return 0
t, cbuilder = self.compile(main)
data = cbuilder.cmdexec('')
assert data == "Test\n1\n2\n"
def test_am_I_holding_the_GIL(self):
def check(name, expected=True):
# we may not have the GIL here, don't use "print"
debug_print(name)
if rgil.am_I_holding_the_GIL() != expected:
debug_print('assert failed at point', name)
debug_print('rgil.gil_get_holder() ==', rgil.gil_get_holder())
assert False
def main(argv):
check('1')
rgil.release()
# don't have the GIL here
check('2', False)
rgil.acquire()
check('3')
rgil.yield_thread()
check('4')
print("OK") # there is also a release/acquire pair here
check('5')
return 0
main([])
t, cbuilder = self.compile(main)
data = cbuilder.cmdexec('')
assert data == "OK\n"
def test_multiple_threads(self):
import time, random
from rpython.rlib import rthread
def check(name, nextop, expected=True):
# we may not have the GIL here, don't use "print"
if rgil.am_I_holding_the_GIL() != expected:
debug_print('assert failed at point', name, 'at', nextop)
debug_print('rgil.gil_get_holder() ==', rgil.gil_get_holder())
assert False
seed = int(time.time())
print("Random seed:", seed)
random.seed(seed)
# This is just a complicated way of simulating random work.
# We randomly release the GIL in various ways from 4 different threads
# and check that at least rgil.am_I_holding_the_GIL() is sane.
OP_YIELD = 0
OP_RELEASE_AND_ACQUIRE = 1
OP_BUSY = 2 # without releasing the GIL
OP_SLEEP = 3 # time.sleep() always releases the GIL
OPS = [OP_YIELD, OP_RELEASE_AND_ACQUIRE, OP_BUSY, OP_SLEEP]
N_THREADS = 4
ops_by_thread = []
for i in range(N_THREADS):
ops = []
for j in range(10000):
op = random.choice(OPS)
ops.append(op)
if op >= 2:
ops.append(random.randint(0, 1000))
ops_by_thread.append(ops)
class Glob:
def __init__(self):
self.my_locks = []
self.n_threads = 0
glob = Glob()
def do_random_work():
thread_index = glob.n_threads
glob.n_threads += 1
ops = ops_by_thread[thread_index]
nextop = 0
while nextop < len(ops):
op = ops[nextop]
nextop += 1
if op == OP_YIELD:
rgil.yield_thread()
check("after yield", nextop)
elif op == OP_RELEASE_AND_ACQUIRE:
rgil.release()
check("after release_gil", nextop, expected=False)
rgil.acquire()
check("after acquire_gil", nextop)
else:
arg = ops[nextop]
nextop += 1
if op == OP_BUSY:
end_time = time.time() + arg * 1e-6
while time.time() < end_time:
pass
check("after busy work", nextop)
else:
time.sleep(arg * 1e-6)
check("after time.sleep()", nextop)
finish_lock = glob.my_locks[thread_index]
finish_lock.release()
def main(argv):
for j in range(N_THREADS):
lock = rthread.allocate_lock()
lock.acquire(True)
glob.my_locks.append(lock)
for j in range(N_THREADS):
rthread.start_new_thread(do_random_work, ())
for j in range(N_THREADS):
glob.my_locks[j].acquire(True)
print("OK")
return 0
self.config = get_combined_translation_config(
overrides={"translation.thread": True})
t, cbuilder = self.compile(main)
data = cbuilder.cmdexec('')
assert data == "OK\n"
class TestGILShadowStack(BaseTestGIL):
gc = 'minimark'
gcrootfinder = 'shadowstack'
| 32.767857
| 78
| 0.520981
|
4a098e3314d42292ac3451ae6899e242c972e14a
| 3,828
|
py
|
Python
|
fbchat/_thread.py
|
PapitomasterDeveloper/fbchat
|
b8f83610e7c1381f1783f010a6953381bb553c42
|
[
"BSD-3-Clause"
] | 1
|
2019-11-02T14:44:05.000Z
|
2019-11-02T14:44:05.000Z
|
fbchat/_thread.py
|
PapitomasterDeveloper/fbchat
|
b8f83610e7c1381f1783f010a6953381bb553c42
|
[
"BSD-3-Clause"
] | null | null | null |
fbchat/_thread.py
|
PapitomasterDeveloper/fbchat
|
b8f83610e7c1381f1783f010a6953381bb553c42
|
[
"BSD-3-Clause"
] | null | null | null |
import attr
from ._core import Enum
class ThreadType(Enum):
"""Used to specify what type of Facebook thread is being used.
See :ref:`intro_threads` for more info.
"""
USER = 1
GROUP = 2
PAGE = 3
def _to_class(self):
"""Convert this enum value to the corresponding class."""
from . import _user, _group, _page
return {
ThreadType.USER: _user.User,
ThreadType.GROUP: _group.Group,
ThreadType.PAGE: _page.Page,
}[self]
class ThreadLocation(Enum):
"""Used to specify where a thread is located (inbox, pending, archived, other)."""
INBOX = "INBOX"
PENDING = "PENDING"
ARCHIVED = "ARCHIVED"
OTHER = "OTHER"
class ThreadColor(Enum):
"""Used to specify a thread colors."""
MESSENGER_BLUE = "#0084ff"
VIKING = "#44bec7"
GOLDEN_POPPY = "#ffc300"
RADICAL_RED = "#fa3c4c"
SHOCKING = "#d696bb"
PICTON_BLUE = "#6699cc"
FREE_SPEECH_GREEN = "#13cf13"
PUMPKIN = "#ff7e29"
LIGHT_CORAL = "#e68585"
MEDIUM_SLATE_BLUE = "#7646ff"
DEEP_SKY_BLUE = "#20cef5"
FERN = "#67b868"
CAMEO = "#d4a88c"
BRILLIANT_ROSE = "#ff5ca1"
BILOBA_FLOWER = "#a695c7"
TICKLE_ME_PINK = "#ff7ca8"
MALACHITE = "#1adb5b"
RUBY = "#f01d6a"
DARK_TANGERINE = "#ff9c19"
BRIGHT_TURQUOISE = "#0edcde"
@classmethod
def _from_graphql(cls, color):
if color is None:
return None
if not color:
return cls.MESSENGER_BLUE
color = color[2:] # Strip the alpha value
value = "#{}".format(color.lower())
return cls._extend_if_invalid(value)
@attr.s
class Thread:
"""Represents a Facebook thread."""
#: The unique identifier of the thread. Can be used a ``thread_id``. See :ref:`intro_threads` for more info
uid = attr.ib(converter=str)
#: Specifies the type of thread. Can be used a ``thread_type``. See :ref:`intro_threads` for more info
type = None
#: A URL to the thread's picture
photo = attr.ib(None)
#: The name of the thread
name = attr.ib(None)
#: Datetime when the thread was last active / when the last message was sent
last_active = attr.ib(None)
#: Number of messages in the thread
message_count = attr.ib(None)
#: Set :class:`Plan`
plan = attr.ib(None)
@staticmethod
def _parse_customization_info(data):
if data is None or data.get("customization_info") is None:
return {}
info = data["customization_info"]
rtn = {
"emoji": info.get("emoji"),
"color": ThreadColor._from_graphql(info.get("outgoing_bubble_color")),
}
if (
data.get("thread_type") == "GROUP"
or data.get("is_group_thread")
or data.get("thread_key", {}).get("thread_fbid")
):
rtn["nicknames"] = {}
for k in info.get("participant_customizations", []):
rtn["nicknames"][k["participant_id"]] = k.get("nickname")
elif info.get("participant_customizations"):
uid = data.get("thread_key", {}).get("other_user_id") or data.get("id")
pc = info["participant_customizations"]
if len(pc) > 0:
if pc[0].get("participant_id") == uid:
rtn["nickname"] = pc[0].get("nickname")
else:
rtn["own_nickname"] = pc[0].get("nickname")
if len(pc) > 1:
if pc[1].get("participant_id") == uid:
rtn["nickname"] = pc[1].get("nickname")
else:
rtn["own_nickname"] = pc[1].get("nickname")
return rtn
def _to_send_data(self):
# TODO: Only implement this in subclasses
return {"other_user_fbid": self.uid}
| 30.624
| 111
| 0.576803
|
4a098eac2fb571ad759df8dcb2f2573a8a0414fe
| 3,049
|
py
|
Python
|
fhir/resources/tests/test_guidanceresponse.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_guidanceresponse.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_guidanceresponse.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/GuidanceResponse
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import guidanceresponse
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class GuidanceResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("GuidanceResponse", js["resourceType"])
return guidanceresponse.GuidanceResponse(js)
def testGuidanceResponse1(self):
inst = self.instantiate_from("guidanceresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a GuidanceResponse instance")
self.implGuidanceResponse1(inst)
js = inst.as_json()
self.assertEqual("GuidanceResponse", js["resourceType"])
inst2 = guidanceresponse.GuidanceResponse(js)
self.implGuidanceResponse1(inst2)
def implGuidanceResponse1(self, inst):
self.assertEqual(
force_bytes(inst.contained[0].id), force_bytes("outputParameters1")
)
self.assertEqual(force_bytes(inst.id), force_bytes("example"))
self.assertEqual(
force_bytes(inst.identifier[0].system), force_bytes("http://example.org")
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("guidanceResponse1")
)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(
force_bytes(inst.moduleUri),
force_bytes(
"http://someguidelineprovider.org/radiology-appropriateness-guidelines.html"
),
)
self.assertEqual(
inst.occurrenceDateTime.date, FHIRDate("2017-03-10T16:02:00Z").date
)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2017-03-10T16:02:00Z")
self.assertEqual(
force_bytes(inst.reasonCode[0].text),
force_bytes("Guideline Appropriate Ordering Assessment"),
)
self.assertEqual(
force_bytes(inst.requestIdentifier.system),
force_bytes("http://example.org"),
)
self.assertEqual(
force_bytes(inst.requestIdentifier.value), force_bytes("guidanceRequest1")
)
self.assertEqual(force_bytes(inst.status), force_bytes("success"))
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
| 36.73494
| 92
| 0.659233
|
4a09904c29032b64547bda625f50de972d48cfd9
| 6,050
|
py
|
Python
|
postman.py
|
ssttkkl/nonebot-plugin-pixiv
|
f2276c923f45ceececd42474f43b5e60d1d9e504
|
[
"MIT"
] | 5
|
2022-02-10T11:10:49.000Z
|
2022-03-31T05:25:30.000Z
|
postman.py
|
ssttkkl/nonebot-plugin-pixiv
|
f2276c923f45ceececd42474f43b5e60d1d9e504
|
[
"MIT"
] | 1
|
2022-01-16T10:50:00.000Z
|
2022-01-16T11:41:28.000Z
|
postman.py
|
ssttkkl/nonebot-plugin-pixiv
|
f2276c923f45ceececd42474f43b5e60d1d9e504
|
[
"MIT"
] | null | null | null |
from asyncio import create_task
import dataclasses
from io import BytesIO
import json
import typing
from h11 import Data
from nonebot.adapters.onebot.v11 import Bot, Message, MessageSegment
from nonebot.adapters.onebot.v11.event import MessageEvent, GroupMessageEvent
from nonebot.utils import DataclassEncoder
from .model import Illust
from .config import Config
from .data_source import PixivDataSource
from .pkg_context import context
@context.export_singleton()
class Postman:
conf = context.require(Config)
data_source = context.require(PixivDataSource)
async def make_illust_msg(self, illust: Illust,
number: typing.Optional[int] = None) -> Message:
msg = Message()
if illust.has_tags(self.conf.pixiv_block_tags):
if self.conf.pixiv_block_action == "no_image":
msg.append("该画像因含有不可描述的tag而被自主规制\n")
elif self.conf.pixiv_block_action == "completely_block":
return Message(MessageSegment.text("该画像因含有不可描述的tag而被自主规制"))
elif self.conf.pixiv_block_action == "no_reply":
return Message()
else:
with BytesIO() as bio:
bio.write(await self.data_source.image(illust))
msg.append(MessageSegment.image(bio))
if number is not None:
msg.append(f"#{number}")
msg.append(f"「{illust.title}」\n"
f"作者:{illust.user.name}\n"
f"发布时间:{illust.create_date.strftime('%Y-%m-%d %H:%M:%S')}\n"
f"https://www.pixiv.net/artworks/{illust.id}")
return msg
async def send_message(self, msg: typing.Union[str, Message],
*, bot: Bot,
event: MessageEvent = None,
user_id: typing.Optional[int] = None,
group_id: typing.Optional[int] = None):
if event is not None:
if isinstance(event, MessageEvent):
if isinstance(msg, Message):
msg = Message(
[MessageSegment.reply(event.message_id), *msg])
else:
msg = Message([MessageSegment.reply(
event.message_id), MessageSegment.text(msg)])
await bot.send(event, msg)
elif group_id:
await bot.send_group_msg(group_id=group_id, message=msg)
else:
await bot.send_msg(user_id=user_id, message=msg)
async def send_illust(self, illust: Illust,
header: typing.Union[str,
MessageSegment, None] = None,
number: typing.Optional[int] = None,
*, bot: Bot,
event: MessageEvent = None,
user_id: typing.Optional[int] = None,
group_id: typing.Optional[int] = None):
msg = Message()
if header is not None:
if header is str:
msg.append(MessageSegment.text(header))
else:
msg.append(header)
msg.extend(await self.make_illust_msg(illust, number))
await self.send_message(msg, bot=bot, event=event, user_id=user_id, group_id=group_id)
async def send_illusts(self, illusts: typing.Union[Illust, typing.Iterable[Illust]],
header: typing.Union[str,
MessageSegment, None] = None,
number: typing.Optional[int] = None,
*, bot: Bot,
event: MessageEvent = None,
user_id: typing.Optional[int] = None,
group_id: typing.Optional[int] = None):
if isinstance(illusts, Illust):
await self.send_illust(illusts, header, number, bot=bot, event=event, user_id=user_id, group_id=group_id)
elif len(illusts) == 1:
await self.send_illust(illusts[0], header, number, bot=bot, event=event, user_id=user_id, group_id=group_id)
else:
msg_fut = [create_task(self.make_illust_msg(illust, number + i if number is not None else None))
for i, illust in enumerate(illusts)]
if event is not None:
if "group_id" in event.__fields__ and event.group_id:
group_id = event.group_id
if "user_id" in event.__fields__ and event.user_id:
user_id = event.user_id
if group_id: # 以合并转发形式发送
# 获取bot的群昵称
self_info = await bot.get_group_member_info(group_id=group_id, user_id=bot.self_id)
if self_info["card"]:
nickname = self_info["card"]
else:
nickname = self_info["nickname"]
# 创建转发消息
messages = []
if header is not None:
if isinstance(header, str):
header = MessageSegment.text(header)
messages.append([dataclasses.asdict(header)])
for fut in msg_fut:
msg = await fut
messages.append([dataclasses.asdict(seg) for seg in msg])
messages = [{
"type": "node",
"data": {
"name": nickname,
"uin": bot.self_id,
"content": msg
}
} for msg in messages]
await bot.send_group_forward_msg(
group_id=group_id,
messages=messages
)
else:
if header:
await self.send_message(header, bot=bot, user_id=user_id)
for fut in msg_fut:
await self.send_message(await fut, bot=bot, user_id=user_id)
__all__ = ("Postman", )
| 40.878378
| 120
| 0.527107
|
4a09907de4677706eb0623812673b4fc946422bc
| 14,911
|
py
|
Python
|
frille-lang/lib/python3.6/site-packages/ipykernel/tests/test_message_spec.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 3
|
2021-04-03T18:20:56.000Z
|
2021-09-09T17:44:46.000Z
|
frille-lang/lib/python3.6/site-packages/ipykernel/tests/test_message_spec.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 1
|
2021-04-26T11:00:23.000Z
|
2021-04-26T11:00:23.000Z
|
frille-lang/lib/python3.6/site-packages/ipykernel/tests/test_message_spec.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 3
|
2021-01-31T16:40:52.000Z
|
2021-08-29T18:32:34.000Z
|
"""Test suite for our zeromq-based message specification."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import sys
from distutils.version import LooseVersion as V
from queue import Empty
import nose.tools as nt
from nose.plugins.skip import SkipTest
from traitlets import (
HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum
)
from .utils import (TIMEOUT, start_global_kernel, flush_channels, execute,
get_reply, )
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
KC = None
def setup():
global KC
KC = start_global_kernel()
#-----------------------------------------------------------------------------
# Message Spec References
#-----------------------------------------------------------------------------
class Reference(HasTraits):
"""
Base class for message spec specification testing.
This class is the core of the message specification test. The
idea is that child classes implement trait attributes for each
message keys, so that message keys can be tested against these
traits using :meth:`check` method.
"""
def check(self, d):
"""validate a dict against our traits"""
for key in self.trait_names():
assert key in d
# FIXME: always allow None, probably not a good idea
if d[key] is None:
continue
try:
setattr(self, key, d[key])
except TraitError as e:
assert False, str(e)
class Version(Unicode):
def __init__(self, *args, **kwargs):
self.min = kwargs.pop('min', None)
self.max = kwargs.pop('max', None)
kwargs['default_value'] = self.min
super(Version, self).__init__(*args, **kwargs)
def validate(self, obj, value):
if self.min and V(value) < V(self.min):
raise TraitError("bad version: %s < %s" % (value, self.min))
if self.max and (V(value) > V(self.max)):
raise TraitError("bad version: %s > %s" % (value, self.max))
class RMessage(Reference):
msg_id = Unicode()
msg_type = Unicode()
header = Dict()
parent_header = Dict()
content = Dict()
def check(self, d):
super(RMessage, self).check(d)
RHeader().check(self.header)
if self.parent_header:
RHeader().check(self.parent_header)
class RHeader(Reference):
msg_id = Unicode()
msg_type = Unicode()
session = Unicode()
username = Unicode()
version = Version(min='5.0')
mime_pat = re.compile(r'^[\w\-\+\.]+/[\w\-\+\.]+$')
class MimeBundle(Reference):
metadata = Dict()
data = Dict()
def _data_changed(self, name, old, new):
for k,v in new.items():
assert mime_pat.match(k)
assert isinstance(v, str)
# shell replies
class Reply(Reference):
status = Enum(('ok', 'error'), default_value='ok')
class ExecuteReply(Reply):
execution_count = Integer()
def check(self, d):
Reference.check(self, d)
if d['status'] == 'ok':
ExecuteReplyOkay().check(d)
elif d['status'] == 'error':
ExecuteReplyError().check(d)
class ExecuteReplyOkay(Reply):
status = Enum(('ok',))
user_expressions = Dict()
class ExecuteReplyError(Reply):
ename = Unicode()
evalue = Unicode()
traceback = List(Unicode())
class InspectReply(Reply, MimeBundle):
found = Bool()
class ArgSpec(Reference):
args = List(Unicode())
varargs = Unicode()
varkw = Unicode()
defaults = List()
class Status(Reference):
execution_state = Enum(('busy', 'idle', 'starting'), default_value='busy')
class CompleteReply(Reply):
matches = List(Unicode())
cursor_start = Integer()
cursor_end = Integer()
status = Unicode()
class LanguageInfo(Reference):
name = Unicode('python')
version = Unicode(sys.version.split()[0])
class KernelInfoReply(Reply):
protocol_version = Version(min='5.0')
implementation = Unicode('ipython')
implementation_version = Version(min='2.1')
language_info = Dict()
banner = Unicode()
def check(self, d):
Reference.check(self, d)
LanguageInfo().check(d['language_info'])
class ConnectReply(Reference):
shell_port = Integer()
control_port = Integer()
stdin_port = Integer()
iopub_port = Integer()
hb_port = Integer()
class CommInfoReply(Reply):
comms = Dict()
class IsCompleteReply(Reference):
status = Enum(('complete', 'incomplete', 'invalid', 'unknown'), default_value='complete')
def check(self, d):
Reference.check(self, d)
if d['status'] == 'incomplete':
IsCompleteReplyIncomplete().check(d)
class IsCompleteReplyIncomplete(Reference):
indent = Unicode()
# IOPub messages
class ExecuteInput(Reference):
code = Unicode()
execution_count = Integer()
class Error(ExecuteReplyError):
"""Errors are the same as ExecuteReply, but without status"""
status = None # no status field
class Stream(Reference):
name = Enum(('stdout', 'stderr'), default_value='stdout')
text = Unicode()
class DisplayData(MimeBundle):
pass
class ExecuteResult(MimeBundle):
execution_count = Integer()
class HistoryReply(Reply):
history = List(List())
references = {
'execute_reply' : ExecuteReply(),
'inspect_reply' : InspectReply(),
'status' : Status(),
'complete_reply' : CompleteReply(),
'kernel_info_reply': KernelInfoReply(),
'connect_reply': ConnectReply(),
'comm_info_reply': CommInfoReply(),
'is_complete_reply': IsCompleteReply(),
'execute_input' : ExecuteInput(),
'execute_result' : ExecuteResult(),
'history_reply' : HistoryReply(),
'error' : Error(),
'stream' : Stream(),
'display_data' : DisplayData(),
'header' : RHeader(),
}
"""
Specifications of `content` part of the reply messages.
"""
def validate_message(msg, msg_type=None, parent=None):
"""validate a message
This is a generator, and must be iterated through to actually
trigger each test.
If msg_type and/or parent are given, the msg_type and/or parent msg_id
are compared with the given values.
"""
RMessage().check(msg)
if msg_type:
assert msg['msg_type'] == msg_type
if parent:
assert msg['parent_header']['msg_id'] == parent
content = msg['content']
ref = references[msg['msg_type']]
ref.check(content)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
# Shell channel
def test_execute():
flush_channels()
msg_id = KC.execute(code='x=1')
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'execute_reply', msg_id)
def test_execute_silent():
flush_channels()
msg_id, reply = execute(code='x=1', silent=True)
# flush status=idle
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(status, 'status', msg_id)
assert status['content']['execution_state'] == 'idle'
nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count = reply['execution_count']
msg_id, reply = execute(code='x=2', silent=True)
# flush status=idle
status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(status, 'status', msg_id)
assert status['content']['execution_state'] == 'idle'
nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
count_2 = reply['execution_count']
assert count_2 == count
def test_execute_error():
flush_channels()
msg_id, reply = execute(code='1/0')
assert reply['status'] == 'error'
assert reply['ename'] == 'ZeroDivisionError'
error = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(error, 'error', msg_id)
def test_execute_inc():
"""execute request should increment execution_count"""
flush_channels()
msg_id, reply = execute(code='x=1')
count = reply['execution_count']
flush_channels()
msg_id, reply = execute(code='x=2')
count_2 = reply['execution_count']
assert count_2 == count+1
def test_execute_stop_on_error():
"""execute request should not abort execution queue with stop_on_error False"""
flush_channels()
fail = '\n'.join([
# sleep to ensure subsequent message is waiting in the queue to be aborted
'import time',
'time.sleep(0.5)',
'raise ValueError',
])
KC.execute(code=fail)
msg_id = KC.execute(code='print("Hello")')
KC.get_shell_msg(timeout=TIMEOUT)
reply = KC.get_shell_msg(timeout=TIMEOUT)
assert reply['content']['status'] == 'aborted'
flush_channels()
KC.execute(code=fail, stop_on_error=False)
msg_id = KC.execute(code='print("Hello")')
KC.get_shell_msg(timeout=TIMEOUT)
reply = KC.get_shell_msg(timeout=TIMEOUT)
assert reply['content']['status'] == 'ok'
def test_user_expressions():
flush_channels()
msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
user_expressions = reply['user_expressions']
nt.assert_equal(user_expressions, {'foo': {
'status': 'ok',
'data': {'text/plain': '2'},
'metadata': {},
}})
def test_user_expressions_fail():
flush_channels()
msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
user_expressions = reply['user_expressions']
foo = user_expressions['foo']
assert foo['status'] == 'error'
assert foo['ename'] == 'NameError'
def test_oinfo():
flush_channels()
msg_id = KC.inspect('a')
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
def test_oinfo_found():
flush_channels()
msg_id, reply = execute(code='a=5')
msg_id = KC.inspect('a')
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
content = reply['content']
assert content['found']
text = content['data']['text/plain']
assert 'Type:' in text
assert 'Docstring:' in text
def test_oinfo_detail():
flush_channels()
msg_id, reply = execute(code='ip=get_ipython()')
msg_id = KC.inspect('ip.object_inspect', cursor_pos=10, detail_level=1)
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
content = reply['content']
assert content['found']
text = content['data']['text/plain']
assert 'Signature:' in text
assert 'Source:' in text
def test_oinfo_not_found():
flush_channels()
msg_id = KC.inspect('dne')
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'inspect_reply', msg_id)
content = reply['content']
assert not content['found']
def test_complete():
flush_channels()
msg_id, reply = execute(code="alpha = albert = 5")
msg_id = KC.complete('al', 2)
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'complete_reply', msg_id)
matches = reply['content']['matches']
for name in ('alpha', 'albert'):
assert name in matches
def test_kernel_info_request():
flush_channels()
msg_id = KC.kernel_info()
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'kernel_info_reply', msg_id)
def test_connect_request():
flush_channels()
msg = KC.session.msg('connect_request')
KC.shell_channel.send(msg)
return msg['header']['msg_id']
msg_id = KC.kernel_info()
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'connect_reply', msg_id)
def test_comm_info_request():
flush_channels()
if not hasattr(KC, 'comm_info'):
raise SkipTest()
msg_id = KC.comm_info()
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'comm_info_reply', msg_id)
def test_single_payload():
"""
We want to test the set_next_input is not triggered several time per cell.
This is (was ?) mostly due to the fact that `?` in a loop would trigger
several set_next_input.
I'm tempted to thing that we actually want to _allow_ multiple
set_next_input (that's users' choice). But that `?` itself (and ?'s
transform) should avoid setting multiple set_next_input).
"""
flush_channels()
msg_id, reply = execute(code="ip = get_ipython()\n"
"for i in range(3):\n"
" ip.set_next_input('Hello There')\n")
payload = reply['payload']
next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"]
assert len(next_input_pls) == 1
def test_is_complete():
flush_channels()
msg_id = KC.is_complete("a = 1")
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'is_complete_reply', msg_id)
def test_history_range():
flush_channels()
msg_id_exec = KC.execute(code='x=1', store_history = True)
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
msg_id = KC.history(hist_access_type = 'range', raw = True, output = True, start = 1, stop = 2, session = 0)
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'history_reply', msg_id)
content = reply['content']
assert len(content['history']) == 1
def test_history_tail():
flush_channels()
msg_id_exec = KC.execute(code='x=1', store_history = True)
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
msg_id = KC.history(hist_access_type = 'tail', raw = True, output = True, n = 1, session = 0)
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'history_reply', msg_id)
content = reply['content']
assert len(content['history']) == 1
def test_history_search():
flush_channels()
msg_id_exec = KC.execute(code='x=1', store_history = True)
reply_exec = KC.get_shell_msg(timeout=TIMEOUT)
msg_id = KC.history(hist_access_type = 'search', raw = True, output = True, n = 1, pattern = '*', session = 0)
reply = get_reply(KC, msg_id, TIMEOUT)
validate_message(reply, 'history_reply', msg_id)
content = reply['content']
assert len(content['history']) == 1
# IOPub channel
def test_stream():
flush_channels()
msg_id, reply = execute("print('hi')")
stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(stdout, 'stream', msg_id)
content = stdout['content']
assert content['text'] == 'hi\n'
def test_display_data():
flush_channels()
msg_id, reply = execute("from IPython.display import display; display(1)")
display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
validate_message(display, 'display_data', parent=msg_id)
data = display['content']['data']
assert data['text/plain'] == '1'
| 27.259598
| 114
| 0.632553
|
4a0990cd888dc881e749dfc7bfd106750af2169a
| 613
|
py
|
Python
|
tests/functional/exporters/testing/dummy_programmatic_test_plan.py
|
francoisverbeek/testplan
|
eec61d396627f24fc4c7135e3938c83b01311ff1
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/exporters/testing/dummy_programmatic_test_plan.py
|
francoisverbeek/testplan
|
eec61d396627f24fc4c7135e3938c83b01311ff1
|
[
"Apache-2.0"
] | 64
|
2019-04-15T20:56:40.000Z
|
2021-03-23T01:00:30.000Z
|
tests/functional/exporters/testing/dummy_programmatic_test_plan.py
|
LevyForchh/testplan
|
3cca8821acd4df569ae495009317deeb8ba5f3f0
|
[
"Apache-2.0"
] | null | null | null |
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan import defaults
from testplan.exporters.testing import WebServerExporter
@testsuite
class Alpha(object):
@testcase
def test_comparison(self, env, result):
result.equal(1, 1, 'equality description')
@test_plan(
name='Multiply',
exporters=WebServerExporter(ui_port=defaults.WEB_SERVER_PORT)
)
def main(plan):
test = MultiTest(name='MultiplyTest',
suites=[Alpha()])
plan.add(test)
if __name__ == '__main__':
sys.exit(not main())
| 22.703704
| 69
| 0.717781
|
4a0990d39c0bd02a8d0b0aa3e71b4c4bbfdd0e42
| 1,685
|
py
|
Python
|
sumo/helper.py
|
wenke727/RoadNetworkCreator
|
a359d2e5c5f0921b1af514c3a88b5e3a25707407
|
[
"MIT"
] | 1
|
2021-04-24T07:56:46.000Z
|
2021-04-24T07:56:46.000Z
|
sumo/helper.py
|
wenke727/RoadNetworkCreator
|
a359d2e5c5f0921b1af514c3a88b5e3a25707407
|
[
"MIT"
] | null | null | null |
sumo/helper.py
|
wenke727/RoadNetworkCreator
|
a359d2e5c5f0921b1af514c3a88b5e3a25707407
|
[
"MIT"
] | null | null | null |
from pyproj import CRS, Transformer
def proj_trans(x, y, in_sys=4326, out_sys=32649, offset=(-799385.77,-2493897.75), precision=2 ):
"""proj trans
Args:
x ([type]): [description]
y ([type]): [description]
in_sys (int, optional): [description]. Defaults to 4326.
out_sys (int, optional): [description]. Defaults to 32649.
offset (tuple, optional): [description]. Defaults to (-799385.77,-2493897.75).
precision (int, optional): [description]. Defaults to 2.
Returns:
[type]: [description]
"""
# assert not isinstance(coord, tuple) and len(coord) != 2, "check coord"
# always_xy (bool, optional) – If true, the transform method will accept as input and return as output coordinates using the traditional GIS order, that is longitude, latitude for geographic CRS and easting, northing for most projected CRS.
coord_transfer = Transformer.from_crs( CRS(f"EPSG:{in_sys}"), CRS(f"EPSG:{out_sys}"), always_xy=True )
x, y = coord_transfer.transform(x, y)
x += offset[0]
y += offset[1]
return round(x, precision), round(y, precision)
def df_coord_transfor(gdf):
# 坐标转换算法
# <location netOffset="-799385.77,-2493897.75" convBoundary="0.00,0.00,18009.61,5593.04" origBoundary="113.832744,22.506539,114.086290,22.692155" projParameter="+proj=utm +zone=49 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"/>
crs = CRS("+proj=utm +zone=49 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
to_crs = crs.to_epsg()
gdf.to_crs(epsg=to_crs, inplace = True)
gdf.loc[:, "x_"] = gdf.geometry.x - 799385.77
gdf.loc[:, "y_"] = gdf.geometry.y - 2493897.75
return gdf
| 40.119048
| 245
| 0.660534
|
4a09910d45470c4d7af0d9a642f988e3eaf07b73
| 5,710
|
py
|
Python
|
library/avi_update_se_data_vnics.py
|
kumaresanhm/Ansible-roleSDK
|
4fe1d53a3738a9ac0644ff57e5f5182eb1d5a319
|
[
"Apache-2.0"
] | null | null | null |
library/avi_update_se_data_vnics.py
|
kumaresanhm/Ansible-roleSDK
|
4fe1d53a3738a9ac0644ff57e5f5182eb1d5a319
|
[
"Apache-2.0"
] | null | null | null |
library/avi_update_se_data_vnics.py
|
kumaresanhm/Ansible-roleSDK
|
4fe1d53a3738a9ac0644ff57e5f5182eb1d5a319
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_update_se_data_vnics
author: Shrikant Chaudhari (shrikant.chaudhari@avinetworks.com)
short_description: Module to update Service Engine's data vnics/vlans configurations.
requirements: [ avisdk ]
options:
data_vnics_config:
description:
- Placeholder for description of property data_vnics of obj type ServiceEngine field.
Here you can specify configuration for data_vnics property of a service engine.
For more details you can refer to swagger specs https://{controller_ip}/swagger/
From above link you can find configurable fields under data_vnics property of a service engine.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Update data vnics and vlan interfaces
avi_update_se_data_vnics:
avi_credentials:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
api_version: "18.1.3"
data_vnics_config:
- if_name: "eth1"
is_asm: false
can_se_dp_takeover: true
is_hsm: false
is_avi_internal_network: false
enabled: true
dhcp_enabled: false
del_pending: false
linux_name: "eth3"
is_mgmt: false
connected: true
vlan_interfaces:
- dhcp_enabled: true
if_name: "eth3"
ip6_autocfg_enabled: false
is_mgmt: false
vlan_id: 0
vnic_networks:
- ip:
ip_addr:
addr: "10.161.56.155"
type: "V4"
mask: 24
mode: "STATIC"
ctlr_alloc: false
vrf_ref: "https://10.10.28.102/api/vrfcontext/vrfcontext-47f8a632-3ab4-427d-9084-433bc06da26d"
vnic_networks:
- ip:
ip_addr:
addr: "10.161.56.154"
type: "V4"
mask: 24
mode: "STATIC"
ctlr_alloc: false
vrf_id: 0
aggregator_chgd: false
mtu: 1500
vrf_ref: "https://10.10.28.102/api/vrfcontext/vrfcontext-47f8a632-3ab4-427d-9084-433bc06da26d"
ip6_autocfg_enabled: false
vlan_id: 0
is_portchannel: false
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from avi.sdk.avi_api import ApiSession, AviCredentials
from avi.sdk.utils.ansible_utils import (
avi_obj_cmp, cleanup_absent_fields, avi_common_argument_spec,
ansible_return)
from pkg_resources import parse_version
import avi.sdk
sdk_version = getattr(avi.sdk, '__version__', None)
if ((sdk_version is None) or
(sdk_version and
(parse_version(sdk_version) < parse_version('17.1')))):
# It allows the __version__ to be '' as that value is used in development builds
raise ImportError
from avi.sdk.utils.ansible_utils import avi_ansible_api
HAS_AVI = True
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
data_vnics_config=dict(type='list', ),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
# Create controller session
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
api = ApiSession.get_session(
api_creds.controller, api_creds.username, password=api_creds.password,
timeout=api_creds.timeout, tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid, token=api_creds.token,
port=api_creds.port)
path = 'serviceengine'
# Get existing SE object
rsp = api.get(path, api_version=api_creds.api_version)
existing_se = rsp.json()
se_obj = existing_se['results'][0]
data_vnics_config = module.params['data_vnics_config']
for d_vnic in se_obj['data_vnics']:
for obj in data_vnics_config:
config_for = obj.get('if_name', None)
if not config_for:
return module.fail_json(msg=(
"if_name in a configuration is mandatory. Please provide if_name i.e. vnic's interface name."))
if config_for == d_vnic['if_name']:
# modify existing object
for key, val in obj.iteritems():
d_vnic[key] = val
if config_for == d_vnic['if_name']:
for key, val in obj.iteritems():
d_vnic[key] = val
module.params.update(se_obj)
module.params.update(
{
'avi_api_update_method': 'put',
'state': 'present'
}
)
module.params.pop('data_vnics_config')
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'serviceengine',
set([]))
if __name__ == '__main__':
main()
| 34.606061
| 115
| 0.608932
|
4a0991d3ef6682e8949c7392ab615d8dfb15811d
| 4,354
|
py
|
Python
|
backend/api/python_http_client/test/test_api_list_runs_response.py
|
FrancisLfg/pipelines
|
b0466cb9626407f125bf7ce2c9de37991e654a6d
|
[
"Apache-2.0"
] | null | null | null |
backend/api/python_http_client/test/test_api_list_runs_response.py
|
FrancisLfg/pipelines
|
b0466cb9626407f125bf7ce2c9de37991e654a6d
|
[
"Apache-2.0"
] | null | null | null |
backend/api/python_http_client/test/test_api_list_runs_response.py
|
FrancisLfg/pipelines
|
b0466cb9626407f125bf7ce2c9de37991e654a6d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition. # noqa: E501
The version of the OpenAPI document: 1.0.0-dev.1
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfp_server_api
from kfp_server_api.models.api_list_runs_response import ApiListRunsResponse # noqa: E501
from kfp_server_api.rest import ApiException
class TestApiListRunsResponse(unittest.TestCase):
"""ApiListRunsResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ApiListRunsResponse
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfp_server_api.models.api_list_runs_response.ApiListRunsResponse() # noqa: E501
if include_optional :
return ApiListRunsResponse(
runs = [
kfp_server_api.models.api_run.apiRun(
id = '0',
name = '0',
storage_state = 'STORAGESTATE_AVAILABLE',
description = '0',
pipeline_spec = kfp_server_api.models.api_pipeline_spec.apiPipelineSpec(
pipeline_id = '0',
pipeline_name = '0',
workflow_manifest = '0',
pipeline_manifest = '0',
parameters = [
kfp_server_api.models.api_parameter.apiParameter(
name = '0',
value = '0', )
], ),
resource_references = [
kfp_server_api.models.api_resource_reference.apiResourceReference(
key = kfp_server_api.models.api_resource_key.apiResourceKey(
type = 'UNKNOWN_RESOURCE_TYPE',
id = '0', ),
name = '0',
relationship = 'UNKNOWN_RELATIONSHIP', )
],
service_account = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
scheduled_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finished_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
status = '0',
error = '0',
metrics = [
kfp_server_api.models.api_run_metric.apiRunMetric(
name = '0',
node_id = '0',
number_value = 1.337,
format = 'UNSPECIFIED', )
], )
],
total_size = 56,
next_page_token = '0'
)
else :
return ApiListRunsResponse(
)
def testApiListRunsResponse(self):
"""Test ApiListRunsResponse"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 40.691589
| 138
| 0.532384
|
4a0991e43cb810f29660b5181aac27aaf518f228
| 446
|
py
|
Python
|
picross_generator/model/Image.py
|
samuel-flynn/picross-generator
|
b94d42574451e39d703047b95d75da6364db9e58
|
[
"MIT"
] | null | null | null |
picross_generator/model/Image.py
|
samuel-flynn/picross-generator
|
b94d42574451e39d703047b95d75da6364db9e58
|
[
"MIT"
] | null | null | null |
picross_generator/model/Image.py
|
samuel-flynn/picross-generator
|
b94d42574451e39d703047b95d75da6364db9e58
|
[
"MIT"
] | null | null | null |
from typing import List
from picross_generator.model.ImageMatrix import ImageMatrix
from picross_generator.model.RowColumnMetadata import RowColumnMetadata
class Image:
name : str
image_matrix : ImageMatrix
column_metadata : List[RowColumnMetadata]
row_metadata : List[RowColumnMetadata]
def __init__(self) -> None:
self.image_matrix = ImageMatrix()
self.column_metadata = []
self.row_metadata = []
| 27.875
| 71
| 0.73991
|
4a0992574dd69d00806313ca57d8108332c14b19
| 5,619
|
py
|
Python
|
example/example.py
|
hozblok/DjangoChannelsGraphqlWs
|
77b406fb19053fcb9220a5bb23e1349fde4fbda3
|
[
"MIT"
] | null | null | null |
example/example.py
|
hozblok/DjangoChannelsGraphqlWs
|
77b406fb19053fcb9220a5bb23e1349fde4fbda3
|
[
"MIT"
] | null | null | null |
example/example.py
|
hozblok/DjangoChannelsGraphqlWs
|
77b406fb19053fcb9220a5bb23e1349fde4fbda3
|
[
"MIT"
] | null | null | null |
#
# coding: utf-8
# Copyright (c) 2019 DATADVANCE
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Simple example of the DjangoChannelsGraphqlWs."""
import pathlib
import channels
import django
import graphene
import channels_graphql_ws
# ---------------------------------------------------------------------- GRAPHQL BACKEND
# Store chat history right here.
chats = {}
class Message(
graphene.ObjectType, default_resolver=graphene.types.resolver.dict_resolver
):
"""Message GraphQL type."""
chatroom = graphene.String()
message = graphene.String()
sender = graphene.String()
class Query(graphene.ObjectType):
"""Root GraphQL query."""
history = graphene.List(Message, chatroom=graphene.String())
def resolve_history(self, info, chatroom):
"""Return chat history."""
del info
return chats[chatroom] if chatroom in chats else []
class SendChatMessage(graphene.Mutation):
"""Send chat message."""
class Output(graphene.ObjectType):
"""Mutation result."""
ok = graphene.Boolean()
class Arguments:
"""Mutation arguments."""
chatroom = graphene.String()
message = graphene.String()
username = graphene.String()
def mutate(self, info, chatroom, message, username):
"""Mutation "resolver" - broadcast a message to the chatroom."""
del info
# Store a message.
history = chats.setdefault(chatroom, [])
history.append({"chatroom": chatroom, "message": message, "sender": username})
# Notify subscribers.
OnNewChatMessage.new_chat_message(
chatroom=chatroom, message=message, sender=username
)
return SendChatMessage.Output(ok=True)
class Mutation(graphene.ObjectType):
"""GraphQL mutations."""
send_chat_message = SendChatMessage.Field()
class OnNewChatMessage(channels_graphql_ws.Subscription):
"""Subscription triggers on a new chat message."""
sender = graphene.String()
chatroom = graphene.String()
message = graphene.String()
class Arguments:
"""Subscription arguments."""
username = graphene.String()
chatroom = graphene.String()
def subscribe(self, info, username, chatroom=None):
"""Client subscription handler."""
del info, username
# Specify the subscription group client subscribes to.
return [chatroom] if chatroom is not None else None
def publish(self, info, username, chatroom=None):
"""Called to prepare the subscription notification message."""
del info
# The `self` contains payload delivered from the `broadcast()`.
message = self["message"]
sender = self["sender"]
# Avoid self-notifications.
if sender == username:
return OnNewChatMessage.SKIP
return OnNewChatMessage(chatroom=chatroom, message=message, sender=sender)
@classmethod
def new_chat_message(cls, chatroom, message, sender):
"""Auxiliary function to send subscription notifications.
It is generally a good idea to encapsulate broadcast invocation
inside auxiliary class methods inside the subscription class.
That allows to consider a structure of the `payload` as an
implementation details.
"""
cls.broadcast(group=chatroom, payload={"message": message, "sender": sender})
class Subscription(graphene.ObjectType):
"""GraphQL subscriptions."""
on_chat_message_sent = OnNewChatMessage.Field()
graphql_schema = graphene.Schema(
query=Query, mutation=Mutation, subscription=Subscription
)
# ----------------------------------------------------------- GRAPHQL WEBSOCKET CONSUMER
class MyGraphqlWsConsumer(channels_graphql_ws.GraphqlWsConsumer):
"""Channels WebSocket consumer which provides GraphQL API."""
schema = graphql_schema
# ------------------------------------------------------------------------- ASGI ROUTING
application = channels.routing.ProtocolTypeRouter(
{
"websocket": channels.routing.URLRouter(
[django.urls.path("graphql/", MyGraphqlWsConsumer)]
)
}
)
# -------------------------------------------------------------------- URL CONFIGURATION
def graphiql(request):
"""Trivial ad-hoc view to serve the `graphiql.html` file."""
del request
graphiql_filepath = pathlib.Path(__file__).absolute().parent / "graphiql.html"
with open(graphiql_filepath) as f:
return django.http.response.HttpResponse(f.read())
urlpatterns = [django.urls.path("", graphiql)]
| 30.873626
| 88
| 0.662395
|
4a0993434b61b40b22a6f91f674c8961c670ffe3
| 893
|
py
|
Python
|
Python_projects/project10/proj10-app.py
|
helpmoeny/pythoncode
|
0e2a0a3d3243323dd9e970daaf3b6c347900c694
|
[
"Unlicense"
] | null | null | null |
Python_projects/project10/proj10-app.py
|
helpmoeny/pythoncode
|
0e2a0a3d3243323dd9e970daaf3b6c347900c694
|
[
"Unlicense"
] | null | null | null |
Python_projects/project10/proj10-app.py
|
helpmoeny/pythoncode
|
0e2a0a3d3243323dd9e970daaf3b6c347900c694
|
[
"Unlicense"
] | null | null | null |
import currency
print("A bank account named 'account' has $1000, you may enter a deduction amount")
print("(In the format 'XXX YYY', where xxx is the amount and yyy is the currencycode)")
print("or type 'q' to quit")
account=currency.Currency(1000,'USD')
while True:
fo=input("expense amount: ")
if fo=="q" or fo=="Q":
print("Quitting...")
break
else:
try:
line_str=fo.split(" ")
amount=float(line_str[0])
currencycode=line_str[1]
expense=currency.Currency(amount,currencycode)
account=account-expense
except:
print("Incorrect inputs, try again...")
zero=currency.Currency(0,'USD')
if zero>account:
print("Overdrew account, quitting...")
break
print("Account now contains: ")
print(account)
print("")
| 28.806452
| 87
| 0.576708
|
4a0993fffece1d7ec91557d694969ebed80d1163
| 8,395
|
py
|
Python
|
py_librus_api/librus.py
|
TheAmazingRak/python-librus
|
2e9e112d38d62df80708a2af9efae5c086d8da65
|
[
"MIT"
] | 4
|
2020-04-14T21:33:00.000Z
|
2021-02-24T08:23:51.000Z
|
py_librus_api/librus.py
|
TheAmazingRak/python-librus
|
2e9e112d38d62df80708a2af9efae5c086d8da65
|
[
"MIT"
] | 6
|
2020-04-28T23:56:59.000Z
|
2021-05-16T14:45:05.000Z
|
py_librus_api/librus.py
|
TheAmazingRak/python-librus
|
2e9e112d38d62df80708a2af9efae5c086d8da65
|
[
"MIT"
] | 4
|
2021-10-10T19:59:06.000Z
|
2021-12-12T21:48:04.000Z
|
import requests
import sys
class Librus:
host = "https://api.librus.pl/"
headers = {
"Authorization": "Basic Mjg6ODRmZGQzYTg3YjAzZDNlYTZmZmU3NzdiNThiMzMyYjE="
}
logged_in = False
lucky_number = None
grades = None
subjects = None
categories = None
students = None
teachers = None
comments = None
lessons = None
school_free_days = None
teacher_free_days = None
teacher_free_days_types = None
attendances = None
attendances_types = None
# Checks data and decides method of login
def login(self, login, password):
if not self.logged_in:
if login is None or password is None or login == "" or password == "":
return False
else:
if self.make_connection(login, password):
return True
else:
return False
# Make connection and get access token
def make_connection(self, login, password):
r = None
loop = 0
while r is None:
try:
r = requests.post(self.host + "OAuth/Token", data={"username": login,
"password": password,
"librus_long_term_token": "1",
"grant_type": "password"},
headers=self.headers)
if r.ok:
self.logged_in = True
self.headers["Authorization"] = "Bearer " + r.json()["access_token"]
return True
else:
return False
except requests.exceptions.Timeout:
if loop >= 10:
return False
else:
loop += 1
continue
except requests.exceptions.RequestException:
raise requests.exceptions.ConnectionError
def get_data(self, url):
if self.logged_in:
try:
return requests.get(self.host + "2.0/" + url, headers=self.headers)
except (requests.exceptions.ConnectionError, TimeoutError, requests.exceptions.Timeout,
requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout):
raise Exception("Connection error")
else:
raise Exception("User not logged in")
def get_lucky_number(self):
if self.lucky_number is None:
r = self.get_data("LuckyNumbers")
self.lucky_number = r.json()["LuckyNumber"]["LuckyNumber"]
return self.lucky_number
return self.lucky_number
def get_grades(self):
r = self.get_data("Grades")
if not self.subjects:
self.get_subjects()
if not self.categories:
self.get_categories()
if not self.teachers:
self.get_teachers()
if self.grades is None:
self.grades = {i: [] for i in self.subjects.values()}
grades_comments = self.get_comments()
for i in r.json()["Grades"]:
if "Comments" in i:
comment = grades_comments[i["Comments"][0]["Id"]]["Text"]
else:
comment = "Brak komentarza"
self.grades[self.subjects[i["Subject"]["Id"]]].append({
"Grade": i["Grade"],
"Weight": self.categories[i["Category"]["Id"]]["Weight"],
"Category": self.categories[i["Category"]["Id"]]["Name"],
"Teacher": self.teachers[i["AddedBy"]["Id"]],
"Comment": comment,
"To_the_average": self.categories[i["Category"]["Id"]]["CountToTheAverage"]
})
return self.grades
def get_subjects(self):
if self.subjects is None:
r = self.get_data("Subjects")
self.subjects = {i["Id"]: i["Name"] for i in r.json()["Subjects"]}
return self.subjects
def get_categories(self):
if self.categories is None:
self.categories = {}
r = self.get_data("Grades/Categories")
for i in r.json()["Categories"]:
if "Weight" in i:
w = i["Weight"]
else:
w = None
if i["CountToTheAverage"]:
i["CountToTheAverage"] = "Tak"
else:
i["CountToTheAverage"] = "Nie"
self.categories[i["Id"]] = {
"Name": i["Name"],
"Weight": w,
"CountToTheAverage": i["CountToTheAverage"],
}
return self.categories
def get_teachers(self, *, mode="normal"):
if self.teachers is None:
r = self.get_data("Users")
self.teachers = {
i["Id"]: {
"FirstName": i["FirstName"],
"LastName": i["LastName"]
} for i in r.json()["Users"]
}
if mode == "fullname":
return ["%s %s" % (data["FirstName"], data["LastName"]) for t_id, data in self.teachers.items()]
elif mode == "fullname-id":
return ["%s: %s %s" % (t_id, data["FirstName"], data["LastName"]) for t_id, data in self.teachers.items()]
return self.teachers
def get_comments(self):
if self.comments is None:
r = self.get_data("Grades/Comments")
self.comments = {
i["Id"]: {
"Text": i["Text"]
} for i in r.json()["Comments"]
}
return self.comments
def get_school_free_days(self):
if self.school_free_days is None:
r = self.get_data("SchoolFreeDays")
self.school_free_days = r.json()["SchoolFreeDays"]
for i in self.school_free_days:
for e in ["Id", "Units"]:
i.pop(e)
return self.school_free_days
def get_teacher_free_days(self):
if self.teachers is None:
self.get_teachers()
if self.teacher_free_days_types is None:
r = self.get_data("TeacherFreeDays/Types")
self.teacher_free_days_types = {
i["Id"]: i["Name"] for i in r.json()["Types"]
}
if self.teacher_free_days is None:
r = self.get_data("TeacherFreeDays")
self.teacher_free_days = r.json()["TeacherFreeDays"]
for i in self.teacher_free_days:
i.pop("Id")
i["Teacher"] = self.teachers[i["Teacher"]["Id"]]
i["Type"] = self.teacher_free_days_types[i["Type"]["Id"]]
return self.teacher_free_days
def get_lessons(self):
if self.lessons is None:
if self.subjects is None:
self.get_subjects()
if self.teachers is None:
self.get_teachers()
r = self.get_data("Lessons")
self.lessons = {
i["Id"]: {
"Subject": self.subjects[i["Subject"]["Id"]],
"Teacher": self.teachers[i["Teacher"]["Id"]]
} for i in r.json()["Lessons"]
}
return self.lessons
def get_attendances(self):
if self.attendances is None:
if self.attendances_types is None:
r = self.get_data("Attendances/Types")
self.attendances_types = {
i["Id"]: {
"Name": i["Name"],
"Short": i["Short"],
"Standard": i["Standard"],
"IsPresenceKind": i["IsPresenceKind"],
"Order": i["Order"]
} for i in r.json()["Types"]
}
if self.lessons is None:
self.get_lessons()
self.attendances = self.get_data("Attendances").json()["Attendances"]
for i in self.attendances:
i.pop("Student")
i["Type"] = self.attendances_types[i["Type"]["Id"]]
i["AddedBy"] = self.teachers[i["AddedBy"]["Id"]]
i["Lesson"] = self.lessons[i["Lesson"]["Id"]]
return self.attendances
| 32.041985
| 118
| 0.491126
|
4a09942be567aabd51a845f4faefdf2b347d0aa3
| 477
|
py
|
Python
|
OpenAttack/data/universal_sentence_encoder.py
|
zangy17/OpenAttack
|
9114a8af12680f14684d2bf1bc6a5c5e34f8932c
|
[
"MIT"
] | 10
|
2021-12-01T15:35:05.000Z
|
2022-03-16T16:10:24.000Z
|
OpenAttack/data/universal_sentence_encoder.py
|
zangy17/OpenAttack
|
9114a8af12680f14684d2bf1bc6a5c5e34f8932c
|
[
"MIT"
] | null | null | null |
OpenAttack/data/universal_sentence_encoder.py
|
zangy17/OpenAttack
|
9114a8af12680f14684d2bf1bc6a5c5e34f8932c
|
[
"MIT"
] | null | null | null |
"""
:type: str
:Size: 916.57MB
Model files for Universal Sentence Encoder in tensorflow_hub.
`[pdf] <https://arxiv.org/pdf/1803.11175>`__
`[page] <https://tfhub.dev/google/universal-sentence-encoder/4>`__
"""
from OpenAttack.utils import make_zip_downloader
import os
NAME = "AttackAssist.UniversalSentenceEncoder"
URL = "https://cdn.data.thunlp.org/TAADToolbox/usencoder.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
return os.path.join(path, "usencoder")
| 23.85
| 66
| 0.754717
|
4a0994932dd96f5c4f7cc2abf77ed71dc6e79399
| 153,935
|
py
|
Python
|
venv/lib/python3.8/site-packages/IPython/core/interactiveshell.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/IPython/core/interactiveshell.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/IPython/core/interactiveshell.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import functools
import inspect
import os
import re
import runpy
import sys
import tempfile
import traceback
import types
import subprocess
import warnings
from io import open as io_open
from pickleshare import PickleShareDB
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
from IPython.core import oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.events import EventManager, available_events
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import Pdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
from IPython.utils.process import system, getoutput
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
from IPython.utils.tempdir import TemporaryDirectory
from traitlets import (
Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
observe, default, validate, Any
)
from warnings import warn
from logging import error
import IPython.core.hooks
from typing import List as ListType, Tuple, Optional
from ast import AST
# NoOpContext is deprecated, but ipykernel imports it from here.
# See https://github.com/ipython/ipykernel/issues/157
# (2016, let's try to remove than in IPython 8.0)
from IPython.utils.contexts import NoOpContext
try:
import docrepr.sphinxify as sphx
def sphinxify(doc):
with TemporaryDirectory() as dirname:
return {
'text/html': sphx.sphinxify(doc, dirname),
'text/plain': doc
}
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
if sys.version_info > (3,8):
from ast import Module
else :
# mock the new API, ignore second argument
# see https://github.com/ipython/ipython/issues/11590
from ast import Module as OriginalModule
Module = lambda nodelist, type_ignores: OriginalModule(nodelist)
if sys.version_info > (3,6):
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
else:
_assign_nodes = (ast.AugAssign, ast.Assign )
_single_targets_nodes = (ast.AugAssign, )
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
def removed_co_newlocals(function:types.FunctionType) -> types.FunctionType:
"""Return a function that do not create a new local scope.
Given a function, create a clone of this function where the co_newlocal flag
has been removed, making this function code actually run in the sourounding
scope.
We need this in order to run asynchronous code in user level namespace.
"""
from types import CodeType, FunctionType
CO_NEWLOCALS = 0x0002
code = function.__code__
new_co_flags = code.co_flags & ~CO_NEWLOCALS
if sys.version_info > (3, 8, 0, 'alpha', 3):
new_code = code.replace(co_flags=new_co_flags)
else:
new_code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
new_co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
return FunctionType(new_code, globals(), function.__name__, function.__defaults__)
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (_asyncio_runner, _asyncify, _pseudo_sync_runner)
from .async_helpers import _curio_runner, _trio_runner, _should_be_async
def _ast_asyncify(cell:str, wrapper_name:str) -> ast.Module:
"""
Parse a cell with top-level await and modify the AST to be able to run it later.
Parameter
---------
cell: str
The code cell to asyncronify
wrapper_name: str
The name of the function to be used to wrap the passed `cell`. It is
advised to **not** use a python identifier in order to not pollute the
global namespace in which the function will be ran.
Return
------
A module object AST containing **one** function named `wrapper_name`.
The given code is wrapped in a async-def function, parsed into an AST, and
the resulting function definition AST is modified to return the last
expression.
The last expression or await node is moved into a return statement at the
end of the function, and removed from its original location. If the last
node is not Expr or Await nothing is done.
The function `__code__` will need to be later modified (by
``removed_co_newlocals``) in a subsequent step to not create new `locals()`
meaning that the local and global scope are the same, ie as if the body of
the function was at module level.
Lastly a call to `locals()` is made just before the last expression of the
function, or just after the last assignment or statement to make sure the
global dict is updated as python function work with a local fast cache which
is updated only on `local()` calls.
"""
from ast import Expr, Await, Return
if sys.version_info >= (3,8):
return ast.parse(cell)
tree = ast.parse(_asyncify(cell))
function_def = tree.body[0]
function_def.name = wrapper_name
try_block = function_def.body[0]
lastexpr = try_block.body[-1]
if isinstance(lastexpr, (Expr, Await)):
try_block.body[-1] = Return(lastexpr.value)
ast.fix_missing_locations(tree)
return tree
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
def get_default_colors():
"DEPRECATED"
warn('get_default_color is deprecated since IPython 5.0, and returns `Neutral` on all platforms.',
DeprecationWarning, stacklevel=2)
return 'Neutral'
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_in2 = Unicode(' .\\D.: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_out = Unicode('Out[\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompts_pad_left = Bool(True,
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
@observe('prompt_in1', 'prompt_in2', 'prompt_out', 'prompt_pad_left')
def _prompt_trait_changed(self, change):
name = change['name']
warn("InteractiveShell.{name} is deprecated since IPython 4.0"
" and ignored since 5.0, set TerminalInteractiveShell.prompts"
" object directly.".format(name=name))
# protect against weird cases where self.config may not exist:
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.init_deprecation_warnings()
self.hooks.late_startup_hook()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = self.compiler_class()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_deprecation_warnings(self):
"""
register default filter for deprecation warning.
This will allow deprecation warning of function used interactively to show
warning to users, and still hide deprecation warning from libraries import.
"""
if sys.version_info < (3,7):
warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# This will just use sys.stdout and sys.stderr. If you want to
# override sys.stdout and sys.stderr themselves, you need to do that
# *before* instantiating this class, because io holds onto
# references to the underlying streams.
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
p = os.path.normcase(sys.executable)
p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
# executable path should end like /bin/python or \\scripts\\python.exe
p_exe_up2 = os.path.dirname(os.path.dirname(p))
if p_exe_up2 and os.path.exists(p_venv) and os.path.samefile(p_exe_up2, p_venv):
# Our exe is inside the virtualenv, don't need to do anything.
return
# fallback venv-old detection:
# stdlib venv-old may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv-old Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = [p]
while os.path.islink(p):
p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
paths.append(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.startswith('\\cygdrive'):
p_venv = p_venv[11:]
elif len(p_venv) >= 2 and p_venv[1] == ':':
p_venv = p_venv[2:]
if any(p_venv in p for p in paths):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
_warn_deprecated=True):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if _warn_deprecated and (name in IPython.core.hooks.deprecated):
alternative = IPython.core.hooks.deprecated[name]
warn("Hook {} is deprecated. Use {} instead.".format(name, alternative), stacklevel=2)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
warn("ip.register_post_execute is deprecated, use "
"ip.events.register('post_run_cell', func) instead.", stacklevel=2)
self.events.register('post_run_cell', func)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <aleaxit@yahoo.com>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <hohn@hooknose.lbl.gov> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = sphinxify if self.sphinxify_docstring else None
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector._get_info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = Pdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb: str):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
val = self.InteractiveTB.stb2text(stb)
try:
print(val)
except UnicodeEncodeError:
print(val.encode("utf-8", "backslashreplace").decode())
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""DEPRECATED
Moved to terminal subclass, here only to simplify the init logic."""
# Set a number of methods that depend on readline to be no-op
warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
DeprecationWarning, stacklevel=2)
self.set_custom_completer = no_op
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def run_line_magic(self, magic_name, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
u'status' : 'error',
u'traceback' : stb,
u'ename' : etype.__name__,
u'evalue' : py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.endswith('.ipynb'):
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
with open(fname) as f:
yield f.read()
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool):
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
return
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell: str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = self.compile.cache(
cell, self.execution_count, raw_code=raw_cell
)
with self.display_trap:
# Compile to bytecode
try:
if sys.version_info < (3,8) and self.autoawait:
if _should_be_async(cell):
# the code AST below will not be user code: we wrap it
# in an `async def`. This will likely make some AST
# transformer below miss some transform opportunity and
# introduce a small coupling to run_code (in which we
# bake some assumptions of what _ast_asyncify returns.
# they are ways around (like grafting part of the ast
# later:
# - Here, return code_ast.body[0].body[1:-1], as well
# as last expression in return statement which is
# the user code part.
# - Let it go through the AST transformers, and graft
# - it back after the AST transform
# But that seem unreasonable, at least while we
# do not need it.
code_ast = _ast_asyncify(cell, 'async-def-wrapper')
_run_async = True
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
if _run_async:
interactivity = 'async'
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
async def run_ast_nodes(self, nodelist:ListType[AST], cell_name:str, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
Experimental value: 'async' Will try to run top level interactive
async/await code in default runner, this will not respect the
interactivity setting and will only run the last node if it is an
expression.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
elif interactivity == 'async':
to_run_exec, to_run_interactive = [], nodelist
_async = True
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
if _async and sys.version_info > (3,8):
raise ValueError("This branch should never happen on Python 3.8 and above, "
"please try to upgrade IPython and open a bug report with your case.")
if _async:
# If interactivity is async the semantics of run_code are
# completely different Skip usual machinery.
mod = Module(nodelist, [])
async_wrapper_code = compiler(mod, cell_name, 'exec')
exec(async_wrapper_code, self.user_global_ns, self.user_ns)
async_code = removed_co_newlocals(self.user_ns.pop('async-def-wrapper')).__code__
if (await self.run_code(async_code, result, async_=True)):
return True
else:
if sys.version_info > (3, 8):
def compare(code):
is_async = (inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE)
return is_async
else:
def compare(code):
return _async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, 'exec'))
for node in to_run_interactive:
to_run.append((node, 'single'))
for node,mode in to_run:
if mode == 'exec':
mod = Module([node], [])
elif mode == 'single':
mod = ast.Interactive([node])
with compiler.extra_flags(getattr(ast, 'PyCF_ALLOW_TOP_LEVEL_AWAIT', 0x0) if self.autoawait else 0x0):
code = compiler(mod, cell_name, mode)
asy = compare(code)
if (await self.run_code(code, result, async_=asy)):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def _async_exec(self, code_obj: types.CodeType, user_ns: dict):
"""
Evaluate an asynchronous code object using a code runner
Fake asynchronous execution of code_object in a namespace via a proxy namespace.
Returns coroutine object, which can be executed via async loop runner
WARNING: The semantics of `async_exec` are quite different from `exec`,
in particular you can only pass a single namespace. It also return a
handle to the value of the last things returned by code_object.
"""
return eval(code_obj, user_ns)
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
if async_ and sys.version_info < (3,8):
last_expr = (await self._async_exec(code_obj, self.user_ns))
code = compile('last_expr', 'fake', "single")
exec(code, {'last_expr': last_expr})
elif async_ :
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core import pylabtools as pt
from matplotlib_inline.backend_inline import configure_inline_support
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dirname = tempfile.mkdtemp(prefix=prefix)
self.tempdirs.append(dirname)
handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
os.close(handle) # On Windows, there can only be one open handle on a file
self.tempfiles.append(filename)
if data:
with open(filename, 'w') as tmp_file:
tmp_file.write(data)
return filename
@undoc
def write(self,data):
"""DEPRECATED: Write a string to the default output"""
warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
DeprecationWarning, stacklevel=2)
sys.stdout.write(data)
@undoc
def write_err(self,data):
"""DEPRECATED: Write a string to the default error output"""
warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
DeprecationWarning, stacklevel=2)
sys.stderr.write(data)
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
for tdir in self.tempdirs:
try:
os.rmdir(tdir)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| 40.244444
| 147
| 0.592153
|
4a0995529456bb4d32909dd01ee19bf838748c93
| 17,048
|
py
|
Python
|
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/pg8000.py
|
vyonna6519/pitches
|
4fa0d56051f5f7761984fdedd61337a0a6195a17
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/pg8000.py
|
vyonna6519/pitches
|
4fa0d56051f5f7761984fdedd61337a0a6195a17
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/pg8000.py
|
vyonna6519/pitches
|
4fa0d56051f5f7761984fdedd61337a0a6195a17
|
[
"MIT"
] | null | null | null |
# postgresql/pg8000.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+pg8000
:name: pg8000
:dbapi: pg8000
:connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pypi.org/project/pg8000/
.. versionchanged:: 1.4 The pg8000 dialect has been updated for version
1.16.6 and higher, and is again part of SQLAlchemy's continuous integration
with full feature support.
.. _pg8000_unicode:
Unicode
-------
pg8000 will encode / decode string values between it and the server using the
PostgreSQL ``client_encoding`` parameter; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
The ``client_encoding`` can be overridden for a session by executing the SQL:
SET CLIENT_ENCODING TO 'utf8';
SQLAlchemy will execute this SQL on all new connections based on the value
passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
engine = create_engine(
"postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
.. _pg8000_ssl:
SSL Connections
---------------
pg8000 accepts a Python ``SSLContext`` object which may be specified using the
:paramref:`_sa.create_engine.connect_args` dictionary::
import ssl
ssl_context = ssl.create_default_context()
engine = sa.create_engine(
"postgresql+pg8000://scott:tiger@192.168.0.199/test",
connect_args={"ssl_context": ssl_context},
)
If the server uses an automatically-generated certificate that is self-signed
or does not match the host name (as seen from the client), it may also be
necessary to disable hostname checking::
import ssl
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
engine = sa.create_engine(
"postgresql+pg8000://scott:tiger@192.168.0.199/test",
connect_args={"ssl_context": ssl_context},
)
.. _pg8000_isolation_level:
pg8000 Transaction Isolation Level
-------------------------------------
The pg8000 dialect offers the same isolation level settings as that
of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`psycopg2_isolation_level`
""" # noqa
import decimal
import re
from uuid import UUID as _python_UUID
from .array import ARRAY as PGARRAY
from .base import _ColonCast
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import INTERVAL
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .json import JSON
from .json import JSONB
from .json import JSONPathType
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...sql.elements import quoted_name
class _PGNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class _PGNumericNoBind(_PGNumeric):
def bind_processor(self, dialect):
return None
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
return None
def get_dbapi_type(self, dbapi):
return dbapi.JSON
class _PGJSONB(JSONB):
def result_processor(self, dialect, coltype):
return None
def get_dbapi_type(self, dbapi):
return dbapi.JSONB
class _PGJSONIndexType(sqltypes.JSON.JSONIndexType):
def get_dbapi_type(self, dbapi):
raise NotImplementedError("should not be here")
class _PGJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
def get_dbapi_type(self, dbapi):
return dbapi.INTEGER
class _PGJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class _PGJSONPathType(JSONPathType):
def get_dbapi_type(self, dbapi):
return 1009
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
class _PGEnum(ENUM):
def get_dbapi_type(self, dbapi):
return dbapi.UNKNOWN
class _PGInterval(INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
@classmethod
def adapt_emulated_to_native(cls, interval, **kw):
return _PGInterval(precision=interval.second_precision)
class _PGTimeStamp(sqltypes.DateTime):
def get_dbapi_type(self, dbapi):
if self.timezone:
# TIMESTAMPTZOID
return 1184
else:
# TIMESTAMPOID
return 1114
class _PGTime(sqltypes.Time):
def get_dbapi_type(self, dbapi):
return dbapi.TIME
class _PGInteger(sqltypes.Integer):
def get_dbapi_type(self, dbapi):
return dbapi.INTEGER
class _PGSmallInteger(sqltypes.SmallInteger):
def get_dbapi_type(self, dbapi):
return dbapi.INTEGER
class _PGNullType(sqltypes.NullType):
def get_dbapi_type(self, dbapi):
return dbapi.NULLTYPE
class _PGBigInteger(sqltypes.BigInteger):
def get_dbapi_type(self, dbapi):
return dbapi.BIGINTEGER
class _PGBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.BOOLEAN
class _PGARRAY(PGARRAY):
def bind_expression(self, bindvalue):
return _ColonCast(bindvalue, self)
_server_side_id = util.counter()
class PGExecutionContext_pg8000(PGExecutionContext):
def create_server_side_cursor(self):
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return ServerSideCursor(self._dbapi_connection.cursor(), ident)
def pre_exec(self):
if not self.compiled:
return
class ServerSideCursor:
server_side = True
def __init__(self, cursor, ident):
self.ident = ident
self.cursor = cursor
@property
def connection(self):
return self.cursor.connection
@property
def rowcount(self):
return self.cursor.rowcount
@property
def description(self):
return self.cursor.description
def execute(self, operation, args=(), stream=None):
op = "DECLARE " + self.ident + " NO SCROLL CURSOR FOR " + operation
self.cursor.execute(op, args, stream=stream)
return self
def executemany(self, operation, param_sets):
self.cursor.executemany(operation, param_sets)
return self
def fetchone(self):
self.cursor.execute("FETCH FORWARD 1 FROM " + self.ident)
return self.cursor.fetchone()
def fetchmany(self, num=None):
if num is None:
return self.fetchall()
else:
self.cursor.execute(
"FETCH FORWARD " + str(int(num)) + " FROM " + self.ident
)
return self.cursor.fetchall()
def fetchall(self):
self.cursor.execute("FETCH FORWARD ALL FROM " + self.ident)
return self.cursor.fetchall()
def close(self):
self.cursor.execute("CLOSE " + self.ident)
self.cursor.close()
def setinputsizes(self, *sizes):
self.cursor.setinputsizes(*sizes)
def setoutputsize(self, size, column=None):
pass
class PGCompiler_pg8000(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
def __init__(self, *args, **kwargs):
PGIdentifierPreparer.__init__(self, *args, **kwargs)
self._double_percents = False
class PGDialect_pg8000(PGDialect):
driver = "pg8000"
supports_statement_cache = True
supports_unicode_statements = True
supports_unicode_binds = True
default_paramstyle = "format"
supports_sane_multi_rowcount = True
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
supports_server_side_cursors = True
use_setinputsizes = True
# reversed as of pg8000 1.16.6. 1.16.5 and lower
# are no longer compatible
description_encoding = None
# description_encoding = "use_encoding"
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumericNoBind,
sqltypes.Float: _PGNumeric,
sqltypes.JSON: _PGJSON,
sqltypes.Boolean: _PGBoolean,
sqltypes.NullType: _PGNullType,
JSONB: _PGJSONB,
sqltypes.JSON.JSONPathType: _PGJSONPathType,
sqltypes.JSON.JSONIndexType: _PGJSONIndexType,
sqltypes.JSON.JSONIntIndexType: _PGJSONIntIndexType,
sqltypes.JSON.JSONStrIndexType: _PGJSONStrIndexType,
UUID: _PGUUID,
sqltypes.Interval: _PGInterval,
INTERVAL: _PGInterval,
sqltypes.DateTime: _PGTimeStamp,
sqltypes.Time: _PGTime,
sqltypes.Integer: _PGInteger,
sqltypes.SmallInteger: _PGSmallInteger,
sqltypes.BigInteger: _PGBigInteger,
sqltypes.Enum: _PGEnum,
sqltypes.ARRAY: _PGARRAY,
},
)
def __init__(self, client_encoding=None, **kwargs):
PGDialect.__init__(self, **kwargs)
self.client_encoding = client_encoding
if self._dbapi_version < (1, 16, 6):
raise NotImplementedError("pg8000 1.16.6 or greater is DataRequired")
@util.memoized_property
def _dbapi_version(self):
if self.dbapi and hasattr(self.dbapi, "__version__"):
return tuple(
[
int(x)
for x in re.findall(
r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
)
]
)
else:
return (99, 99, 99)
@classmethod
def dbapi(cls):
return __import__("pg8000")
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if "port" in opts:
opts["port"] = int(opts["port"])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.InterfaceError) and "network error" in str(
e
):
# new as of pg8000 1.19.0 for broken connections
return True
# connection was closed normally
return "connection is closed" in str(e)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
# adjust for ConnectionFairy possibly being present
if hasattr(connection, "dbapi_connection"):
connection = connection.dbapi_connection
if level == "AUTOCOMMIT":
connection.autocommit = True
elif level in self._isolation_lookup:
connection.autocommit = False
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level
)
cursor.execute("COMMIT")
cursor.close()
else:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s or AUTOCOMMIT"
% (level, self.name, ", ".join(self._isolation_lookup))
)
def set_readonly(self, connection, value):
cursor = connection.cursor()
try:
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION %s"
% ("READ ONLY" if value else "READ WRITE")
)
cursor.execute("COMMIT")
finally:
cursor.close()
def get_readonly(self, connection):
cursor = connection.cursor()
try:
cursor.execute("show transaction_read_only")
val = cursor.fetchone()[0]
finally:
cursor.close()
return val == "on"
def set_deferrable(self, connection, value):
cursor = connection.cursor()
try:
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION %s"
% ("DEFERRABLE" if value else "NOT DEFERRABLE")
)
cursor.execute("COMMIT")
finally:
cursor.close()
def get_deferrable(self, connection):
cursor = connection.cursor()
try:
cursor.execute("show transaction_deferrable")
val = cursor.fetchone()[0]
finally:
cursor.close()
return val == "on"
def set_client_encoding(self, connection, client_encoding):
# adjust for ConnectionFairy possibly being present
if hasattr(connection, "dbapi_connection"):
connection = connection.dbapi_connection
cursor = connection.cursor()
cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'")
cursor.execute("COMMIT")
cursor.close()
def do_set_input_sizes(self, cursor, list_of_tuples, context):
if self.positional:
cursor.setinputsizes(
*[dbtype for key, dbtype, sqltype in list_of_tuples]
)
else:
cursor.setinputsizes(
**{
key: dbtype
for key, dbtype, sqltype in list_of_tuples
if dbtype
}
)
def do_begin_twophase(self, connection, xid):
connection.connection.tpc_begin((0, xid, ""))
def do_prepare_twophase(self, connection, xid):
connection.connection.tpc_prepare()
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
connection.connection.tpc_rollback((0, xid, ""))
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
connection.connection.tpc_commit((0, xid, ""))
def do_recover_twophase(self, connection):
return [row[1] for row in connection.connection.tpc_recover()]
def on_connect(self):
fns = []
def on_connect(conn):
conn.py_types[quoted_name] = conn.py_types[util.text_type]
fns.append(on_connect)
if self.client_encoding is not None:
def on_connect(conn):
self.set_client_encoding(conn, self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self._json_deserializer:
def on_connect(conn):
# json
conn.register_in_adapter(114, self._json_deserializer)
# jsonb
conn.register_in_adapter(3802, self._json_deserializer)
fns.append(on_connect)
if len(fns) > 0:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
dialect = PGDialect_pg8000
| 28.652101
| 95
| 0.626115
|
4a0995dd276672ffbbe3b1bf3c32f103dba8e105
| 3,979
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
kukkikkpl/recipe-app-api
|
94212e4ca9ca79c886b699d7be12163c0baa09a6
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
kukkikkpl/recipe-app-api
|
94212e4ca9ca79c886b699d7be12163c0baa09a6
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
kukkikkpl/recipe-app-api
|
94212e4ca9ca79c886b699d7be12163c0baa09a6
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@gmail.com',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@gmail.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 31.579365
| 71
| 0.631063
|
4a099759891491e43e2c994da4b8134bfa068231
| 412
|
py
|
Python
|
aliexpress/api/rest/OfferRedefiningGetcanusedproductbysizetemplateid.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 3
|
2021-03-10T16:46:43.000Z
|
2022-03-29T15:28:50.000Z
|
aliexpress/api/rest/OfferRedefiningGetcanusedproductbysizetemplateid.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | null | null | null |
aliexpress/api/rest/OfferRedefiningGetcanusedproductbysizetemplateid.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 2
|
2021-10-30T17:09:34.000Z
|
2021-11-25T11:50:52.000Z
|
from aliexpress.api.base import RestApi
class AliexpressOfferRedefiningGetcanusedproductbysizetemplateidRequest(
RestApi
):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.current_page = None
self.size_template_id = None
def getapiname(self):
return "aliexpress.offer.redefining.getcanusedproductbysizetemplateid"
| 29.428571
| 78
| 0.742718
|
4a099998fc72f23e3937765a9e872dd60ad6e2af
| 1,520
|
py
|
Python
|
samples/shareItems.py
|
travisbutcher/ago-tools
|
09572cedf0dd5517cd9c88941409144c91439462
|
[
"Apache-2.0"
] | 56
|
2015-01-07T12:10:10.000Z
|
2021-11-28T21:22:41.000Z
|
samples/shareItems.py
|
travisbutcher/ago-tools
|
09572cedf0dd5517cd9c88941409144c91439462
|
[
"Apache-2.0"
] | 20
|
2015-01-07T19:01:44.000Z
|
2018-01-26T16:47:53.000Z
|
samples/shareItems.py
|
travisbutcher/ago-tools
|
09572cedf0dd5517cd9c88941409144c91439462
|
[
"Apache-2.0"
] | 47
|
2015-01-08T15:29:21.000Z
|
2022-03-28T11:33:30.000Z
|
import csv
import argparse
import sys
from agoTools.admin import Admin
from agoTools.admin import AGOLItems
def _raw_input(prompt=None, stream=None, input=None):
# A raw_input() replacement that doesn't save the string in the
# GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-p', '--password')
parser.add_argument('-group', '--groupID')
parser.add_argument('-file', '--file')
parser.add_argument('-portal', '--portal')
args = parser.parse_args()
inputFile = ''
if args.file == None:
args.file = _raw_input("CSV path: ")
if args.user == None:
args.user = _raw_input("Username:")
if args.groupID == None:
args.groupID = _raw_input("Group ID:")
if args.portal == None:
args.portal = _raw_input("Portal: ")
args.portal = str(args.portal).replace("http://","https://")
agoAdmin = Admin(args.user,args.portal,args.password)
if args.file != None:
inputFile=args.file
with open(inputFile) as input:
dataReader = csv.DictReader(input)
items=AGOLItems(dataReader)
agoAdmin.shareItems(items.AGOLItems_list,args.groupID)
| 24.516129
| 76
| 0.669079
|
4a0999b1396c4bf9052536989a7c1c746dd9d5c3
| 684
|
py
|
Python
|
migrations/versions/a9078456e117_added_next_update_to_sensor_model.py
|
frangiz/walter-server
|
0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639
|
[
"MIT"
] | null | null | null |
migrations/versions/a9078456e117_added_next_update_to_sensor_model.py
|
frangiz/walter-server
|
0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639
|
[
"MIT"
] | 21
|
2019-09-16T08:08:17.000Z
|
2020-05-27T06:49:34.000Z
|
migrations/versions/a9078456e117_added_next_update_to_sensor_model.py
|
frangiz/walter-server
|
0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639
|
[
"MIT"
] | 1
|
2019-10-16T11:23:38.000Z
|
2019-10-16T11:23:38.000Z
|
"""Added next_update to Sensor model.
Revision ID: a9078456e117
Revises: a100346c698b
Create Date: 2019-10-07 20:30:43.880157
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a9078456e117'
down_revision = 'a100346c698b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('sensor', sa.Column('next_update', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('sensor', 'next_update')
# ### end Alembic commands ###
| 23.586207
| 83
| 0.69883
|
4a0999ccf931c788b281f7847449bdc6e4a738f2
| 9,208
|
py
|
Python
|
cla.py
|
owenps/Chess-Line-Analysis
|
bd1902df0733bdeec78d08aa7b977179e6826941
|
[
"MIT"
] | 1
|
2021-07-21T04:28:28.000Z
|
2021-07-21T04:28:28.000Z
|
cla.py
|
owenps/Chess-Line-Analysis
|
bd1902df0733bdeec78d08aa7b977179e6826941
|
[
"MIT"
] | null | null | null |
cla.py
|
owenps/Chess-Line-Analysis
|
bd1902df0733bdeec78d08aa7b977179e6826941
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import json
import time
import random
from chess import pgn
opening_db = {}
data = None
request_err_msg = True
# ------------ Settings ---------------
with open('settings.json') as f:
settings = json.load(f)
ratings, speeds = [], []
for r in settings['Average Ratings']:
if settings['Average Ratings'][r]:
ratings.append(r)
for t in settings['Time Controls']:
if settings['Time Controls'][t]:
speeds.append(t)
# ------------ FUNCTIONS ----------------
def cla_definitions():
print(""" DEFINITIONS:
Name - This a user defined title to a particular game. This can help identify a unique PGN. Use option "6" to edit this name.
Moves - The total amount of moves made in the game
Total Games - The number of games that have reached the same board position
Game Prob. - The probability of a particular player playing each move recorded in the game's PGN according to the Lichess opening explorer.
Avg Prob. - The average probability of each move being played in the game
PGN - Portable Game Notation (PGN) is a common digital notation for chess games
(W) - Refers to the player playing with the White pieces
(B) - Refers to the player playing with the Black pieces
""")
def cla_edit():
try:
row = int(input("Enter the row's index number that you would like to set a name to: "))
if row < 0 or row > data.shape[0]:
print("Invalid row number")
else:
name = input("Enter a name: ")
return row, name
except ValueError:
print("Invalid row number")
return -1, None
def cla_calculate(game, max_moves):
game_prob = [1,1]
move_prob = [[],[]]
count = 0
board = game.board()
for i,move in enumerate(game.mainline_moves()):
count += 1
if count == max_moves:
break
fen = board.fen()
info = opening_db[fen] if fen in opening_db else lichess_query(fen)
total_games, move_total = 0, 0
for candidate_move in info['moves']:
total_games += candidate_move['white']+candidate_move['draws']+candidate_move['black']
if candidate_move['uci'] == move.uci():
move_total = candidate_move['white']+candidate_move['draws']+candidate_move['black']
game_inc = 0.1/(total_games+1) if move_total == 0 else move_total*1.0/total_games
game_prob[i % 2] *= game_inc
move_prob[i % 2].append(game_inc)
board.push(move)
return game_prob, [sum(move_prob[0])/len(move_prob[0]),sum(move_prob[1])/len(move_prob[1])], board.fen(), count
def lichess_query(fen):
query = {
'variant':'standard',
'fen':fen,
'recentGames':0,
'topGames':0,
'speeds[]':speeds,
'ratings[]':ratings
}
response = requests.get("https://explorer.lichess.ovh/lichess", params=query)
if response.text[0] == "<": # too many requests
global request_err_msg
if request_err_msg:
print("The database limits the number of requests per minute. Calculations will take a little longer than usual.")
request_err_msg = False
time.sleep(10)
response = requests.get("https://explorer.lichess.ovh/lichess", params=query)
opening_db[fen] = response.json()
return response.json()
def cla_create():
games = []
ok,name = cla_load(games)
# Fetch FENs
if ok:
# Create Table
cla_build(games, name)
def cla_build(games, name, new=True):
d = {
'Name' : [],
'Moves' : [],
'Total Games' : [],
'Game Prob. (W)' : [],
'Avg Prob. (W)' : [],
'Game Prob. (B)' : [],
'Avg Prob. (B)' : [],
'PGN' : []
}
while True:
max_moves = 0
try:
max_moves = int(input("Set a max number of moves to analyze: "))
break
except ValueError:
print('Invalid max_move parameter')
for num,game in enumerate(games):
if num % 50 == 0 and num != 0:
print("Calculating Game {}...".format(num))
game_score, move_score, fen, moves = cla_calculate(game, max_moves)
info = opening_db[fen] if fen in opening_db else lichess_query(fen)
d['Name'].append(name)
d['Moves'].append(moves)
d['Game Prob. (W)'].append(game_score[0])
d['Avg Prob. (W)'].append(move_score[0])
d['Game Prob. (B)'].append(game_score[1])
d['Avg Prob. (B)'].append(move_score[1])
d['Total Games'].append(info['white']+info['draws']+info['black'])
d['PGN'].append(game.variations[0])
global data
if new: # Make new dataframe
data = pd.DataFrame(data=d)
cla_show()
else:
data_temp = pd.DataFrame(data=d)
data = data.append(data_temp, ignore_index=True)
def cla_show():
print(data)
# Table Manipulation
while True:
print("""\nPlease select one of the following options by entering the corresponding number
1. Sort Table by Number of Moves
2. Sort Table by Total Games
3. Sort Table by Game Probability (W/B)
4. Sort Table by Average Move Probability (W/B)
5. Sort Table by Game Index
6. Edit Name of row
7. Add a PGN file to the table
8. Header Definitions
9. Return to main menu""")
ans = input()
if ans == "1" or ans == "2":
col = { "1" : "Moves", "2" : "Total Games"}
print(data.sort_values(by=[col[ans]], ascending=False))
elif ans == "3":
color = input("White (W) or Black (B): ").lower()
if color in ["white","w","black","b"]:
if "w" in color:
print(data.sort_values(by=['Game Prob. (W)'], ascending=False))
else:
print(data.sort_values(by=['Game Prob. (B)'], ascending=False))
elif ans == "4":
color = input("White (W) or Black (B): ").lower()
if color in ["white","w","black","b"]:
if "w" in color:
print(data.sort_values(by=['Avg Prob. (W)'], ascending=False))
else:
print(data.sort_values(by=['Avg Prob. (B)'], ascending=False))
elif ans == "5":
print(data)
elif ans == "6":
pos,name = cla_edit()
if pos != -1:
data.loc[pos,'Name'] = name
print(data)
elif ans == "7":
games = []
# Fetch FENs
ok, name = cla_load(games)
if ok:
# Create Table
cla_build(games,name,new=False)
print(data)
elif ans == "8":
cla_definitions()
elif ans == "9":
break
def cla_load(games):
fn = input("Enter a valid file in PGN format to load: ")
try:
with open(fn) as f:
game = pgn.read_game(f)
while game:
game.board() # Check if valid, otherwise throws attribute error
games.append(game)
game = pgn.read_game(f)
return True, fn
except FileNotFoundError:
print('The file "{}" was not found.'.format(fn))
except AttributeError:
print('Error reading "{}". Please submit a valid file that uses PGN format.'.format(fn))
return False, None
def cla_import():
try:
fn = input("Enter a the name/path of a CSV file to import: ")
global data
data_temp = pd.read_csv(fn)
structure = ['Name', 'Moves', 'Total Games',
'Game Prob. (W)', 'Avg Prob. (W)',
'Game Prob. (B)', 'Avg Prob. (B)',
'PGN']
for i,col in enumerate(data_temp.columns):
if structure[i] != col:
print("Error Reading CSV, please make sure it conforms to the structure of a CLA table")
return
data = data_temp
print('File "{}" has been successfully imported!'.format(fn))
cla_show()
except FileNotFoundError:
print('File "{}" is not found'.format(fn))
def cla_export():
if data is not None:
fn = 'cla_results.csv'
data.to_csv(fn, index=False, header=True)
print('The CLA table has been successfully exported to {}'.format(fn))
else:
print("There is no current CLA Table, try either creating or importing one to get started.")
# ----------------- MAIN ------------------
options = {
"1" : cla_create,
"2" : cla_import,
"3" : cla_export,
"4" : exit,
}
print("""-------------------------------------------------
Welcome To the Chess Line Analysis (CLA) Program
-------------------------------------------------""")
while True:
print("""\nPlease select one of the following options by entering the corresponding number
1. Create a new CLA table.
2. Import a CLA table
3. Export a CLA table
4. Exit""")
ans = input()
if ans in options:
options[ans]()
response = requests.get("https://explorer.lichess.ovh/lichess", params=query)
print(response)
print(response.json())
| 33.97786
| 143
| 0.55821
|
4a099a9675df540b97461b2f1513340aba0d12d6
| 7,573
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v1/validator.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v1/validator.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/manifest_validator/v1/validator.py
|
flowcommerce/integrations-core
|
c562b0d423ec1a5dd4073b703d6a8d3a9ab23c72
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import uuid
import datadog_checks.dev.tooling.manifest_validator.common.validator as common
from ...constants import get_root
from ...manifest_validator.common.validator import BaseManifestValidator
from ...manifest_validator.v1.schema import get_manifest_schema
from ...utils import has_logs, is_package, parse_version_parts
class AttributesValidator(BaseManifestValidator):
"""Check that attributes are valid"""
def validate(self, check_name, decoded, fix):
errors = sorted(get_manifest_schema().iter_errors(decoded), key=lambda e: e.path)
if errors:
for error in errors:
self.fail(f' {"->".join(map(str, error.absolute_path))} Error: {error.message}')
class GUIDValidator(BaseManifestValidator):
all_guids = {}
def validate(self, check_name, decoded, fix):
guid = decoded.get('guid')
if guid in self.all_guids:
output = f' duplicate `guid`: `{guid}` from `{self.all_guids[guid]}`'
if fix:
new_guid = uuid.uuid4()
self.all_guids[new_guid] = check_name
decoded['guid'] = new_guid
self.fix(output, f' new `guid`: {new_guid}')
else:
self.fail(output)
elif not guid or not isinstance(guid, str):
output = ' required non-null string: guid'
if fix:
new_guid = uuid.uuid4()
self.all_guids[new_guid] = check_name
decoded['guid'] = new_guid
self.fix(output, f' new `guid`: {new_guid}')
else:
self.fail(output)
else:
self.all_guids[guid] = check_name
return self.result
class IsPublicValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
correct_is_public = True
path = '/is_public'
is_public = decoded.get_path(path)
if not isinstance(is_public, bool):
output = ' required boolean: is_public'
if fix:
decoded.set_path(path, correct_is_public)
self.fix(output, f' new `is_public`: {correct_is_public}')
else:
self.fail(output)
if not is_public:
message = (
f" {check_name}: `is_public` is disabled, set to `True` "
f"if you want the integration documentation to be published."
)
self.warning(message)
class ManifestVersionValidator(BaseManifestValidator):
def __init__(self, *args, **kwargs):
super(ManifestVersionValidator, self).__init__(*args, **kwargs)
self.root = get_root()
def validate(self, check_name, decoded, fix):
# manifest_version
correct_manifest_version = '1.0.0'
manifest_version = decoded.get('manifest_version')
version_parts = parse_version_parts(manifest_version)
if len(version_parts) != 3:
if not manifest_version:
output = ' required non-null string: manifest_version'
else:
output = f' invalid `manifest_version`: {manifest_version}'
if fix:
version_parts = parse_version_parts(correct_manifest_version)
decoded['manifest_version'] = correct_manifest_version
self.fix(output, f' new `manifest_version`: {correct_manifest_version}')
else:
self.fail(output)
if len(version_parts) == 3:
about_exists = os.path.isfile(
os.path.join(self.root, check_name, 'datadog_checks', check_name, '__about__.py')
)
if version_parts >= [1, 0, 0]:
if 'version' in decoded and about_exists:
output = ' outdated field: version'
if fix:
del decoded['version']
self.fix(output, ' removed field: version')
else:
self.fail(output)
elif about_exists:
output = f' outdated `manifest_version`: {manifest_version}'
if fix:
decoded['manifest_version'] = correct_manifest_version
self.fix(output, f' new `manifest_version`: {correct_manifest_version}')
if 'version' in decoded:
del decoded['version']
self.result.messages['success'].append(' removed field: version')
else:
self.fail(output)
else:
version = decoded.get('version')
version_parts = parse_version_parts(version)
if len(version_parts) != 3:
if not version:
output = ' required non-null string: version'
else:
output = f' invalid `version`: {version}'
self.fail(output)
class NameValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
correct_name = check_name
name = decoded.get_path('/name')
if check_name.startswith('datadog') and check_name != 'datadog_cluster_agent':
self.fail(f' An integration check folder cannot start with `datadog`: {check_name}')
if not isinstance(name, str) or name.lower() != correct_name.lower():
output = f' incorrect `name`: {name}'
if fix:
decoded.set_path('/name', correct_name)
self.fix(output, f' new `name`: {correct_name}')
else:
self.fail(output)
class SupportValidator(BaseManifestValidator):
def validate(self, check_name, decoded, fix):
if self.is_extras:
correct_support = 'contrib'
elif self.is_marketplace:
correct_support = 'partner'
else:
correct_support = 'core'
support = decoded.get('support')
if support != correct_support:
output = f' incorrect `support`: {support}'
if fix:
decoded['support'] = correct_support
self.fix(output, f' new `support`: {correct_support}')
else:
self.fail(output)
class SupportedOSValidator(BaseManifestValidator):
"""If an integration contains python or logs configuration, the supported_os field should not be empty."""
def validate(self, check_name, decoded, _):
supported_os = decoded.get('supported_os')
check_has_logs = has_logs(check_name)
check_has_python = is_package(check_name)
if not supported_os and (check_has_logs or check_has_python):
output = f'Attribute `supported_os` in {check_name}/manifest.json should not be empty.'
self.fail(output)
def get_v1_validators(is_extras, is_marketplace):
return [
AttributesValidator(),
GUIDValidator(),
ManifestVersionValidator(),
common.MaintainerValidator(is_extras, is_marketplace, check_in_extras=False, check_in_marketplace=False),
NameValidator(),
common.MetricsMetadataValidator(),
common.MetricToCheckValidator(),
SupportValidator(is_extras, is_marketplace),
SupportedOSValidator(),
IsPublicValidator(),
common.ImmutableAttributesValidator(),
common.LogsCategoryValidator(),
]
| 38.441624
| 113
| 0.589198
|
4a099b3392d1f54354f7a38229734ce0361df64e
| 21,120
|
py
|
Python
|
examples/example_retromoco_motion.py
|
GReguig/torchio
|
0cd4f3105408410adda4fddf4873eb8c12883ecc
|
[
"Apache-2.0"
] | null | null | null |
examples/example_retromoco_motion.py
|
GReguig/torchio
|
0cd4f3105408410adda4fddf4873eb8c12883ecc
|
[
"Apache-2.0"
] | null | null | null |
examples/example_retromoco_motion.py
|
GReguig/torchio
|
0cd4f3105408410adda4fddf4873eb8c12883ecc
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pprint
from torchio import Image, transforms, INTENSITY, LABEL, Subject, SubjectsDataset
import torchio
from torchvision.transforms import Compose
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from torchio.transforms import RandomMotionFromTimeCourse, RandomAffine, CenterCropOrPad
from copy import deepcopy
from nibabel.viewers import OrthoSlicer3D as ov
from torchvision.transforms import Compose
import sys
from torchio.data.image import read_image
import torch
import seaborn as sns
sns.set(style="whitegrid")
pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1, 'display.width', 400)
#from torchQC import do_training
#dt = do_training('/tmp')
l1_loss = torch.nn.L1Loss()
"""
Comparing result with retromocoToolbox
"""
from utils_file import gfile, get_parent_path
import pandas as pd
from doit_train import do_training
def corrupt_data( x0, sigma= 5, amplitude=20, method='gauss', mvt_axes=[1] ):
fp = np.zeros((6, 200))
x = np.arange(0,200)
if method=='gauss':
y = np.exp(-(x - x0) ** 2 / float(2 * sigma ** 2))*amplitude
elif method == 'step':
if x0<100:
y = np.hstack((np.zeros((1,(x0-sigma))),
np.linspace(0,amplitude,2*sigma+1).reshape(1,-1),
np.ones((1,((200-x0)-sigma-1)))*amplitude ))
else:
y = np.hstack((np.zeros((1,(x0-sigma))),
np.linspace(0,-amplitude,2*sigma+1).reshape(1,-1),
np.ones((1,((200-x0)-sigma-1)))*-amplitude ))
elif method == 'sin':
fp = np.zeros((6, 182*218))
x = np.arange(0,182*218)
y = np.sin(x/x0 * 2 * np.pi)
#plt.plot(x,y)
for xx in mvt_axes:
fp[xx,:] = y
return fp
def corrupt_data_both( x0, sigma= 5, amplitude=20, method='gauss'):
fp1 = corrupt_data(x0, sigma, amplitude=amplitude, method='gauss')
fp2 = corrupt_data(30, 2, amplitude=-amplitude, method='step')
fp = fp1 + fp2
return fp
suj_type='brain'#'synth'#'suj'
if suj_type=='suj':
suj = [ Subject(image=Image('/data/romain/data_exemple/suj_150423/mT1w_1mm.nii', INTENSITY)), ]
#suj = [ Subject(image=Image('/data/romain/data_exemple/s_S02_t1_mpr_sag_1iso_p2.nii.gz', INTENSITY)), ]
elif suj_type=='brain':
suj = [ Subject(image=Image('/data/romain/data_exemple/suj_150423/mask_brain.nii', INTENSITY)), ]
elif suj_type=='synth':
dr = '/data/romain/data_exemple/suj_274542/ROI_PVE_1mm/'
label_list = [ "GM", "WM", "CSF", "both_R_Accu", "both_R_Amyg", "both_R_Caud", "both_R_Hipp", "both_R_Pall", "both_R_Puta", "both_R_Thal",
"cereb_GM", "cereb_WM", "skin", "skull", "background" ]
suj = [Subject (label=Image(type=LABEL, path=[dr + ll + '.nii.gz' for ll in label_list]))]
tlab = torchio.transforms.RandomLabelsToImage(label_key='label', image_key='image', mean=[0.6, 1, 0.2, 0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,1, 1, 0.1, 0],
default_std = 0.001 )
dico_params = { "fitpars": None, "oversampling_pct":0,
"correct_motion":False, 'freq_encoding_dim': [2] }
disp_str_list = ['no_shift', 'center_zero', 'demean', 'demean_half' ] # [None 'center_zero', 'demean']
mvt_types=['step', 'gauss']
mvt_type =mvt_types[1]
x0 = [100] #[20, 50, 90, 95, 100] #[ 90, 95, 99];
shifts, dimy = range(-15, 15, 1), 218
mvt_axe_str_list = ['transX', 'transY','transZ', 'rotX', 'rotY', 'rotZ']
mvt_axes = [1]
mvt_axe_str = mvt_axe_str_list[mvt_axes[0]]
out_path = '/data/romain/data_exemple/test2/'
if not os.path.exists(out_path): os.mkdir(out_path)
#plt.ioff()
data_ref, aff = read_image('/data/romain/data_exemple/suj_150423/mT1w_1mm.nii')
res, res_fitpar, extra_info = pd.DataFrame(), pd.DataFrame(), dict()
disp_str = disp_str_list[0]; s = 2; xx = 100
for disp_str in disp_str_list:
for s in [2, 20]: #[1, 2, 3, 5, 7, 10, 12 , 15, 20 ] : # [2,4,6] : #[1, 3 , 5 , 8, 10 , 12, 15, 20 , 25 ]:
for xx in x0:
dico_params['displacement_shift_strategy'] = disp_str
fp = corrupt_data(xx, sigma=s, method=mvt_type, amplitude=10, mvt_axes=mvt_axes)
dico_params['fitpars'] = fp
dico_params['nT'] = fp.shape[1]
t = RandomMotionFromTimeCourse(**dico_params)
if 'synth' in suj_type:
dataset = SubjectsDataset(suj, transform= torchio.Compose([tlab, t ]))
else:
dataset = SubjectsDataset(suj, transform= t )
sample = dataset[0]
fout = out_path + '/{}_{}_{}_s{}_freq{}_{}'.format(suj_type, mvt_axe_str, mvt_type, s, xx, disp_str)
fit_pars = t.fitpars - np.tile(t.to_substract[..., np.newaxis],(1,200))
# fig = plt.figure();plt.plot(fit_pars.T);plt.savefig(fout+'.png');plt.close(fig)
#sample['image'].save(fout+'.nii')
extra_info['x0'], extra_info['mvt_type'], extra_info['mvt_axe']= xx, mvt_type, mvt_axe_str
extra_info['shift_type'], extra_info['sigma'], extra_info['amp'] = disp_str, s, 10
extra_info['disp'] = np.sum(t.to_substract)
dff = pd.DataFrame(fit_pars.T); dff.columns = ['x', 'trans_y', 'z', 'r1', 'r2', 'r3']; dff['nbt'] = range(0,200)
for k,v in extra_info.items():
dff[k] = v
res_fitpar = res_fitpar.append(dff, sort=False)
data = sample['image']['data']
for shift in shifts:
if shift < 0:
d1 = data[:, :, dimy + shift:, :]
d2 = torch.cat([d1, data[:, :, :dimy + shift, :]], dim=2)
else:
d1 = data[:, :, 0:shift, :]
d2 = torch.cat([data[:, :, shift:, :], d1], dim=2)
extra_info['L1'] , extra_info['vox_disp'] = float(l1_loss(data_ref, d2).numpy()), shift
res = res.append(extra_info, ignore_index=True, sort=False)
ppf = sns.relplot(data=res_fitpar, x="nbt", y='trans_y', hue='shift_type', col='sigma', kind='line')
ss = str(res.groupby(['sigma','shift_type']).describe()['disp']['mean'])
plt.text(-100, 1, ss, alpha=0.9, backgroundcolor='w')
pp = sns.relplot(data=res, x="vox_disp", y="L1", hue='shift_type', col='sigma', kind='line')
res.groupby(['shift_type', 'sigma']).describe()['disp']['mean']
ppf = sns.relplot(data=res_fitpar, x="nbt", y='trans_y', hue='shift_type', row='sigma', col='x0', kind='line')
pp = sns.relplot(data=res, x="vox_disp", y="L1", hue='shift_type', col='x0', row='sigma', kind='line')
np.unique(res['disp'])
def str_cat(PSer, col1, col2):
return '{}_{}_{}_{}'.format(col1, PSer[col1], col2, PSer[col2])
# res['vox_disp'] = res['vox_disp'].apply(lambda s: float(s))
# res['ss'] = res[['sigma', 'shift_type', 'disp']].apply(lambda s: str_cat(s), axis=1)
# res['L1'] = res['L1'].apply(lambda s: float(s))
res_fitpar["P"] = res_fitpar[['sigma', 'x0']].apply(lambda s: str_cat(s,'sigma','x0'), axis=1)
sys.exit(0)
fres = out_path+'/res_metrics_{}_{}.csv'.format(mvt_axe_str, disp_str)
res.to_csv(fres)
res = pd.read_csv('/data/romain/data_exemple/motion_gaussX/res_metrics_transX_center_TF.csv')
#res = pd.read_csv('/data/romain/data_exemple/motion_gaussX_sigma2/res_metrics_transX_center_TF.csv')
res = pd.read_csv('/data/romain/data_exemple/motion_stepX/res_metrics_transX_step.csv')
isel = [range(0,15), range(15,30), range(30,45)]
for ii in isel:
plt.figure('ssim')
plt.plot( res.loc[ii,'x0'], res.loc[ii,'ssim'])
plt.figure('displacement')
plt.plot(res.loc[ii, 'x0'], res.loc[ii, 'mean_DispP_iterp']) #mean_DispP_iterp rmse_Disp_iterp
plt.figure('ssim')
plt.legend(disp_str_list)
plt.grid(); plt.ylabel('ssim'); plt.xlabel('')
plt.figure('displacement')
plt.legend(disp_str_list)
plt.grid(); plt.ylabel('displacement'); plt.xlabel('')
fitpars =t.fitpars_interp
plt.plot(fitpars[1].reshape(-1)) #order C par defaut : with the last axis index changing fastest -> display is correct
ff=np.tile(np.expand_dims(fitpars,1),(1,182,1,1))
#ff=np.moveaxis(ff,2,3)
#plt.plot(ff[1].reshape(-1,order='F'))
fitpars_interp =ff
dd = ImagesDataset(suj, transform=CenterCropOrPad(size=(182, 218,152)) ); sorig = dd[0]
original_image = sorig['T1']['data'][0]
#pour amplitude de 40 presque
#Removing [ 0. -2.8949889 0. 0. 0. 0. ] OR [0. 2.51842243 0. -> first 5.41
#?? [ 0. -3.23879857 0. 0. 0. 0. ] OR [0. 2.17461276 0. 0. 0. 0. ]
dataset = ImagesDataset(suj)
so=dataset[0]
image = so['T1']['data'][0]
tfi = (np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(image)))).astype(np.complex128)
tfi_sum = np.abs(np.sum(tfi,axis=0)); #tfi_sum = np.sum(np.abs(tfi),axis=0)
sum_intensity, sum_intensity_abs = np.zeros((tfi.shape[2])), np.zeros((tfi.shape[2]))
sum_intensity, sum_intensity_abs = np.zeros((tfi.shape[1],tfi.shape[2])), np.zeros((tfi.shape[1],tfi.shape[2]))
#for z in range(0,tfi.shape[2]):
for y in range(0, tfi.shape[1]):
for z in range(0, tfi.shape[2]):
ttf = np.zeros(tfi.shape,dtype=complex)
ttf[:,y,z] = tfi[:,y,z]
ifft = np.fft.ifftshift(np.fft.ifftn(ttf))
sum_intensity[y,z] = np.abs(np.sum(ifft))
sum_intensity_abs[y,z] = np.sum(np.abs(ifft))
sum_intensity_abs = np.load('/data/romain/data_exemple/suj_274542/intensity_fft_mask.npz.npy')
for s in [1,2, 3, 4, 5 , 8, 10 , 12, 15, 20 , 2500 ]:
fp = corrupt_data(50, sigma=s, method='gauss')
dico_params['fitpars'] = fp
t = RandomMotionFromTimeCourse(**dico_params)
t._calc_dimensions(sample['T1']['data'][0].shape)
fitpars_interp = t._interpolate_space_timing(t.fitpars)
fitpars_interp = np.tile(fitpars_interp,[1,182,1,1])
trans = fitpars_interp[1,0,:]
#plt.figure(); plt.plot(trans.reshape(-1))
print(np.sum(trans*sum_intensity_abs)/np.sum(sum_intensity_abs))
fp = corrupt_data(109,5,amplitude=40 )
ffp = np.expand_dims(np.expand_dims(fp,1),3)
ff = np.tile(ffp, [1, 182, 1, 152])
#testing with smal fitpars if mean of rot is the same as mean of affine
ff=fitpars=np.abs(t.fitpars)
ss = np.ones(ff.shape)
to_substract = np.zeros(6)
for i in range(0, 6):
ffi = ff[i].reshape(-1, order='F')
ssi = ss[i].reshape(-1, order='C')
# mean over all kspace
to_substract[i] = np.sum(ffi * ssi) / np.sum(ssi)
fitpars = fitpars - np.tile(to_substract[...,np.newaxis],[1,200])
from torchio.transforms.augmentation.intensity.random_motion_from_time_course import create_rotation_matrix_3d
affine = np.identity(4)
rot = np.radians(fitpars[3:])
rotation_matrices = np.apply_along_axis(create_rotation_matrix_3d, axis=0, arr=rot).transpose([-1, 0, 1])
tt = fitpars[0:3, :].transpose([1, 0])
affs = np.tile(affine, [fitpars.shape[1], 1, 1])
affs[:,0:3,0:3] = rotation_matrices
affs[:, 0:3, 3] = tt
from scipy.linalg import logm, expm
weights, matrices = ss[0], affs
logs = [w * logm(A) for (w, A) in zip(weights, matrices)]
logs = np.array(logs)
logs_sum = logs.sum(axis=0)
expm(logs_sum/np.sum(weights, axis=0) )
#a 10-2 pres c'est bien l'identite !
rp_files = gfile('/data/romain/HCPdata/suj_274542/Motion_ms','^rp')
rp_files = gfile('/data/romain/HCPdata/suj_274542/mot_separate','^rp')
rpf = rp_files[10]
res = pd.DataFrame()
for rpf in rp_files:
dirpath,name = get_parent_path([rpf])
fout = dirpath[0] + '/check/'+name[0][3:-4] + '.nii'
t = RandomMotionFromTimeCourse(fitpars=rpf, nufft=True, oversampling_pct=0, keep_original=True, verbose=True)
dataset = ImagesDataset(suj, transform=t)
sample = dataset[0]
dicm = sample['T1']['metrics']
dicm['fname'] = fout
res = res.append(dicm, ignore_index=True)
dataset.save_sample(sample, dict(T1=fout))
fit_pars = sample['T1']['fit_pars']
plt.figure; plt.plot(fit_pars[3:].T)
plt.figure; plt.plot(fit_pars.T)
dic_no_mot ={ "noiseBasePars": (5, 20, 0),"swallowFrequency": (0, 1, 1), "suddenFrequency": (0, 1, 1),
"oversampling_pct":0.3, "nufft":True , "keep_original": True}
t = RandomMotionFromTimeCourse(**dic_no_mot)
dataset = ImagesDataset(suj, transform=t)
sample = dataset[0]
dico_params = {"maxDisp": (1, 6), "maxRot": (1, 6), "noiseBasePars": (5, 20, 0),
"swallowFrequency": (2, 6, 0), "swallowMagnitude": (1, 6),
"suddenFrequency": (1, 2, 1), "suddenMagnitude": (6, 6),
"verbose": True, "keep_original": True, "compare_to_original": True}
dico_params = {"maxDisp": (1, 6), "maxRot": (1, 6), "noiseBasePars": (5, 20, 0.8),
"swallowFrequency": (2, 6, 0.5), "swallowMagnitude": (1, 6),
"suddenFrequency": (2, 6, 0.5), "suddenMagnitude": (1, 6),
"verbose": True, "keep_original": True, "compare_to_original": True, "oversampling_pct":0,
"preserve_center_pct":0.01}
dico_params = {"maxDisp": (6,6), "maxRot": (6, 6), "noiseBasePars": (5, 20, 0.8),
"swallowFrequency": (2, 6, 0), "swallowMagnitude": (3, 6),
"suddenFrequency": (2, 6, 0), "suddenMagnitude": (3, 6),
"verbose": False, "keep_original": True, "proba_to_augment": 1,
"preserve_center_pct":0.1, "keep_original": True, "compare_to_original": True,
"oversampling_pct":0, "correct_motion":True}
np.random.seed(12)
t = RandomMotionFromTimeCourse(**dico_params)
dataset = ImagesDataset(suj, transform=t)
dirpath = ['/data/romain/data_exemple/motion_correct/'];
s1 = dataset[0]
s2 = dataset[0]
fout = dirpath[0] + 'suj_mot'
fit_pars = t.fitpars
fig = plt.figure(); plt.plot(fit_pars.T); plt.savefig(fout + '.png');plt.close(fig)
dataset.save_sample(s1, dict(image=fout + '.nii'))
s1['image']['data'] = s1['image']['data_cor']
dataset.save_sample(s1, dict(image=fout + '_corr.nii'))
img1, img2 = s1['image']['data'].unsqueeze(0), s1['image_orig']['data'].unsqueeze(0)
res = pd.DataFrame()
dirpath = ['/data/romain/data_exemple/motion_random_preserve01/'];
if not os.path.isdir(dirpath[0]): os.mkdir(dirpath[0])
plt.ioff()
for i in range(500):
sample = dataset[0]
dicm = sample['T1']['metrics']
dics = sample['T1']['simu_param']
fout = dirpath[0] +'mot_TF_fit_par_sim{}'.format(np.floor(dicm['ssim']*10000))
dicm['fname'] = fout
dicm.update(dics)
fit_pars = t.fitpars
np.savetxt(fout+'.csv', fit_pars, delimiter=',')
fig = plt.figure()
plt.plot(fit_pars.T)
plt.savefig(fout+'.png')
plt.close(fig)
res = res.append(dicm, ignore_index=True)
dataset.save_sample(sample, dict(T1=fout+'.nii'))
fout = dirpath[0] +'res_simu.csv'
res.to_csv(fout)
dd = res[[ 'L1', 'MSE', 'ssim', 'corr', 'mean_DispP', 'rmse_Disp', 'rmse_DispTF']]
import seaborn as sns
sns.pairplot(dd)
#mot_separate
y_Disp, y_swalF, y_swalM, y_sudF, y_sudM = [], [], [], [], []
plt.figure()
for rpf in rp_files:
fit_pars = pd.read_csv(rpf, header=None).values
st=rpf
temp = [pos for pos, char in enumerate(st) if char == "_"]
y_Disp=int(st[temp[-13]+1:temp[-12]])/100
y_Noise=int(st[temp[-11]+1:temp[-10]])/100
y_swalF=np.floor(int(st[temp[-9]+1:temp[-8]])/100)
y_swalM=int(st[temp[-7]+1:temp[-6]])/100
y_sudF=np.floor(int(st[temp[-5]+1:temp[-4]])/100)
y_sudM=int(st[temp[-3]+1:temp[-2]])/100
dico_params = {
"maxDisp": (y_Disp,y_Disp),"maxRot": (y_Disp,y_Disp),"noiseBasePars": (y_Noise,y_Noise),
"swallowFrequency": (y_swalF,y_swalF+1), "swallowMagnitude": (y_swalM,y_swalM),
"suddenFrequency": (y_sudF, y_sudF+1),"suddenMagnitude": (y_sudM, y_sudM),
"verbose": True,
}
t = RandomMotionFromTimeCourse(**dico_params)
t._calc_dimensions((100,20,50))
fitP = t._simulate_random_trajectory()
fitP = t.fitpars
if True:# y_Disp>0:
plt.figure()
plt.plot(fit_pars.T)
plt.plot(fitP.T,'--')
#test transforms
from torchio.transforms import RandomSpike
t = RandomSpike(num_spikes_range=(5,10), intensity_range=(0.1,0.2))
dataset = ImagesDataset(suj, transform=t)
for i in range(1,10):
sample = dataset[0]
fout='/tmp/toto{}_nb{}_I{}.nii'.format(i,sample['T1']['random_spike_num_spikes'],np.floor(sample['T1']['random_spike_intensity']*100))
dataset.save_sample(sample, dict(T1=fout))
out_dir = '/data/ghiles/motion_simulation/tests/'
def corrupt_data(data, percentage):
n_pts_to_corrupt = int(round(percentage * len(data)))
#pts_to_corrupt = np.random.choice(range(len(data)), n_pts_to_corrupt, replace=False)
# MotionSimTransformRetroMocoBox.perlinNoise1D(npts=n_pts_to_corrupt,
# weights=np.random.uniform(low=1.0, high=2)) - .5
#to avoid global displacement let the center to zero
if percentage>0.5:
data[n_pts_to_corrupt:] = 15
else:
data[:n_pts_to_corrupt] = 15
return data
dico_params = {
"maxDisp": 0,
"maxRot": 0,
"tr": 2.3,
"es": 4e-3,
"nT": 200,
"noiseBasePars": 0,
"swallowFrequency": 0,
"swallowMagnitude": 0,
"suddenFrequency": 0,
"suddenMagnitude": 0,
"displacement_shift": 0,
"freq_encoding_dim": [1],
"oversampling_pct": 0.3,
"nufft": True,
"verbose": True,
"keep_original": True,
}
np.random.seed(12)
suj = [[
Image('T1', '/data/romain/HCPdata/suj_100307/T1w_1mm.nii.gz', INTENSITY),
Image('mask', '/data/romain/HCPdata/suj_100307/brain_mT1w_1mm.nii', LABEL)
]]
corrupt_pct = [.25, .45, .55, .75]
corrupt_pct = [.45]
transformation_names = ["translation1", "translation2", "translation3", "rotation1", "rotation2", "rotation3"]
fpars_list = dict()
dim_loop = [0, 1, 2]
for dd in dim_loop:
for pct_corr in corrupt_pct:
fpars_list[pct_corr] = dict()
for dim, name in enumerate(transformation_names):
fpars_handmade = np.zeros((6, dico_params['nT']))
fpars_handmade[dim] = corrupt_data(fpars_handmade[dim], pct_corr)
#fpars_handmade[3:] = np.radians(fpars_handmade[3:])
fpars_list[pct_corr][name] = fpars_handmade
dico_params["fitpars"] = fpars_handmade
#dico_params["freq_encoding_dim"] = [dim % 3]
dico_params["freq_encoding_dim"] = [dd]
t = RandomMotionFromTimeCourse(**dico_params)
transforms = Compose([t])
dataset = ImagesDataset(suj, transform=transforms)
sample = dataset[0]
# dataset.save_sample(sample, dict(T1='/data/romain/data_exemple/motion/begin_{}_{}_freq{}_Center{}.nii'.format(
# name, pct_corr,dico_params["freq_encoding_dim"][0],dico_params["displacement_shift"])))
dataset.save_sample(sample, dict(T1='/data/romain/data_exemple/motion/noorderF_{}_{}_freq{}.nii'.format(
name, pct_corr,dico_params["freq_encoding_dim"][0])))
print("Saved {}_{}".format(name, pct_corr))
t = RandomMotionFromTimeCourse(**dico_params)
transforms = Compose([t])
dataset = ImagesDataset(suj, transform=transforms)
sample = dataset[0]
rots = t.rotations.reshape((3, 182, 218, 182))
translats = t.translations.reshape((3, 182, 218, 182))
# TESTING AFFINE GRIG from pytorch
from torchio.transforms.augmentation.intensity.random_motion_from_time_course import create_rotation_matrix_3d
#import sys
#sys.path.append('/data/romain/toolbox_python/romain/cnnQC/')
#from utils import reslice_to_ref
import nibabel.processing as nbp
import nibabel as nib
import torch.nn.functional as F
import torch
sample = dataset[0]
ii, affine = sample['T1']['data'], sample['T1']['affine']
rot = np.deg2rad([0,10,20])
scale = [1, 1.2, 1/1.2 ]
trans = [-30, 30, 0]
image_size = np.array([ii[0].size()])
trans_torch = np.array(trans)/(image_size/2)
mr = create_rotation_matrix_3d(rot)
ms = np.diag(scale)
center = np.ceil(image_size/2)
center = center.T - mr@center.T
center_mat=np.zeros([4,4])
center_mat[0:3,3] = center[0:3].T
maff = np.hstack((ms @ mr,np.expand_dims(trans,0).T))
maff_torch = np.hstack((ms @ mr,trans_torch.T))
maff = np.vstack((maff,[0,0,0,1]))
nib_fin = nib.Nifti1Image(ii.numpy()[0], affine)
new_aff = affine @ np.linalg.inv(maff+center_mat) #new_aff = maff @ affine # other way round new_aff = affine@maff
nib_fin.affine[:] = new_aff[:]
fout = nbp.resample_from_to(nib_fin, (nib_fin.shape, affine), cval=-1) #fout = nbp.resample_from_to(nib_fin, (nib_fin.shape, new_aff), cval=-1)
ov(fout.get_fdata())
#it gives almost the same, just the scalling is shifted with nibabel (whereas it is centred with torch
mafft = maff_torch[np.newaxis,:]
mafft = torch.from_numpy(mafft)
x = ii.permute(0,3,2,1).unsqueeze(0)
grid = F.affine_grid(mafft, x.shape, align_corners=False).float()
x = F.grid_sample(x, grid, align_corners=False)
xx = x[0,0].numpy().transpose(2,1,0)
ov(xx)
# make the inverse transform
xx=torch.zeros(4,4); xx[3,3]=1
xx[0:3,0:4] = mafft[0]
imaf = xx.inverse()
imaf = imaf[0:3,0:4].unsqueeze(0)
grid = F.affine_grid(imaf, x.shape, align_corners=False).float()
x = F.grid_sample(x, grid, align_corners=False)
xx = x[0,0].numpy().transpose(2,1,0)
ov(xx)
| 39.256506
| 153
| 0.640152
|
4a099ba3bb212b848ef9fea96b45077edcbe3bf4
| 24,157
|
py
|
Python
|
nncf/quantization/init_precision.py
|
gnomonsis/nncf_pytorch
|
9fc4a92b5cb1b2c240e633c4ffa69b4fae1917fb
|
[
"Apache-2.0"
] | null | null | null |
nncf/quantization/init_precision.py
|
gnomonsis/nncf_pytorch
|
9fc4a92b5cb1b2c240e633c4ffa69b4fae1917fb
|
[
"Apache-2.0"
] | 4
|
2020-07-17T11:12:35.000Z
|
2021-12-15T15:20:24.000Z
|
nncf/quantization/init_precision.py
|
gnomonsis/nncf_pytorch
|
9fc4a92b5cb1b2c240e633c4ffa69b4fae1917fb
|
[
"Apache-2.0"
] | null | null | null |
import itertools
from collections import OrderedDict
from typing import List, Dict, Union
import os
import shutil
import torch
from torch import Tensor, nn
from torch.nn.modules.loss import _Loss
from nncf.debug import is_debug
from nncf.dynamic_graph.context import no_nncf_trace
from nncf.nncf_logger import logger as nncf_logger
from nncf.nncf_network import NNCFNetwork, CompressionModuleType
from nncf.quantization.layers import QUANTIZATION_MODULES, BaseQuantizer
from .hessian_trace import HessianTraceEstimator
from .hw_precision_constraints import HWPrecisionConstraints
from .quantizer_id import QuantizerId
from ..structures import QuantizationPrecisionInitArgs
from ..utils import in_scope_list, get_all_modules_by_type
class ManualPrecisionInitializer:
def __init__(self, algo: 'QuantizationController', config: 'NNCFConfig',
all_quantizers: Dict[QuantizerId, BaseQuantizer],
hw_precision_constraints: HWPrecisionConstraints,
init_args: QuantizationPrecisionInitArgs = None):
self._algo = algo
self._model = self._algo._model # type: NNCFNetwork
self._bitwidth_per_scope = config.get('bitwidth_per_scope', {}) # type: List[List]
self._hw_precision_constraints = hw_precision_constraints
self.original_precisions = {q_id: quantizer.num_bits for q_id, quantizer in all_quantizers.items()}
self._quantizer_address_to_id_mapping = {id(quantizer): q_id for q_id, quantizer in all_quantizers.items()}
quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]
weight_module_dict = self._model.get_nncf_wrapped_model()
ordered_weight_quantizers_per_scope = get_all_modules_by_type(weight_module_dict, quantization_types)
ordered_weight_quantization_list = []
for quantizer in ordered_weight_quantizers_per_scope.values():
address = id(quantizer)
if quantizer.is_weights:
ordered_weight_quantization_list.append((self._quantizer_address_to_id_mapping[address], quantizer))
self._ordered_weight_quantizations = OrderedDict(ordered_weight_quantization_list)
self._all_quantizers_per_scope = get_all_modules_by_type(
self._model.get_compression_modules_by_type(CompressionModuleType.ACTIVATION_QUANTIZER), quantization_types)
self._all_quantizers_per_scope.update(get_all_modules_by_type(
self._model.get_compression_modules_by_type(CompressionModuleType.FUNCTION_QUANTIZER), quantization_types))
self._all_quantizers_per_scope.update(ordered_weight_quantizers_per_scope)
def apply_init(self):
for pair in self._bitwidth_per_scope:
if len(pair) != 2:
raise ValueError('Invalid format of bitwidth per scope: [int, str] is expected')
bitwidth = pair[0]
scope_name = pair[1]
is_matched = False
for scope, quantizer in self._all_quantizers_per_scope.items():
if in_scope_list(str(scope), scope_name):
quantizer.num_bits = bitwidth
is_matched = True
if not is_matched:
raise ValueError(
'Invalid scope name `{}`, failed to assign bitwidth {} to it'.format(scope_name, bitwidth))
class PerturbationObserver:
def __init__(self, device):
super().__init__()
self.device = device
self.perturbation = None
self.numels = None
def calc_perturbation(self, module, inputs: torch.Tensor, output: torch.Tensor):
input_ = inputs[0] if isinstance(inputs, tuple) else inputs
with no_nncf_trace():
self.perturbation = torch.norm(input_ - output, p=2) ** 2
self.numels = input_.size().numel()
self.input_norm = torch.norm(input_, p=2) ** 2
def reset(self):
self.perturbation = None
self.numels = None
def get_observation(self):
return self.perturbation
def get_numels(self):
return self.numels
def get_input_norm(self):
return self.input_norm
class Perturbations:
def __init__(self):
self._perturbations = {} # type: Dict[int, Dict[int, Tensor]]
def add(self, layer_id: int, bitwidth: int, perturbation: Tensor):
if layer_id in self._perturbations:
self._perturbations[layer_id].update({bitwidth: perturbation})
else:
self._perturbations[layer_id] = {bitwidth: perturbation}
def get(self, layer_id: int, bitwidth: int) -> Tensor:
layer_perturbations = self._perturbations[layer_id]
return layer_perturbations[bitwidth]
def get_all(self) -> Dict[int, Dict[int, Tensor]]:
return self._perturbations
class TracesPerLayer:
def __init__(self, traces_per_layer: Tensor):
self._traces_per_layer = traces_per_layer
self._traces_order = [i[0] for i in
sorted(enumerate(traces_per_layer), reverse=False, key=lambda x: x[1])]
def get(self, index: int) -> Tensor:
return self._traces_per_layer[index]
def get_order_of_traces(self) -> List[int]:
return self._traces_order
def get_all(self) -> Tensor:
return self._traces_per_layer
def __bool__(self):
return bool(self._traces_order)
class HAWQPrecisionInitializer(ManualPrecisionInitializer):
def __init__(self, algo: 'QuantizationController', config: 'NNCFConfig',
all_quantizers: Dict[QuantizerId, BaseQuantizer],
hw_precision_constraints: HWPrecisionConstraints,
init_args: QuantizationPrecisionInitArgs):
super().__init__(algo, config, all_quantizers, hw_precision_constraints, init_args)
if not init_args:
raise ValueError('Arguments for precision initialization are not provided. '
'Refer to `CompressionAlgorithmInitArgs` class')
self._criterion = init_args.criterion
self._data_loader = init_args.data_loader
self._traces_per_layer_path = config.get('traces_per_layer_path', None)
self._num_data_points = config.get('num_data_points', 200)
self._iter_number = config.get('iter_number', 200)
self._tolerance = config.get('tolerance', 1e-5)
self._bits = hw_precision_constraints.get_all_unique_bits() \
if hw_precision_constraints else config.get('bits', [4, 8])
self._device = next(self._model.parameters()).device
def apply_init(self):
disabled_gradients = self.disable_quantizer_gradients(self._all_quantizers_per_scope,
self._algo.quantized_weight_modules_registry,
self._model)
traces_per_layer = self._calc_traces(self._criterion, self._iter_number, self._tolerance)
if not traces_per_layer:
raise RuntimeError('Failed to calculate hessian traces!')
self.enable_quantizer_gradients(self._model, self._all_quantizers_per_scope, disabled_gradients)
num_weights = len(self._ordered_weight_quantizations)
bits_configurations = self.get_configs_constrained_by_order(self._bits, num_weights)
ordered_weight_quantization_ids = list(self._ordered_weight_quantizations.keys())
bits_configurations = self.filter_configs_by_precision_constraints(bits_configurations,
self._hw_precision_constraints,
ordered_weight_quantization_ids,
traces_per_layer.get_order_of_traces())
if not bits_configurations:
raise RuntimeError('All bits configurations are incompatible with HW Config!')
perturbations, weight_observers = self.calc_quantization_noise()
configuration_metric = self.calc_hawq_metric_per_configuration(bits_configurations, perturbations,
traces_per_layer, self._device)
chosen_config_per_layer = self.choose_configuration(configuration_metric, bits_configurations,
traces_per_layer.get_order_of_traces())
self.set_chosen_config(chosen_config_per_layer)
ordered_metric_per_layer = self.get_metric_per_layer(chosen_config_per_layer, perturbations,
traces_per_layer)
if is_debug():
self.HAWQDump(bits_configurations, configuration_metric, perturbations,
weight_observers, traces_per_layer, self._bits).run()
self._model.rebuild_graph()
str_bw = [str(element) for element in self.get_bitwidth_per_scope()]
nncf_logger.info('\n'.join(['\n\"bitwidth_per_scope\": [', ',\n'.join(str_bw), ']']))
return ordered_metric_per_layer
def get_bitwidth_per_scope(self) -> List[List[Union[int, str]]]:
sorted_quantizers = OrderedDict(sorted(self._all_quantizers_per_scope.items(), key=lambda x: str(x[0])))
full_bitwidth_per_scope = []
for scope, quantizer in sorted_quantizers.items():
quantizer_id = self._quantizer_address_to_id_mapping[id(quantizer)]
if quantizer.num_bits != self.original_precisions[quantizer_id]:
full_bitwidth_per_scope.append([quantizer.num_bits, str(scope)])
return full_bitwidth_per_scope
@staticmethod
def disable_quantizer_gradients(all_quantizations: Dict['Scope', BaseQuantizer],
quantized_weight_modules_registry: Dict['Scope', torch.nn.Module],
model: nn.Module) -> List[str]:
"""
Disables gradients of all parameters, except for layers that have quantizers for weights.
:param all_quantizations: all quantizers per quantizer id
:param quantized_weight_modules_registry: modules with quantized weights per scope
:param model: model to access all parameters
:return: list of names of the parameters that were originally disabled
"""
for module in all_quantizations.values():
module.init_stage = True
module.disable_gradients()
# remember gradients of quantized modules that were enabled
gradients_to_enable = []
for quantized_module in quantized_weight_modules_registry.values():
for param_name, param in quantized_module.named_parameters():
if param.requires_grad:
gradients_to_enable.append(param_name)
disabled_gradients = []
# disable all gradients, except already disabled
for param_name, param in model.named_parameters():
if not param.requires_grad:
disabled_gradients.append(param_name)
else:
param.requires_grad = False
# enable gradients of quantized modules that were disabled
for quantized_module in quantized_weight_modules_registry.values():
for param_name, param in quantized_module.named_parameters():
if param_name in gradients_to_enable and not 'bias' in param_name:
param.requires_grad = True
return disabled_gradients
def _calc_traces(self, criterion: _Loss, iter_number: int, tolerance: float) -> TracesPerLayer:
if self._traces_per_layer_path:
return TracesPerLayer(torch.load(self._traces_per_layer_path))
trace_estimator = HessianTraceEstimator(self._model, criterion, self._device, self._data_loader,
self._num_data_points)
avg_traces = trace_estimator.get_average_traces(max_iter=iter_number, tolerance=tolerance)
return TracesPerLayer(avg_traces)
@staticmethod
def enable_quantizer_gradients(model: nn.Module, all_quantizers: Dict['Scope', nn.Module],
disabled_gradients: List):
"""
Enables gradients of all parameters back, except for ones that were originally disabled
:param all_quantizers: all quantizers per id
:param model: model to access all parameters
:param disabled_gradients: list of names of the parameters that were originally disabled
"""
for param_name, param in model.named_parameters():
if param_name not in disabled_gradients:
param.requires_grad = True
for module in all_quantizers.values():
module.init_stage = False
module.enable_gradients()
@staticmethod
def get_configs_constrained_by_order(bits_: List[int], num_layers: int) -> List[List[int]]:
bits = sorted(bits_)
m = len(bits)
L = num_layers
bit_configs = []
for j in range(1, m + 1):
for combo_bits in itertools.combinations(bits, j):
for combo_partitions in itertools.combinations(list(range(1, L)), j - 1):
bit_config = []
prev_p = 0
for (p, b) in zip(combo_partitions + (L,), combo_bits):
bit_config += [b] * (p - prev_p)
prev_p = p
bit_configs.append(bit_config)
return bit_configs
@staticmethod
def filter_configs_by_precision_constraints(bits_configurations: List[List[int]],
hw_precision_constraints: HWPrecisionConstraints,
ordered_weight_ids: List[QuantizerId],
traces_order: List[int]) -> List[List[int]]:
if not hw_precision_constraints:
return bits_configurations
filtered_bits_configurations = []
for bits_configuration in bits_configurations:
is_all_bitwidth_compatible = True
for i, bitwidth in enumerate(bits_configuration):
weight_id = ordered_weight_ids[traces_order[i]]
bits_constraints = hw_precision_constraints.get(weight_id)
if bitwidth not in bits_constraints:
is_all_bitwidth_compatible = False
break
if is_all_bitwidth_compatible:
filtered_bits_configurations.append(bits_configuration)
return filtered_bits_configurations
def calc_quantization_noise(self) -> [Perturbations, List[PerturbationObserver]]:
hook_handles = []
observers = []
for module in self._ordered_weight_quantizations.values():
observer = PerturbationObserver(self._device)
hook_handles.append(module.register_forward_hook(observer.calc_perturbation))
observers.append(observer)
perturbations = Perturbations()
for b in self._bits:
for wi in self._ordered_weight_quantizations.values():
wi.num_bits = b
self._model.do_dummy_forward(force_eval=True)
for i, observer in enumerate(observers):
perturbations.add(layer_id=i, bitwidth=b, perturbation=observer.get_observation())
for handle in hook_handles:
handle.remove()
return perturbations, observers
@staticmethod
def calc_hawq_metric_per_configuration(bits_configurations: List[List[int]], perturbations: Perturbations,
traces_per_layer: TracesPerLayer, device) -> List[Tensor]:
configuration_metric = []
for bits_config in bits_configurations:
hawq_metric = torch.Tensor([0]).to(device)
for i, layer_bits in enumerate(bits_config):
order = traces_per_layer.get_order_of_traces()[i]
hawq_metric += traces_per_layer.get(order) * perturbations.get(layer_id=order,
bitwidth=layer_bits)
configuration_metric.append(hawq_metric)
return configuration_metric
def choose_configuration(self, configuration_metric: List[Tensor], bits_configurations: List[List[int]],
traces_order: List[int]) -> List[int]:
num_weights = len(traces_order)
ordered_config = [0] * num_weights
median_metric = torch.Tensor(configuration_metric).to(self._device).median()
configuration_index = configuration_metric.index(median_metric)
bit_configuration = bits_configurations[configuration_index]
for i, bitwidth in enumerate(bit_configuration):
ordered_config[traces_order[i]] = bitwidth
nncf_logger.info('Chosen HAWQ configuration (bitwidth per weightable layer)={}'.format(ordered_config))
nncf_logger.debug('Order of the weightable layers in the HAWQ configuration={}'.format(traces_order))
return ordered_config
def set_chosen_config(self, weight_bits_per_layer: List[int]):
for wq, bits in zip(self._ordered_weight_quantizations.values(), weight_bits_per_layer):
wq.num_bits = bits
pairs = self._algo.get_weights_activation_quantizers_pairs()
for pair in pairs:
wqs, aq = pair
aq.num_bits = max([wq.num_bits for wq in wqs])
def get_metric_per_layer(self, chosen_config_per_layer: List[int], perturbations: Perturbations,
traces_per_layer: TracesPerLayer):
metric_per_layer = []
for i, layer_bits in enumerate(chosen_config_per_layer):
metric_per_layer.append(traces_per_layer.get(i) * perturbations.get(i, layer_bits))
ordered_metric_per_layer = [i[0] for i in
sorted(enumerate(metric_per_layer), reverse=True, key=lambda x: x[1])]
return ordered_metric_per_layer
class HAWQDump:
def __init__(self, bits_configurations: List[List[int]], configuration_metric: List[Tensor],
perturbations: Perturbations, weight_observers: List[PerturbationObserver],
traces_per_layer: TracesPerLayer, bits: List[int]):
self._bits_configurations = bits_configurations
self._configuration_metric = configuration_metric
self._num_weights = len(weight_observers)
self._perturbations = perturbations
self._weight_observers = weight_observers
self._dump_dir = "hawq_dumps"
if os.path.exists(self._dump_dir):
shutil.rmtree(self._dump_dir)
os.makedirs(self._dump_dir, exist_ok=True)
self._traces_order = traces_per_layer.get_order_of_traces()
self._traces_per_layer = traces_per_layer.get_all()
num_of_weights = []
norm_of_weights = []
for i in range(self._num_weights):
order = self._traces_order[i]
num_of_weights.append(self._weight_observers[order].get_numels())
norm_of_weights.append(self._weight_observers[order].get_input_norm())
self._num_weights_per_layer = torch.Tensor(num_of_weights)
self._norm_weights_per_layer = torch.Tensor(norm_of_weights)
bits_in_megabyte = 2 ** 23
self._model_sizes = []
for bits_config in self._bits_configurations:
size = torch.sum(torch.Tensor(bits_config) * self._num_weights_per_layer).item() / bits_in_megabyte
self._model_sizes.append(size)
self._bits = bits
def run(self):
self._dump_avg_traces()
self._dump_density_of_quantization_noise()
self._dump_metric()
self._dump_perturbations_ratio()
def _dump_avg_traces(self):
import matplotlib.pyplot as plt
dump_file = os.path.join(self._dump_dir, 'avg_traces_per_layer')
torch.save(self._traces_per_layer, dump_file)
fig = plt.figure()
fig.suptitle('Average Hessian Trace')
ax = fig.add_subplot(2, 1, 1)
ax.set_yscale('log')
ax.set_xlabel('weight quantizers')
ax.set_ylabel('average hessian trace')
ax.plot(self._traces_per_layer.cpu().numpy())
plt.savefig(dump_file)
def _dump_metric(self):
import matplotlib.pyplot as plt
list_to_plot = [cm.item() for cm in self._configuration_metric]
fig = plt.figure()
fig.suptitle('Pareto Frontier')
ax = fig.add_subplot(2, 1, 1)
ax.set_yscale('log')
ax.set_xlabel('Model Size (MB)')
ax.set_ylabel('Metric value (total perturbation)')
ax.scatter(self._model_sizes, list_to_plot, s=20, facecolors='none', edgecolors='r')
cm = torch.Tensor(self._configuration_metric)
cm_m = cm.median().item()
configuration_index = self._configuration_metric.index(cm_m)
ms_m = self._model_sizes[configuration_index]
ax.scatter(ms_m, cm_m, s=30, facecolors='none', edgecolors='b', label='median from all metrics')
ax.legend()
plt.savefig(os.path.join(self._dump_dir, 'Pareto_Frontier'))
nncf_logger.info(
'Distribution of HAWQ metrics: min_value={:.3f}, max_value={:.3f}, median_value={:.3f}, '
'median_index={}, total_number={}'.format(cm.min().item(), cm.max().item(), cm_m,
configuration_index,
len(self._configuration_metric)))
def _dump_density_of_quantization_noise(self):
noise_per_config = [] # type: List[Tensor]
for bits_config in self._bits_configurations:
qnoise = 0
for i in range(self._num_weights):
layer_bits = bits_config[i]
order = self._traces_order[i]
qnoise += self._perturbations.get(layer_id=order, bitwidth=layer_bits)
noise_per_config.append(qnoise)
list_to_plot = [cm.item() for cm in noise_per_config]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.suptitle('Density of quantization noise')
ax = fig.add_subplot(2, 1, 1)
ax.set_yscale('log')
ax.set_xlabel('Blocks')
ax.set_ylabel('Noise value')
ax.scatter(self._model_sizes, list_to_plot, s=20, alpha=0.3)
ax.legend()
plt.savefig(os.path.join(self._dump_dir, 'Density_of_quantization_noise'))
def _dump_perturbations_ratio(self):
import matplotlib.pyplot as plt
fig = plt.figure()
fig.suptitle('Quantization noise vs Average Trace')
ax = fig.add_subplot(2, 1, 1)
ax.set_xlabel('Blocks')
ax.set_yscale('log')
b = max(self._bits)
perturb = [p[b] for p in self._perturbations.get_all().values()]
ax.plot(
[p / m / n for p, m, n in zip(perturb, self._num_weights_per_layer, self._norm_weights_per_layer)],
label='normalized {}-bit noise'.format(b))
ax.plot(perturb, label='{}-bit noise'.format(b))
ax.plot(self._traces_per_layer.cpu().numpy(), label='trace')
ax.plot([n * p for n, p in zip(self._traces_per_layer.cpu(), perturb)], label='trace * noise')
ax.legend()
plt.savefig(os.path.join(self._dump_dir, 'Quantization_noise_vs_Average_Trace'))
class PrecisionInitializerFactory:
@staticmethod
def create(init_type: str):
if init_type == "manual":
return ManualPrecisionInitializer
if init_type == "hawq":
return HAWQPrecisionInitializer
raise NotImplementedError
| 49.705761
| 120
| 0.640394
|
4a099c9242184ae4f4ba2e4d3829c30185210dd0
| 55
|
py
|
Python
|
Examples/hello2.py
|
tariqueameer7/course-python
|
f1252e82471f7e34d66beb30d9236850df1bd8d4
|
[
"Apache-2.0"
] | 1
|
2021-02-04T16:59:11.000Z
|
2021-02-04T16:59:11.000Z
|
Examples/hello2.py
|
tariqueameer7/course-python
|
f1252e82471f7e34d66beb30d9236850df1bd8d4
|
[
"Apache-2.0"
] | null | null | null |
Examples/hello2.py
|
tariqueameer7/course-python
|
f1252e82471f7e34d66beb30d9236850df1bd8d4
|
[
"Apache-2.0"
] | 1
|
2019-10-30T14:37:48.000Z
|
2019-10-30T14:37:48.000Z
|
print("hello world")
for i in range(10):
print(i)
| 11
| 20
| 0.618182
|
4a099cb840e7831ba90ef54d8c108f02725f5fdd
| 1,990
|
py
|
Python
|
blog/blogsettings.py
|
mkdika/pyniblog
|
0612836f7c91076a386d5d64ae22cc4fe9443964
|
[
"Apache-2.0"
] | 1
|
2018-08-14T06:30:10.000Z
|
2018-08-14T06:30:10.000Z
|
blog/blogsettings.py
|
mkdika/pyniblog
|
0612836f7c91076a386d5d64ae22cc4fe9443964
|
[
"Apache-2.0"
] | 7
|
2019-10-22T21:37:16.000Z
|
2021-09-08T00:01:00.000Z
|
blog/blogsettings.py
|
mkdika/pyniblog
|
0612836f7c91076a386d5d64ae22cc4fe9443964
|
[
"Apache-2.0"
] | null | null | null |
class BlogSetting():
# blog short name, to be appear on upper left conner of each page.
name = 'PyniBlog'
# blog title, for main page.
title = 'Simple Blog'
# blog sub, for main page.
sub_title = 'A Django 2 Based Blog Engine'
# social media account link, to be apper on bottom section of each page.
twitter_url = 'http://twitter.com/maikelchandika'
facebook_url = 'http://facebook.com/maikel.chandika'
github_url = 'http://github.com/mkdika'
copyright = 'Maikel Chandika'
# pagination number for post main page
post_per_page = 5
# about page setup
about_sub_title = "How's this blog goes"
about_blog = """
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus orci est,
aliquet a mi sit amet, mollis imperdiet enim. Aenean luctus, libero at eleifend rutrum,
velit nunc finibus neque, vel posuere augue nisi ac eros. Quisque lacinia metus
sed rutrum porttitor. Ut bibendum sit amet ligula vitae egestas. Cras pellentesque
ligula lorem, sit amet ullamcorper metus feugiat eu. Proin aliquam tortor arcu,
sed egestas nibh cursus sit amet. Curabitur et aliquet nisl, a vehicula ex. Duis
sed facilisis tortor.</p>
<p>Nam luctus dapibus dignissim. Vestibulum blandit sit amet sapien nec venenatis.
Phasellus eget cursus lorem. Sed vehicula lacinia vehicula. Morbi non dolor et libero
dapibus aliquam vel ac arcu. Ut vitae pellentesque nisi. Donec lobortis posuere posuere.
Maecenas accumsan consequat auctor. Ut in leo nec tortor lacinia consectetur vitae
id orci. Praesent euismod ornare risus sed placerat. Etiam at risus elit. Vivamus
quis suscipit felis. Morbi dictum pellentesque egestas. Proin at felis et orci
tempor efficitur sed at augue.</p>
"""
| 49.75
| 105
| 0.656281
|
4a099d36b2d7bb1bb2b287a2cf483e8c888835c0
| 1,629
|
py
|
Python
|
src/tests/test_direct.py
|
ndejong/env-alias
|
0047098ebfc3be08b426b78a5cb9e747c87c28cf
|
[
"BSD-2-Clause"
] | 2
|
2020-12-11T23:01:53.000Z
|
2021-07-02T05:49:25.000Z
|
src/tests/test_direct.py
|
ndejong/env-alias
|
0047098ebfc3be08b426b78a5cb9e747c87c28cf
|
[
"BSD-2-Clause"
] | null | null | null |
src/tests/test_direct.py
|
ndejong/env-alias
|
0047098ebfc3be08b426b78a5cb9e747c87c28cf
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import tempfile
import random
import string
from env_alias.EnvAliasGenerator import EnvAliasGenerator
def test_sample_direct_01(capsys):
yaml = '''
sample_direct_01:
value: 'somevalue'
'''
configuration_file = __generate_config_file(yaml)
EnvAliasGenerator().main(configuration_file=configuration_file)
os.unlink(configuration_file)
captured = capsys.readouterr().out.rstrip()
assert captured == ' export "sample_direct_01"="somevalue"'
def test_sample_direct_02(capsys):
yaml = '''
sample_direct_02:
value: 'env:HOME'
'''
configuration_file = __generate_config_file(yaml)
EnvAliasGenerator().main(configuration_file=configuration_file)
os.unlink(configuration_file)
captured = capsys.readouterr().out.rstrip()
assert captured == ' export "sample_direct_02"="{}"'.format(os.getenv('HOME'))
def test_sample_direct_03(capsys):
yaml = '''
sample_direct_03:
name: 'sample_direct_03_override_name'
value: 'env:HOME'
'''
configuration_file = __generate_config_file(yaml)
EnvAliasGenerator().main(configuration_file=configuration_file)
os.unlink(configuration_file)
captured = capsys.readouterr().out.rstrip()
assert captured == ' export "sample_direct_03_override_name"="{}"'.format(os.getenv('HOME'))
def __generate_config_file(yaml_config):
config = 'env-alias:' + yaml_config
filename = os.path.join(tempfile.gettempdir(), ''.join(random.choice(string.ascii_lowercase) for i in range(8)))
with open(filename, 'w') as f:
f.write(config)
return filename
| 26.704918
| 116
| 0.712093
|
4a099deaf688028c190f0947e5ed0539a6ef7d12
| 8,527
|
py
|
Python
|
welcome.py
|
Hemphill39/jetson-service
|
27a22a53b337500447d9076c9f50189eaa232e65
|
[
"Apache-2.0"
] | null | null | null |
welcome.py
|
Hemphill39/jetson-service
|
27a22a53b337500447d9076c9f50189eaa232e65
|
[
"Apache-2.0"
] | 30
|
2017-10-03T21:53:59.000Z
|
2017-12-05T22:18:01.000Z
|
welcome.py
|
Hemphill39/jetson-service
|
27a22a53b337500447d9076c9f50189eaa232e65
|
[
"Apache-2.0"
] | 2
|
2017-09-25T21:26:11.000Z
|
2017-12-07T18:14:30.000Z
|
# Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from flask import Flask, jsonify, request
from discovery import Discovery
from speech_to_text import Speech_to_text
from getConfidence import NLC
import requests
app = Flask(__name__)
discovery = None
Speech = None
classifier = None
discovery_collection_id = "0cead13f-1bf4-438b-8c6b-e3f412d2eb3e"
discovery_configuration_id = "59aca88c-a9c2-4299-a6a2-be7e5e3eea6b"
discovery_environment_id = "67c3f67b-a49f-4156-a795-1ff97ad09e6d"
classifier_id = "ebd15ex229-nlc-54210"
if 'VCAP_SERVICES' in os.environ:
logging.basicConfig(filename='welcome.log',level=logging.DEBUG)
logging.info('Using VCAP on remote')
vcap = json.loads(os.getenv('VCAP_SERVICES'))
if 'discovery' in vcap:
discreds = vcap['discovery'][0]['credentials']
disuser = discreds['username']
dispassword = discreds['password']
disurl = discreds['url']
discovery = Discovery(disurl, disuser, dispassword,
discovery_collection_id,
discovery_configuration_id,
discovery_environment_id)
if 'natural_language_classifier' in vcap:
nlccreds = vcap['natural_language_classifier'][0]['credentials']
nlcuser = nlccreds['username']
nlcpassword = nlccreds['password']
nlcurl = nlccreds['url']
classifier = NLC(nlcurl, nlcuser, nlcpassword, classifier_id)
if 'speech_to_text' in vcap:
speechcreds = vcap['speech_to_text'][0]['credentials']
speechuser = speechcreds['username']
speechpassword = speechcreds['password']
speechurl = speechcreds['url']
Speech = Speech_to_text(speechurl, speechuser, speechpassword)
elif os.path.isfile('vcap-local-back.json'):
logging.basicConfig(filename="welcome.log", level=logging.DEBUG)
with open('vcap-local-back.json') as f:
logging.info('Using Local VCAP credentials')
vcap = json.load(f)
discreds = vcap['discovery'][0]['credentials']
disuser = discreds['username']
dispassword = discreds['password']
disurl = discreds['url']
discovery = Discovery(disurl, disuser, dispassword,
discovery_collection_id,
discovery_configuration_id,
discovery_environment_id)
speechcreds = vcap['speech_to_text'][0]['credentials']
speechuser = speechcreds['username']
speechpassword = speechcreds['password']
speechurl = speechcreds['url']
Speech = Speech_to_text(speechurl, speechuser, speechpassword)
nlccreds = vcap['natural_language_classifier'][0]['credentials']
nlcuser = nlccreds['username']
nlcpassword = nlccreds['password']
nlcurl = nlccreds['url']
classifier = NLC(nlcurl, nlcuser, nlcpassword, classifier_id)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/audio')
def audiosend():
return app.send_static_file('audio.html')
@app.route('/api/query', methods=['POST'])
def query_watson():
query_obj = request.get_json()
return jsonify(result=handle_input(query_obj))
@app.route('/api/feedback', methods=['POST'])
def submit_feedback():
request_obj = request.get_json()
try:
discovery_feedback_add_edit(request_obj['query'], request_obj['document_id'], request_obj['feedback'])
return jsonify(result={"response" : "Feedback submitted"})
except:
return jsonify(resylt={"error": "Error submitting feedback"})
def discovery_feedback(query, document_id, relevance):
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data?version=2017-11-07".format(discovery_environment_id,discovery_collection_id)
data = {
"natural_language_query": query,
"examples": [
{
"document_id": document_id,
"relevance": relevance
}
]
}
r = requests.post(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
print r
def discovery_feedback_add_edit(query, document_id, relevance):
ALREADY_EXISTS = "ALREADY_EXISTS"
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data?version=2017-11-07".format(discovery_environment_id,discovery_collection_id)
data = {
"natural_language_query": query,
"examples": [
{
"document_id": document_id,
"relevance": relevance
}
]
}
headers = {"content-type":"application/json"}
r = requests.post(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
try:
error_string = json.loads(r.content)["error"]
if ALREADY_EXISTS in error_string:
query_id = error_string.split(' already exists in collection')[0]
query_id = query_id.split('id ')[-1]
data = {
"document_id": document_id,
"relevance": relevance
}
print "Query already exists:",query_id
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data/{2}/examples?version=2017-11-07".format(discovery_environment_id,discovery_collection_id,query_id)
r = requests.post(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
try:
error_string = json.loads(r.content)["error"]
print error_string
if ALREADY_EXISTS in error_string:
example_id = error_string.split(' already has an example')[0]
example_id = example_id.split('Document id ')[-1]
print example_id
print "document already exists:",example_id
url = "https://gateway.watsonplatform.net/discovery/api/v1/environments/{0}/collections/{1}/training_data/{2}/examples/{3}?version=2017-11-07".format(discovery_environment_id,discovery_collection_id,query_id,example_id)
r = requests.put(url, auth=(discovery.creds['username'], discovery.creds['password']), json=data)
try:
error_string = json.loads(r.content)["error"]
print error_string
except:
print "Document score updated."
except:
print "Document added to query."
except:
print "New Query/document pair accepted."
def handle_input(input_object):
return_object = {'error': '', 'articles': [], 'categories': []}
user_input = input_object['queryText']
user_category = input_object['category']
logging.info('welcome.handle_input(): queryText: ' + user_input + ' category: ' + user_category)
try:
categories = []
if not user_category:
categories = nlc(user_input)
else:
categories.append(user_category)
return_object['categories'] = categories
if len(categories) == 1:
matches = discovery.query(user_input, categories[0])
for match in matches:
return_object['articles'].append({'html': match['html'], 'document_id': match['id']})
except:
return_object['error'] = 'Error searching for request.'
return json.dumps(return_object)
@app.route('/audio/blob', methods=['GET', 'POST'])
def get_blob():
if request.method == 'POST':
a = request.files['data']
fname = os.path.join(os.getcwd()+"/static", "test.wav")
a.save(fname)
text = Speech.speech_to_text(fname)
return text
def nlc(s):
return classifier.classify(s)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| 37.073913
| 239
| 0.644424
|
4a099e901a1f2baa5221a42cc2d3b4e301ed328e
| 32,446
|
py
|
Python
|
src/ggrc/converters/handlers/handlers.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/converters/handlers/handlers.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/converters/handlers/handlers.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Generic handlers for imports and exports."""
import json
import re
from logging import getLogger
from datetime import date
from datetime import datetime
from dateutil.parser import parse
from sqlalchemy import and_
from sqlalchemy import or_
from ggrc import db
from ggrc.converters import get_exportables, errors
from ggrc.login import get_current_user
from ggrc.models import all_models
from ggrc.models.exceptions import ValidationError
from ggrc.models.reflection import AttributeInfo
from ggrc.rbac import permissions
# pylint: disable=invalid-name
from ggrc.services import signals
logger = getLogger(__name__)
MAPPING_PREFIX = "__mapping__:"
CUSTOM_ATTR_PREFIX = "__custom__:"
class ColumnHandler(object):
"""Default column handler.
This handler can be used on any model attribute that can accept normal text
value.
"""
# special marker to set the field empty
EXPLICIT_EMPTY_VALUE = {"-", "--", "---"}
def __init__(self, row_converter, key, **options):
self.row_converter = row_converter
self.key = key
self.value = None
self.set_empty = False
self.is_duplicate = False
self.raw_value = options.get("raw_value", "").strip()
self.validator = options.get("validator")
self.mandatory = options.get("mandatory", False)
self.view_only = options.get("view_only", False)
self.default = options.get("default")
self.description = options.get("description", "")
self.display_name = options.get("display_name", "")
self.dry_run = row_converter.block_converter.converter.dry_run
self.new_objects = self.row_converter.block_converter.converter.new_objects
self.unique = options.get("unique", False)
self.ignore = False
def value_explicitly_empty(self, value):
return value in self.EXPLICIT_EMPTY_VALUE
def check_unique_consistency(self):
"""Returns true if no object exists with the same unique field."""
if not self.unique:
return
if not self.value:
return
if not self.row_converter.obj:
return
if self.is_duplicate:
# a hack to avoid two different errors for the same non-unique cell
return
nr_duplicates = self.row_converter.object_class.query.filter(and_(
getattr(self.row_converter.object_class, self.key) == self.value,
self.row_converter.object_class.id != self.row_converter.obj.id
)).count()
if nr_duplicates > 0:
self.add_error(
errors.DUPLICATE_VALUE, column_name=self.key, value=self.value
)
self.row_converter.set_ignore()
def set_value(self):
"set value for current culumn after parsing"
self.value = self.parse_item()
def get_value(self):
"get value for current column from instance"
return getattr(self.row_converter.obj, self.key, self.value)
def add_error(self, template, **kwargs):
"add error to current row"
self.row_converter.add_error(template, **kwargs)
def add_warning(self, template, **kwargs):
"add warning to current row"
self.row_converter.add_warning(template, **kwargs)
def parse_item(self):
"Parse item default handler"
return self.raw_value
def set_obj_attr(self):
"Set attribute value to object"
if not self.set_empty and not self.value:
return
try:
if getattr(self.row_converter.obj, self.key, None) != self.value:
setattr(self.row_converter.obj, self.key, self.value)
except ValueError as e:
self.add_error(
errors.VALIDATION_ERROR,
column_name=self.display_name,
message=e.message
)
except: # pylint: disable=bare-except
self.add_error(errors.UNKNOWN_ERROR)
logger.exception(
"Import failed with setattr(%r, %r, %r)",
self.row_converter.obj,
self.key,
self.value,
)
def get_default(self):
"Get default value to column"
if callable(self.default):
return self.default()
return self.default
def insert_object(self):
""" For inserting fields such as custom attributes and mappings """
pass
class DeleteColumnHandler(ColumnHandler):
"""Column handler for deleting objects."""
# this is a white list of objects that can be deleted in a cascade
# e.g. deleting a Market can delete the associated Relationship object too
DELETE_WHITELIST = {"Relationship", "AccessControlList", "ObjectPerson"}
ALLOWED_VALUES = {"", "no", "false", "true", "yes", "force"}
TRUE_VALUES = {"true", "yes", "force"}
def __init__(self, *args, **kwargs):
self._allow_cascade = False
super(DeleteColumnHandler, self).__init__(*args, **kwargs)
def get_value(self):
return ""
def parse_item(self):
if self.raw_value:
self.add_error(
u"Line {line}: Delete column is temporary disabled, please use web "
u"interface to delete current object."
)
return None
if self.raw_value.lower() not in self.ALLOWED_VALUES:
self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name)
return False
is_delete = self.raw_value.lower() in self.TRUE_VALUES
self._allow_cascade = self.raw_value.lower() == "force"
self.row_converter.is_delete = is_delete
return is_delete
def set_obj_attr(self):
if not self.value:
return
obj = self.row_converter.obj
if self.row_converter.is_new:
self.add_error(
errors.DELETE_NEW_OBJECT_ERROR, object_type=obj.type, slug=obj.slug
)
return
if self.row_converter.ignore:
return
tr = db.session.begin_nested()
try:
tr.session.delete(obj)
deleted = len(
[
o for o in tr.session.deleted
if o.type not in self.DELETE_WHITELIST
]
)
if deleted > 1 and not self._allow_cascade:
self.add_error(
errors.DELETE_CASCADE_ERROR, object_type=obj.type, slug=obj.slug
)
finally:
if self.dry_run or self.row_converter.ignore:
tr.rollback()
else:
indexer = self.row_converter.block_converter.converter.indexer
if indexer is not None:
for o in tr.session.deleted:
indexer.delete_record(o.id, o.__class__.__name__, commit=False)
tr.commit()
class StatusColumnHandler(ColumnHandler):
def __init__(self, row_converter, key, **options):
self.key = key
self.valid_states = row_converter.object_class.VALID_STATES
self.state_mappings = {str(s).lower(): s for s in self.valid_states}
super(StatusColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self):
value = self.raw_value.lower()
status = self.state_mappings.get(value)
if status is not None:
return status
if self.row_converter.obj.status:
status = self.row_converter.obj.status
error_tmpl = errors.WRONG_VALUE_CURRENT
else:
status = self.row_converter.object_class.default_status()
error_tmpl = errors.WRONG_VALUE_DEFAULT
self.add_warning(error_tmpl, column_name=self.display_name)
return status
class UserColumnHandler(ColumnHandler):
"""Handler for a single user fields.
Used for primary and secondary contacts.
"""
def get_users_list(self):
users = set()
email_lines = self.raw_value.splitlines()
owner_emails = filter(unicode.strip, email_lines) # noqa
for raw_line in owner_emails:
email = raw_line.strip().lower()
person = self.get_person(email)
if person:
users.add(person)
else:
self.add_warning(errors.UNKNOWN_USER_WARNING, email=email)
return list(users)
def get_person(self, email):
from ggrc.utils import user_generator
new_objects = self.row_converter.block_converter.converter.new_objects
if email not in new_objects[all_models.Person]:
try:
new_objects[all_models.Person][email] = user_generator.find_user(email)
except ValueError as ex:
self.add_error(
errors.VALIDATION_ERROR,
column_name=self.display_name,
message=ex.message
)
return None
return new_objects[all_models.Person].get(email)
def _parse_raw_data_to_emails(self):
"""Parse raw data: split emails if necessary"""
email_list = re.split("[, ;\n]+", self.raw_value.lower().strip())
email_list = filter(None, email_list)
return sorted(email_list)
def _parse_emails(self, email_list):
"""Parse user email. If it were multiply emails in this column parse them.
We use next rules:
- Return first valid user.
- If this field is mandatory and there were no emails provided
MISSING_VALUE error occurs.
- If field is mandatory and there were no valid users NO_VALID_USERS
error occurs.
- If field isn't mandatory and there were no valid users UNKNOWN_USER
warning occurs for each invalid email.
"""
person = None
for email in email_list:
person = self.get_person(email)
if person:
break
if self.mandatory:
if not email_list:
self.add_error(
errors.MISSING_VALUE_ERROR, column_name=self.display_name
)
elif not person:
self.add_error(
errors.NO_VALID_USERS_ERROR, column_name=self.display_name
)
else:
if email_list and not person:
for email in email_list:
self.add_warning(errors.UNKNOWN_USER_WARNING, email=email)
return person
def parse_item(self):
email_list = self._parse_raw_data_to_emails()
if len(email_list) > 1:
self.add_warning(
errors.MULTIPLE_ASSIGNEES, column_name=self.display_name
)
return self._parse_emails(email_list)
def get_value(self):
person = getattr(self.row_converter.obj, self.key)
if person:
return person.email
return self.value
class UsersColumnHandler(UserColumnHandler):
"""Handler for multi user fields."""
def _missed_mandatory_person(self):
"""Create response for missing mandatory field"""
self.add_warning(errors.OWNER_MISSING, column_name=self.display_name)
return [get_current_user()]
def parse_item(self):
"""Parses multi users field."""
people = set()
if self.value_explicitly_empty(self.raw_value):
if not self.mandatory:
self.set_empty = True
return None
return self._missed_mandatory_person()
email_lines = self.raw_value.splitlines()
owner_emails = filter(unicode.strip, email_lines) # noqa
for raw_line in owner_emails:
email = raw_line.strip().lower()
person = self.get_person(email)
if person:
people.add(person)
else:
self.add_warning(errors.UNKNOWN_USER_WARNING, email=email)
if not people and self.mandatory:
return self._missed_mandatory_person()
return list(people)
class DateColumnHandler(ColumnHandler):
"""Handler for fields that contains date."""
def parse_item(self):
if self.view_only:
self._check_errors_non_importable_objects(
self.get_value(), self.raw_value
)
return
value = self.raw_value
if value and not re.match(
r"[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}|"
r"[0-9]{1,2}/[0-9]{1,2}/[0-9]{4}", self.raw_value
):
self.add_error(errors.UNKNOWN_DATE_FORMAT, column_name=self.display_name)
return
# TODO: change all importable date columns' type from 'DateTime'
# to 'Date' type. Remove if statement after it.
try:
value = value or self.get_value()
if not value:
if self.mandatory:
self.add_error(errors.MISSING_VALUE_ERROR,
column_name=self.display_name)
return None
parsed_value = parse(value)
if isinstance(getattr(self.row_converter.obj, self.key, None), date):
parsed_value = parsed_value.date()
if self.key in ("last_assessment_date", "verified_date") and \
self.check_readonly_changes(parsed_value, self.key):
return None
return parsed_value
except: # pylint: disable=bare-except
self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name)
def _check_errors_non_importable_objects(self, object_date, import_date):
"""Check whether a warning should be ejected"""
if not import_date:
return
if object_date:
try:
import_date = datetime.strptime(import_date, "%Y-%m-%d")
except ValueError:
try:
import_date = datetime.strptime(import_date, "%m/%d/%Y")
except ValueError:
self.add_warning(
errors.EXPORT_ONLY_WARNING, column_name=self.display_name
)
return
object_date = datetime.strptime(object_date, '%m/%d/%Y')
if object_date == import_date:
return
self.add_warning(errors.EXPORT_ONLY_WARNING, column_name=self.display_name)
def get_value(self):
value = getattr(self.row_converter.obj, self.key)
if value:
return value.strftime("%m/%d/%Y")
return ""
def check_readonly_changes(self, new_date, attr_name):
"""Check if the new object don't contain changed date."""
old_date = getattr(self.row_converter.obj, attr_name, None)
is_modified = old_date and new_date and old_date != new_date
if is_modified:
self.add_warning(errors.UNMODIFIABLE_COLUMN,
column_name=self.display_name,)
return True
return False
class NullableDateColumnHandler(DateColumnHandler):
"""Nullable date column handler."""
DEFAULT_EMPTY_VALUE = "--"
def parse_item(self):
"""Datetime column can be nullable."""
if not self.value_explicitly_empty(self.raw_value) and \
self.raw_value != "":
return super(NullableDateColumnHandler, self).parse_item()
if self.mandatory:
self.add_error(
errors.MISSING_COLUMN, s="", column_names=self.display_name
)
else:
self.set_empty = True
def get_value(self):
if getattr(self.row_converter.obj, self.key):
return super(NullableDateColumnHandler, self).get_value()
return self.DEFAULT_EMPTY_VALUE
class EmailColumnHandler(ColumnHandler):
def parse_item(self):
""" emails are case insensitive """
email = self.raw_value.lower()
if not email:
self.add_error(errors.MISSING_VALUE_ERROR, column_name="Email")
elif not all_models.Person.is_valid_email(email):
self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name)
return ""
return email
class TextColumnHandler(ColumnHandler):
""" Single line text field handler """
def parse_item(self):
""" Remove multiple spaces and new lines from text """
value = self.raw_value or ""
value = self.clean_whitespaces(value)
if self.mandatory and not value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return value
@staticmethod
def clean_whitespaces(value):
return re.sub(r'\s+', " ", value)
class MappingColumnHandler(ColumnHandler):
""" Handler for mapped objects """
def __init__(self, row_converter, key, **options):
self.key = key
exportable = get_exportables()
self.attr_name = options.get("attr_name", "")
self.mapping_object = exportable.get(self.attr_name)
self.new_slugs = row_converter.block_converter.converter.new_objects[
self.mapping_object
]
self.unmap = self.key.startswith(AttributeInfo.UNMAPPING_PREFIX)
super(MappingColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self):
"""Parse a list of slugs to be mapped.
Parse a new line separated list of slugs and check if they are valid
objects.
Returns:
list of objects. During dry_run, the list can contain a slug instead of
an actual object if that object will be generated in the current import.
"""
# pylint: disable=protected-access
class_ = self.mapping_object
lines = set(self.raw_value.splitlines())
slugs = set([slug.lower() for slug in lines if slug.strip()])
objects = []
for slug in slugs:
obj = class_.query.filter_by(slug=slug).first()
if obj:
is_allowed_by_type = self._is_allowed_mapping_by_type(
source_type=self.row_converter.obj.__class__.__name__,
destination_type=class_.__name__,
)
if not is_allowed_by_type:
self._add_mapping_warning(
source=self.row_converter.obj,
destination=obj,
)
continue
if not permissions.is_allowed_update_for(obj):
self.add_warning(
errors.MAPPING_PERMISSION_ERROR,
object_type=class_._inflector.human_singular.title(),
slug=slug,
)
continue
objects.append(obj)
elif slug in self.new_slugs and not self.dry_run:
objects.append(self.new_slugs[slug])
elif slug in self.new_slugs and self.dry_run:
objects.append(slug)
else:
self.add_warning(
errors.UNKNOWN_OBJECT,
object_type=class_._inflector.human_singular.title(),
slug=slug
)
if self.mandatory and not objects and self.row_converter.is_new:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return objects
def _is_allowed_mapping_by_type(self, source_type, destination_type):
# pylint: disable=no-self-use
"""Checks if a mapping is allowed between given types."""
try:
all_models.Relationship.validate_relation_by_type(source_type,
destination_type)
except ValidationError:
return False
return True
def _add_mapping_warning(self, source, destination):
"""Add warning if we have changes mappings """
mapping = all_models.Relationship.find_related(source, destination)
if (self.unmap and mapping) or (not self.unmap and not mapping):
self.add_warning(
errors.MAPPING_SCOPING_ERROR,
object_type=destination.__class__.__name__,
action="unmap" if self.unmap else "map"
)
def set_obj_attr(self):
self.value = self.parse_item()
def insert_object(self):
""" Create a new mapping object """
if self.dry_run or not self.value:
return
current_obj = self.row_converter.obj
relationships = []
mapping = None
for obj in self.value:
if current_obj.id:
mapping = all_models.Relationship.find_related(current_obj, obj)
if not self.unmap and not mapping:
if not (
self.mapping_object.__name__ == "Audit" and
not getattr(current_obj, "allow_map_to_audit", True)
):
mapping = all_models.Relationship(source=current_obj,
destination=obj)
signals.Import.mapping_created.send(obj.__class__,
instance=obj,
counterparty=current_obj)
signals.Import.mapping_created.send(current_obj.__class__,
instance=current_obj,
counterparty=obj)
relationships.append(mapping)
db.session.add(mapping)
else:
self.add_warning(
errors.SINGLE_AUDIT_RESTRICTION,
mapped_type=obj.type,
object_type=current_obj.type
)
elif self.unmap and mapping:
if not (
self.mapping_object.__name__ == "Audit" and
not getattr(current_obj, "allow_unmap_from_audit", True)
):
db.session.delete(mapping)
else:
self.add_warning(
errors.UNMAP_AUDIT_RESTRICTION,
mapped_type=obj.type,
object_type=current_obj.type
)
db.session.flush()
self.dry_run = True
def get_value(self):
if self.unmap or not self.mapping_object:
return ""
cache = self.row_converter.block_converter.get_mapping_cache()
slugs = cache[self.row_converter.obj.id][self.mapping_object.__name__]
return "\n".join(slugs)
def set_value(self):
pass
class ConclusionColumnHandler(ColumnHandler):
""" Handler for design and operationally columns in ControlAssesments """
CONCLUSION_MAP = {i.lower(): i
for i in all_models.Assessment.VALID_CONCLUSIONS}
def parse_item(self):
"""Parse conclusion design and operational values."""
value = self.CONCLUSION_MAP.get(self.raw_value.lower(), "")
if self.raw_value and not value:
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
return value
class OptionColumnHandler(ColumnHandler):
"""Column handler for option fields.
This column handler is used only for option fields that have their values
stored in the Options table. Hardcoded options and boolean options should
not be handled by this class.
"""
def parse_item(self):
if not self.mandatory and self.value_explicitly_empty(self.raw_value):
self.set_empty = True
return None
prefixed_key = "{}_{}".format(
self.row_converter.object_class._inflector.table_singular, self.key
)
item = all_models.Option.query.filter(
and_(
all_models.Option.title == self.raw_value,
or_(all_models.Option.role == self.key,
all_models.Option.role == prefixed_key)
)
).first()
return item
def get_value(self):
option = getattr(self.row_converter.obj, self.key, None)
if option is None:
return "--"
if callable(option.title):
return option.title()
return option.title
class ParentColumnHandler(ColumnHandler):
""" handler for directly mapped columns """
parent = None
def parse_item(self):
""" get parent object """
# pylint: disable=protected-access
if self.raw_value == "":
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return None
slug = self.raw_value
obj = self.new_objects.get(self.parent, {}).get(slug)
if obj is None:
obj = self.parent.query.filter(self.parent.slug == slug).first()
if obj is None:
self.add_error(
errors.UNKNOWN_OBJECT,
object_type=self.parent._inflector.human_singular.title(),
slug=slug
)
return None
context_id = None
if hasattr(obj, "context_id") and \
hasattr(self.row_converter.obj, "context_id"):
context_id = obj.context_id
if context_id is not None:
name = self.row_converter.obj.__class__.__name__
if not permissions.is_allowed_create(name, None, context_id) \
and not permissions.has_conditions('create', name):
self.add_error(
errors.MAPPING_PERMISSION_ERROR, object_type=obj.type, slug=slug
)
return None
return obj
def set_obj_attr(self):
super(ParentColumnHandler, self).set_obj_attr()
# inherit context
obj = self.row_converter.obj
parent = getattr(obj, self.key, None)
if parent is not None and \
hasattr(obj, "context") and \
hasattr(parent, "context") and \
parent.context is not None:
obj.context = parent.context
def get_value(self):
value = getattr(self.row_converter.obj, self.key, self.value)
if not value:
return None
return value.slug
class ProgramColumnHandler(ParentColumnHandler):
"""Handler for program column on audit imports."""
parent = all_models.Program
def set_obj_attr(self):
if self.row_converter.is_new:
super(ProgramColumnHandler, self).set_obj_attr()
else:
owned_program_id = self.row_converter.obj.program_id
given_program_id = self.value.id
if owned_program_id != given_program_id:
self.add_warning(errors.UNMODIFIABLE_COLUMN,
column_name=self.display_name)
class RequirementDirectiveColumnHandler(MappingColumnHandler):
def get_directive_from_slug(self, directive_class, slug):
if slug in self.new_objects[directive_class]:
return self.new_objects[directive_class][slug]
return directive_class.query.filter_by(slug=slug).first()
def parse_item(self):
""" get a directive from slug """
allowed_directives = [all_models.Policy, all_models.Regulation,
all_models.Standard, all_models.Contract]
if self.raw_value == "":
return None
slug = self.raw_value
for directive_class in allowed_directives:
directive = self.get_directive_from_slug(directive_class, slug)
if directive is not None:
self.mapping_object = type(directive)
return [directive]
self.add_error(errors.UNKNOWN_OBJECT, object_type="Program", slug=slug)
return None
def get_value(self):
# Legacy field. With the new mapping system it is not possible to determine
# which was the primary directive that has been mapped
return ""
class AuditColumnHandler(MappingColumnHandler):
"""Handler for mandatory Audit mappings on Assessments."""
def __init__(self, row_converter, key, **options):
key = "{}audit".format(MAPPING_PREFIX)
super(AuditColumnHandler, self).__init__(row_converter, key, **options)
def set_obj_attr(self):
"""Set values to be saved.
This saves the value for creating the relationships, and if the dry_run
flag is not set, it will also set the correct context to the parent object.
"""
self.value = self.parse_item()
if not self.value:
# If there is no mandatory value, the parse item will already mark the
# error, so there is no need to do anything here.
return
audit = self.value[0]
if isinstance(audit, all_models.Audit):
old_slug = None
if (
hasattr(self.row_converter.obj, "audit") and
self.row_converter.obj.audit
):
old_slug = self.row_converter.obj.audit.slug
else:
rel_audits = self.row_converter.obj.related_objects(_types="Audit")
if rel_audits:
old_slug = rel_audits.pop().slug
if not self.row_converter.is_new and audit.slug != old_slug:
self.add_warning(
errors.UNMODIFIABLE_COLUMN, column_name=self.display_name
)
self.value = []
else:
self.row_converter.obj.context = audit.context
self.row_converter.obj.audit = audit
class ObjectPersonColumnHandler(UserColumnHandler):
"""
ObjectPerson handler for all specific columns such as "owner" or any other
role. This handler will remove all people not listed in the value and will
add people that are missing.
"""
def parse_item(self):
return self.get_users_list()
def set_obj_attr(self):
pass
def get_value(self):
object_person = db.session.query(
all_models.ObjectPerson.person_id,
).filter_by(
personable_id=self.row_converter.obj.id,
personable_type=self.row_converter.obj.__class__.__name__
)
users = all_models.Person.query.filter(
all_models.Person.id.in_(object_person),
)
emails = [user.email for user in users]
return "\n".join(emails)
def remove_current_people(self):
all_models.ObjectPerson.query.filter_by(
personable_id=self.row_converter.obj.id,
personable_type=self.row_converter.obj.__class__.__name__
).delete()
def insert_object(self):
if self.dry_run or not self.value:
return
self.remove_current_people()
for person in self.value:
object_person = all_models.ObjectPerson(
personable=self.row_converter.obj,
person=person,
context=self.row_converter.obj.context
)
db.session.add(object_person)
db.session.flush()
self.dry_run = True
class PersonMappingColumnHandler(ObjectPersonColumnHandler):
"""
This handler will only add people listed in self.value if they are not yet
connected to the current object.
"""
def remove_current_people(self):
obj = self.row_converter.obj
self.value = [
person for person in self.value
if not all_models.ObjectPerson.query.filter_by(
personable_id=obj.id,
personable_type=obj.__class__.__name__,
person=person
).count()
]
class PersonUnmappingColumnHandler(ObjectPersonColumnHandler):
"""
This handler will only remove people listed in self.value if they are already
connected to the current object.
"""
def insert_object(self):
if self.dry_run or not self.value:
return
obj = self.row_converter.obj
context = getattr(obj, 'context', None)
user_role = getattr(all_models, 'UserRole', None)
for person in self.value:
all_models.ObjectPerson.query.filter_by(
personable_id=obj.id,
personable_type=obj.__class__.__name__,
person=person
).delete()
if context and user_role:
# The fix is a bit hackish, because it uses ``UserRole`` model
# from ``ggrc_basic_permissions``. But it is the only way I found to
# fix the issue, without massive refactoring.
user_role.query.filter_by(person=person, context=context).delete()
self.dry_run = True
class DocumentsColumnHandler(ColumnHandler):
def get_value(self):
lines = [
u"{} {}".format(d.title, d.link)
for d in self.row_converter.obj.documents
]
return u"\n".join(lines)
def parse_item(self):
lines = [line.rsplit(" ", 1) for line in self.raw_value.splitlines()]
documents = []
for line in lines:
if len(line) != 2:
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
continue
title, link = line
documents.append(
all_models.Document(title=title.strip(), link=link.strip())
)
return documents
def set_obj_attr(self):
pass
def insert_object(self):
if self.dry_run or not self.value:
return
self.row_converter.obj.documents = self.value
self.dry_run = True
class LabelsHandler(ColumnHandler):
""" Handler for labels """
def parse_item(self):
if self.raw_value is None:
return
names = set(l.strip() for l in self.raw_value.split(',') if l.strip())
return [{'id': None, 'name': name} for name in names]
def set_obj_attr(self):
self.row_converter.obj.labels = self.value
def get_value(self):
return ','.join(label.name for label in self.row_converter.obj.labels)
class ExportOnlyColumnHandler(ColumnHandler):
"""Only on export column handler base class"""
def __init__(self, *args, **kwargs):
kwargs["view_only"] = True
kwargs["mandatory"] = False
super(ExportOnlyColumnHandler, self).__init__(*args, **kwargs)
def parse_item(self):
pass
def set_obj_attr(self):
pass
def insert_object(self):
pass
def set_value(self):
pass
class DirecPersonMappingColumnHandler(ExportOnlyColumnHandler):
def get_value(self):
person = getattr(self.row_converter.obj, self.key, self.value)
return getattr(person, "email", "")
class ExportOnlyDateColumnHandler(ExportOnlyColumnHandler):
def get_value(self):
value = getattr(self.row_converter.obj, self.key)
if value:
return value.strftime("%m/%d/%Y")
return ""
class ExportOnlyIssueTrackerColumnHandler(ExportOnlyColumnHandler):
def get_value(self):
cache = self.row_converter.block_converter.get_ticket_tracker_cache()
return cache.get(self.row_converter.obj.id, "")
class ReviewersColumnHandler(ExportOnlyColumnHandler):
"""Only on export handler for Reviewers column"""
def get_value(self):
reviewers = self.row_converter.obj.reviewers
if not reviewers:
return ''
return '\n'.join(sorted(
reviewer.email for reviewer in reviewers
))
class JsonListColumnHandler(ColumnHandler):
"""Handler for fields with json list values."""
def get_value(self):
json_values = getattr(self.row_converter.obj, self.key, "[]")
values = []
try:
if json_values:
values = json.loads(json_values)
except ValueError:
logger.error(
"Failed to convert {} field for {} {}".format(
self.key, self.row_converter.obj.type, self.row_converter.obj.id
)
)
return "\n".join(values)
| 31.562257
| 79
| 0.672317
|
4a099edf1b9f36df43c09574f013246142a9dcb0
| 439
|
py
|
Python
|
geodescriber/config/base.py
|
Skydipper/Geodescriber
|
04b5160e5fe9a1a0bf4635276669f0de4ceb8ea7
|
[
"MIT"
] | null | null | null |
geodescriber/config/base.py
|
Skydipper/Geodescriber
|
04b5160e5fe9a1a0bf4635276669f0de4ceb8ea7
|
[
"MIT"
] | null | null | null |
geodescriber/config/base.py
|
Skydipper/Geodescriber
|
04b5160e5fe9a1a0bf4635276669f0de4ceb8ea7
|
[
"MIT"
] | 1
|
2021-02-18T13:16:46.000Z
|
2021-02-18T13:16:46.000Z
|
import os
from geodescriber.utils.files import BASE_DIR, PROJECT_DIR
SETTINGS = {
'logging': {
'level': 'DEBUG'
},
'service': {
'port': 4501
},
'gee': {
'service_account': 'skydipper@skydipper-196010.iam.gserviceaccount.com',
'privatekey_file': BASE_DIR + '/privatekey.json',
'assets': {
'geodescriber':'projects/wri-datalab/geodesriber-asset-v2'
},
}
}
| 23.105263
| 80
| 0.57631
|
4a099efb2258dce4e6026873f030aaf2714a5737
| 1,865
|
py
|
Python
|
generate_data.py
|
CaoDuyThanh/TIES_DataGeneration
|
31640fe992631802300efbdc53a0ef4a84cedff3
|
[
"MIT"
] | null | null | null |
generate_data.py
|
CaoDuyThanh/TIES_DataGeneration
|
31640fe992631802300efbdc53a0ef4a84cedff3
|
[
"MIT"
] | null | null | null |
generate_data.py
|
CaoDuyThanh/TIES_DataGeneration
|
31640fe992631802300efbdc53a0ef4a84cedff3
|
[
"MIT"
] | null | null | null |
import argparse
from TFGeneration.GenerateTFRecord import *
def parse_args():
parser = argparse.ArgumentParser(description='Tool to generate synthetic tables data.')
parser.add_argument('--filesize', type=int, default=1, help='Number of images to store in one tfrecord. Default: 1.')
parser.add_argument('--num_trecords', type=int, default=1000, help='Number of trecords files. Defult: 1000.')
parser.add_argument('--threads', type=int, default=1, help='Number of threads to run. More threads less time. Default: 1.')
parser.add_argument('--outpath', type=str, default='tfrecords/', help='Output directory to store generated tfrecords. Default: tfrecords/.')
parser.add_argument('--imagespath', default='../Table_Detection_Dataset/unlv/train/images', help='Directory containing UNLV dataset images.')
parser.add_argument('--ocrpath', default='../Table_Detection_Dataset/unlv/unlv_xml_ocr', help='Directory containing ground truths of characters in UNLV dataset.')
parser.add_argument('--tablepath', default='../Table_Detection_Dataset/unlv/unlv _xml_gt', help='Directory containing ground truths of tables in UNLV dataset.')
parser.add_argument('--visualizeimgs', type=int, action='store_true', help='Store the generated images (along than tfrecords).')
parser.add_argument('--visualizebboxes', type=int, action='store_true', help='Store the images with bound boxes.')
return parser.parse_args()
def run_generator(args):
file_size = max(int(args.filesize), 4)
distributionfile = 'unlv_distribution'
t = GenerateTFRecord(args.outpath, args.num_trecords, file_size, args.imagespath,
args.ocrpath, args.tablepath, args.visualizeimgs, args.visualizebboxes, distributionfile)
t.write_to_tf(args.threads)
if __name__ == '__main__':
args = parse_args()
run_generator(args)
| 62.166667
| 166
| 0.741555
|
4a099f09f076eda76f85a4f19e76b3bbea5a74d3
| 14,643
|
py
|
Python
|
Lib/test/test_multibytecodec_support.py
|
jimmyyu2004/jython
|
5b4dc2d54d01a6fda8c55d07b2608167e7a40769
|
[
"CNRI-Jython"
] | 332
|
2015-08-22T12:43:56.000Z
|
2022-03-17T01:05:43.000Z
|
Lib/test/test_multibytecodec_support.py
|
Pandinosaurus/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
[
"CNRI-Jython"
] | 36
|
2015-05-30T08:39:19.000Z
|
2022-03-04T20:42:33.000Z
|
Lib/test/test_multibytecodec_support.py
|
Pandinosaurus/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
[
"CNRI-Jython"
] | 74
|
2015-05-29T17:18:53.000Z
|
2022-01-15T14:06:44.000Z
|
#!/usr/bin/env python
#
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import codecs
import os
import re
import sys
import unittest
from http.client import HTTPException
from test import support
from io import StringIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = '' # string to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = '\ufffe' # a unicode codepoint that is not mapped
# CPython uses an isolated surrogate, which will not work on Jython
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
for f in self.tstring]):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
if func is self.decode:
self.assertTrue(isinstance(result, str), type(result))
self.assertEqual(result, expected,
'%r.decode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertTrue(isinstance(result, bytes), type(result))
self.assertEqual(result, expected,
'%r.encode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
return
s = "\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
return
from html.entities import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append("&%s;" % codepoint2name[ord(c)])
else:
l.append("&#%d;" % ord(c))
return ("".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = "\xab\u211c\xbb = \u2329\u1234\u232a"
sout = "«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), 'string', ''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return ('x', int(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), ('abcdxefgh', 9))
def myreplace(exc):
return ('x', sys.maxsize + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return ('x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return ('REPLACED', 0)
else:
return ('TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'),
('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return ('REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), ('abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return ('TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = StringIO()
encoder = self.incrementalencoder()
while True:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = StringIO(self.tstring[0])
ostream = UTF8Writer(StringIO())
decoder = self.incrementaldecoder()
while True:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
e.reset()
def tempreplace(exc):
return ('called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), 'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(StringIO(self.tstring[0]))
ostream = UTF8Writer(StringIO())
func = getattr(istream, name)
while True:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = self.writer(StringIO())
func = getattr(istream, name)
while True:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
codectests = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
try:
self.open_mapping_file().close() # test it to report the error early
except (IOError, HTTPException):
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
_unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
unichrs = lambda s: ''.join(_unichr(c) for c in s.split('+'))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = chr(csetval & 0xff)
elif csetval >= 0x1000000:
csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x10000:
csetch = chr(csetval >> 16) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x100:
csetch = chr(csetval >> 8) + chr(csetval & 0xff)
else:
continue
unich = unichrs(data[1])
if unich == '\ufffd' or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
with self.open_mapping_file() as f:
ucmdata = f.read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = chr(int(uni, 16))
codech = ''.join(chr(int(c, 16)) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
try:
self.assertEqual(unich.encode(self.encoding), csetch)
except UnicodeError as exc:
self.fail('Encoding failed while testing %s -> %s: %s' % (
repr(unich), repr(csetch), exc.reason))
if (csetch, unich) not in self.pass_dectest:
try:
self.assertEqual(csetch.decode(self.encoding), unich)
except UnicodeError as exc:
self.fail('Decoding failed while testing %s -> %s: %s' % (
repr(csetch), repr(unich), exc.reason))
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = source.decode
else:
func = source.encode
if expected:
if isinstance(source, bytes):
result = func(self.encoding, scheme)
self.assertTrue(isinstance(result, str), type(result))
self.assertEqual(result, expected,
'%r.decode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
result = func(self.encoding, scheme)
self.assertTrue(isinstance(result, bytes), type(result))
self.assertEqual(result, expected,
'%r.encode(%r, %r)=%r != %r'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, self.encoding, scheme)
def load_teststring(name):
dir = os.path.join(os.path.dirname(__file__), 'cjkencodings')
with open(os.path.join(dir, name + '.txt'), 'rb') as f:
encoded = f.read()
with open(os.path.join(dir, name + '-utf8.txt'), 'rb') as f:
utf8 = f.read()
return encoded, utf8
| 38.944149
| 99
| 0.51779
|
4a099f9b031d810ab3dd38b11cbee24e546bd4ad
| 555
|
py
|
Python
|
ch11ex8_rdupl.py
|
jerzyjerzy8/j-zelle
|
0e945091523ee074c3ab1f3eb0110e25744ccee4
|
[
"MIT"
] | null | null | null |
ch11ex8_rdupl.py
|
jerzyjerzy8/j-zelle
|
0e945091523ee074c3ab1f3eb0110e25744ccee4
|
[
"MIT"
] | null | null | null |
ch11ex8_rdupl.py
|
jerzyjerzy8/j-zelle
|
0e945091523ee074c3ab1f3eb0110e25744ccee4
|
[
"MIT"
] | null | null | null |
# ch11ex8_rdupl
"""The remove_duplicates function and its tests."""
def remove_duplicates(ls):
"""Remove duplicate values from the list ls."""
d = {}
for el in ls:
d[el] = d.get(el, 0) + 1
for key in d:
for i in range(d[key] - 1):
ls.remove(key)
def remove_duplicates_tests():
l = [0, 1, 2, 1, 3, 0]
remove_duplicates(l)
assert l == [2, 1, 3, 0]
l = []
remove_duplicates(l)
assert l == []
l = ["a"]
remove_duplicates(l)
assert l == ["a"]
print("All tests passed!")
| 19.821429
| 51
| 0.540541
|
4a099fe403f13906da120d907cd5e7dbcc3d6d3d
| 6,716
|
py
|
Python
|
gcn/utils.py
|
floregol/gcn
|
23cad65c4f77a4eb007cf5f85a16d428d0928826
|
[
"MIT"
] | 1
|
2018-07-09T20:08:07.000Z
|
2018-07-09T20:08:07.000Z
|
gcn/utils.py
|
floregol/gcn
|
23cad65c4f77a4eb007cf5f85a16d428d0928826
|
[
"MIT"
] | null | null | null |
gcn/utils.py
|
floregol/gcn
|
23cad65c4f77a4eb007cf5f85a16d428d0928826
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import os
current_dir = os.getcwd()
project_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
data_dir = os.path.join(current_dir, "data")
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def print_partition_index(mask, partition_name, y):
indices = np.argwhere(mask > 0).reshape(-1)
start = min(indices)
end = max(indices)
val_indices = np.argwhere(np.argmax(y, axis=1) > 0).reshape(-1)
val_start = min(val_indices)
val_end = max(val_indices)
print(partition_name + " index : " + str(start) + "-" + str(end) + " val index : " + str(val_start) + "-" +
str(val_end))
def load_data(dataset_str):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open(os.path.join(data_dir, "ind.{}.{}".format(dataset_str, names[i])), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(os.path.join(data_dir, "ind.{}.test.index".format(dataset_str)))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(1208)
idx_val = range(1208, idx_test[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
train_mask = np.logical_not(val_mask + test_mask)
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, labels, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, sub_sampled_support, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update(
{placeholders['sub_sampled_support'][i]: sub_sampled_support[i]
for i in range(len(sub_sampled_support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def squash_list(bins, scores_dicts):
avg_list = []
for x in bins:
b = []
for scores_dict in scores_dicts:
if x in scores_dict:
b.append(scores_dict[x])
if len(b) > 0:
avg_list.append((x, np.mean(b)))
return avg_list
| 36.699454
| 115
| 0.672722
|
4a09a13418ba1bff75a788fefdf65a5d63085eeb
| 1,752
|
py
|
Python
|
test.py
|
graehu/confply
|
4471b520b3685d587a89d31dee7eec9ca7bf85de
|
[
"MIT"
] | null | null | null |
test.py
|
graehu/confply
|
4471b520b3685d587a89d31dee7eec9ca7bf85de
|
[
"MIT"
] | null | null | null |
test.py
|
graehu/confply
|
4471b520b3685d587a89d31dee7eec9ca7bf85de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# _____ .__
# ____ ____ _____/ ____\_____ | | ___.__.
# _/ ___\/ _ \ / \ __\\____ \| |< | |
# \ \__( <_> ) | \ | | |_> > |_\___ |
# \___ >____/|___| /__| | __/|____/ ____|
# \/ \/ |__| \/
# launcher generated using:
#
# python ./confply.py --launcher build.py
import sys
import os
sys.path.append(os.path.abspath("."))
from confply import launcher
from confply import run_commandline
# fill this with your commands
aliases = {
# 'default': '--in path/to/command.py'
"g++": "--config.confply.tool g++ --in examples/cpp_compiler.cpp.py --cpp_clean",
"gcc": "--config.confply.tool gcc --in examples/cpp_compiler.cpp.py --cpp_clean",
"clang": "--config.confply.tool clang --in examples/cpp_compiler.cpp.py --cpp_clean",
"clang++": "--config.confply.tool clang++ --in examples/cpp_compiler.cpp.py --cpp_clean",
"emcc": "--config.confply.tool emcc --in examples/cpp_compiler.cpp.py --cpp_clean",
"em++": "--config.confply.tool em++ --in examples/cpp_compiler.cpp.py --cpp_clean",
"cl": "--config.confply.tool cl --in examples/cpp_compiler.cpp.py --cpp_clean",
"echo": "--config.confply.tool echo --in examples/cpp_compiler.cpp.py --cpp_clean"
}
# "all" will run all of the aliases
aliases["all"] = " -- ".join([val for key, val in aliases.items()])
if __name__ == "__main__":
args = sys.argv[1:]
if "--listen" in args:
run_commandline(["--listen", __file__])
else:
dir_name = os.path.dirname(__file__)
if not dir_name == "":
os.chdir(dir_name)
if args:
launcher(sys.argv[1:], aliases)
else:
launcher(["default"], aliases)
| 38.933333
| 93
| 0.587329
|
4a09a1a8a56deaf26d1c9d3741a12d6adb8bcfb3
| 2,437
|
py
|
Python
|
Code/problems/pom3/helper/pom3_teams.py
|
rahlk/Experimental-Algorithms
|
d04a2d3ec5a4c54ff3ebff5cf003b93d2a983061
|
[
"MIT"
] | null | null | null |
Code/problems/pom3/helper/pom3_teams.py
|
rahlk/Experimental-Algorithms
|
d04a2d3ec5a4c54ff3ebff5cf003b93d2a983061
|
[
"MIT"
] | 9
|
2015-09-14T21:07:06.000Z
|
2015-12-08T01:38:08.000Z
|
Code/problems/pom3/helper/pom3_teams.py
|
rahlk/Experimental-Algorithms
|
d04a2d3ec5a4c54ff3ebff5cf003b93d2a983061
|
[
"MIT"
] | null | null | null |
__author__ = 'joe krall'
from pom3_team import *
import math
class pom3_teams:
def __init__(p3t, requirements, decisions):
p3t.teams = []
p3t.decisions = decisions
# Build Each Team
total_size = 0
while (total_size < requirements.count):
#specific sized teams
p3t.teams.append(Team(decisions))
total_size += decisions.team_size
# Assign Initial Tasks to Each Team
begin = 0
for team in p3t.teams:
percent = (float)(team.team_size) / (float)(total_size)
end = (int)(begin+math.ceil(percent*len(requirements.tasks))-1)
for k in range(begin, end):
team.tasks.append(requirements.tasks[k])
begin = end
if ((end) < len(requirements.tasks)):
for i in range(len(requirements.tasks) - (end)):
p3t.teams[len(p3t.teams)-1].tasks.append(requirements.tasks[begin+i])
# Mark Initial Visibility of Tasks for Each Team
for team in p3t.teams:
team.markTasksVisible()
# Apply Effect of Boehm-Turner Personnel Scales to Task Costs
scales_alpha = [0.45, 0.50, 0.55, 0.60, 0.65]
scales_beta = [0.40, 0.30, 0.20, 0.10, 0.00]
scales_gamma = [0.15, 0.20, 0.25, 0.30, 0.35]
for team in p3t.teams:
numAlphas = scales_alpha[decisions.size]*team.team_size
numBetas = scales_beta[decisions.size]*team.team_size
numGammas = scales_gamma[decisions.size]*team.team_size
#print numAlphas, numBetas, numGammas
team.alpha = numAlphas
team.beta = numBetas
team.gamma = numGammas
team.power = team.alpha + 1.22*team.beta + 1.6*team.gamma
for task in team.tasks:
task.val.cost += task.val.cost * ((numAlphas + 1.22*numBetas + 1.6*numGammas)/100.0)
# and apply effect of criticality while we're at it
task.val.cost = task.val.cost * (team.decisions.criticality_modifier ** team.decisions.criticality) # cost' = cost * X^criticality
#Print Out of Teams & Requirements
"""
for i,team in enumerate(p3t.teams):
print "___________________TEAM #" + str(i) + "______________________"
for e,task in enumerate(team.tasks):
print "> TASK #" + str(e) + ": " + str(task)
"""
| 38.68254
| 146
| 0.583094
|
4a09a22306582a21c4c34d2a6f386bb740e79fbb
| 1,735
|
py
|
Python
|
lnbits/extensions/lnurlpayout/crud.py
|
blackcoffeexbt/lnbits-legend
|
a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64
|
[
"MIT"
] | null | null | null |
lnbits/extensions/lnurlpayout/crud.py
|
blackcoffeexbt/lnbits-legend
|
a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64
|
[
"MIT"
] | null | null | null |
lnbits/extensions/lnurlpayout/crud.py
|
blackcoffeexbt/lnbits-legend
|
a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, Union
from lnbits.helpers import urlsafe_short_hash
from . import db
from .models import lnurlpayout, CreateLnurlPayoutData
async def create_lnurlpayout(wallet_id: str, admin_key: str, data: CreateLnurlPayoutData) -> lnurlpayout:
lnurlpayout_id = urlsafe_short_hash()
await db.execute(
"""
INSERT INTO lnurlpayout.lnurlpayouts (id, title, wallet, admin_key, lnurlpay, threshold)
VALUES (?, ?, ?, ?, ?, ?)
""",
(lnurlpayout_id, data.title, wallet_id, admin_key, data.lnurlpay, data.threshold),
)
lnurlpayout = await get_lnurlpayout(lnurlpayout_id)
assert lnurlpayout, "Newly created lnurlpayout couldn't be retrieved"
return lnurlpayout
async def get_lnurlpayout(lnurlpayout_id: str) -> Optional[lnurlpayout]:
row = await db.fetchone("SELECT * FROM lnurlpayout.lnurlpayouts WHERE id = ?", (lnurlpayout_id,))
return lnurlpayout(**row) if row else None
async def get_lnurlpayout_from_wallet(wallet_id: str) -> Optional[lnurlpayout]:
row = await db.fetchone("SELECT * FROM lnurlpayout.lnurlpayouts WHERE wallet = ?", (wallet_id,))
return lnurlpayout(**row) if row else None
async def get_lnurlpayouts(wallet_ids: Union[str, List[str]]) -> List[lnurlpayout]:
if isinstance(wallet_ids, str):
wallet_ids = [wallet_ids]
q = ",".join(["?"] * len(wallet_ids))
rows = await db.fetchall(
f"SELECT * FROM lnurlpayout.lnurlpayouts WHERE wallet IN ({q})", (*wallet_ids,)
)
return [lnurlpayout(**row) if row else None for row in rows]
async def delete_lnurlpayout(lnurlpayout_id: str) -> None:
await db.execute("DELETE FROM lnurlpayout.lnurlpayouts WHERE id = ?", (lnurlpayout_id,))
| 37.717391
| 105
| 0.707781
|
4a09a2bb923728684a2938057954486f10ad4cf3
| 3,976
|
py
|
Python
|
examples/python.asyncio/gen-py.asyncio/v1/music/f_AlbumWinners_publisher.py
|
dustyholmes-wf/frugal
|
915ccfc58fcc9baabc4549c522e3acd2975a2e0b
|
[
"Apache-2.0"
] | null | null | null |
examples/python.asyncio/gen-py.asyncio/v1/music/f_AlbumWinners_publisher.py
|
dustyholmes-wf/frugal
|
915ccfc58fcc9baabc4549c522e3acd2975a2e0b
|
[
"Apache-2.0"
] | null | null | null |
examples/python.asyncio/gen-py.asyncio/v1/music/f_AlbumWinners_publisher.py
|
dustyholmes-wf/frugal
|
915ccfc58fcc9baabc4549c522e3acd2975a2e0b
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Frugal Compiler (3.4.7)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import inspect
import sys
import traceback
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.Thrift import TType
from frugal.exceptions import TApplicationExceptionType
from frugal.middleware import Method
from frugal.subscription import FSubscription
from frugal.transport import TMemoryOutputBuffer
from .ttypes import *
class AlbumWinnersPublisher(object):
"""
Scopes are a Frugal extension to the IDL for declaring PubSub
semantics. Subscribers to this scope will be notified if they win a contest.
Scopes must have a prefix.
"""
_DELIMITER = '.'
def __init__(self, provider, middleware=None):
"""
Create a new AlbumWinnersPublisher.
Args:
provider: FScopeProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
middleware += provider.get_middleware()
self._transport, self._protocol_factory = provider.new_publisher()
self._methods = {
'publish_ContestStart': Method(self._publish_ContestStart, middleware),
'publish_TimeLeft': Method(self._publish_TimeLeft, middleware),
'publish_Winner': Method(self._publish_Winner, middleware),
}
async def open(self):
await self._transport.open()
async def close(self):
await self._transport.close()
async def publish_ContestStart(self, ctx, req):
"""
Args:
ctx: FContext
req: list
"""
await self._methods['publish_ContestStart']([ctx, req])
async def _publish_ContestStart(self, ctx, req):
op = 'ContestStart'
prefix = 'v1.music.'
topic = '{}AlbumWinners{}{}'.format(prefix, self._DELIMITER, op)
buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit())
oprot = self._protocol_factory.get_protocol(buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin(op, TMessageType.CALL, 0)
oprot.writeListBegin(TType.STRUCT, len(req))
for elem3 in req:
elem3.write(oprot)
oprot.writeListEnd()
oprot.writeMessageEnd()
await self._transport.publish(topic, buffer.getvalue())
async def publish_TimeLeft(self, ctx, req):
"""
Args:
ctx: FContext
req: Minutes
"""
await self._methods['publish_TimeLeft']([ctx, req])
async def _publish_TimeLeft(self, ctx, req):
op = 'TimeLeft'
prefix = 'v1.music.'
topic = '{}AlbumWinners{}{}'.format(prefix, self._DELIMITER, op)
buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit())
oprot = self._protocol_factory.get_protocol(buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin(op, TMessageType.CALL, 0)
oprot.writeDouble(req)
oprot.writeMessageEnd()
await self._transport.publish(topic, buffer.getvalue())
async def publish_Winner(self, ctx, req):
"""
Args:
ctx: FContext
req: Album
"""
await self._methods['publish_Winner']([ctx, req])
async def _publish_Winner(self, ctx, req):
op = 'Winner'
prefix = 'v1.music.'
topic = '{}AlbumWinners{}{}'.format(prefix, self._DELIMITER, op)
buffer = TMemoryOutputBuffer(self._transport.get_publish_size_limit())
oprot = self._protocol_factory.get_protocol(buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin(op, TMessageType.CALL, 0)
req.write(oprot)
oprot.writeMessageEnd()
await self._transport.publish(topic, buffer.getvalue())
| 31.555556
| 83
| 0.652918
|
4a09a341cf9daaba18edc1c5b9d1571e89482381
| 855
|
py
|
Python
|
plotly/validators/histogram/_stream.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/histogram/_stream.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | null | null | null |
plotly/validators/histogram/_stream.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='stream', parent_name='histogram', **kwargs
):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
""",
**kwargs
)
| 32.884615
| 70
| 0.578947
|
4a09a34297d659c6841d679a0258041a434a6094
| 12,940
|
py
|
Python
|
Arvore AVL (TP3 A1)/mainA1AVL-v3.py
|
DFTF-PConsole/AED-Labs-Arvores-LEI
|
2149b4f6058fb581282c5c5d813ae99e233e453b
|
[
"MIT"
] | null | null | null |
Arvore AVL (TP3 A1)/mainA1AVL-v3.py
|
DFTF-PConsole/AED-Labs-Arvores-LEI
|
2149b4f6058fb581282c5c5d813ae99e233e453b
|
[
"MIT"
] | null | null | null |
Arvore AVL (TP3 A1)/mainA1AVL-v3.py
|
DFTF-PConsole/AED-Labs-Arvores-LEI
|
2149b4f6058fb581282c5c5d813ae99e233e453b
|
[
"MIT"
] | null | null | null |
# v3 - Melhoramentos: Retirei "in" em "x in array"; implementei pesquisa binaria; print_array; etc.
# v3 Abordagem Ate as folhas, depois de Baixo-para-Cima, Recursiva
# pai.direcao = return no filho da recursividade
# #### BIBLIOTECAS ####
import sys
# #### CONSTANTES ####
CMD_IN_LINHAS = "LINHAS"
CMD_OUT_NULO = "-1"
CMD_IN_ASSOC = "ASSOC"
CMD_OUT_NAOENCONTRADA = "NAO ENCONTRADA."
CMD_OUT_ENCONTRADA = "ENCONTRADA."
CMD_IN_TERMINADO = "TCHAU\n"
CMD_IN_TERMINADO2 = "TCHAU"
CMD_IN_TEXTO = "TEXTO\n"
CMD_IN_FIM = "FIM.\n"
CMD_OUT_GUARDADO = "GUARDADO."
# #### FUNCOES ####
class Elemento:
def __init__(self, input_palavra, input_ocorrencias):
self.palavra = input_palavra
self.ocorrencias = []
self.ocorrencias.append(input_ocorrencias)
def add_ocorrencia(self, count):
if not count == self.ocorrencias[-1]:
self.ocorrencias.append(count)
class No:
def __init__(self, input_elemento=None, input_esquerda=None, input_direita=None):
self.elemento = input_elemento
self.esquerda = input_esquerda
self.direita = input_direita
self.altura = 1
class ArvoreAVL:
def __init__(self, input_raiz=None):
self.raiz = input_raiz
def rotacao_esq(self, input_no_k1): # Faz rotacao simples com filho k2 a direita, E <- D
# ### FUNCAO ### Rotacao Simples Esquerda (Direcao <-)
no_k2 = input_no_k1.direita
no_k3 = no_k2.esquerda
no_k2.esquerda = input_no_k1
input_no_k1.direita = no_k3
input_no_k1.altura = 1 + max(self.get_altura(input_no_k1.esquerda), self.get_altura(input_no_k1.direita)) # Cumprir ordem para obter altura coerente
no_k2.altura = 1 + max(self.get_altura(no_k2.esquerda), self.get_altura(no_k2.direita)) # Altura anterior + 1 (para incluir o no atual)
return no_k2 # Nova raiz da sub-arvore
def rotacao_dir(self, input_no_k1): # Faz rotacao simples com filho k2 a esquerda, E -> D
# ### FUNCAO ### Rotacao Simples Direita ( Direcao ->)
no_k2 = input_no_k1.esquerda
no_k3 = no_k2.direita
no_k2.direita = input_no_k1
input_no_k1.esquerda = no_k3
input_no_k1.altura = 1 + max(self.get_altura(input_no_k1.esquerda), self.get_altura(input_no_k1.direita))
no_k2.altura = 1 + max(self.get_altura(no_k2.esquerda), self.get_altura(no_k2.direita))
return no_k2
def rotacao_esq_dir(self, input_no_k1): # Faz rotacao com filho k2 a direita | Faz rotacao com filho k2 a esquerda ?
# ### FUNCAO ### Rotacao Dupla Esquerda-Direita ( Direcao <- e ->)
input_no_k1.esquerda = self.rotacao_esq(input_no_k1.esquerda)
return self.rotacao_dir(input_no_k1)
def rotacao_dir_esq(self, input_no_k1): # Faz rotacao com filho k2 a esquerda | Faz rotacao com filho k2 a direita ?
# ### FUNCAO ### Rotacao Dupla Direita-Esquerda ( Direcao -> e <-)
input_no_k1.direita = self.rotacao_dir(input_no_k1.direita)
return self.rotacao_esq(input_no_k1)
def procura_palavra(self, input_palavra):
# ### FUNCAO ### Procura Palavra na Arvore e return esse elemento, se nao existe retorna: None
no = self.raiz
while no is not None:
if compara_str(input_palavra, no.elemento.palavra) == 0:
return no.elemento
elif compara_str(input_palavra, no.elemento.palavra) == 1:
no = no.direita
else:
no = no.esquerda
return None
def inserir_elemento(self, input_raiz, input_elemento): # input_raiz -> raiz ou no da sub-arvore
# ### FUNCAO ### Inserir Elementos na Arvore AVP, recursivamente, ate chegar as folhas nulas, inserindo-o
if input_raiz is None: # Insere o elemento
novo_no = No(input_elemento)
return novo_no
elif compara_str(input_raiz.elemento.palavra, input_elemento.palavra) == 1: # Se a str 1 (no da arvore) e maior
input_raiz.esquerda = self.inserir_elemento(input_raiz.esquerda, input_elemento)
else: # Se a str 2 (novo no) e maior
input_raiz.direita = self.inserir_elemento(input_raiz.direita, input_elemento)
input_raiz.altura = 1 + max(self.get_altura(input_raiz.esquerda), self.get_altura(input_raiz.direita)) # Altura anterior + 1 (para incluir o no atual)
# ----------------------- Verificar Equilibrio, fazer rotacoes para corrigir ----------------------
equilibrio = self.get_equilibrio(input_raiz)
if equilibrio > 1: # Lado Esquerdo MAIOR que o Direito (na sub-arvore do no atual: input_raiz)
if compara_str(input_raiz.esquerda.elemento.palavra, input_elemento.palavra) == 1: # str 1 (Palavra no->esquerdo) MAIOR que str 2 (Palavra nova inserida)
# Se Caminho entre Avo-Pai-Filho -> Esq-Esq
return self.rotacao_dir(input_raiz)
else: # str 2 (Palavra nova inserida) MAIOR que str 1 (Palavra no->esquerdo)
# Se Caminho entre Avo-Pai-Filho -> Esq-Dir
return self.rotacao_esq_dir(input_raiz)
if equilibrio < -1: # Lado Direito MAIOR que o Esquerdo (na sub-arvore do no atual: input_raiz)
if compara_str(input_raiz.direita.elemento.palavra, input_elemento.palavra) == 2: # str 1 (Palavra no->esquerdo) MAIOR que str 2 (Palavra nova inserida)
# Se Caminho entre Avo-Pai-Filho -> Dir-Dir
return self.rotacao_esq(input_raiz)
else: # str 2 (Palavra nova inserida) MAIOR que str 1 (Palavra no->esquerdo)
# Se Caminho entre Avo-Pai-Filho -> Dir-Esq
return self.rotacao_dir_esq(input_raiz)
return input_raiz # Sem rotacoes
def get_altura(self, input_no):
# ### FUNCAO ### Get Altura guardado no atributo do no, ou 0 se o no e nulo
if input_no is None:
return 0
return input_no.altura
def get_equilibrio(self, input_no):
# ### FUNCAO ### Get Equilibrio atraves da altura guardado no atributo do no, ou 0 se o no e nulo
if input_no is None:
return 0
return self.get_altura(input_no.esquerda) - self.get_altura(input_no.direita) # Equilibrio da sub-arvore
def compara_str(str1, str2):
# ### FUNCAO ### str1 maior: return 1, str2 maior: return 2, iguais: return 0
if str1 > str2: # Str1 Maior
return 1
elif str1 < str2: # Str2 Maior
return 2
else: # Iguais
return 0
def input_texto(arvore_avl):
# ### FUNCAO ### Le e manipula o texto do stdin ate CMD_IN_FIM
count = 0
for linha in sys.stdin:
if count == 0 and linha == "":
sys.exit("Erro - Sem Texto para input")
if linha == CMD_IN_FIM:
break
palavra = ""
for ch in linha:
if ch == '\n':
if len(palavra) > 0:
palavra = palavra.lower()
elemento = arvore_avl.procura_palavra(palavra)
if elemento is not None:
elemento.add_ocorrencia(count)
else:
elemento = Elemento(palavra, count)
arvore_avl.raiz = arvore_avl.inserir_elemento(arvore_avl.raiz, elemento)
palavra = ""
elif ch == ' ' or ch == '.' or ch == ',' or ch == ';' or ch == '(' or ch == ')':
if len(palavra) > 0:
palavra = palavra.lower()
elemento = arvore_avl.procura_palavra(palavra)
if elemento is not None:
elemento.add_ocorrencia(count)
else:
elemento = Elemento(palavra, count)
arvore_avl.raiz = arvore_avl.inserir_elemento(arvore_avl.raiz, elemento)
elemento = arvore_avl.procura_palavra(ch)
if elemento is not None:
elemento.add_ocorrencia(count)
else:
elemento = Elemento(ch, count)
arvore_avl.raiz = arvore_avl.inserir_elemento(arvore_avl.raiz, elemento)
palavra = ""
else:
palavra = palavra + ch
count += 1
print(CMD_OUT_GUARDADO)
return 0
def input_cmd(arvore_avl):
# ### FUNCAO ### Le, executa e escreve no stdout os comandos no stdin, ate CMD_IN_TERMINADO
for linha in sys.stdin:
if linha == CMD_IN_TERMINADO2:
break
elif linha == CMD_IN_TERMINADO:
break
elif linha == "":
break
elif (CMD_IN_LINHAS in linha) and (linha.index(CMD_IN_LINHAS) == 0):
palavra = linha[len(CMD_IN_LINHAS)+1:len(linha)-1]
palavra = palavra.lower()
elemento = arvore_avl.procura_palavra(palavra)
if elemento is not None:
print(print_array(elemento.ocorrencias))
else:
print(CMD_OUT_NULO)
elif (CMD_IN_ASSOC in linha) and (linha.index(CMD_IN_ASSOC) == 0):
palavras = linha.split(' ')
palavras[2] = (palavras[2])[:len(palavras[2])-1]
palavras[1] = palavras[1].lower()
elemento = arvore_avl.procura_palavra(palavras[1])
if elemento is not None:
if not (pesquisa_binaria(elemento.ocorrencias, int(palavras[2])) == -1):
print(CMD_OUT_ENCONTRADA)
else:
print(CMD_OUT_NAOENCONTRADA)
else:
print(CMD_OUT_NAOENCONTRADA)
else:
sys.exit("Erro - Interpretacao dos comandos pos-texto")
return 0
def pesquisa_binaria(array, valor):
# ### FUNCAO ### Pesquisa Binaria Classica num Array/Lista, input array e valor, return indice ou -1 se nao existir
inicio = 0
fim = len(array)-1
if fim == -1:
return -1
while inicio <= fim:
meio = inicio + (fim - inicio) // 2 # Divisao Real, Arredonda para baixo
if array[meio] == valor: # Valor esta no meio
return meio
elif array[meio] < valor: # Se valor e maior que o meio, ignora metade inferior
inicio = meio + 1
else: # Se for menor que o meio, ignora metade superior
fim = meio - 1
return -1 # Nao existe
def print_array(array):
# ### FUNCAO ### Transforma os dados num array numa string com espacos
string = ""
for num in array:
string = string + " " + str(num)
return string[1:]
def main():
# ### FUNCAO ### Funcao Principal
arvore_avl = ArvoreAVL()
if sys.stdin.readline() == CMD_IN_TEXTO:
input_texto(arvore_avl)
else:
sys.exit("Erro - Sem Comando Incial: " + CMD_IN_TEXTO)
input_cmd(arvore_avl)
return 0
if __name__ == '__main__':
# ### START ###
main()
| 49.201521
| 196
| 0.510433
|
4a09a39bce9a405eaa36f82c73bc421f5d766569
| 58,620
|
py
|
Python
|
idb/idapython.py
|
chubbymaggie/python-idb
|
027068f992a56b99468db52226910031e1b818a6
|
[
"Apache-2.0"
] | null | null | null |
idb/idapython.py
|
chubbymaggie/python-idb
|
027068f992a56b99468db52226910031e1b818a6
|
[
"Apache-2.0"
] | null | null | null |
idb/idapython.py
|
chubbymaggie/python-idb
|
027068f992a56b99468db52226910031e1b818a6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import logging
import weakref
import functools
import collections
import six
import idb.netnode
import idb.analysis
logger = logging.getLogger(__name__)
# via: https://stackoverflow.com/a/33672499/87207
def memoized_method(*lru_args, **lru_kwargs):
def decorator(func):
@functools.wraps(func)
def wrapped_func(self, *args, **kwargs):
# We're storing the wrapped method inside the instance. If we had
# a strong reference to self the instance would never die.
self_weak = weakref.ref(self)
@functools.wraps(func)
@functools.lru_cache(*lru_args, **lru_kwargs)
def cached_method(*args, **kwargs):
return func(self_weak(), *args, **kwargs)
setattr(self, func.__name__, cached_method)
return cached_method(*args, **kwargs)
return wrapped_func
return decorator
def is_flag_set(flags, flag):
return flags & flag == flag
class FLAGS:
# instruction/data operands
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__op.html
# outer offset base (combined with operand number). More...
OPND_OUTER = 0x80
# mask for operand number
OPND_MASK = 0x07
# all operands
OPND_ALL = OPND_MASK
# byte states bits
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__statebits.html
# Mask for typing.
MS_CLS = 0x00000600
# Code ?
FF_CODE = 0x00000600
# Data ?
FF_DATA = 0x00000400
# Tail ?
FF_TAIL = 0x00000200
# Unknown ?
FF_UNK = 0x00000000
# specific state information bits
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__statespecb.html
# Mask of common bits.
MS_COMM = 0x000FF800
# Has comment ?
FF_COMM = 0x00000800
# has references
FF_REF = 0x00001000
# Has next or prev lines ?
FF_LINE = 0x00002000
# Has name ?
FF_NAME = 0x00004000
# Has dummy name?
FF_LABL = 0x00008000
# Exec flow from prev instruction.
FF_FLOW = 0x00010000
# Inverted sign of operands.
FF_SIGN = 0x00020000
# Bitwise negation of operands.
FF_BNOT = 0x00040000
# is variable byte?
FF_VAR = 0x00080000
# instruction operand types bites
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__opbits.html
# Mask for 1st arg typing.
MS_0TYPE = 0x00F00000
# Void (unknown)?
FF_0VOID = 0x00000000
# Hexadecimal number?
FF_0NUMH = 0x00100000
# Decimal number?
FF_0NUMD = 0x00200000
# Char ('x')?
FF_0CHAR = 0x00300000
# Segment?
FF_0SEG = 0x00400000
# Offset?
FF_0OFF = 0x00500000
# Binary number?
FF_0NUMB = 0x00600000
# Octal number?
FF_0NUMO = 0x00700000
# Enumeration?
FF_0ENUM = 0x00800000
# Forced operand?
FF_0FOP = 0x00900000
# Struct offset?
FF_0STRO = 0x00A00000
# Stack variable?
FF_0STK = 0x00B00000
# Floating point number?
FF_0FLT = 0x00C00000
# Custom representation?
FF_0CUST = 0x00D00000
# Mask for the type of other operands.
MS_1TYPE = 0x0F000000
# Void (unknown)?
FF_1VOID = 0x00000000
# Hexadecimal number?
FF_1NUMH = 0x01000000
# Decimal number?
FF_1NUMD = 0x02000000
# Char ('x')?
FF_1CHAR = 0x03000000
# Segment?
FF_1SEG = 0x04000000
# Offset?
FF_1OFF = 0x05000000
# Binary number?
FF_1NUMB = 0x06000000
# Octal number?
FF_1NUMO = 0x07000000
# Enumeration?
FF_1ENUM = 0x08000000
# Forced operand?
FF_1FOP = 0x09000000
# Struct offset?
FF_1STRO = 0x0A000000
# Stack variable?
FF_1STK = 0x0B000000
# Floating point number?
FF_1FLT = 0x0C000000
# Custom representation?
FF_1CUST = 0x0D000000
# code byte bits
# via: https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__codebits.html
# Mask for code bits.
MS_CODE = 0xF0000000
# function start?
FF_FUNC = 0x10000000
# Has Immediate value?
FF_IMMD = 0x40000000
# Has jump table or switch_info?
FF_JUMP = 0x80000000
# data bytes bits
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__databits.html
# Mask for DATA typing.
DT_TYPE = 0xF0000000
# byte
FF_BYTE = 0x00000000
# word
FF_WORD = 0x10000000
# double word
FF_DWRD = 0x20000000
# quadro word
FF_QWRD = 0x30000000
# tbyte
FF_TBYT = 0x40000000
# ASCII ?
FF_ASCI = 0x50000000
# Struct ?
FF_STRU = 0x60000000
# octaword/xmm word (16 bytes/128 bits)
FF_OWRD = 0x70000000
# float
FF_FLOAT = 0x80000000
# double
FF_DOUBLE = 0x90000000
# packed decimal real
FF_PACKREAL = 0xA0000000
# alignment directive
FF_ALIGN = 0xB0000000
# 3-byte data (only with support from the processor module)
FF_3BYTE = 0xC0000000
# custom data type
FF_CUSTOM = 0xD0000000
# ymm word (32 bytes/256 bits)
FF_YWRD = 0xE0000000
# bytes
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_f__.html
# Mask for byte value.
MS_VAL = 0x000000FF
# Byte has value?
FF_IVL = 0x00000100
class AFLAGS:
# additional flags
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group___a_f_l__.html
# has line number info
AFL_LINNUM = 0x00000001
# user-defined SP value
AFL_USERSP = 0x00000002
# name is public (inter-file linkage)
AFL_PUBNAM = 0x00000004
# name is weak
AFL_WEAKNAM = 0x00000008
# the item is hidden completely
AFL_HIDDEN = 0x00000010
# the instruction/data is specified by the user
AFL_MANUAL = 0x00000020
# the code/data border is hidden
AFL_NOBRD = 0x00000040
# display struct field name at 0 offset when displaying an offset. More...
AFL_ZSTROFF = 0x00000080
# the 1st operand is bitwise negated
AFL_BNOT0 = 0x00000100
# the 2nd operand is bitwise negated
AFL_BNOT1 = 0x00000200
# item from the standard library. More...
AFL_LIB = 0x00000400
# has typeinfo? (NSUP_TYPEINFO)
AFL_TI = 0x00000800
# has typeinfo for operand 0? (NSUP_OPTYPES)
AFL_TI0 = 0x00001000
# has typeinfo for operand 1? (NSUP_OPTYPES+1)
AFL_TI1 = 0x00002000
# has local name too (FF_NAME should be set)
AFL_LNAME = 0x00004000
# has type comment? (such a comment may be changed by IDA)
AFL_TILCMT = 0x00008000
# toggle leading zeroes for the 1st operand
AFL_LZERO0 = 0x00010000
# toggle leading zeroes for the 2nd operand
AFL_LZERO1 = 0x00020000
# has user defined instruction color?
AFL_COLORED = 0x00040000
# terse structure variable display?
AFL_TERSESTR = 0x00080000
# code: toggle sign of the 1st operand
AFL_SIGN0 = 0x00100000
# code: toggle sign of the 2nd operand
AFL_SIGN1 = 0x00200000
# for imported function pointers: doesn't return. More...
AFL_NORET = 0x00400000
# sp delta value is fixed by analysis. More...
AFL_FIXEDSPD = 0x00800000
# the previous insn was created for alignment purposes only
AFL_ALIGNFLOW = 0x01000000
# the type information is definitive. More...
AFL_USERTI = 0x02000000
# function returns a floating point value
AFL_RETFP = 0x04000000
# insn modifes SP and uses the modified value More...
AFL_USEMODSP = 0x08000000
# autoanalysis should not create code here
AFL_NOTCODE = 0x10000000
class ida_netnode:
def __init__(self, db, api):
self.idb = db
self.api = api
def netnode(self, *args, **kwargs):
return idb.netnode.Netnode(self.idb, *args, **kwargs)
class idc:
SEGPERM_EXEC = 1 # Execute
SEGPERM_WRITE = 2 # Write
SEGPERM_READ = 4 # Read
SEGPERM_MAXVAL = 7 # (SEGPERM_EXEC + SEGPERM_WRITE + SEGPERM_READ)
SFL_COMORG = 0x01 # IDP dependent field (IBM PC: if set, ORG directive is not commented out)
SFL_OBOK = 0x02 # orgbase is present? (IDP dependent field)
SFL_HIDDEN = 0x04 # is the segment hidden?
SFL_DEBUG = 0x08 # is the segment created for the debugger?
SFL_LOADER = 0x10 # is the segment created by the loader?
SFL_HIDETYPE = 0x20 # hide segment type (do not print it in the listing)
def __init__(self, db, api):
self.idb = db
self.api = api
# these will be the capstone disassemblers, lazily loaded.
# map from bitness (numbers 16, 32, and 64) to capstone disassembler instance
self.bit_dis = None
# map from tuple (segment start, end address) to capstone disassembler instance
self.seg_dis = None
# apparently this enum changes with bitness.
# this is annoying.
# so, be sure to reference these via an `idc` *instance*.
# yes:
#
# idc(some_idb).FUNCATTR_START
#
# no:
#
# idc.FUNCATTR_START
#
# via:
# https://github.com/zachriggle/idapython/blob/37d2fd13b31fec8e6e53fbb9704fa3cd0cbd5b07/python/idc.py#L4149
if self.idb.wordsize == 4:
# function start address
self.FUNCATTR_START = 0
# function end address
self.FUNCATTR_END = 4
# function flags
self.FUNCATTR_FLAGS = 8
# function frame id
self.FUNCATTR_FRAME = 10
# size of local variables
self.FUNCATTR_FRSIZE = 14
# size of saved registers area
self.FUNCATTR_FRREGS = 18
# number of bytes purged from the stack
self.FUNCATTR_ARGSIZE = 20
# frame pointer delta
self.FUNCATTR_FPD = 24
# function color code
self.FUNCATTR_COLOR = 28
# starting address
self.SEGATTR_START = 0
# ending address
self.SEGATTR_END = 4
self.SEGATTR_ORGBASE = 16
# alignment
self.SEGATTR_ALIGN = 20
# combination
self.SEGATTR_COMB = 21
# permissions
self.SEGATTR_PERM = 22
# bitness (0: 16, 1: 32, 2: 64 bit segment)
self.SEGATTR_BITNESS = 23
# segment flags
self.SEGATTR_FLAGS = 24
# segment selector
self.SEGATTR_SEL = 28
# default ES value
self.SEGATTR_ES = 32
# default CS value
self.SEGATTR_CS = 36
# default SS value
self.SEGATTR_SS = 40
# default DS value
self.SEGATTR_DS = 44
# default FS value
self.SEGATTR_FS = 48
# default GS value
self.SEGATTR_GS = 52
# segment type
self.SEGATTR_TYPE = 96
# segment color
self.SEGATTR_COLOR = 100
self.BADADDR = 0xFFFFFFFF
elif self.idb.wordsize == 8:
self.FUNCATTR_START = 0
self.FUNCATTR_END = 8
self.FUNCATTR_FLAGS = 16
self.FUNCATTR_FRAME = 18
self.FUNCATTR_FRSIZE = 26
self.FUNCATTR_FRREGS = 34
self.FUNCATTR_ARGSIZE = 36
self.FUNCATTR_FPD = 44
self.FUNCATTR_COLOR = 52
self.FUNCATTR_OWNER = 18
self.FUNCATTR_REFQTY = 26
self.SEGATTR_START = 0
self.SEGATTR_END = 8
self.SEGATTR_ORGBASE = 32
self.SEGATTR_ALIGN = 40
self.SEGATTR_COMB = 41
self.SEGATTR_PERM = 42
self.SEGATTR_BITNESS = 43
self.SEGATTR_FLAGS = 44
self.SEGATTR_SEL = 48
self.SEGATTR_ES = 56
self.SEGATTR_CS = 64
self.SEGATTR_SS = 72
self.SEGATTR_DS = 80
self.SEGATTR_FS = 88
self.SEGATTR_GS = 96
self.SEGATTR_TYPE = 184
self.SEGATTR_COLOR = 188
self.BADADDR = 0xFFFFFFFFFFFFFFFF
else:
raise RuntimeError('unexpected wordsize')
def ScreenEA(self):
return self.api.ScreenEA
def _get_segment(self, ea):
segs = idb.analysis.Segments(self.idb).segments
for seg in segs.values():
if seg.startEA <= ea < seg.endEA:
return seg
def SegStart(self, ea):
return self._get_segment(ea).startEA
def SegEnd(self, ea):
return self._get_segment(ea).endEA
def FirstSeg(self):
segs = idb.analysis.Segments(self.idb).segments
for startEA in sorted(segs.keys()):
return startEA
def NextSeg(self, ea):
segs = idb.analysis.Segments(self.idb).segments.values()
segs = sorted(segs, key=lambda s: s.startEA)
for i, seg in enumerate(segs):
if seg.startEA <= ea < seg.endEA:
if i < len(segs) - 1:
return segs[i + 1].startEA
else:
return self.BADADDR
def SegName(self, ea):
segstrings = idb.analysis.SegStrings(self.idb).strings
return segstrings[self._get_segment(ea).name_index]
def GetSegmentAttr(self, ea, attr):
if attr == self.SEGATTR_START:
return self.SegStart(ea)
elif attr == self.SEGATTR_END:
return self.SegEnd(ea)
elif attr == self.SEGATTR_ORGBASE:
return self._get_segment(ea).orgbase
elif attr == self.SEGATTR_ALIGN:
return self._get_segment(ea).align
elif attr == self.SEGATTR_COMB:
return self._get_segment(ea).comb
elif attr == self.SEGATTR_PERM:
return self._get_segment(ea).perm
elif attr == self.SEGATTR_BITNESS:
return self._get_segment(ea).bitness
elif attr == self.SEGATTR_FLAGS:
return self._get_segment(ea).flags
elif attr == self.SEGATTR_TYPE:
return self._get_segment(ea).type
elif attr == self.SEGATTR_COLOR:
return self._get_segment(ea).color
else:
raise NotImplementedError('segment attribute %d not yet implemented' % (attr))
def MinEA(self):
segs = idb.analysis.Segments(self.idb).segments.values()
segs = list(sorted(segs, key=lambda s: s.startEA))
return segs[0].startEA
def MaxEA(self):
segs = idb.analysis.Segments(self.idb).segments.values()
segs = list(sorted(segs, key=lambda s: s.startEA))
return segs[-1].endEA
def GetFlags(self, ea):
try:
return self.idb.id1.get_flags(ea)
except KeyError:
return 0
def IdbByte(self, ea):
flags = self.GetFlags(ea)
if self.hasValue(flags):
return flags & FLAGS.MS_VAL
else:
raise KeyError(ea)
def Head(self, ea):
flags = self.GetFlags(ea)
while not self.api.ida_bytes.isHead(flags):
ea -= 1
# TODO: handle Index/KeyError here when we overrun a segment
flags = self.GetFlags(ea)
return ea
def ItemSize(self, ea):
oea = ea
flags = self.GetFlags(ea)
if not self.api.ida_bytes.isHead(flags):
raise ValueError('ItemSize must only be called on a head address.')
ea += 1
flags = self.GetFlags(ea)
while flags is not None and not self.api.ida_bytes.isHead(flags):
ea += 1
# TODO: handle Index/KeyError here when we overrun a segment
flags = self.GetFlags(ea)
return ea - oea
def NextHead(self, ea):
ea += 1
flags = self.GetFlags(ea)
while flags is not None and not self.api.ida_bytes.isHead(flags):
ea += 1
# TODO: handle Index/KeyError here when we overrun a segment
flags = self.GetFlags(ea)
return ea
def PrevHead(self, ea):
ea = self.Head(ea)
ea -= 1
return self.Head(ea)
def GetManyBytes(self, ea, size, use_dbg=False):
'''
Raises:
IndexError: if the range extends beyond a segment.
KeyError: if a byte is not defined.
'''
if use_dbg:
raise NotImplementedError()
# can only read from one segment at a time
if self.SegStart(ea) != self.SegStart(ea + size):
# edge case: when reading exactly to the end of the segment.
if ea + size == self.SegEnd(ea):
pass
else:
raise IndexError((ea, ea + size))
ret = []
try:
for i in range(ea, ea + size):
ret.append(self.IdbByte(i))
except KeyError:
# we have already verified that that the requested range falls within a Segment.
# however, the underlying ID1 section may be smaller than the Segment.
# so, we pad the Segment with NULL bytes.
# this is consistent with the IDAPython behavior.
# see github issue #29.
ret.extend([0x0 for _ in range(size - len(ret))])
if six.PY2:
return ''.join(map(chr, ret))
else:
return bytes(ret)
def _load_dis(self, arch, mode):
import capstone
if self.bit_dis is None:
self.bit_dis = {}
if self.bit_dis.get((arch, mode)) is None:
r = capstone.Cs(arch, mode)
self.bit_dis[(arch, mode)] = r
return self.bit_dis[(arch, mode)]
def _disassemble(self, ea):
import capstone
size = self.ItemSize(ea)
inst_buf = self.GetManyBytes(ea, size)
segment = self._get_segment(ea)
bitness = 16 << segment.bitness# 16, 32, 64
procname = self.api.idaapi.get_inf_structure().procname.lower()
dis = None
if procname == "arm" and bitness == 64:
dis = self._load_dis(capstone.CS_ARCH_ARM64, capstone.CS_MODE_ARM)
elif procname == "arm" and bitness == 32:
if size == 2:
dis = self._load_dis(capstone.CS_ARCH_ARM, capstone.CS_MODE_THUMB)
else:
dis = self._load_dis(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM)
elif procname in ['metapc', '8086', '80286r', '80286p', '80386r', '80386p','80486r', '80486p', '80586r', '80586p', '80686p', 'k62', 'p2', 'p3', 'athlon', 'p4', '8085']:
if bitness == 16:
dis = self._load_dis(capstone.CS_ARCH_X86, capstone.CS_MODE_16)
elif bitness == 32:
dis = self._load_dis(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
elif bitness == 64:
dis = self._load_dis(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
elif procname == "mipsb":
if bitness == 32:
dis = self._load_dis(capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS32 | capstone.CS_MODE_BIG_ENDIAN)
elif bitness == 64:
dis = self._load_dis(capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS64 | capstone.CS_MODE_BIG_ENDIAN)
elif procname == "mipsl":
if bitness == 32:
dis = self._load_dis(capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS32 | capstone.CS_MODE_LITTLE_ENDIAN)
elif bitness == 64:
dis = self._load_dis(capstone.CS_ARCH_MIPS, capstone.CS_MODE_MIPS64 | capstone.CS_MODE_LITTLE_ENDIAN)
if dis is None:
raise NotImplementedError("unknown arch %s bit:%s inst_len:%d" % (procname, bitness, len(inst_buf)))
dis.detail = True
try:
op = next(dis.disasm(inst_buf, ea))
except StopIteration:
raise RuntimeError('failed to disassemble %s' % (hex(ea)))
else:
return op
def GetMnem(self, ea):
op = self._disassemble(ea)
return op.mnemonic
def GetDisasm(self, ea):
op = self._disassemble(ea)
return '%s\t%s' % (op.mnemonic, op.op_str)
# one instruction or data
CIC_ITEM = 1
# function
CIC_FUNC = 2
# segment
CIC_SEGM = 3
# default color
DEFCOLOR = 0xFFFFFFFF
def GetColor(self, ea, what):
'''
Args:
ea (int): effective address of thing.
what (int): one of:
- idc.CIC_ITEM
- idc.CIC_FUNC
- idc.CIC_SEGM
Returns:
int: the color in RGB. possibly idc.DEFCOLOR if not set.
'''
if what != idc.CIC_ITEM:
raise NotImplementedError()
if not self.api.ida_nalt.is_colored_item(ea):
return idc.DEFCOLOR
nn = self.api.ida_netnode.netnode(ea)
try:
return nn.altval(tag='A', index=0x14) - 1
except KeyError:
return idc.DEFCOLOR
def GetFunctionFlags(self, ea):
func = self.api.ida_funcs.get_func(ea)
return func.flags
def GetFunctionAttr(self, ea, attr):
func = self.api.ida_funcs.get_func(ea)
if attr == self.FUNCATTR_START:
return func.startEA
elif attr == self.FUNCATTR_END:
return func.endEA
elif attr == self.FUNCATTR_FLAGS:
return func.flags
elif attr == self.FUNCATTR_FRAME:
return func.frame
elif attr == self.FUNCATTR_FRSIZE:
return func.frsize
elif attr == self.FUNCATTR_FRREGS:
return func.frregs
elif attr == self.FUNCATTR_ARGSIZE:
return func.argsize
elif attr == self.FUNCATTR_FPD:
return func.fpd
elif attr == self.FUNCATTR_COLOR:
return func.color
else:
raise ValueError('unknown attr: %x' % (attr))
def GetFunctionName(self, ea):
return self.api.ida_funcs.get_func_name(ea)
def LocByName(self, name):
try:
key = ("N" + name).encode('utf-8')
cursor = self.idb.id0.find(key)
return idb.netnode.as_uint(cursor.value)
except KeyError:
return -1
def GetInputMD5(self):
return idb.analysis.Root(self.idb).md5
def Comment(self, ea):
return self.api.ida_bytes.get_cmt(ea, False)
def RptCmt(self, ea):
return self.api.ida_bytes.get_cmt(ea, True)
def GetCommentEx(self, ea, repeatable):
return self.api.ida_bytes.get_cmt(ea, repeatable)
def GetType(self, ea):
try:
f = idb.analysis.Function(self.idb, ea)
except Exception as e:
logger.warning('failed to fetch function for GetType: %s', e)
return None
try:
name = f.get_name()
sig = f.get_signature()
except KeyError:
return None
params = []
for param in sig.parameters:
params.append('%s %s' % (param.type, param.name))
return '{rtype:s} {cc:s} {name:s}({params:s})'.format(
rtype=sig.rtype,
cc=sig.calling_convention,
name=name,
params=', '.join(params),
)
@staticmethod
def hasValue(flags):
return flags & FLAGS.FF_IVL > 0
@staticmethod
def isDefArg0(flags):
return flags & FLAGS.MS_0TYPE > 0
@staticmethod
def isDefArg1(flags):
return flags & FLAGS.MS_1TYPE > 0
@staticmethod
def isOff0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0CUST
@staticmethod
def isOff1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1CUST
@staticmethod
def isChar0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0CHAR
@staticmethod
def isChar1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1CHAR
@staticmethod
def isSeg0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0SEG
@staticmethod
def isSeg1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1SEG
@staticmethod
def isEnum0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0ENUM
@staticmethod
def isEnum1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1ENUM
@staticmethod
def isStroff0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0STRO
@staticmethod
def isStroff1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1STRO
@staticmethod
def isStkvar0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0STK
@staticmethod
def isStkvar1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1STK
@staticmethod
def isFloat0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0FLT
@staticmethod
def isFloat1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1FLT
@staticmethod
def isCustFmt0(flags):
return flags & FLAGS.MS_0TYPE == FLAGS.FF_0CUST
@staticmethod
def isCustFmt1(flags):
return flags & FLAGS.MS_1TYPE == FLAGS.FF_1CUST
@staticmethod
def isNum0(flags):
t = flags & FLAGS.MS_0TYPE
return t == FLAGS.FF_0NUMB or \
t == FLAGS.FF_0NUMO or \
t == FLAGS.FF_0NUMD or \
t == FLAGS.FF_0NUMH
@staticmethod
def isNum1(flags):
t = flags & FLAGS.MS_1TYPE
return t == FLAGS.FF_1NUMB or \
t == FLAGS.FF_1NUMO or \
t == FLAGS.FF_1NUMD or \
t == FLAGS.FF_1NUMH
@staticmethod
def get_optype_flags0(flags):
return flags & FLAGS.MS_0TYPE
@staticmethod
def get_optype_flags1(flags):
return flags & FLAGS.MS_1TYPE
def LineA(self, ea, num):
nn = self.api.ida_netnode.netnode(ea)
# 1000 looks like a magic number, and it sorta is.
# S-1000, 1001, 1002, ... are where anterior lines are
try:
return nn.supstr(tag='S', index=1000 + num)
except KeyError:
return ''
def LineB(self, ea, num):
nn = self.api.ida_netnode.netnode(ea)
try:
return nn.supstr(tag='S', index=2000 + num)
except KeyError:
return ''
class ida_bytes:
def __init__(self, db, api):
self.idb = db
self.api = api
def get_cmt(self, ea, repeatable):
flags = self.api.idc.GetFlags(ea)
if not self.has_cmt(flags):
return ''
try:
nn = self.api.ida_netnode.netnode(ea)
if repeatable:
return nn.supstr(tag='S', index=1)
else:
return nn.supstr(tag='S', index=0)
except KeyError:
return ''
def get_flags(self, ea):
return self.api.idc.GetFlags(ea)
@staticmethod
def is_func(flags):
return flags & FLAGS.MS_CODE == FLAGS.FF_FUNC
@staticmethod
def isImmd(flags):
return flags & FLAGS.MS_CODE == FLAGS.FF_IMMD
@staticmethod
def is_code(flags):
return flags & FLAGS.MS_CLS == FLAGS.FF_CODE
@staticmethod
def is_data(flags):
return flags & FLAGS.MS_CLS == FLAGS.FF_DATA
@staticmethod
def is_tail(flags):
return flags & FLAGS.MS_CLS == FLAGS.FF_TAIL
@staticmethod
def is_not_tail(flags):
return not ida_bytes.is_tail(flags)
@staticmethod
def is_unknown(flags):
return flags & FLAGS.MS_CLS == FLAGS.FF_UNK
@staticmethod
def is_head(flags):
return ida_bytes.isCode(flags) or ida_bytes.isData(flags)
@staticmethod
def is_flow(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_FLOW > 0
@staticmethod
def is_var(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_VAR > 0
@staticmethod
def has_extra_cmts(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_LINE > 0
@staticmethod
def has_cmt(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_COMM > 0
@staticmethod
def has_ref(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_REF > 0
@staticmethod
def has_name(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_NAME > 0
@staticmethod
def has_dummy_name(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_LABL > 0
@staticmethod
def has_auto_name(flags):
# unknown how to compute this
raise NotImplementedError()
@staticmethod
def has_any_name(flags):
# unknown how to compute this
raise NotImplementedError()
@staticmethod
def has_user_name(flags):
# unknown how to compute this
raise NotImplementedError()
@staticmethod
def is_invsign(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_SIGN > 0
@staticmethod
def is_bnot(flags):
return flags & FLAGS.MS_COMM & FLAGS.FF_BNOT > 0
@staticmethod
def has_value(flags):
return (flags & FLAGS.FF_IVL) > 0
@staticmethod
def is_byte(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_BYTE
@staticmethod
def is_word(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_WORD
@staticmethod
def is_dword(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_DWRD
@staticmethod
def is_qword(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_QWRD
@staticmethod
def is_oword(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_OWRD
@staticmethod
def is_yword(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_YWRD
@staticmethod
def is_tbyte(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_TBYT
@staticmethod
def is_float(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_FLOAT
@staticmethod
def is_double(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_DOUBLE
@staticmethod
def isPackReal(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_PACKREAL
@staticmethod
def isASCII(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_ASCI
@staticmethod
def is_struct(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_STRU
@staticmethod
def is_align(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_ALIGN
@staticmethod
def is_3_byte(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_3BYTE
@staticmethod
def is_custom(flags):
return flags & FLAGS.DT_TYPE == FLAGS.FF_CUSTOM
def get_bytes(self, ea, count):
return self.api.idc.GetManyBytes(ea, count)
def next_that(self, ea, maxea, testf):
for i in range(ea+1, maxea):
flags = self.get_flags(i)
if testf(flags):
return i
return self.api.idc.BADADDR
def next_not_tail(self, ea):
while True:
ea += 1
flags = self.get_flags(ea)
if not self.is_tail(flags):
break
return ea
def next_inited(self, ea, maxea):
return self.next_that(ea, maxea, lambda flags: ida_bytes.has_value(flags))
class ida_nalt:
def __init__(self, db, api):
self.idb = db
self.api = api
def get_aflags(self, ea):
nn = self.api.ida_netnode.netnode(ea)
try:
return nn.altval(tag='A', index=0x8)
except KeyError:
return 0
def is_hidden_item(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_HIDDEN)
def is_hidden_border(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_NOBRD)
def uses_modsp(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_USEMODSP)
def is_zstroff(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_ZSTROFF)
def is__bnot0(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_BNOT0)
def is__bnot1(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_BNOT1)
def is_libitem(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_LIB)
def has_ti(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_TI)
def has_ti0(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_TI0)
def has_ti1(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_TI1)
def has_lname(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_LNAME)
def is_tilcmt(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_TILCMT)
def is_usersp(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_USERSP)
def is_lzero0(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_LZERO0)
def is_lzero1(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_LZERO1)
def is_colored_item(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_COLORED)
def is_terse_struc(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_TERSESTR)
def is__invsign0(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_SIGN0)
def is__invsign1(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_SIGN1)
def is_noret(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_NORET)
def is_fixed_spd(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_FIXEDSPD)
def is_align_flow(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_ALIGNFLOW)
def is_userti(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_USERTI)
def is_retfp(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_RETFP)
def is_notcode(self, ea):
return is_flag_set(self.get_aflags(ea), AFLAGS.AFL_NOTCODE)
def get_import_module_qty(self):
return max(idb.analysis.Imports(self.idb).lib_names.keys())
def get_import_module_name(self, mod_index):
return idb.analysis.Imports(self.idb).lib_names[mod_index]
def enum_import_names(self, mod_index, py_cb):
imps = idb.analysis.Imports(self.idb)
# dereference the node id stored in the A val
nnref = imps.lib_netnodes[mod_index]
nn = idb.netnode.Netnode(self.idb, nnref)
for funcaddr in nn.sups():
funcname = nn.supstr(funcaddr)
if not py_cb(funcaddr, funcname, None):
return
def get_imagebase(self):
try:
return idb.analysis.Root(self.idb).imagebase
except KeyError:
# seems that the key is not present in all databases,
# particularly those with an imagebase of 0x0.
return 0x0
# TODO: where to fetch ordinal?
class ida_funcs:
# via: https://www.hex-rays.com/products/ida/support/sdkdoc/group___f_u_n_c__.html
# Function doesn't return.
FUNC_NORET = 0x00000001
# Far function.
FUNC_FAR = 0x00000002
# Library function.
FUNC_LIB = 0x00000004
# Static function.
FUNC_STATICDEF = 0x00000008
# Function uses frame pointer (BP)
FUNC_FRAME = 0x00000010
# User has specified far-ness. More...
FUNC_USERFAR = 0x00000020
# A hidden function chunk.
FUNC_HIDDEN = 0x00000040
# Thunk (jump) function.
FUNC_THUNK = 0x00000080
# BP points to the bottom of the stack frame.
FUNC_BOTTOMBP = 0x00000100
# Function 'non-return' analysis must be performed. More...
FUNC_NORET_PENDING = 0x00200
# SP-analysis has been performed. More...
FUNC_SP_READY = 0x00000400
# 'argsize' field has been validated. More...
FUNC_PURGED_OK = 0x00004000
# This is a function tail. More...
FUNC_TAIL = 0x00008000
def __init__(self, db, api):
self.idb = db
self.api = api
def get_func(self, ea):
'''
get the func_t associated with the given address.
if the address is not the start of a function (or function tail), then searches
for a function that contains the given address.
note: the range search is pretty slow, since we parse everything on-demand.
'''
nn = self.api.ida_netnode.netnode('$ funcs')
try:
v = nn.supval(tag='S', index=ea)
except KeyError:
# search for the given effective address in the function regions.
# according to [1], `get_func` only searches the primary region, and not all chunks?
#
# [1]: http://www.openrce.org/reference_library/ida_sdk_lookup/get_func
for func in idb.analysis.Functions(self.idb).functions.values():
if not (func.startEA <= ea < func.endEA):
continue
if is_flag_set(func.flags, self.FUNC_TAIL):
return self.get_func(func.owner)
else:
return func
raise KeyError(ea)
else:
func = idb.analysis.func_t(v, wordsize=self.idb.wordsize)
if is_flag_set(func.flags, self.FUNC_TAIL):
return self.get_func(func.owner)
else:
return func
def get_func_cmt(self, ea, repeatable):
# function comments are stored on the `$ funcs` netnode
# tag is either `R` or `C`.
# index is effective address of the function.
# for example::
#
# nodeid: ff00000000000027 tag: C index: 0x401598
# 00000000: 72 65 70 20 63 6D 74 00 rep cmt.
# --
# nodeid: ff00000000000027 tag: N index: None
# 00000000: 24 20 66 75 6E 63 73 $ funcs
# --
# nodeid: ff00000000000027 tag: R index: 0x401598
# 00000000: 72 65 70 20 63 6D 74 00 rep cmt.
#
# i think its a bug that when you set a repeatable function via the IDA UI,
# it also sets a local function comment.
nn = self.api.ida_netnode.netnode('$ funcs')
try:
if repeatable:
tag = 'R'
else:
tag = 'C'
return nn.supstr(tag=tag, index=ea)
except KeyError:
return ''
def get_func_name(self, ea):
func = self.get_func(ea)
# ensure this is a function
if func.startEA != ea:
raise KeyError(ea)
# shouldn't be a chunk
if is_flag_set(func.flags, func.FUNC_TAIL):
raise KeyError(ea)
nn = self.api.ida_netnode.netnode(ea)
try:
return nn.name()
except:
if self.idb.wordsize == 4:
return 'sub_%04x' % (ea)
elif self.idb.wordsize == 8:
return 'sub_%08x' % (ea)
else:
raise RuntimeError('unexpected wordsize')
class BasicBlock(object):
'''
interface extracted from: https://raw.githubusercontent.com/gabtremblay/idabearclean/master/idaapi.py
'''
def __init__(self, flowchart, startEA, lastInstEA, endEA):
self.fc = flowchart
self.id = startEA
self.startEA = startEA
self.lastInstEA = lastInstEA
self.endEA = endEA
# types are declared here:
# https://www.hex-rays.com/products/ida/support/sdkdoc/gdl_8hpp.html#afa6fb2b53981d849d63273abbb1624bd
# not sure if they are stored in the idb. seems like probably not.
self.type = NotImplementedError()
def preds(self):
for pred in self.fc.preds[self.startEA]:
yield self.fc.bbs[pred]
def succs(self):
for succ in self.fc.succs[self.startEA]:
yield self.fc.bbs[succ]
def __str__(self):
return 'BasicBlock(startEA: 0x%x, endEA: 0x%x)' % (self.startEA, self.endEA)
def is_empty(s):
for c in s:
return False
return True
class idaapi:
# xref flags
# via:
# https://www.hex-rays.com/products/ida/support/sdkdoc/group__xref__type.html#ga78aab6d0d6bd9cb4904bbdbb5ac4fa71
# unknown – for compatibility with old versions.
# Should not be used anymore.
fl_U = 0
# Call Far
fl_CF = 0x10
# Call Near
fl_CN = 0x11
# Jump Far.
fl_JF = 0x12
# Jump Near.
fl_JN = 0x13
# User specified (obsolete)
fl_USobsolete = 0x14
# Ordinary flow: used to specify execution flow to the next instruction.
fl_F = 0x15
# unknown – for compatibility with old versions.
# Should not be used anymore.
dr_U = 0
# Offset
# The reference uses 'offset' of data rather than its value OR
# The reference appeared because the "OFFSET" flag of instruction is set.
# The meaning of this type is IDP dependent.
dr_O = 1
# Write access.
dr_W = 2
# Read access.
dr_R = 3
# Text (for forced operands only) Name of data is used in manual operand.
dr_T = 4
# Informational (a derived java class references its base class informationally)
dr_I = 5
def __init__(self, db, api):
self.idb = db
self.api = api
def _find_bb_end(self, ea):
'''
Args:
ea (int): address at which a basic block begins. behavior undefined if its not a block start.
Returns:
int: the address of the final instruction in the basic block. it may be the same as the start.
'''
if not is_empty(idb.analysis.get_crefs_from(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F])):
return ea
while True:
last_ea = ea
ea = self.api.idc.NextHead(ea)
flags = self.api.idc.GetFlags(ea)
if flags == 0:
return last_ea
if self.api.ida_bytes.hasRef(flags):
return last_ea
if self.api.ida_bytes.isFunc(flags):
return last_ea
if not self.api.ida_bytes.isFlow(flags):
return last_ea
if not is_empty(idb.analysis.get_crefs_from(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F])):
return ea
def _find_bb_start(self, ea):
'''
Args:
ea (int): address at which a basic block ends. behavior undefined if its not a block end.
Returns:
int: the address of the first instruction in the basic block. it may be the same as the end.
'''
while True:
flags = self.api.idc.GetFlags(ea)
if self.api.ida_bytes.hasRef(flags):
return ea
if self.api.ida_bytes.isFunc(flags):
return ea
last_ea = ea
ea = self.api.idc.PrevHead(ea)
if not is_empty(idb.analysis.get_crefs_from(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F])):
return last_ea
if not self.api.ida_bytes.isFlow(flags):
return last_ea
def _get_flow_preds(self, ea):
# this is basically CodeRefsTo with flow=True.
# need to fixup the return types, though.
flags = self.api.idc.GetFlags(ea)
if flags is not None and self.api.ida_bytes.isFlow(flags):
# prev instruction fell through to this insn
yield idb.analysis.Xref(self.api.idc.PrevHead(ea), ea, idaapi.fl_F)
# get all the flow xrefs to this instruction.
# a flow xref is like a fallthrough or jump, not like a call.
for xref in idb.analysis.get_crefs_to(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F]):
yield xref
def _get_flow_succs(self, ea):
# this is basically CodeRefsFrom with flow=True.
# need to fixup the return types, though.
nextea = self.api.idc.NextHead(ea)
nextflags = self.api.idc.GetFlags(nextea)
if nextflags is not None and self.api.ida_bytes.isFlow(nextflags):
# instruction falls through to next insn
yield idb.analysis.Xref(ea, nextea, idaapi.fl_F)
# get all the flow xrefs from this instruction.
# a flow xref is like a fallthrough or jump, not like a call.
for xref in idb.analysis.get_crefs_from(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F]):
yield xref
def FlowChart(self, func):
'''
Example::
f = idaapi.FlowChart(idaapi.get_func(here()))
for block in f:
if p:
print "%x - %x [%d]:" % (block.startEA, block.endEA, block.id)
for succ_block in block.succs():
if p:
print " %x - %x [%d]:" % (succ_block.startEA, succ_block.endEA, succ_block.id)
for pred_block in block.preds():
if p:
print " %x - %x [%d]:" % (pred_block.startEA, pred_block.endEA, pred_block.id)
via: https://github.com/EiNSTeiN-/idapython/blob/master/examples/ex_gdl_qflow_chart.py
'''
# i have no idea how this data is indexed in the idb.
# is it even indexed?
# therefore, let's parse the basic blocks ourselves!
class _FlowChart:
def __init__(self, db, api, ea):
self.idb = db
logger.debug('creating flowchart for %x', ea)
# set of startEA
seen = set([])
# map from startEA to BasicBlock instance
bbs_by_start = {}
# map from endEA to BasicBlock instance
bbs_by_end = {}
# map from startEA to set of startEA
preds = collections.defaultdict(lambda: set([]))
# map from startEA to set of startEA
succs = collections.defaultdict(lambda: set([]))
lastInstEA = api.idaapi._find_bb_end(ea)
logger.debug('found end. %x -> %x', ea, lastInstEA)
block = BasicBlock(self, ea, lastInstEA, api.idc.NextHead(lastInstEA))
bbs_by_start[ea] = block
bbs_by_end[lastInstEA] = block
q = [block]
while q:
logger.debug('iteration')
logger.debug('queue: [%s]', ', '.join(map(str, q)))
block = q[0]
q = q[1:]
logger.debug('exploring %s', block)
if block.startEA in seen:
logger.debug('already seen!')
continue
logger.debug('new!')
seen.add(block.startEA)
for xref in api.idaapi._get_flow_preds(block.startEA):
if xref.src not in bbs_by_end:
pred_start = api.idaapi._find_bb_start(xref.src)
pred = BasicBlock(self, pred_start, xref.src, api.idc.NextHead(xref.src))
bbs_by_start[pred.startEA] = pred
bbs_by_end[pred.lastInstEA] = pred
else:
pred = bbs_by_end[xref.src]
logger.debug('pred: %s', pred)
preds[block.startEA].add(pred.startEA)
succs[pred.startEA].add(block.startEA)
q.append(pred)
for xref in api.idaapi._get_flow_succs(block.lastInstEA):
if xref.dst not in bbs_by_start:
succ_end = api.idaapi._find_bb_end(xref.dst)
succ = BasicBlock(self, xref.dst, succ_end, api.idc.NextHead(succ_end))
bbs_by_start[succ.startEA] = succ
bbs_by_end[succ.lastInstEA] = succ
else:
succ = bbs_by_start[xref.dst]
logger.debug('succ: %s', succ)
succs[block.startEA].add(succ.startEA)
preds[succ.startEA].add(block.startEA)
q.append(succ)
self.preds = preds
self.succs = succs
self.bbs = bbs_by_start
def __iter__(self):
for bb in self.bbs.values():
yield bb
return _FlowChart(self.idb, self.api, func.startEA)
def get_next_fixup_ea(self, ea):
nn = self.api.ida_netnode.netnode('$ fixups')
# TODO: this is really bad algorithmically. we should cache.
for index in nn.sups(tag='S'):
if ea <= index:
return index
raise KeyError(ea)
def contains_fixups(self, ea, size):
try:
next_fixup = self.get_next_fixup_ea(ea)
except KeyError:
return False
else:
if next_fixup < ea + size:
return True
else:
return False
def getseg(self, ea):
segs = idb.analysis.Segments(self.idb).segments
for seg in segs.values():
if seg.startEA <= ea < seg.endEA:
return seg
def get_segm_name(self, ea):
return self.api.idc.SegName(ea)
def get_segm_end(self, ea):
return self.api.idc.SegEnd(ea)
def get_inf_structure(self):
return idb.analysis.Root(self.idb).idainfo
def get_imagebase(self):
return self.api.ida_nalt.get_imagebase()
class StringItem:
def __init__(self, ea, length, strtype, s):
self.ea = ea
self.length = length
self.strtype = strtype
self.s = s
def __str__(self):
return s
class _Strings:
C = 0x0
C_16 = 0x1
C_32 = 0x2
PASCAL = 0x4
PASCAL_16 = 0x5
LEN2 = 0x8
LEN2_16 = 0x9
LEN4 = 0xC
LEN4_16 = 0xD
ASCII_BYTE = b" !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~\t"
def __init__(self, db, api):
self.db = db
self.api = api
self.cache = None
self.strtypes = [0]
self.minlen = 5
self.only_7bit = True
self.ignore_instructions = False
self.display_only_existing_strings = False
def clear_cache(self):
self.cache = None
@memoized_method()
def get_seg_data(self, seg):
start = self.api.idc.SegStart(seg)
end = self.api.idc.SegEnd(start)
IdbByte = self.api.idc.IdbByte
get_flags = self.api.ida_bytes.get_flags
has_value = self.api.ida_bytes.has_value
data = []
for i in range(start, end):
b = IdbByte(i)
if b == 0:
flags = get_flags(i)
if not has_value(flags):
break
data.append(b)
if six.PY2:
return ''.join(map(chr, data))
else:
return bytes(data)
def parse_C_strings(self, va, buf):
reg = b"([%s]{%d,})" % (_Strings.ASCII_BYTE, self.minlen)
ascii_re = re.compile(reg)
for match in ascii_re.finditer(buf):
s = match.group().decode('ascii')
yield StringItem(va + match.start(), len(s), _Strings.C, s)
def parse_C_16_strings(self, va, buf):
reg = b"((?:[%s]\x00){%d,})" % (_Strings.ASCII_BYTE, self.minlen)
uni_re = re.compile(reg)
for match in uni_re.finditer(buf):
try:
s = match.group().decode('utf-16')
except UnicodeDecodeError:
continue
else:
yield StringItem(va + match.start(), len(s), _Strings.C_16, s)
def parse_C_32_strings(self, va, buf):
reg = b"((?:[%s]\x00\x00\x00){%d,})" % (_Strings.ASCII_BYTE, self.minlen)
uni_re = re.compile(reg)
for match in uni_re.finditer(buf):
try:
s = match.group().decode('utf-32')
except UnicodeDecodeError:
continue
else:
yield StringItem(va + match.start(), len(s), _Strings.C_32, s)
def parse_PASCAL_strings(self, va, buf):
raise NotImplementedError('parse PASCAL strings')
def parse_PASCAL_16_strings(self, va, buf):
raise NotImplementedError('parse PASCAL_16 strings')
def parse_LEN2_strings(self, va, buf):
raise NotImplementedError('parse LEN2 strings')
def parse_LEN2_16_strings(self, va, buf):
raise NotImplementedError('parse LEN2_16 strings')
def parse_LEN4_strings(self, va, buf):
raise NotImplementedError('parse LEN4 strings')
def parse_LEN4_16_strings(self, va, buf):
raise NotImplementedError('parse LEN4_16 strings')
def refresh(self):
ret = []
for seg in self.api.idautils.Segments():
buf = self.get_seg_data(seg)
for parser in (self.parse_C_strings,
self.parse_C_16_strings,
self.parse_C_32_strings,
self.parse_PASCAL_strings,
self.parse_PASCAL_16_strings,
self.parse_LEN2_strings,
self.parse_LEN2_16_strings,
self.parse_LEN4_strings,
self.parse_LEN4_16_strings):
try:
ret.extend(list(parser(seg, buf)))
except NotImplementedError as e:
logger.warning('warning: %s', e)
self.cache = ret[:]
return ret
def setup(self,
strtypes=[0],
minlen=5,
only_7bit=True,
ignore_instructions=False,
display_only_existing_strings=False):
self.strtypes = strtypes
self.minlen = minlen
self.only_7bit = only_7bit
self.ignore_instructions = ignore_instructions
self.display_only_existing_strings = display_only_existing_strings
def __iter__(self):
if self.cache is None:
self.refresh()
for s in self.cache:
yield s
def __getitem__(self, index):
if self.cache is None:
self.refresh()
return self.cache[index]
class idautils:
def __init__(self, db, api):
self.idb = db
self.api = api
self.strings = _Strings(db, api)
def GetInputFileMD5(self):
return self.api.idc.GetInputMD5()
def Segments(self):
return sorted(idb.analysis.Segments(self.idb).segments.keys())
def Functions(self):
ret = []
for ea, func in idb.analysis.Functions(self.idb).functions.items():
# we won't report chunks
if is_flag_set(func.flags, func.FUNC_TAIL):
continue
ret.append(func.startEA)
return list(sorted(ret))
def CodeRefsTo(self, ea, flow):
if flow:
flags = self.api.idc.GetFlags(ea)
if flags is not None and self.api.ida_bytes.isFlow(flags):
# prev instruction fell through to this insn
yield self.api.idc.PrevHead(ea)
# get all the code xrefs to this instruction.
# a code xref is like a fallthrough or jump, not like a call.
for xref in idb.analysis.get_crefs_to(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F]):
yield xref.src
def CodeRefsFrom(self, ea, flow):
if flow:
nextea = self.api.idc.NextHead(ea)
nextflags = self.api.idc.GetFlags(nextea)
if self.api.ida_bytes.isFlow(nextflags):
# instruction falls through to next insn
yield nextea
# get all the code xrefs from this instruction.
# a code xref is like a fallthrough or jump, not like a call.
for xref in idb.analysis.get_crefs_from(self.idb, ea,
types=[idaapi.fl_JN, idaapi.fl_JF, idaapi.fl_F]):
yield xref.dst
def Strings(self, default_setup=False):
return self.strings
class ida_entry:
def __init__(self, db, api):
self.idb = db
self.api = api
def get_entry_qty(self):
ents = idb.analysis.EntryPoints(self.idb)
return len(ents.functions) + len(ents.main_entry)
def get_entry_ordinal(self, index):
ents = idb.analysis.EntryPoints(self.idb)
try:
return ents.ordinals[index + 1]
except KeyError:
# once we enumerate all the exports by ordinal,
# then wrap into the "main entry".
# not sure that there can be more than one, but we attempt to deal here.
return sorted(ents.main_entry)[index - len(ents.functions) - 1]
def get_entry(self, ordinal):
# for the "main entry", ordinal is actually an address.
ents = idb.analysis.EntryPoints(self.idb)
return ents.functions[ordinal]
def get_entry_name(self, ordinal):
ents = idb.analysis.EntryPoints(self.idb)
try:
return ents.function_names[ordinal]
except KeyError:
# for the "main entry", ordinal is actually an address.
return ents.main_entry_name[ordinal]
def get_entry_forwarder(self, ordinal):
ents = idb.analysis.EntryPoints(self.idb)
return ents.forwarded_symbols.get(ordinal)
class ida_name:
def __init__(self, db, api):
self.idb = db
self.api = api
def get_name(self, ea):
flags = self.api.ida_bytes.get_flags(ea)
if not self.api.ida_bytes.has_name(flags):
return ''
try:
nn = self.api.ida_netnode.netnode(ea)
return nn.name()
except KeyError:
return ''
class IDAPython:
def __init__(self, db, ScreenEA=None):
self.idb = db
self.ScreenEA = ScreenEA
self.idc = idc(db, self)
self.idaapi = idaapi(db, self)
self.idautils = idautils(db, self)
self.ida_funcs = ida_funcs(db, self)
self.ida_bytes = ida_bytes(db, self)
self.ida_netnode = ida_netnode(db, self)
self.ida_nalt = ida_nalt(db, self)
self.ida_entry = ida_entry(db, self)
self.ida_name = ida_name(db, self)
| 29.847251
| 176
| 0.58289
|
4a09a41d00838f0fd853d6df78833b7643c38945
| 16,444
|
py
|
Python
|
python/craftassist/test/fake_agent.py
|
boldsort/craftassist
|
8058d115a250e30deb60d969b7b1a5fefd6e974c
|
[
"MIT"
] | 626
|
2019-07-18T18:40:44.000Z
|
2022-03-29T17:34:43.000Z
|
python/craftassist/test/fake_agent.py
|
boldsort/craftassist
|
8058d115a250e30deb60d969b7b1a5fefd6e974c
|
[
"MIT"
] | 42
|
2019-07-27T11:04:15.000Z
|
2021-02-23T03:15:14.000Z
|
python/craftassist/test/fake_agent.py
|
boldsort/craftassist
|
8058d115a250e30deb60d969b7b1a5fefd6e974c
|
[
"MIT"
] | 89
|
2019-07-19T15:07:39.000Z
|
2022-02-15T18:44:24.000Z
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import numpy as np
from typing import List
from mc_util import XYZ, IDM, Block
from utils import Look, Pos, Item, Player
from base_agent.loco_mc_agent import LocoMCAgent
from base_agent.base_util import TICKS_PER_SEC
from mc_memory import MCAgentMemory
from mc_memory_nodes import VoxelObjectNode
from craftassist_agent import CraftAssistAgent
from base_agent.base_util import Time
from base_agent.nsp_dialogue_manager import NSPDialogueManager
from dialogue_objects import GetMemoryHandler, PutMemoryHandler, Interpreter
from low_level_perception import LowLevelMCPerception
import heuristic_perception
from rotation import look_vec
# how many internal, non-world-interacting steps agent takes before world steps:
WORLD_STEP = 10
WORLD_STEPS_PER_DAY = 480
class Opt:
pass
class FakeMCTime(Time):
def __init__(self, world):
self.world = world
def get_world_hour(self):
return (self.world.count % WORLD_STEPS_PER_DAY) / WORLD_STEPS_PER_DAY
# converts from "seconds" to internal tick
def round_time(self, t):
return int(TICKS_PER_SEC * t)
def get_time(self):
return self.world.count * TICKS_PER_SEC
def add_tick(self, ticks=1):
for i in range(ticks):
self.world.step()
class FakeCPPAction:
NAME = "NULL"
def __init__(self, agent):
self.agent = agent
def action(self, *args):
self.agent.world_interaction_occurred = True
def __call__(self, *args):
if hasattr(self.agent, "recorder"):
self.agent.recorder.record_action({"name": self.NAME, "args": list(args)})
return self.action(*args)
class Dig(FakeCPPAction):
NAME = "dig"
def action(self, x, y, z):
self.agent.world_interaction_occurred = True
dug = self.agent.world.dig((x, y, z))
if dug:
self.agent._changed_blocks.append(((x, y, z), (0, 0)))
return True
else:
return False
class SendChat(FakeCPPAction):
NAME = "send_chat"
def action(self, chat):
self.agent.world_interaction_occurred = True
logging.info("FakeAgent.send_chat: {}".format(chat))
self.agent._outgoing_chats.append(chat)
class SetHeldItem(FakeCPPAction):
NAME = "set_held_item"
def action(self, arg):
self.agent.world_interaction_occurred = True
try:
d, m = arg
self.agent._held_item = (d, m)
except TypeError:
self.agent._held_item = (arg, 0)
class StepPosX(FakeCPPAction):
NAME = "step_pos_x"
def action(self):
self.agent.world_interaction_occurred = True
self.agent.pos += (1, 0, 0)
class StepNegX(FakeCPPAction):
NAME = "step_neg_x"
def action(self):
self.agent.world_interaction_occurred = True
self.agent.pos += (-1, 0, 0)
class StepPosZ(FakeCPPAction):
NAME = "step_pos_z"
def action(self):
self.agent.world_interaction_occurred = True
self.agent.pos += (0, 0, 1)
class StepNegZ(FakeCPPAction):
NAME = "step_neg_z"
def action(self):
self.agent.world_interaction_occurred = True
self.agent.pos += (0, 0, -1)
class StepPosY(FakeCPPAction):
NAME = "step_pos_y"
def action(self):
self.agent.world_interaction_occurred = True
self.agent.pos += (0, 1, 0)
class StepNegY(FakeCPPAction):
NAME = "step_neg_y"
def action(self):
self.agent.world_interaction_occurred = True
self.agent.pos += (0, -1, 0)
class StepForward(FakeCPPAction):
NAME = "step_forward"
def action(self):
self.agent.world_interaction_occurred = True
dx, dy, dz = self.agent._look_vec
self.agent.pos += (dx, 0, dz)
class TurnAngle(FakeCPPAction):
NAME = "turn_angle"
def action(self, angle):
self.agent.world_interaction_occurred = True
if angle == 90:
self.agent.turn_left()
elif angle == -90:
self.agent.turn_right()
else:
raise ValueError("bad angle={}".format(angle))
class TurnLeft(FakeCPPAction):
NAME = "turn_left"
def action(self):
self.agent.world_interaction_occurred = True
old_l = (self.agent._look_vec[0], self.agent._look_vec[1])
idx = self.agent.CCW_LOOK_VECS.index(old_l)
new_l = self.agent.CCW_LOOK_VECS[(idx + 1) % len(self.agent.CCW_LOOK_VECS)]
self.agent._look_vec[0] = new_l[0]
self.agent._look_vec[2] = new_l[2]
class TurnRight(FakeCPPAction):
NAME = "turn_right"
def action(self):
self.agent.world_interaction_occurred = True
old_l = (self.agent._look_vec[0], self.agent._look_vec[1])
idx = self.agent.CCW_LOOK_VECS.index(old_l)
new_l = self.agent.CCW_LOOK_VECS[(idx - 1) % len(self.agent.CCW_LOOK_VECS)]
self.agent._look_vec[0] = new_l[0]
self.agent._look_vec[2] = new_l[2]
class PlaceBlock(FakeCPPAction):
NAME = "place_block"
def action(self, x, y, z):
self.agent.world_interaction_occurred = True
block = ((x, y, z), self.agent._held_item)
self.agent.world.place_block(block)
self.agent._changed_blocks.append(block)
return True
class LookAt(FakeCPPAction):
NAME = "look_at"
def action(self, x, y, z):
raise NotImplementedError()
class SetLook(FakeCPPAction):
NAME = "set_look"
def action(self, yaw, pitch):
self.agent.world_interaction_occurred = True
a = look_vec(yaw, pitch)
self._look_vec = [a[0], a[1], a[2]]
class Craft(FakeCPPAction):
NAME = "craft"
def action(self):
raise NotImplementedError()
class FakeAgent(LocoMCAgent):
CCW_LOOK_VECS = [(1, 0), (0, 1), (-1, 0), (0, -1)]
default_frame = CraftAssistAgent.default_frame
coordinate_transforms = CraftAssistAgent.default_frame
def __init__(self, world, opts=None, do_heuristic_perception=False):
self.world = world
self.chat_count = 0
if not opts:
opts = Opt()
opts.nsp_model_dir = None
opts.nsp_data_dir = None
opts.nsp_embedding_path = None
opts.model_base_path = None
opts.QA_nsp_model_path = None
opts.ground_truth_data_dir = ""
opts.web_app = False
opts.no_ground_truth = True
super(FakeAgent, self).__init__(opts)
self.do_heuristic_perception = do_heuristic_perception
self.no_default_behavior = True
self.last_task_memid = None
pos = (0, 63, 0)
if hasattr(self.world, "agent_data"):
pos = self.world.agent_data["pos"]
self.pos = np.array(pos, dtype="int")
self.logical_form = None
self.world_interaction_occurred = False
self._held_item: IDM = (0, 0)
self._look_vec = (1, 0, 0)
self._changed_blocks: List[Block] = []
self._outgoing_chats: List[str] = []
CraftAssistAgent.add_self_memory_node(self)
def init_perception(self):
self.geoscorer = None
self.perception_modules = {}
self.perception_modules["low_level"] = LowLevelMCPerception(self, perceive_freq=1)
self.perception_modules["heuristic"] = heuristic_perception.PerceptionWrapper(self)
def init_physical_interfaces(self):
self.dig = Dig(self)
self.send_chat = SendChat(self)
self.set_held_item = SetHeldItem(self)
self.step_pos_x = StepPosX(self)
self.step_neg_x = StepNegX(self)
self.step_pos_z = StepPosZ(self)
self.step_neg_z = StepNegZ(self)
self.step_pos_y = StepPosY(self)
self.step_neg_y = StepNegY(self)
self.step_forward = StepForward(self)
self.turn_angle = TurnAngle(self)
self.turn_left = TurnLeft(self)
self.turn_right = TurnRight(self)
self.set_look = SetLook(self)
self.place_block = PlaceBlock(self)
def init_memory(self):
T = FakeMCTime(self.world)
self.memory = MCAgentMemory(load_minecraft_specs=False, agent_time=T)
def init_controller(self):
dialogue_object_classes = {}
dialogue_object_classes["interpreter"] = Interpreter
dialogue_object_classes["get_memory"] = GetMemoryHandler
dialogue_object_classes["put_memory"] = PutMemoryHandler
self.dialogue_manager = NSPDialogueManager(self, dialogue_object_classes, self.opts)
def set_logical_form(self, lf, chatstr, speaker):
self.logical_form = {"logical_form": lf, "chatstr": chatstr, "speaker": speaker}
def step(self):
if hasattr(self.world, "step"):
if self.world_interaction_occurred or self.count % WORLD_STEP == 0:
self.world.step()
self.world_interaction_occurred = False
if hasattr(self, "recorder"):
self.recorder.record_world()
super().step()
#### use the CraftassistAgent.controller_step()
def controller_step(self):
if self.logical_form is None:
pass
CraftAssistAgent.controller_step(self)
else: # logical form given directly:
# clear the chat buffer
self.get_incoming_chats()
# use the logical form as given...
d = self.logical_form["logical_form"]
chatstr = self.logical_form["chatstr"]
speaker_name = self.logical_form["speaker"]
self.memory.add_chat(self.memory.get_player_by_name(speaker_name).memid, chatstr)
# force to get objects, speaker info
self.perceive(force=True)
obj = self.dialogue_manager.handle_logical_form(speaker_name, d, chatstr)
if obj is not None:
self.dialogue_manager.dialogue_stack.append(obj)
self.logical_form = None
def setup_test(self):
self.task_steps_count = 0
def clear_outgoing_chats(self):
self._outgoing_chats.clear()
def get_last_outgoing_chat(self):
try:
return self._outgoing_chats[-1]
except IndexError:
return None
########################
## FAKE .PY METHODS ##
########################
def task_step(self):
CraftAssistAgent.task_step(self, sleep_time=0)
def point_at(*args):
pass
def perceive(self, force=False):
self.perception_modules["low_level"].perceive(force=force)
if self.do_heuristic_perception:
self.perception_modules["heuristic"].perceive()
###################################
## FAKE C++ PERCEPTION METHODS ##
###################################
def get_blocks(self, xa, xb, ya, yb, za, zb):
return self.world.get_blocks(xa, xb, ya, yb, za, zb)
def get_local_blocks(self, r):
x, y, z = self.pos
return self.get_blocks(x - r, x + r, y - r, y + r, z - r, z + r)
def get_incoming_chats(self):
c = self.chat_count
self.chat_count = len(self.world.chat_log)
return self.world.chat_log[c:].copy()
def get_player(self):
return Player(1, "fake_agent", Pos(*self.pos), self.get_look(), Item(*self._held_item))
def get_mobs(self):
return self.world.get_mobs()
def get_item_stacks(self):
return self.world.get_item_stacks()
def get_other_players(self):
return self.world.players.copy()
def get_other_player_by_name(self):
raise NotImplementedError()
def get_vision(self):
raise NotImplementedError()
def get_line_of_sight(self):
raise NotImplementedError()
def get_look(self):
pitch = -np.rad2deg(np.arcsin(self._look_vec[1]))
yaw = -np.rad2deg(np.arctan2(self._look_vec[0], self._look_vec[2]))
return Look(pitch, yaw)
def get_player_line_of_sight(self, player_struct):
if hasattr(self.world, "get_line_of_sight"):
pos = (player_struct.pos.x, player_struct.pos.y, player_struct.pos.z)
pitch = player_struct.look.pitch
yaw = player_struct.look.yaw
xsect = self.world.get_line_of_sight(pos, yaw, pitch)
if xsect is not None:
return Pos(*xsect)
else:
raise NotImplementedError()
def get_changed_blocks(self) -> List[Block]:
# need a better solution here
r = self._changed_blocks.copy()
self._changed_blocks.clear()
return r
def safe_get_changed_blocks(self) -> List[Block]:
return self.get_changed_blocks()
######################################
## World setup
######################################
def set_blocks(self, xyzbms: List[Block], origin: XYZ = (0, 0, 0)):
"""Change the state of the world, block by block,
store in memory"""
for xyz, idm in xyzbms:
abs_xyz = tuple(np.array(xyz) + origin)
self.perception_modules["low_level"].pending_agent_placed_blocks.add(abs_xyz)
# TODO add force option so we don't need to make it as if agent placed
self.perception_modules["low_level"].on_block_changed(abs_xyz, idm)
self.world.place_block((abs_xyz, idm))
def add_object(
self, xyzbms: List[Block], origin: XYZ = (0, 0, 0), relations={}
) -> VoxelObjectNode:
"""Add an object to memory as if it was placed block by block
Args:
- xyzbms: a list of relative (xyz, idm)
- origin: (x, y, z) of the corner
Returns an VoxelObjectNode
"""
self.set_blocks(xyzbms, origin)
abs_xyz = tuple(np.array(xyzbms[0][0]) + origin)
memid = self.memory.get_block_object_ids_by_xyz(abs_xyz)[0]
for pred, obj in relations.items():
self.memory.add_triple(subj=memid, pred_text=pred, obj_text=obj)
# sooooorrry FIXME? when we handle triples better in interpreter_helper
if "has_" in pred:
self.memory.tag(memid, obj)
return self.memory.get_object_by_id(memid)
######################################
## visualization
######################################
def draw_slice(self, h=None, r=5, c=None):
if not h:
h = self.pos[1]
if c:
c = [c[0], h, c[1]]
else:
c = [self.pos[0], h, self.pos[2]]
C = self.world.to_world_coords(c)
A = self.world.to_world_coords(self.pos)
shifted_agent_pos = [A[0] - C[0] + r, A[2] - C[2] + r]
npy = self.world.get_blocks(
c[0] - r, c[0] + r, c[1], c[1], c[2] - r, c[2] + r, transpose=False
)
npy = npy[:, 0, :, 0]
try:
npy[shifted_agent_pos[0], shifted_agent_pos[1]] = 1024
except:
pass
mobnums = {"rabbit": -1, "cow": -2, "pig": -3, "chicken": -4, "sheep": -5}
nummobs = {-1: "rabbit", -2: "cow", -3: "pig", -4: "chicken", -5: "sheep"}
for mob in self.world.mobs:
# todo only in the plane?
p = np.round(np.array(self.world.to_world_coords(mob.pos)))
p = p - C
try:
npy[p[0] + r, p[1] + r] = mobnums[mob.mobname]
except:
pass
mapslice = ""
height = npy.shape[0]
width = npy.shape[1]
def xs(x):
return x + int(self.pos[0]) - r
def zs(z):
return z + int(self.pos[2]) - r
mapslice = mapslice + " " * (width + 2) * 3 + "\n"
for i in reversed(range(height)):
mapslice = mapslice + str(xs(i)).center(3)
for j in range(width):
if npy[i, j] > 0:
if npy[i, j] == 1024:
mapslice = mapslice + " A "
else:
mapslice = mapslice + str(npy[i, j]).center(3)
elif npy[i, j] == 0:
mapslice = mapslice + " * "
else:
npy[i, j] = mapslice + " " + nummobs[npy[i, j]][0] + " "
mapslice = mapslice + "\n"
mapslice = mapslice + " "
for j in range(width):
mapslice = mapslice + " * "
mapslice = mapslice + "\n"
mapslice = mapslice + " "
for j in range(width):
mapslice = mapslice + str(zs(j)).center(3)
return mapslice
| 31.684008
| 95
| 0.59803
|
4a09a4300d42478f62d25067130d23f982222301
| 1,389
|
py
|
Python
|
examples/ex_toy.py
|
tonyduan/glm-lib
|
2afcbb3b22a5c9377735511d8d0b09eed7fe722c
|
[
"MIT"
] | 2
|
2020-01-29T08:56:50.000Z
|
2020-02-03T13:06:17.000Z
|
examples/ex_toy.py
|
tonyduan/glm-lib
|
2afcbb3b22a5c9377735511d8d0b09eed7fe722c
|
[
"MIT"
] | null | null | null |
examples/ex_toy.py
|
tonyduan/glm-lib
|
2afcbb3b22a5c9377735511d8d0b09eed7fe722c
|
[
"MIT"
] | null | null | null |
import numpy as np
from argparse import ArgumentParser
from sklearn.datasets import load_breast_cancer, load_iris, load_boston, load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, roc_auc_score
from glm_lib.distns import Bernoulli, Gaussian, Categorical
from glm_lib.models import GLM
if __name__ == "__main__":
np.random.seed(123)
argparser = ArgumentParser()
argparser.add_argument("--lr", type=float, default=0.1)
argparser.add_argument("--verbose", action="store_true")
args = argparser.parse_args()
x, y = load_breast_cancer(True)
x = np.c_[x, np.ones(len(x))]
x_tr, x_te, y_tr, y_te = train_test_split(x, y)
glm = GLM(Bernoulli, args.lr, verbose=args.verbose)
glm.fit(x_tr, y_tr)
print(f"ROC: {roc_auc_score(y_te, glm.predict(x_te).mean())}")
x, y = load_boston(True)
x = np.c_[x, np.ones(len(x))]
x_tr, x_te, y_tr, y_te = train_test_split(x, y)
glm = GLM(Gaussian, args.lr, verbose=args.verbose)
glm.fit(x_tr, y_tr)
print(f"R2: {r2_score(y_te, glm.predict(x_te).mean())}")
x, y = load_iris(True)
x = np.c_[x, np.ones(len(x))]
x_tr, x_te, y_tr, y_te = train_test_split(x, y)
glm = GLM(Categorical, args.lr, verbose=args.verbose)
glm.fit(x_tr, y_tr)
print(f"Acc: {np.mean(np.argmax(glm.predict(x_te).mean(), axis=1) == y_te)}")
| 29.553191
| 82
| 0.684665
|
4a09a4beb2668ee8ccb9c0cf498e4140b2593e15
| 881
|
py
|
Python
|
libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_telemetry_client.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 388
|
2019-05-07T15:53:21.000Z
|
2022-03-28T20:29:46.000Z
|
libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_telemetry_client.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 1,286
|
2019-05-07T23:38:19.000Z
|
2022-03-31T10:44:16.000Z
|
libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_telemetry_client.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 168
|
2019-05-14T20:23:25.000Z
|
2022-03-16T06:49:14.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
from typing import Dict
from botbuilder.core import BotTelemetryClient, TurnContext
from .qnamaker_options import QnAMakerOptions
class QnAMakerTelemetryClient(ABC):
def __init__(
self, log_personal_information: bool, telemetry_client: BotTelemetryClient
):
self.log_personal_information = (log_personal_information,)
self.telemetry_client = telemetry_client
@abstractmethod
def get_answers(
self,
context: TurnContext,
options: QnAMakerOptions = None,
telemetry_properties: Dict[str, str] = None,
telemetry_metrics: Dict[str, float] = None,
):
raise NotImplementedError(
"QnAMakerTelemetryClient.get_answers(): is not implemented."
)
| 31.464286
| 82
| 0.715096
|
4a09a5764487b5a59045966b271eeed0e3369cbe
| 5,409
|
py
|
Python
|
lib/mnemonic_utils.py
|
bluzelle/blzpy
|
9b3084673913992375a0ae18270cb8edd8165352
|
[
"MIT"
] | 1
|
2021-02-03T11:38:50.000Z
|
2021-02-03T11:38:50.000Z
|
lib/mnemonic_utils.py
|
bluzelle/blzpy
|
9b3084673913992375a0ae18270cb8edd8165352
|
[
"MIT"
] | null | null | null |
lib/mnemonic_utils.py
|
bluzelle/blzpy
|
9b3084673913992375a0ae18270cb8edd8165352
|
[
"MIT"
] | 2
|
2020-07-11T22:00:58.000Z
|
2021-11-20T06:05:49.000Z
|
#!/usr/bin/env python3
# https://raw.githubusercontent.com/vergl4s/ethereum-mnemonic-utils/master/mnemonic_utils.py
import binascii
import hashlib
import hmac
import struct
from base58 import b58encode_check
from ecdsa.curves import SECP256k1
BIP39_PBKDF2_ROUNDS = 2048
BIP39_SALT_MODIFIER = "mnemonic"
BIP32_PRIVDEV = 0x80000000
BIP32_CURVE = SECP256k1
BIP32_SEED_MODIFIER = b'Bitcoin seed'
LEDGER_ETH_DERIVATION_PATH = "m/44'/60'/0'/0"
def mnemonic_to_bip39seed(mnemonic, passphrase):
""" BIP39 seed from a mnemonic key.
Logic adapted from https://github.com/trezor/python-mnemonic. """
mnemonic = bytes(mnemonic, 'utf8')
salt = bytes(BIP39_SALT_MODIFIER + passphrase, 'utf8')
return hashlib.pbkdf2_hmac('sha512', mnemonic, salt, BIP39_PBKDF2_ROUNDS)
def bip39seed_to_bip32masternode(seed):
""" BIP32 master node derivation from a bip39 seed.
Logic adapted from https://github.com/satoshilabs/slips/blob/master/slip-0010/testvectors.py. """
k = seed
h = hmac.new(BIP32_SEED_MODIFIER, seed, hashlib.sha512).digest()
key, chain_code = h[:32], h[32:]
return key, chain_code
def derive_public_key(private_key):
""" Public key from a private key.
Logic adapted from https://github.com/satoshilabs/slips/blob/master/slip-0010/testvectors.py. """
Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator
xstr = Q.x().to_bytes(32, byteorder='big')
parity = Q.y() & 1
return (2 + parity).to_bytes(1, byteorder='big') + xstr
def derive_bip32childkey(parent_key, parent_chain_code, i):
""" Derives a child key from an existing key, i is current derivation parameter.
Logic adapted from https://github.com/satoshilabs/slips/blob/master/slip-0010/testvectors.py. """
assert len(parent_key) == 32
assert len(parent_chain_code) == 32
k = parent_chain_code
if (i & BIP32_PRIVDEV) != 0:
key = b'\x00' + parent_key
else:
key = derive_public_key(parent_key)
d = key + struct.pack('>L', i)
while True:
h = hmac.new(k, d, hashlib.sha512).digest()
key, chain_code = h[:32], h[32:]
a = int.from_bytes(key, byteorder='big')
b = int.from_bytes(parent_key, byteorder='big')
key = (a + b) % BIP32_CURVE.order
if a < BIP32_CURVE.order and key != 0:
key = key.to_bytes(32, byteorder='big')
break
d = b'\x01' + h[32:] + struct.pack('>L', i)
return key, chain_code
def fingerprint(public_key):
""" BIP32 fingerprint formula, used to get b58 serialized key. """
return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]
def b58xprv(parent_fingerprint, private_key, chain, depth, childnr):
""" Private key b58 serialization format. """
raw = (
b'\x04\x88\xad\xe4' +
bytes(chr(depth), 'utf-8') +
parent_fingerprint +
childnr.to_bytes(4, byteorder='big') +
chain +
b'\x00' +
private_key)
return b58encode_check(raw)
def b58xpub(parent_fingerprint, public_key, chain, depth, childnr):
""" Public key b58 serialization format. """
raw = (
b'\x04\x88\xb2\x1e' +
bytes(chr(depth), 'utf-8') +
parent_fingerprint +
childnr.to_bytes(4, byteorder='big') +
chain +
public_key)
return b58encode_check(raw)
def parse_derivation_path(str_derivation_path):
""" Parses a derivation path such as "m/44'/60/0'/0" and returns
list of integers for each element in path. """
path = []
if str_derivation_path[0:2] != 'm/':
raise ValueError("Can't recognize derivation path. It should look like \"m/44'/60/0'/0\".")
for i in str_derivation_path.lstrip('m/').split('/'):
if "'" in i:
path.append(BIP32_PRIVDEV + int(i[:-1]))
else:
path.append(int(i))
return path
def mnemonic_to_private_key(mnemonic, str_derivation_path=LEDGER_ETH_DERIVATION_PATH, passphrase=""):
""" Performs all convertions to get a private key from a mnemonic sentence, including:
BIP39 mnemonic to seed
BIP32 seed to master key
BIP32 child derivation of a path provided
Parameters:
mnemonic -- seed wordlist, usually with 24 words, that is used for ledger wallet backup
str_derivation_path -- string that directs BIP32 key derivation, defaults to path
used by ledger ETH wallet
"""
derivation_path = parse_derivation_path(str_derivation_path)
bip39seed = mnemonic_to_bip39seed(mnemonic, passphrase)
master_private_key, master_chain_code = bip39seed_to_bip32masternode(bip39seed)
private_key, chain_code = master_private_key, master_chain_code
for i in derivation_path:
private_key, chain_code = derive_bip32childkey(private_key, chain_code, i)
return private_key
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("# Missing filename that includes your mnemonic key")
print("# Usage: {} <mnemonic-filename>".format(sys.argv[0]))
else:
with open(sys.argv[1], 'r') as f:
mnemonic = ' '.join(f.read().split('\n')).rstrip().lstrip()
private_key = mnemonic_to_private_key(mnemonic)
print("# Your private key is: {}".format(str(binascii.hexlify(private_key), 'utf-8')))
| 32.781818
| 105
| 0.660935
|
4a09a577fbe51441490bbbbdd0e96624cc679309
| 1,197
|
py
|
Python
|
nr.pylang.bundle/src/nr/pylang/bundle/nativedeps/_base.py
|
NiklasRosenstein/nr-python
|
dc5b31ae5773ea4522a6f35112792dde9e872bef
|
[
"MIT"
] | 3
|
2018-11-20T22:19:35.000Z
|
2020-10-31T09:23:53.000Z
|
nr.pylang.bundle/src/nr/pylang/bundle/nativedeps/_base.py
|
NiklasRosenstein/python-nr
|
dc5b31ae5773ea4522a6f35112792dde9e872bef
|
[
"MIT"
] | 3
|
2021-08-09T00:14:26.000Z
|
2021-08-09T00:28:27.000Z
|
nr.pylang.bundle/src/nr/pylang/bundle/nativedeps/_base.py
|
NiklasRosenstein/nr-python
|
dc5b31ae5773ea4522a6f35112792dde9e872bef
|
[
"MIT"
] | 3
|
2019-03-22T06:15:17.000Z
|
2020-10-31T09:23:53.000Z
|
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from dataclasses import dataclass
@dataclass
class Dependency:
name: str
filename: str = None
| 42.75
| 78
| 0.778613
|
4a09a7eed506cced70822a07faa8ca267de876d6
| 3,198
|
py
|
Python
|
examples/scripts/misc/regression.py
|
vishalbelsare/sporco
|
afc3dae3ab81d84a23e8487812670ecb7457e869
|
[
"BSD-3-Clause"
] | 217
|
2016-06-13T16:41:26.000Z
|
2022-03-22T06:31:05.000Z
|
examples/scripts/misc/regression.py
|
vishalbelsare/sporco
|
afc3dae3ab81d84a23e8487812670ecb7457e869
|
[
"BSD-3-Clause"
] | 21
|
2016-06-13T23:28:35.000Z
|
2022-02-17T23:20:01.000Z
|
examples/scripts/misc/regression.py
|
vishalbelsare/sporco
|
afc3dae3ab81d84a23e8487812670ecb7457e869
|
[
"BSD-3-Clause"
] | 47
|
2016-12-14T13:08:33.000Z
|
2021-12-12T01:59:49.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Linear Regression
=================
This example demonstrates the use of classes :func:`.interp.lstabsdev` and :func:`.interp.lstmaxdev` for linear regression.
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.interp import lstabsdev, lstmaxdev
from sporco import plot
"""
Generate reference data
$$\mathbf{y}_0 = \left( \begin{array}{cc} \mathbf{x} & \mathbf{1} \end{array} \right) \left( \begin{array}{c} p_0 \\ p_1 \end{array} \right)$$
where $\mathbf{1}$ is a column vector of unit entries, $\mathbf{x}$ is the independent variable, $\mathbf{y}_0$ is the dependent variable, and $p_0$ and $p_1$ are the slope and offset respectively of the linear relationship between $\mathbf{y}_0$ and $\mathbf{x}$.
"""
x = np.linspace(-5, 5, 100).astype(np.float32)
A = np.vstack([x, np.ones(len(x))]).T
p = np.array([0.6, 0.2])
y0 = np.dot(A, p)
"""
Construct test data by corrupting the reference data with noise with Gaussian, Laplacian, and uniform distributions.
"""
np.random.seed(123)
yg = y0 + 2.0 * np.random.randn(*y0.shape)
yl = y0 + np.random.laplace(scale=1.5, size=y0.shape)
yu = y0 + np.random.uniform(low=-4.0, high=4.0, size=y0.shape)
"""
Compute least squares solutions for all three test signals.
"""
ygsqr = np.linalg.lstsq(A, yg, rcond=None)[0]
ylsqr = np.linalg.lstsq(A, yl, rcond=None)[0]
yusqr = np.linalg.lstsq(A, yu, rcond=None)[0]
"""
Compute least absolute deviations solutions for all three test signals.
"""
ygabs = lstabsdev(A, yg)
ylabs = lstabsdev(A, yl)
yuabs = lstabsdev(A, yu)
"""
Compute least maximum deviation solutions for all three test signals.
"""
ygmax = lstmaxdev(A, yg)
ylmax = lstmaxdev(A, yl)
yumax = lstmaxdev(A, yu)
"""
Compare performance of each regression method on each test signal. Relative performance is as expected considering that least squares, least absolute deviations, and least maximum deviation are the maximum likelihood (ML) estimators for Gaussian, Laplacian, and uniform noise respectively.
"""
ynoise = (yg, yl, yu)
noise = ('Gaussian', 'Laplacian', 'Uniform')
method = ('least squares', 'least absolute deviations',
'least maximum deviation')
solution = ((ygsqr, ylsqr, yusqr),
(ygabs, ylabs, yuabs),
(ygmax, ylmax, yumax))
fig, ax = plot.subplots(nrows=3, ncols=3, sharex=True, sharey=True,
figsize=(22, 20))
for row in range(3):
for col in range(3):
m, c = solution[row][col]
plot.plot(y0, x, xlbl='x', ylbl='y', lw=4, ax=ax[row][col], fig=fig)
plot.plot(ynoise[col], x, lw=0, ms=5.0, marker='o', ax=ax[row][col],
fig=fig)
plot.plot(m * x + c, x, lw=2, ls='--',
title='%s noise, %s fit' % (noise[col], method[row]),
lgnd=('Reference line', 'Noisy data', 'Fitted line'),
ax=ax[row][col], fig=fig)
ax[row][col].set_ylim(-4, 4)
fig.show()
# Wait for enter on keyboard
input()
| 30.75
| 289
| 0.654472
|
4a09a80ae8b9be3b671f9d925c87cdc555886351
| 2,714
|
py
|
Python
|
src/modules/critics/coma.py
|
yuchen-x/pymarl
|
d918408c7b190dd9c3c0a01c28afd992680241a7
|
[
"Apache-2.0"
] | null | null | null |
src/modules/critics/coma.py
|
yuchen-x/pymarl
|
d918408c7b190dd9c3c0a01c28afd992680241a7
|
[
"Apache-2.0"
] | null | null | null |
src/modules/critics/coma.py
|
yuchen-x/pymarl
|
d918408c7b190dd9c3c0a01c28afd992680241a7
|
[
"Apache-2.0"
] | null | null | null |
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class COMACritic(nn.Module):
def __init__(self, scheme, args):
super(COMACritic, self).__init__()
self.args = args
self.n_actions = args.n_actions
self.n_agents = args.n_agents
input_shape = self._get_input_shape(scheme)
self.output_type = "q"
# Set up network layers
self.fc1 = nn.Linear(input_shape, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, self.n_actions)
def forward(self, batch, t=None):
inputs = self._build_inputs(batch, t=t)
x = F.leaky_relu(self.fc1(inputs))
x = F.leaky_relu(self.fc2(x))
q = self.fc3(x)
return q
def _build_inputs(self, batch, t=None):
bs = batch.batch_size
max_t = batch.max_seq_length if t is None else 1
ts = slice(None) if t is None else slice(t, t+1)
inputs = []
# state
inputs.append(batch["state"][:, ts].unsqueeze(2).repeat(1, 1, self.n_agents, 1))
# observation
inputs.append(batch["obs"][:, ts])
# actions (masked out by agent)
actions = batch["actions_onehot"][:, ts].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)
agent_mask = (1 - th.eye(self.n_agents, device=batch.device))
agent_mask = agent_mask.view(-1, 1).repeat(1, self.n_actions).view(self.n_agents, -1)
inputs.append(actions * agent_mask.unsqueeze(0).unsqueeze(0))
# last actions
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, 0:1]).view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1))
elif isinstance(t, int):
inputs.append(batch["actions_onehot"][:, slice(t-1, t)].view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1))
else:
last_actions = th.cat([th.zeros_like(batch["actions_onehot"][:, 0:1]), batch["actions_onehot"][:, :-1]], dim=1)
last_actions = last_actions.view(bs, max_t, 1, -1).repeat(1, 1, self.n_agents, 1)
inputs.append(last_actions)
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))
inputs = th.cat([x.reshape(bs, max_t, self.n_agents, -1) for x in inputs], dim=-1)
return inputs
def _get_input_shape(self, scheme):
# state
input_shape = scheme["state"]["vshape"]
# observation
input_shape += scheme["obs"]["vshape"]
# actions and last actions
input_shape += scheme["actions_onehot"]["vshape"][0] * self.n_agents * 2
# agent id
input_shape += self.n_agents
return input_shape
| 38.225352
| 127
| 0.600958
|
4a09a8e8550c3eae8086c469d02b67cc2b468dc1
| 3,613
|
py
|
Python
|
blog/mixins.py
|
ThePokerFaCcCe/myblog
|
9b24f381148b7f3262dd59e320f5e1600d1af68f
|
[
"MIT"
] | 4
|
2021-11-24T21:48:29.000Z
|
2021-12-07T00:44:44.000Z
|
blog/mixins.py
|
ThePokerFaCcCe/myblog
|
9b24f381148b7f3262dd59e320f5e1600d1af68f
|
[
"MIT"
] | null | null | null |
blog/mixins.py
|
ThePokerFaCcCe/myblog
|
9b24f381148b7f3262dd59e320f5e1600d1af68f
|
[
"MIT"
] | null | null | null |
from django_filters.rest_framework.backends import DjangoFilterBackend
from django.db.models import Q
from rest_framework.generics import get_object_or_404
from rest_framework.mixins import DestroyModelMixin, RetrieveModelMixin, UpdateModelMixin
from rest_framework.parsers import MultiPartParser, JSONParser
from rest_framework.viewsets import GenericViewSet
from blog.models import Category, Post, SpecialForChoices
from blog.serializers import CategorySerializer, PostSerializer
from core.permissions import IsAdmin, IsAuthor, IsOwnerOfItem, IsReadOnly
from core.mixins import DeletePicMixin
class RUDWithFilterMixin:
filter_backends = [DjangoFilterBackend]
filterset_class = None
"""You should set `filterset_class` in subclasses"""
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
obj = get_object_or_404(queryset)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class SpecialMixin:
def get_special_for_fields(self):
"""Returns a list of fields that contains `SpecialForChoices`"""
return ['special_for']
def filterby_special_for(self,
queryset,
choices: list[SpecialForChoices],
status: str = "IS",
):
"""Returns a list of Q filters for every field in `get_special_for_fields`.
Params
--------
`status` can set to "NOT" or "IS".
"""
not_status = (status == 'NOT')
for item in self.get_special_for_fields():
for choice in choices:
queryset = queryset.filter(
~Q(**{item: choice}) if not_status else Q(**{item: choice})
)
return queryset
def get_queryset(self):
queryset = super().get_queryset()
user = self.request.user
if user.is_authenticated:
if user.is_staff:
return queryset
elif user.is_author:
return self.filterby_special_for(queryset, status="NOT", choices=[SpecialForChoices.STAFF])
elif user.is_vip:
return self.filterby_special_for(
queryset,
status="NOT",
choices=[SpecialForChoices.STAFF, SpecialForChoices.AUTHOR]
)
return self.filterby_special_for(queryset, choices=[SpecialForChoices.NORMAL])
class CategoryDefaultsMixin(SpecialMixin):
queryset = Category.objects.all()
serializer_class = CategorySerializer
parser_classes = [MultiPartParser, JSONParser]
permission_classes = [IsReadOnly | IsAuthor | IsAdmin]
class CategoryDetailMixin(CategoryDefaultsMixin,
RetrieveModelMixin, UpdateModelMixin,
DeletePicMixin, DestroyModelMixin,
GenericViewSet):
pass
class PostDefaultsMixin(SpecialMixin):
queryset = Post.objects.select_related("category").prefetch_related("tags__tag", 'likes', 'author', 'comments__user')
serializer_class = PostSerializer
parser_classes = [MultiPartParser, JSONParser]
permission_classes = [IsReadOnly | IsAdmin | (IsAuthor & IsOwnerOfItem)]
def get_special_for_fields(self):
return ['special_for', 'category__special_for']
class PostDetailMixin(PostDefaultsMixin,
RetrieveModelMixin, UpdateModelMixin,
DeletePicMixin, DestroyModelMixin,
GenericViewSet):
pass
| 35.772277
| 121
| 0.651536
|
4a09a8fa5d52f4c353b39ebf3918d4d568e491fd
| 5,552
|
py
|
Python
|
builder/modules/load.py
|
antonkurenkov/hub-builder
|
80c885b35952f8df567e80c4a67da2f5d8bc0b5b
|
[
"Apache-2.0"
] | null | null | null |
builder/modules/load.py
|
antonkurenkov/hub-builder
|
80c885b35952f8df567e80c4a67da2f5d8bc0b5b
|
[
"Apache-2.0"
] | null | null | null |
builder/modules/load.py
|
antonkurenkov/hub-builder
|
80c885b35952f8df567e80c4a67da2f5d8bc0b5b
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import datetime
import re
import requests
import subprocess
from pymongo import MongoClient
from ruamel.yaml import YAML
from builder.color_print import *
yaml = YAML()
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
package_path = os.path.join(root_dir, 'api', 'hub', 'package')
status_path = os.path.join(root_dir, 'api', 'hub', 'status')
class Mongo:
def __init__(self):
credentials = os.getenv('MONGODB_CREDENTIALS')
if credentials:
address = f"mongodb+srv://{credentials}@cluster0-irout.mongodb.net/test?retryWrites=true&w=majority"
client = MongoClient(address)
self.db = client['jina-test']
else:
self.db = None
print(f'Incorrect credentials "{credentials}" for DB connection. Will use status.json as history source.')
def update_history_on_db(self, **kwargs):
if self.db:
spec = {'_id': 1}
self.db.docker.replace_one(filter=spec, replacement=dict(**kwargs), upsert=True)
print(print_green('Hub history updated successfully on database'))
def get_history_from_database(self):
if self.db:
spec = {'_id': 1}
return self.db.docker.find_one(filter=spec)
# def select_head(self, n):
# return dict(self.db.docker.find().limit(n))
#
# def contains(self, item): # -> int | bool
# return dict(self.db.docker.find(item).count())
class StateLoader(Mongo):
def __init__(self):
Mongo.__init__(self)
def get_history(self):
local_history = self.get_local_history()
remote_history = self.get_history_from_database()
empty_history = {'Images': {}, 'LastBuildTime': {}, 'LastBuildStatus': {}, 'LastBuildReason': ''}
history = remote_history or local_history or empty_history
if history == empty_history:
print(print_red('\nCan\'t load build history from database or local'))
return history
@staticmethod
def get_local_history():
if os.path.isfile(package_path) and os.path.isfile(status_path):
with open(status_path, 'r') as sp:
history = json.load(sp)
with open(package_path, 'r') as bp:
history['Images'] = json.load(bp)
return history
def update_total_history(self, history):
self.update_readme(history)
self.update_hub_badge(history)
self.update_history_on_db(**history)
self.update_api(history)
def update_readme(self, history):
readme_path = os.path.join(root_dir, 'status', 'README.md')
build_badge_regex = r'<!-- START_BUILD_BADGE -->(.*)<!-- END_BUILD_BADGE -->'
build_badge_prefix = r'<!-- START_BUILD_BADGE --><!-- END_BUILD_BADGE -->'
with open(readme_path, 'r') as fp:
tmp = fp.read()
badge_str = '\n'.join([self.get_badge_md(k, v) for k, v in history['LastBuildStatus'].items()])
h1 = f'## Last Build at: {datetime.datetime.now():%Y-%m-%d %H:%M:%S %Z}'
h2 = '<summary>Reason</summary>'
h3 = '**Images**'
reason = '\n\n'.join(history['LastBuildReason'])
content = [build_badge_prefix, h1, h3, badge_str, '<details>', h2, reason, '</details>']
tmp = re.sub(pattern=build_badge_regex, repl='\n\n'.join(content), string=tmp, flags=re.DOTALL)
with open(readme_path, 'w') as fp:
fp.write(tmp)
print(print_green('Hub readme updated successfully on path ') + str(readme_path))
@staticmethod
def get_badge_md(img_name, status):
safe_url_name = img_name.replace('-', '--').replace('_', '__').replace(' ', '_')
if status is True:
success_tag = 'success-success'
elif status is False:
success_tag = 'fail-critical'
else:
success_tag = 'pending-yellow'
return f'[]' \
f'(https://hub.docker.com/repository/docker/jinaai/{img_name})'
def update_api(self, history):
self.update_build_json(history)
self.update_status_json(history)
@staticmethod
def update_build_json(history):
images = history.pop('Images')
with open(package_path, 'w') as fp:
json.dump(images, fp)
with open(package_path + '.json', 'w') as fp:
json.dump(images, fp)
print(print_green(f'Package api updated on path ') + str(package_path))
@staticmethod
def update_status_json(history):
if '_id' in history:
history.pop('_id')
with open(status_path, 'w') as fp:
json.dump(history, fp)
with open(status_path + '.json', 'w') as fp:
json.dump(history, fp)
print(print_green('Status api updated on path ') + str(status_path))
@staticmethod
def update_hub_badge(history):
hubbadge_path = os.path.join(root_dir, 'status', 'hub-stat.svg')
url = f'https://badgen.net/badge/Hub%20Images/{len(history["Images"])}/cyan'
response = requests.get(url)
if response.ok:
with open(hubbadge_path, 'wb') as opfile:
opfile.write(response.content)
print(print_green('Hub badge updated successfully on path ') + str(hubbadge_path))
else:
print(print_red('Hub badge update failed ') + str(hubbadge_path))
| 38.825175
| 118
| 0.613112
|
4a09a9a474faf81e1759f73a9c64b4123df41196
| 193
|
py
|
Python
|
scripts/writeX1.py
|
vibhatha/digitalocean_7
|
2c14e6b543e76f65a84c9ca91dbec8725f6e7e22
|
[
"MIT"
] | null | null | null |
scripts/writeX1.py
|
vibhatha/digitalocean_7
|
2c14e6b543e76f65a84c9ca91dbec8725f6e7e22
|
[
"MIT"
] | null | null | null |
scripts/writeX1.py
|
vibhatha/digitalocean_7
|
2c14e6b543e76f65a84c9ca91dbec8725f6e7e22
|
[
"MIT"
] | null | null | null |
def write(text):
fo = open("X.txt", "a")
#fo.write(bytes(text, 'UTF-8'))
#fo.write(bytes('\n', 'UTF-8'))
fo.write(text)
fo.write('\n')
# Close opend file
fo.close()
| 21.444444
| 35
| 0.518135
|
4a09a9cc04647d9ee4ac9b72a5b89f2bba294ac3
| 55,516
|
py
|
Python
|
discord/state.py
|
MrKomodoDragon/discord.py
|
1c40d43fd1289238a2184e7f106b908b2a1b4381
|
[
"MIT"
] | null | null | null |
discord/state.py
|
MrKomodoDragon/discord.py
|
1c40d43fd1289238a2184e7f106b908b2a1b4381
|
[
"MIT"
] | null | null | null |
discord/state.py
|
MrKomodoDragon/discord.py
|
1c40d43fd1289238a2184e7f106b908b2a1b4381
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
from collections import deque, OrderedDict
import copy
import datetime
import itertools
import logging
from typing import Dict, Optional
import weakref
import warnings
import inspect
import os
from .guild import Guild
from .activity import BaseActivity
from .user import User, ClientUser
from .emoji import Emoji
from .mentions import AllowedMentions
from .partial_emoji import PartialEmoji
from .message import Message
from .channel import *
from .channel import _channel_factory
from .raw_models import *
from .member import Member
from .role import Role
from .enums import ChannelType, try_enum, Status
from . import utils
from .flags import ApplicationFlags, Intents, MemberCacheFlags
from .object import Object
from .invite import Invite
from .integrations import _integration_factory
from .interactions import Interaction
from .ui.view import ViewStore
from .stage_instance import StageInstance
from .threads import Thread, ThreadMember
from .sticker import GuildSticker
class ChunkRequest:
def __init__(self, guild_id, loop, resolver, *, cache=True):
self.guild_id = guild_id
self.resolver = resolver
self.loop = loop
self.cache = cache
self.nonce = os.urandom(16).hex()
self.buffer = [] # List[Member]
self.waiters = []
def add_members(self, members):
self.buffer.extend(members)
if self.cache:
guild = self.resolver(self.guild_id)
if guild is None:
return
for member in members:
existing = guild.get_member(member.id)
if existing is None or existing.joined_at is None:
guild._add_member(member)
async def wait(self):
future = self.loop.create_future()
self.waiters.append(future)
try:
return await future
finally:
self.waiters.remove(future)
def get_future(self):
future = self.loop.create_future()
self.waiters.append(future)
return future
def done(self):
for future in self.waiters:
if not future.done():
future.set_result(self.buffer)
log = logging.getLogger(__name__)
async def logging_coroutine(coroutine, *, info):
try:
await coroutine
except Exception:
log.exception('Exception occurred during %s', info)
class ConnectionState:
def __init__(self, *, dispatch, handlers, hooks, http, loop, **options):
self.loop = loop
self.http = http
self.max_messages = options.get('max_messages', 1000)
if self.max_messages is not None and self.max_messages <= 0:
self.max_messages = 1000
self.dispatch = dispatch
self.handlers = handlers
self.hooks = hooks
self.shard_count = None
self._ready_task = None
self.application_id = utils._get_as_snowflake(options, 'application_id')
self.heartbeat_timeout = options.get('heartbeat_timeout', 60.0)
self.guild_ready_timeout = options.get('guild_ready_timeout', 2.0)
if self.guild_ready_timeout < 0:
raise ValueError('guild_ready_timeout cannot be negative')
allowed_mentions = options.get('allowed_mentions')
if allowed_mentions is not None and not isinstance(allowed_mentions, AllowedMentions):
raise TypeError('allowed_mentions parameter must be AllowedMentions')
self.allowed_mentions = allowed_mentions
self._chunk_requests = {} # Dict[Union[int, str], ChunkRequest]
activity = options.get('activity', None)
if activity:
if not isinstance(activity, BaseActivity):
raise TypeError('activity parameter must derive from BaseActivity.')
activity = activity.to_dict()
status = options.get('status', None)
if status:
if status is Status.offline:
status = 'invisible'
else:
status = str(status)
intents = options.get('intents', None)
if intents is not None:
if not isinstance(intents, Intents):
raise TypeError(f'intents parameter must be Intent not {type(intents)!r}')
else:
intents = Intents.default()
if not intents.guilds:
log.warning('Guilds intent seems to be disabled. This may cause state related issues.')
self._chunk_guilds = options.get('chunk_guilds_at_startup', intents.members)
# Ensure these two are set properly
if not intents.members and self._chunk_guilds:
raise ValueError('Intents.members must be enabled to chunk guilds at startup.')
cache_flags = options.get('member_cache_flags', None)
if cache_flags is None:
cache_flags = MemberCacheFlags.from_intents(intents)
else:
if not isinstance(cache_flags, MemberCacheFlags):
raise TypeError(f'member_cache_flags parameter must be MemberCacheFlags not {type(cache_flags)!r}')
cache_flags._verify_intents(intents)
self.member_cache_flags = cache_flags
self._activity = activity
self._status = status
self._intents = intents
if not intents.members or cache_flags._empty:
self.store_user = self.create_user
self.deref_user = self.deref_user_no_intents
self.parsers = parsers = {}
for attr, func in inspect.getmembers(self):
if attr.startswith('parse_'):
parsers[attr[6:].upper()] = func
self.clear()
def clear(self):
self.user = None
# Originally, this code used WeakValueDictionary to maintain references to the
# global user mapping.
# However, profiling showed that this came with two cons:
# 1. The __weakref__ slot caused a non-trivial increase in memory
# 2. The performance of the mapping caused store_user to be a bottleneck.
# Since this is undesirable, a mapping is now used instead with stored
# references now using a regular dictionary with eviction being done
# using __del__. Testing this for memory leaks led to no discernable leaks,
# though more testing will have to be done.
self._users: Dict[int, User] = {}
self._emojis = {}
self._stickers = {}
self._guilds = {}
self._view_store = ViewStore(self)
self._voice_clients = {}
# LRU of max size 128
self._private_channels = OrderedDict()
# extra dict to look up private channels by user id
self._private_channels_by_user = {}
self._messages = self.max_messages and deque(maxlen=self.max_messages)
def process_chunk_requests(self, guild_id, nonce, members, complete):
removed = []
for key, request in self._chunk_requests.items():
if request.guild_id == guild_id and request.nonce == nonce:
request.add_members(members)
if complete:
request.done()
removed.append(key)
for key in removed:
del self._chunk_requests[key]
def call_handlers(self, key, *args, **kwargs):
try:
func = self.handlers[key]
except KeyError:
pass
else:
func(*args, **kwargs)
async def call_hooks(self, key, *args, **kwargs):
try:
coro = self.hooks[key]
except KeyError:
pass
else:
await coro(*args, **kwargs)
@property
def self_id(self):
u = self.user
return u.id if u else None
@property
def intents(self):
ret = Intents.none()
ret.value = self._intents.value
return ret
@property
def voice_clients(self):
return list(self._voice_clients.values())
def _get_voice_client(self, guild_id):
return self._voice_clients.get(guild_id)
def _add_voice_client(self, guild_id, voice):
self._voice_clients[guild_id] = voice
def _remove_voice_client(self, guild_id):
self._voice_clients.pop(guild_id, None)
def _update_references(self, ws):
for vc in self.voice_clients:
vc.main_ws = ws
def store_user(self, data):
user_id = int(data['id'])
try:
return self._users[user_id]
except KeyError:
user = User(state=self, data=data)
if user.discriminator != '0000':
self._users[user_id] = user
user._stored = True
return user
def deref_user(self, user_id):
self._users.pop(user_id, None)
def create_user(self, data):
return User(state=self, data=data)
def deref_user_no_intents(self, user_id):
return
def get_user(self, id):
return self._users.get(id)
def store_emoji(self, guild, data):
emoji_id = int(data['id'])
self._emojis[emoji_id] = emoji = Emoji(guild=guild, state=self, data=data)
return emoji
def store_sticker(self, guild, data):
sticker_id = int(data['id'])
self._stickers[sticker_id] = sticker = GuildSticker(state=self, data=data)
return sticker
def store_view(self, view, message_id=None):
self._view_store.add_view(view, message_id)
def prevent_view_updates_for(self, message_id):
return self._view_store.remove_message_tracking(message_id)
@property
def persistent_views(self):
return self._view_store.persistent_views
@property
def guilds(self):
return list(self._guilds.values())
def _get_guild(self, guild_id):
return self._guilds.get(guild_id)
def _add_guild(self, guild):
self._guilds[guild.id] = guild
def _remove_guild(self, guild):
self._guilds.pop(guild.id, None)
for emoji in guild.emojis:
self._emojis.pop(emoji.id, None)
for sticker in guild.stickers:
self._stickers.pop(sticker.id, None)
del guild
@property
def emojis(self):
return list(self._emojis.values())
@property
def stickers(self):
return list(self._stickers.values())
def get_emoji(self, emoji_id):
return self._emojis.get(emoji_id)
def get_sticker(self, sticker_id):
return self._stickers.get(sticker_id)
@property
def private_channels(self):
return list(self._private_channels.values())
def _get_private_channel(self, channel_id):
try:
value = self._private_channels[channel_id]
except KeyError:
return None
else:
self._private_channels.move_to_end(channel_id)
return value
def _get_private_channel_by_user(self, user_id):
return self._private_channels_by_user.get(user_id)
def _add_private_channel(self, channel):
channel_id = channel.id
self._private_channels[channel_id] = channel
if len(self._private_channels) > 128:
_, to_remove = self._private_channels.popitem(last=False)
if isinstance(to_remove, DMChannel) and to_remove.recipient:
self._private_channels_by_user.pop(to_remove.recipient.id, None)
if isinstance(channel, DMChannel) and channel.recipient:
self._private_channels_by_user[channel.recipient.id] = channel
def add_dm_channel(self, data):
channel = DMChannel(me=self.user, state=self, data=data)
self._add_private_channel(channel)
return channel
def _remove_private_channel(self, channel):
self._private_channels.pop(channel.id, None)
if isinstance(channel, DMChannel):
self._private_channels_by_user.pop(channel.recipient.id, None)
def _get_message(self, msg_id):
return utils.find(lambda m: m.id == msg_id, reversed(self._messages)) if self._messages else None
def _add_guild_from_data(self, guild):
guild = Guild(data=guild, state=self)
self._add_guild(guild)
return guild
def _guild_needs_chunking(self, guild):
# If presences are enabled then we get back the old guild.large behaviour
return self._chunk_guilds and not guild.chunked and not (self._intents.presences and not guild.large)
def _get_guild_channel(self, data):
channel_id = int(data['channel_id'])
try:
guild = self._get_guild(int(data['guild_id']))
except KeyError:
channel = PartialMessageable(state=self, id=channel_id, type=ChannelType.private)
guild = None
else:
channel = guild and guild._resolve_channel(channel_id)
return channel or PartialMessageable(state=self, id=channel_id), guild
async def chunker(self, guild_id, query='', limit=0, presences=False, *, nonce=None):
ws = self._get_websocket(guild_id) # This is ignored upstream
await ws.request_chunks(guild_id, query=query, limit=limit, presences=presences, nonce=nonce)
async def query_members(self, guild, query, limit, user_ids, cache, presences):
guild_id = guild.id
ws = self._get_websocket(guild_id)
if ws is None:
raise RuntimeError('Somehow do not have a websocket for this guild_id')
request = ChunkRequest(guild.id, self.loop, self._get_guild, cache=cache)
self._chunk_requests[request.nonce] = request
try:
# start the query operation
await ws.request_chunks(guild_id, query=query, limit=limit, user_ids=user_ids, presences=presences, nonce=request.nonce)
return await asyncio.wait_for(request.wait(), timeout=30.0)
except asyncio.TimeoutError:
log.warning('Timed out waiting for chunks with query %r and limit %d for guild_id %d', query, limit, guild_id)
raise
async def _delay_ready(self):
try:
states = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(self._ready_state.get(), timeout=self.guild_ready_timeout)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
future = await self.chunk_guild(guild, wait=False)
states.append((guild, future))
else:
if guild.unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
for guild, future in states:
try:
await asyncio.wait_for(future, timeout=5.0)
except asyncio.TimeoutError:
log.warning('Shard ID %s timed out waiting for chunks for guild_id %s.', guild.shard_id, guild.id)
if guild.unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
except asyncio.CancelledError:
pass
else:
# dispatch the event
self.call_handlers('ready')
self.dispatch('ready')
finally:
self._ready_task = None
def parse_ready(self, data):
if self._ready_task is not None:
self._ready_task.cancel()
self._ready_state = asyncio.Queue()
self.clear()
self.user = user = ClientUser(state=self, data=data['user'])
self.store_user(data['user'])
if self.application_id is None:
try:
application = data['application']
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, 'id')
self.application_flags = ApplicationFlags._from_value(application['flags'])
for guild_data in data['guilds']:
self._add_guild_from_data(guild_data)
self.dispatch('connect')
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data):
self.dispatch('resumed')
def parse_message_create(self, data):
channel, _ = self._get_guild_channel(data)
message = Message(channel=channel, data=data, state=self)
self.dispatch('message', message)
if self._messages is not None:
self._messages.append(message)
if channel and channel.__class__ in (TextChannel, Thread):
channel.last_message_id = message.id
def parse_message_delete(self, data):
raw = RawMessageDeleteEvent(data)
found = self._get_message(raw.message_id)
raw.cached_message = found
self.dispatch('raw_message_delete', raw)
if self._messages is not None and found is not None:
self.dispatch('message_delete', found)
self._messages.remove(found)
def parse_message_delete_bulk(self, data):
raw = RawBulkMessageDeleteEvent(data)
if self._messages:
found_messages = [message for message in self._messages if message.id in raw.message_ids]
else:
found_messages = []
raw.cached_messages = found_messages
self.dispatch('raw_bulk_message_delete', raw)
if found_messages:
self.dispatch('bulk_message_delete', found_messages)
for msg in found_messages:
self._messages.remove(msg)
def parse_message_update(self, data):
raw = RawMessageUpdateEvent(data)
message = self._get_message(raw.message_id)
if message is not None:
older_message = copy.copy(message)
raw.cached_message = older_message
self.dispatch('raw_message_edit', raw)
message._update(data)
# Coerce the `after` parameter to take the new updated Member
# ref: #5999
older_message.author = message.author
self.dispatch('message_edit', older_message, message)
else:
self.dispatch('raw_message_edit', raw)
if 'components' in data and self._view_store.is_message_tracked(raw.message_id):
self._view_store.update_from_message(raw.message_id, data['components'])
def parse_message_reaction_add(self, data):
emoji = data['emoji']
emoji_id = utils._get_as_snowflake(emoji, 'id')
emoji = PartialEmoji.with_state(self, id=emoji_id, animated=emoji.get('animated', False), name=emoji['name'])
raw = RawReactionActionEvent(data, emoji, 'REACTION_ADD')
member_data = data.get('member')
if member_data:
guild = self._get_guild(raw.guild_id)
raw.member = Member(data=member_data, guild=guild, state=self)
else:
raw.member = None
self.dispatch('raw_reaction_add', raw)
# rich interface here
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
reaction = message._add_reaction(data, emoji, raw.user_id)
user = raw.member or self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch('reaction_add', reaction, user)
def parse_message_reaction_remove_all(self, data):
raw = RawReactionClearEvent(data)
self.dispatch('raw_reaction_clear', raw)
message = self._get_message(raw.message_id)
if message is not None:
old_reactions = message.reactions.copy()
message.reactions.clear()
self.dispatch('reaction_clear', message, old_reactions)
def parse_message_reaction_remove(self, data):
emoji = data['emoji']
emoji_id = utils._get_as_snowflake(emoji, 'id')
emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji['name'])
raw = RawReactionActionEvent(data, emoji, 'REACTION_REMOVE')
self.dispatch('raw_reaction_remove', raw)
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
try:
reaction = message._remove_reaction(data, emoji, raw.user_id)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
user = self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch('reaction_remove', reaction, user)
def parse_message_reaction_remove_emoji(self, data):
emoji = data['emoji']
emoji_id = utils._get_as_snowflake(emoji, 'id')
emoji = PartialEmoji.with_state(self, id=emoji_id, name=emoji['name'])
raw = RawReactionClearEmojiEvent(data, emoji)
self.dispatch('raw_reaction_clear_emoji', raw)
message = self._get_message(raw.message_id)
if message is not None:
try:
reaction = message._clear_emoji(emoji)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
if reaction:
self.dispatch('reaction_clear_emoji', reaction)
def parse_interaction_create(self, data):
interaction = Interaction(data=data, state=self)
if data['type'] == 3: # interaction component
custom_id = interaction.data['custom_id'] # type: ignore
component_type = interaction.data['component_type'] # type: ignore
self._view_store.dispatch(component_type, custom_id, interaction)
self.dispatch('interaction', interaction)
def parse_presence_update(self, data):
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is None:
log.debug('PRESENCE_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
return
user = data['user']
member_id = int(user['id'])
member = guild.get_member(member_id)
if member is None:
log.debug('PRESENCE_UPDATE referencing an unknown member ID: %s. Discarding', member_id)
return
old_member = Member._copy(member)
user_update = member._presence_update(data=data, user=user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
self.dispatch('presence_update', old_member, member)
def parse_user_update(self, data):
self.user._update(data)
ref = self._users.get(self.user.id)
if ref:
ref._update(data)
def parse_invite_create(self, data):
invite = Invite.from_gateway(state=self, data=data)
self.dispatch('invite_create', invite)
def parse_invite_delete(self, data):
invite = Invite.from_gateway(state=self, data=data)
self.dispatch('invite_delete', invite)
def parse_channel_delete(self, data):
guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
channel_id = int(data['id'])
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
guild._remove_channel(channel)
self.dispatch('guild_channel_delete', channel)
def parse_channel_update(self, data):
channel_type = try_enum(ChannelType, data.get('type'))
channel_id = int(data['id'])
if channel_type is ChannelType.group:
channel = self._get_private_channel(channel_id)
old_channel = copy.copy(channel)
channel._update_group(data)
self.dispatch('private_channel_update', old_channel, channel)
return
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
old_channel = copy.copy(channel)
channel._update(guild, data)
self.dispatch('guild_channel_update', old_channel, channel)
else:
log.debug('CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
else:
log.debug('CHANNEL_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_channel_create(self, data):
factory, ch_type = _channel_factory(data['type'])
if factory is None:
log.debug('CHANNEL_CREATE referencing an unknown channel type %s. Discarding.', data['type'])
return
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is not None:
channel = factory(guild=guild, state=self, data=data)
guild._add_channel(channel)
self.dispatch('guild_channel_create', channel)
else:
log.debug('CHANNEL_CREATE referencing an unknown guild ID: %s. Discarding.', guild_id)
return
def parse_channel_pins_update(self, data):
channel_id = int(data['channel_id'])
try:
guild = self._get_guild(int(data['guild_id']))
except KeyError:
guild = None
channel = self._get_private_channel(channel_id)
else:
channel = guild and guild._resolve_channel(channel_id)
if channel is None:
log.debug('CHANNEL_PINS_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
return
last_pin = utils.parse_time(data['last_pin_timestamp']) if data['last_pin_timestamp'] else None
if guild is None:
self.dispatch('private_channel_pins_update', channel, last_pin)
else:
self.dispatch('guild_channel_pins_update', channel, last_pin)
def parse_thread_create(self, data):
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
log.debug('THREAD_CREATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread = Thread(guild=guild, state=guild._state, data=data)
has_thread = guild.get_thread(thread.id)
guild._add_thread(thread)
if not has_thread:
self.dispatch('thread_join', thread)
def parse_thread_update(self, data):
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
if guild is None:
log.debug('THREAD_UPDATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread = guild.get_thread(thread_id)
if thread is not None:
old = copy.copy(thread)
thread._update(data)
self.dispatch('thread_update', old, thread)
else:
thread = Thread(guild=guild, state=guild._state, data=data)
guild._add_thread(thread)
self.dispatch('thread_join', thread)
def parse_thread_delete(self, data):
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
if guild is None:
log.debug('THREAD_DELETE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread = guild.get_thread(thread_id)
if thread is not None:
guild._remove_thread(thread)
self.dispatch('thread_delete', thread)
def parse_thread_list_sync(self, data):
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
log.debug('THREAD_LIST_SYNC referencing an unknown guild ID: %s. Discarding', guild_id)
return
try:
channel_ids = set(data['channel_ids'])
except KeyError:
# If not provided, then the entire guild is being synced
# So all previous thread data should be overwritten
previous_threads = guild._threads.copy()
guild._clear_threads()
else:
previous_threads = guild._filter_threads(channel_ids)
threads = {
d['id']: guild._store_thread(d)
for d in data.get('threads', [])
}
for member in data.get('members', []):
try:
# note: member['id'] is the thread_id
thread = threads[member['id']]
except KeyError:
continue
else:
thread._add_member(ThreadMember(thread, member))
for thread in threads.values():
old = previous_threads.pop(thread.id, None)
if old is None:
self.dispatch('thread_join', thread)
for thread in previous_threads.values():
self.dispatch('thread_remove', thread)
def parse_thread_member_update(self, data):
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
log.debug('THREAD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
log.debug('THREAD_MEMBER_UPDATE referencing an unknown thread ID: %s. Discarding', thread_id)
return
member = ThreadMember(thread, data)
thread.me = member
def parse_thread_members_update(self, data):
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
log.debug('THREAD_MEMBERS_UPDATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
log.debug('THREAD_MEMBERS_UPDATE referencing an unknown thread ID: %s. Discarding', thread_id)
return
added_members = [ThreadMember(thread, d) for d in data.get('added_members', [])]
removed_member_ids = [int(x) for x in data.get('removed_member_ids', [])]
self_id = self.self_id
for member in added_members:
if member.id != self_id:
thread._add_member(member)
self.dispatch('thread_member_join', member)
else:
thread.me = member
self.dispatch('thread_join', thread)
for member_id in removed_member_ids:
if member_id != self_id:
member = thread._pop_member(member_id)
if member is not None:
self.dispatch('thread_member_remove', member)
else:
self.dispatch('thread_remove', thread)
def parse_guild_member_add(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.debug('GUILD_MEMBER_ADD referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
member = Member(guild=guild, data=data, state=self)
if self.member_cache_flags.joined:
guild._add_member(member)
try:
guild._member_count += 1
except AttributeError:
pass
self.dispatch('member_join', member)
def parse_guild_member_remove(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
guild._member_count -= 1
except AttributeError:
pass
user_id = int(data['user']['id'])
member = guild.get_member(user_id)
if member is not None:
guild._remove_member(member)
self.dispatch('member_remove', member)
else:
log.debug('GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_member_update(self, data):
guild = self._get_guild(int(data['guild_id']))
user = data['user']
user_id = int(user['id'])
if guild is None:
log.debug('GUILD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
member = guild.get_member(user_id)
if member is not None:
old_member = Member._copy(member)
member._update(data)
user_update = member._update_inner_user(user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
self.dispatch('member_update', old_member, member)
else:
if self.member_cache_flags.joined:
member = Member(data=data, guild=guild, state=self)
# Force an update on the inner user if necessary
user_update = member._update_inner_user(user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
guild._add_member(member)
log.debug('GUILD_MEMBER_UPDATE referencing an unknown member ID: %s. Discarding.', user_id)
def parse_guild_emojis_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.debug('GUILD_EMOJIS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
before_emojis = guild.emojis
for emoji in before_emojis:
self._emojis.pop(emoji.id, None)
guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data['emojis']))
self.dispatch('guild_emojis_update', guild, before_emojis, guild.emojis)
def parse_guild_stickers_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.debug('GUILD_STICKERS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
before_stickers = guild.stickers
for emoji in before_stickers:
self._stickers.pop(emoji.id, None)
guild.stickers = tuple(map(lambda d: self.store_sticker(guild, d), data['stickers']))
self.dispatch('guild_stickers_update', guild, before_stickers, guild.stickers)
def _get_create_guild(self, data):
if data.get('unavailable') is False:
# GUILD_CREATE with unavailable in the response
# usually means that the guild has become available
# and is therefore in the cache
guild = self._get_guild(int(data['id']))
if guild is not None:
guild.unavailable = False
guild._from_data(data)
return guild
return self._add_guild_from_data(data)
def is_guild_evicted(self, guild) -> bool:
return guild.id not in self._guilds
async def chunk_guild(self, guild, *, wait=True, cache=None):
cache = cache or self.member_cache_flags.joined
request = self._chunk_requests.get(guild.id)
if request is None:
self._chunk_requests[guild.id] = request = ChunkRequest(guild.id, self.loop, self._get_guild, cache=cache)
await self.chunker(guild.id, nonce=request.nonce)
if wait:
return await request.wait()
return request.get_future()
async def _chunk_and_dispatch(self, guild, unavailable):
try:
await asyncio.wait_for(self.chunk_guild(guild), timeout=60.0)
except asyncio.TimeoutError:
log.info('Somehow timed out waiting for chunks.')
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
def parse_guild_create(self, data):
unavailable = data.get('unavailable')
if unavailable is True:
# joined a guild with unavailable == True so..
return
guild = self._get_create_guild(data)
try:
# Notify the on_ready state, if any, that this guild is complete.
self._ready_state.put_nowait(guild)
except AttributeError:
pass
else:
# If we're waiting for the event, put the rest on hold
return
# check if it requires chunking
if self._guild_needs_chunking(guild):
asyncio.create_task(self._chunk_and_dispatch(guild, unavailable))
return
# Dispatch available if newly available
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
def parse_guild_update(self, data):
guild = self._get_guild(int(data['id']))
if guild is not None:
old_guild = copy.copy(guild)
guild._from_data(data)
self.dispatch('guild_update', old_guild, guild)
else:
log.debug('GUILD_UPDATE referencing an unknown guild ID: %s. Discarding.', data['id'])
def parse_guild_delete(self, data):
guild = self._get_guild(int(data['id']))
if guild is None:
log.debug('GUILD_DELETE referencing an unknown guild ID: %s. Discarding.', data['id'])
return
if data.get('unavailable', False):
# GUILD_DELETE with unavailable being True means that the
# guild that was available is now currently unavailable
guild.unavailable = True
self.dispatch('guild_unavailable', guild)
return
# do a cleanup of the messages cache
if self._messages is not None:
self._messages = deque((msg for msg in self._messages if msg.guild != guild), maxlen=self.max_messages)
self._remove_guild(guild)
self.dispatch('guild_remove', guild)
def parse_guild_ban_add(self, data):
# we make the assumption that GUILD_BAN_ADD is done
# before GUILD_MEMBER_REMOVE is called
# hence we don't remove it from cache or do anything
# strange with it, the main purpose of this event
# is mainly to dispatch to another event worth listening to for logging
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
user = User(data=data['user'], state=self)
except KeyError:
pass
else:
member = guild.get_member(user.id) or user
self.dispatch('member_ban', guild, member)
def parse_guild_ban_remove(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None and 'user' in data:
user = self.store_user(data['user'])
self.dispatch('member_unban', guild, user)
def parse_guild_role_create(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.debug('GUILD_ROLE_CREATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
role_data = data['role']
role = Role(guild=guild, data=role_data, state=self)
guild._add_role(role)
self.dispatch('guild_role_create', role)
def parse_guild_role_delete(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
role_id = int(data['role_id'])
try:
role = guild._remove_role(role_id)
except KeyError:
return
else:
self.dispatch('guild_role_delete', role)
else:
log.debug('GUILD_ROLE_DELETE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_role_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
role_data = data['role']
role_id = int(role_data['id'])
role = guild.get_role(role_id)
if role is not None:
old_role = copy.copy(role)
role._update(role_data)
self.dispatch('guild_role_update', old_role, role)
else:
log.debug('GUILD_ROLE_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_members_chunk(self, data):
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
presences = data.get('presences', [])
members = [Member(guild=guild, data=member, state=self) for member in data.get('members', [])]
log.debug('Processed a chunk for %s members in guild ID %s.', len(members), guild_id)
if presences:
member_dict = {str(member.id): member for member in members}
for presence in presences:
user = presence['user']
member_id = user['id']
member = member_dict.get(member_id)
member._presence_update(presence, user)
complete = data.get('chunk_index', 0) + 1 == data.get('chunk_count')
self.process_chunk_requests(guild_id, data.get('nonce'), members, complete)
def parse_guild_integrations_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
self.dispatch('guild_integrations_update', guild)
else:
log.debug('GUILD_INTEGRATIONS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_integration_create(self, data):
guild_id = int(data.pop('guild_id'))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data['type'])
integration = cls(data=data, guild=guild)
self.dispatch('integration_create', integration)
else:
log.debug('INTEGRATION_CREATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_integration_update(self, data):
guild_id = int(data.pop('guild_id'))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data['type'])
integration = cls(data=data, guild=guild)
self.dispatch('integration_update', integration)
else:
log.debug('INTEGRATION_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_integration_delete(self, data):
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
if guild is not None:
raw = RawIntegrationDeleteEvent(data)
self.dispatch('raw_integration_delete', raw)
else:
log.debug('INTEGRATION_DELETE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_webhooks_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is None:
log.debug('WEBHOOKS_UPDATE referencing an unknown guild ID: %s. Discarding', data['guild_id'])
return
channel = guild.get_channel(int(data['channel_id']))
if channel is not None:
self.dispatch('webhooks_update', channel)
else:
log.debug('WEBHOOKS_UPDATE referencing an unknown channel ID: %s. Discarding.', data['channel_id'])
def parse_stage_instance_create(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
stage_instance = StageInstance(guild=guild, state=self, data=data)
guild._stage_instances[stage_instance.id] = stage_instance
self.dispatch('stage_instance_create', stage_instance)
else:
log.debug('STAGE_INSTANCE_CREATE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_stage_instance_update(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
stage_instance = guild._stage_instances.get(int(data['id']))
if stage_instance is not None:
old_stage_instance = copy.copy(stage_instance)
stage_instance._update(data)
self.dispatch('stage_instance_update', old_stage_instance, stage_instance)
else:
log.debug('STAGE_INSTANCE_UPDATE referencing unknown stage instance ID: %s. Discarding.', data['id'])
else:
log.debug('STAGE_INSTANCE_UPDATE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_stage_instance_delete(self, data):
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
stage_instance = guild._stage_instances.pop(int(data['id']))
except KeyError:
pass
else:
self.dispatch('stage_instance_delete', stage_instance)
else:
log.debug('STAGE_INSTANCE_DELETE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_voice_state_update(self, data):
guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
channel_id = utils._get_as_snowflake(data, 'channel_id')
flags = self.member_cache_flags
self_id = self.user.id
if guild is not None:
if int(data['user_id']) == self_id:
voice = self._get_voice_client(guild.id)
if voice is not None:
coro = voice.on_voice_state_update(data)
asyncio.create_task(logging_coroutine(coro, info='Voice Protocol voice state update handler'))
member, before, after = guild._update_voice_state(data, channel_id)
if member is not None:
if flags.voice:
if channel_id is None and flags._voice_only and member.id != self_id:
# Only remove from cache iff we only have the voice flag enabled
guild._remove_member(member)
elif channel_id is not None:
guild._add_member(member)
self.dispatch('voice_state_update', member, before, after)
else:
log.debug('VOICE_STATE_UPDATE referencing an unknown member ID: %s. Discarding.', data['user_id'])
def parse_voice_server_update(self, data):
try:
key_id = int(data['guild_id'])
except KeyError:
key_id = int(data['channel_id'])
vc = self._get_voice_client(key_id)
if vc is not None:
coro = vc.on_voice_server_update(data)
asyncio.create_task(logging_coroutine(coro, info='Voice Protocol voice server update handler'))
def parse_typing_start(self, data):
channel, guild = self._get_guild_channel(data)
if channel is not None:
member = None
user_id = utils._get_as_snowflake(data, 'user_id')
if isinstance(channel, DMChannel):
member = channel.recipient
elif isinstance(channel, TextChannel) and guild is not None:
member = guild.get_member(user_id)
if member is None:
member_data = data.get('member')
if member_data:
member = Member(data=member_data, state=self, guild=guild)
elif isinstance(channel, GroupChannel):
member = utils.find(lambda x: x.id == user_id, channel.recipients)
if member is not None:
timestamp = datetime.datetime.fromtimestamp(data.get('timestamp'), tz=datetime.timezone.utc)
self.dispatch('typing', channel, member, timestamp)
def _get_reaction_user(self, channel, user_id):
if isinstance(channel, TextChannel):
return channel.guild.get_member(user_id)
return self.get_user(user_id)
def get_reaction_emoji(self, data):
emoji_id = utils._get_as_snowflake(data, 'id')
if not emoji_id:
return data['name']
try:
return self._emojis[emoji_id]
except KeyError:
return PartialEmoji.with_state(self, animated=data.get('animated', False), id=emoji_id, name=data['name'])
def _upgrade_partial_emoji(self, emoji):
emoji_id = emoji.id
if not emoji_id:
return emoji.name
try:
return self._emojis[emoji_id]
except KeyError:
return emoji
def get_channel(self, id):
if id is None:
return None
pm = self._get_private_channel(id)
if pm is not None:
return pm
for guild in self.guilds:
channel = guild._resolve_channel(id)
if channel is not None:
return channel
def create_message(self, *, channel, data):
return Message(state=self, channel=channel, data=data)
class AutoShardedConnectionState(ConnectionState):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._ready_task = None
self.shard_ids = ()
self.shards_launched = asyncio.Event()
def _update_message_references(self):
for msg in self._messages:
if not msg.guild:
continue
new_guild = self._get_guild(msg.guild.id)
if new_guild is not None and new_guild is not msg.guild:
channel_id = msg.channel.id
channel = new_guild._resolve_channel(channel_id) or Object(id=channel_id)
msg._rebind_cached_references(new_guild, channel)
async def chunker(self, guild_id, query='', limit=0, presences=False, *, shard_id=None, nonce=None):
ws = self._get_websocket(guild_id, shard_id=shard_id)
await ws.request_chunks(guild_id, query=query, limit=limit, presences=presences, nonce=nonce)
async def _delay_ready(self):
await self.shards_launched.wait()
processed = []
max_concurrency = len(self.shard_ids) * 2
current_bucket = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(self._ready_state.get(), timeout=self.guild_ready_timeout)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
log.debug('Guild ID %d requires chunking, will be done in the background.', guild.id)
if len(current_bucket) >= max_concurrency:
try:
await utils.sane_wait_for(current_bucket, timeout=max_concurrency * 70.0)
except asyncio.TimeoutError:
fmt = 'Shard ID %s failed to wait for chunks from a sub-bucket with length %d'
log.warning(fmt, guild.shard_id, len(current_bucket))
finally:
current_bucket = []
# Chunk the guild in the background while we wait for GUILD_CREATE streaming
future = asyncio.ensure_future(self.chunk_guild(guild))
current_bucket.append(future)
else:
future = self.loop.create_future()
future.set_result([])
processed.append((guild, future))
guilds = sorted(processed, key=lambda g: g[0].shard_id)
for shard_id, info in itertools.groupby(guilds, key=lambda g: g[0].shard_id):
children, futures = zip(*info)
# 110 reqs/minute w/ 1 req/guild plus some buffer
timeout = 61 * (len(children) / 110)
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
log.warning('Shard ID %s failed to wait for chunks (timeout=%.2f) for %d guilds', shard_id,
timeout,
len(guilds))
for guild in children:
if guild.unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
self.dispatch('shard_ready', shard_id)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
# regular users cannot shard so we won't worry about it here.
# clear the current task
self._ready_task = None
# dispatch the event
self.call_handlers('ready')
self.dispatch('ready')
def parse_ready(self, data):
if not hasattr(self, '_ready_state'):
self._ready_state = asyncio.Queue()
self.user = user = ClientUser(state=self, data=data['user'])
self._users[user.id] = user
if self.application_id is None:
try:
application = data['application']
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, 'id')
self.application_flags = ApplicationFlags._from_value(application['flags'])
for guild_data in data['guilds']:
self._add_guild_from_data(guild_data)
if self._messages:
self._update_message_references()
self.dispatch('connect')
self.dispatch('shard_connect', data['__shard_id__'])
if self._ready_task is None:
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data):
self.dispatch('resumed')
self.dispatch('shard_resumed', data['__shard_id__'])
| 38.795248
| 132
| 0.616507
|
4a09aaa9b9837294054ca9b234bc0d207d4099f7
| 3,107
|
py
|
Python
|
warc.py
|
valschneider/exfire
|
c6d0b7cdb7eb8bb03fe38729d420e1ab28c352b7
|
[
"MIT"
] | null | null | null |
warc.py
|
valschneider/exfire
|
c6d0b7cdb7eb8bb03fe38729d420e1ab28c352b7
|
[
"MIT"
] | null | null | null |
warc.py
|
valschneider/exfire
|
c6d0b7cdb7eb8bb03fe38729d420e1ab28c352b7
|
[
"MIT"
] | null | null | null |
import requests
import gzip
import json
from urllib.parse import urlparse
from bs4 import BeautifulSoup
def warc_url(url):
"""
Search the WARC archived version of the URL
:returns: The WARC URL if found, else None
"""
query = "http://archive.org/wayback/available?url={}".format(url)
response = requests.get(query)
if not response:
raise RuntimeError()
data = json.loads(response.text)
snapshots = data["archived_snapshots"]
if not snapshots:
return None
return snapshots["closest"]["url"]
class WarcDescriptor:
"""
Web archive content descriptor
"""
# There's also some more stuff behind, dunno what it is
def __init__(self, date, url, kind, code, key):
self.date = date
self.url = url
self.kind = kind
self.code = code
self.key = key
@classmethod
def from_string(cls, string):
"""
Example expected string: com,xfire,crash)/video/1042c0 20150621140344 http://crash.xfire.com/video/1042c0/ text/html 200 AYEIWSNQ6QKWFXM7S4FZZJIZYSHSDMMW - - 8074 15935484941
"""
string = string.split()
# XXX: Do something with the rest?
_, date, url, kind, code, key, _, _, _, _, _ = string
return WarcDescriptor(date, url, kind, code, key)
@classmethod
def iter_from_url(cls, url):
response = requests.get(url)
if not response:
raise RuntimeError()
# TODO: check headers for file info?
data = gzip.decompress(response.content).decode()
# Dunno what the first line is for, skip it
for line in data.splitlines()[1:]:
yield cls.from_string(line)
class WarcHost:
def __init__(self, url):
self.url = url
parse = urlparse(url)
self.host = "{}://{}".format(parse.scheme, parse.netloc)
def iter_archive_pages(self):
"""
Yield the URL for the page of each archive
"""
response = requests.get(self.url)
if not response:
raise RuntimeError()
dom = BeautifulSoup(response.text, features="html.parser")
divs = dom.findAll("div", class_="item-ttl")
for div in divs:
yield self.host + div.a['href']
def get_archive_descriptor(self, url):
"""
:param url: The URL to the archive page
:type url: str
"""
response = requests.get(url)
if not response:
raise RuntimeError()
# Look for an archive URL
dom = BeautifulSoup(response.text, features="html.parser")
section = dom.find("section", class_="item-download-options")
# XXX: Might want to search for right format rather than hardcode index
options = section.findAll("a", class_="format-summary")
option = options[5]
return self.host + option['href']
def iter_descriptors(self):
for archive_url in self.iter_archive_pages():
for desc in WarcDescriptor.iter_from_url(self.get_archive_descriptor(archive_url)):
yield desc
| 27.741071
| 182
| 0.61281
|
4a09ac0434fbab150074df74c2643eed1cb917e9
| 6,239
|
py
|
Python
|
ginza_util/train_word2vec.py
|
polm/ginza
|
b868823f793057ac3976fa343fd9bd14ebe1c75e
|
[
"MIT"
] | 1
|
2020-04-08T04:45:20.000Z
|
2020-04-08T04:45:20.000Z
|
ginza_util/train_word2vec.py
|
joreyolo/ginza
|
b868823f793057ac3976fa343fd9bd14ebe1c75e
|
[
"MIT"
] | null | null | null |
ginza_util/train_word2vec.py
|
joreyolo/ginza
|
b868823f793057ac3976fa343fd9bd14ebe1c75e
|
[
"MIT"
] | null | null | null |
# coding: utf8
from __future__ import unicode_literals, print_function
import plac
from pathlib import Path
import pickle
import sys
import spacy
from gensim.models import Word2Vec
from ginza.sudachipy_tokenizer import read_sudachi_a, read_sudachi_b, read_sudachi_c
from .corpus import sentence_iter
from .bccwj_ud_corpus import read_bccwj_ud
from spacy.util import get_lang_class
@plac.annotations(
corpus_type=("Corpus type (default='sudachi_b')", "option", "t", str),
base_model_path=("Path to base model directory", "option", "b", Path),
lang_name=("Language name", "option", "l", str),
model_name=("Model name", "option", "n", str),
model_version=("Model version", "option", "v", str),
dimension=("Dimension of the word vectors (default=100)", "option", "d", int),
vocab_size=("Vocab size (default=100000)", "option", "s", int),
min_count=("Min count (default=5)", "option", "c", int),
window=("Context window size (default=7)", "option", "w", int),
negative=("Number of negative samples (default=5)", "option", "p", int),
n_workers=("Number of workers (default=8)", "option", "k", int),
epochs=("Epochs (default=2)", "option", "e", int),
output_dir=("Output directory (default='.')", "option", "o", Path),
require_gpu=("enable require_gpu", "flag", "g"),
)
def train_word2vec_from_file(
corpus_type='sudachi_b',
base_model_path=None,
lang_name='ja',
model_name='bccwj_ud',
model_version='1.0.0',
dimension=100,
vocab_size=100000,
min_count=5,
window=7,
negative=5,
n_workers=8,
epochs=2,
output_dir=Path('.'),
require_gpu=False,
input_path=None,
):
if require_gpu:
spacy.require_gpu()
print("GPU enabled", file=sys.stderr)
if corpus_type == 'sudachi_a':
corpus_reader = read_sudachi_a
elif corpus_type == 'sudachi_b':
corpus_reader = read_sudachi_b
elif corpus_type == 'sudachi_c':
corpus_reader = read_sudachi_c
elif corpus_type == 'bccwj_ud':
corpus_reader = read_bccwj_ud
else:
raise Exception('%s not supported' % corpus_type)
if base_model_path:
print('load base model: {}'.format(base_model_path), file=sys.stderr)
model = Word2Vec.load(str(model_file_path(base_model_path, 'w2v')))
print('w2v loaded', file=sys.stderr)
with open(str(model_file_path(base_model_path, 'pickle')), 'rb') as f:
total_sents, word_store, word_counter = pickle.load(f)
print('pickle loaded', file=sys.stderr)
else:
model = Word2Vec(
size=dimension,
window=window,
min_count=min_count,
workers=n_workers,
sample=1e-5,
negative=negative
)
total_sents = 0
word_store = {}
word_counter = []
print('initialized', file=sys.stderr)
total_sents, words = train_word2vec(
model, total_sents, word_store, word_counter, corpus_reader, vocab_size, min_count, epochs, input_path
)
new_model_path = output_dir
nlp = get_lang_class(lang_name)
nlp.meta['name'] = model_name
nlp.meta['version'] = model_version
vocab = nlp.vocab
for word in words:
vocab.set_vector(word, model.wv[word])
corrector = nlp.create_pipe('JapaneseCorrector')
nlp.add_pipe(corrector, last=True)
nlp.to_disk(new_model_path)
print('saved: ', new_model_path, file=sys.stderr)
model.save(str(model_file_path(new_model_path, 'w2v')))
print('w2v saved', file=sys.stderr)
with open(str(model_file_path(new_model_path, 'pickle')), 'wb') as f:
pickle.dump((total_sents, word_store, word_counter), f)
print('pickle saved', file=sys.stderr)
def train_word2vec(
model,
total_sents,
word_store,
word_counter,
corpus_reader=read_sudachi_b,
vocab_size=100000,
min_count=5,
epochs=1,
input_path=None,
):
total_words = sum(word_counter)
next_id = len(word_store.keys())
print('word count phase start ({}, {})'.format(total_sents, total_words, next_id), flush=True)
for sentence in sentence_iter(input_path, corpus_reader):
total_sents += 1
for word in sentence:
word_id = word_store.get(word, next_id)
if word_id == next_id:
word_store[word] = next_id
next_id += 1
word_counter.append(1)
else:
word_counter[word_id] += 1
total_words += 1
print('word count phase end ({}, {})'.format(total_sents, total_words, next_id), flush=True)
size = 0
if len(word_counter) > vocab_size:
for freq in word_counter:
if freq >= min_count:
size += 1
if size <= vocab_size:
word_freq_map = {
word: word_counter[word_id] for word, word_id in word_store.items() if word_counter[word_id] >= min_count
}
else:
word_freqs = sorted(
[
(
word,
word_counter[word_id]
) for word, word_id in word_store.items() if word_counter[word_id] >= min_count
], key=lambda t: -t[1]
)[:vocab_size]
word_freq_map = {
t[0]: t[1] for t in word_freqs
}
print('word2vec training phase start', flush=True)
try:
model.build_vocab_from_freq(word_freq_map)
except RuntimeError:
print('Vocabulary is fixed', file=sys.stderr)
model.train(
sentence_iter(input_path, corpus_reader),
total_examples=total_sents,
total_words=total_words,
epochs=epochs,
)
print('word2vec training phase end', flush=True)
if len(word_store) > 0:
word = list(word_store.keys())[0]
word_id = word_store[word]
print('{},{},{}'.format(word_id, word, word_counter[word_id]))
print(model[word], flush=True)
return total_sents, word_freq_map.keys()
def model_file_path(path, file):
return Path('{}.{}'.format(path, file))
if __name__ == '__main__':
plac.call(train_word2vec_from_file)
| 33.543011
| 117
| 0.619651
|
4a09ac5cd2374750efb040401f3d5c77d9dec9cb
| 17,031
|
py
|
Python
|
Code/autopkglib/__init__.py
|
timsutton/autopkg
|
55810ad388aef4a1b62bfd68c256949963935414
|
[
"Apache-2.0"
] | 2
|
2016-01-02T09:35:03.000Z
|
2020-05-24T16:56:34.000Z
|
Code/autopkglib/__init__.py
|
timsutton/autopkg
|
55810ad388aef4a1b62bfd68c256949963935414
|
[
"Apache-2.0"
] | null | null | null |
Code/autopkglib/__init__.py
|
timsutton/autopkg
|
55810ad388aef4a1b62bfd68c256949963935414
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2010 Per Olofsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import imp
import FoundationPlist
import pprint
import re
import subprocess
from Foundation import NSArray, NSDictionary
from CoreFoundation import CFPreferencesCopyAppValue, CFPreferencesSetAppValue
from CoreFoundation import CFPreferencesAppSynchronize
from distutils.version import LooseVersion
BUNDLE_ID = "com.github.autopkg"
SUPPORTED_PREFS = [
"MUNKI_REPO",
"CACHE_DIR",
"RECIPE_SEARCH_DIRS",
"RECIPE_OVERRIDE_DIRS",
"RECIPE_REPO_DIR",
"RECIPE_REPOS"
]
re_keyref = re.compile(r'%(?P<key>[a-zA-Z_][a-zA-Z_0-9]*)%')
class PreferenceError(Exception):
"""Preference exception"""
pass
def get_pref(key, domain=BUNDLE_ID):
"""Return a single pref value (or None) for a domain."""
value = CFPreferencesCopyAppValue(key, domain) or None
# Casting NSArrays and NSDictionaries to native Python types.
# This a workaround for 10.6, where PyObjC doesn't seem to
# support as many common operations such as list concatenation
# between Python and ObjC objects.
if isinstance(value, NSArray):
value = list(value)
elif isinstance(value, NSDictionary):
value = dict(value)
return value
def set_pref(key, value, domain=BUNDLE_ID):
"""Sets a preference for domain"""
try:
CFPreferencesSetAppValue(key, value, domain)
if not CFPreferencesAppSynchronize(domain):
raise PreferenceError(
"Could not synchronize %s preference: %s" % key)
except Exception, err:
raise PreferenceError(
"Could not set %s preference: %s" % (key, err))
def get_all_prefs(domain=BUNDLE_ID):
"""Return a dict (or an empty dict) with the contents of all
supported preferences in the domain."""
prefs = {}
for key in SUPPORTED_PREFS:
if get_pref(key, domain=BUNDLE_ID):
prefs[key] = get_pref(key, domain=BUNDLE_ID)
return prefs
def get_autopkg_version():
try:
version_plist = FoundationPlist.readPlist(
os.path.join(os.path.dirname(__file__), "version.plist"))
except FoundationPlist.FoundationPlistException:
return "UNKNOWN"
try:
return version_plist["Version"]
except (AttributeError, TypeError):
return "UNKNOWN"
def update_data(a_dict, key, value):
"""Update a_dict keys with value. Existing data can be referenced
by wrapping the key in %percent% signs."""
def getdata(m):
return a_dict[m.group("key")]
def do_variable_substitution(item):
"""Do variable substitution for item"""
if isinstance(item, basestring):
try:
item = re_keyref.sub(getdata, item)
except KeyError, err:
print >> sys.stderr, (
"Use of undefined key in variable substitution: %s"
% err)
elif isinstance(item, (list, NSArray)):
for index in range(len(item)):
item[index] = do_variable_substitution(item[index])
elif isinstance(item, (dict, NSDictionary)):
# ObjC-bridged objects don't like to be modified while being iterated over
dict_copy = item.copy()
for key, value in dict_copy.iteritems():
item[key] = do_variable_substitution(value)
return item
a_dict[key] = do_variable_substitution(value)
# Processor and ProcessorError base class definitions
class ProcessorError(Exception):
pass
class Processor(object):
"""Processor base class.
Processors accept a property list as input, process its contents, and
returns a new or updated property list that can be processed further.
"""
def __init__(self, env=None, infile=None, outfile=None):
#super(Processor, self).__init__()
self.env = env
if infile is None:
self.infile = sys.stdin
else:
self.infile = infile
if outfile is None:
self.outfile = sys.stdout
else:
self.outfile = outfile
def output(self, msg, verbose_level=1):
if self.env.get('verbose', 0) >= verbose_level:
print "%s: %s" % (self.__class__.__name__, msg)
def main(self):
raise ProcessorError("Abstract method main() not implemented.")
def get_manifest(self):
try:
return (self.description,
self.input_variables,
self.output_variables)
except AttributeError as e:
raise ProcessorError("Missing manifest: %s" % e)
def read_input_plist(self):
"""Read environment from input plist."""
try:
indata = self.infile.read()
if indata:
self.env = FoundationPlist.readPlistFromString(indata)
else:
self.env = dict()
except BaseException as e:
raise ProcessorError(e)
def write_output_plist(self):
"""Write environment to output as plist."""
if self.env is None:
return
try:
FoundationPlist.writePlist(self.env, self.outfile)
except BaseException as e:
raise ProcessorError(e)
def parse_arguments(self):
"""Parse arguments as key='value'."""
for arg in sys.argv[1:]:
(key, sep, value) = arg.partition("=")
if sep != "=":
raise ProcessorError("Illegal argument '%s'" % arg)
self.update_data(key, value)
def inject(self, arguments):
# Update data with arguments.
for key, value in arguments.items():
update_data(self.env, key, value)
def process(self):
"""Main processing loop."""
# Make sure all required arguments have been supplied.
for variable, flags in self.input_variables.items():
if flags["required"] and (variable not in self.env):
raise ProcessorError(
"%s requires %s" % (self.__name__, variable))
self.main()
return self.env
def cmdexec(self, command, description):
"""Execute a command and return output."""
try:
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
except OSError as e:
raise ProcessorError(
"%s execution failed with error code %d: %s"
% (command[0], e.errno, e.strerror))
if p.returncode != 0:
raise ProcessorError("%s failed: %s" % (description, err))
return out
def execute_shell(self):
"""Execute as a standalone binary on the commandline."""
try:
self.read_input_plist()
self.parse_arguments()
self.main()
self.write_output_plist()
except ProcessorError as e:
print >> sys.stderr, "ProcessorError: %s" % e
sys.exit(10)
else:
sys.exit(0)
# AutoPackager class defintion
class AutoPackagerError(Exception):
pass
class AutoPackager(object):
"""Instantiate and execute processors from a recipe."""
def __init__(self, options, env):
self.verbose = options.verbose
self.env = env
self.results = []
self.env["AUTOPKG_VERSION"] = get_autopkg_version()
def output(self, msg, verbose_level=1):
if self.verbose >= verbose_level:
print msg
def get_recipe_identifier(self, recipe):
"""Return the identifier given an input recipe plist."""
identifier = (recipe.get("Identifier") or
recipe["Input"].get("IDENTIFIER"))
if not identifier:
print "ID NOT FOUND"
# build a pseudo-identifier based on the recipe pathname
recipe_path = self.env.get("RECIPE_PATH")
# get rid of filename extension
recipe_path = os.path.splitext(recipe_path)[0]
path_parts = recipe_path.split("/")
identifier = "-".join(path_parts)
return identifier
def process_cli_overrides(self, recipe, cli_values):
"""Override env with input values from the CLI:
Start with items in recipe's 'Input' dict, merge and
overwrite any key-value pairs appended to the
autopkg command invocation, of the form: NAME=value
"""
# Set up empty container for final output
inputs = {}
inputs.update(recipe["Input"])
identifier = self.get_recipe_identifier(recipe)
inputs.update(cli_values)
self.env.update(inputs)
# do any internal string substitutions
for key, value in self.env.items():
update_data(self.env, key, value)
def verify(self, recipe):
"""Verify a recipe and check for errors."""
# Check for MinimumAutopkgVersion
if "MinimumVersion" in recipe.keys():
if (LooseVersion(self.env["AUTOPKG_VERSION"]) <
LooseVersion(recipe.get("MinimumVersion"))):
raise AutoPackagerError(
"Recipe requires at least version %s, "
"but we are version %s."
% (recipe.get("MinimumVersion"),
self.env["AUTOPKG_VERSION"]))
# Initialize variable set with input variables.
variables = set(recipe["Input"].keys())
# Add environment.
variables.update(set(self.env.keys()))
# Check each step of the process.
for step in recipe["Process"]:
try:
processor_class = get_processor(
step["Processor"], recipe=recipe)
except (KeyError, AttributeError):
raise AutoPackagerError(
"Unknown processor '%s'" % step["Processor"])
# Add arguments to set of variables.
variables.update(set(step.get("Arguments", dict()).keys()))
# Make sure all required input variables exist.
for key, flags in processor_class.input_variables.items():
if flags["required"] and (key not in variables):
raise AutoPackagerError("%s requires missing argument %s"
% (step["Processor"], key))
# Add output variables to set.
variables.update(set(processor_class.output_variables.keys()))
def process(self, recipe):
"""Process a recipe."""
identifier = self.get_recipe_identifier(recipe)
# define a cache/work directory for use by the recipe
cache_dir = self.env.get("CACHE_DIR") or os.path.expanduser(
"~/Library/AutoPkg/Cache")
self.env["RECIPE_CACHE_DIR"] = os.path.join(
cache_dir, identifier)
recipe_input_dict = {}
for key in self.env.keys():
recipe_input_dict[key] = self.env[key]
self.results.append({"Recipe input": recipe_input_dict})
# make sure the RECIPE_CACHE_DIR exists, creating it if needed
if not os.path.exists(self.env["RECIPE_CACHE_DIR"]):
try:
os.makedirs(self.env["RECIPE_CACHE_DIR"])
except OSError, e:
raise AutoPackagerError(
"Could not create RECIPE_CACHE_DIR %s: %s"
% (self.env["RECIPE_CACHE_DIR"], e))
if self.verbose > 2:
pprint.pprint(self.env)
for step in recipe["Process"]:
if self.verbose:
print step["Processor"]
processor_class = get_processor(step["Processor"])
processor = processor_class(self.env)
processor.inject(step.get("Arguments", dict()))
input_dict = {}
for key in processor.input_variables.keys():
if key in processor.env:
input_dict[key] = processor.env[key]
if self.verbose > 1:
# pretty print any defined input variables
pprint.pprint({"Input": input_dict})
try:
self.env = processor.process()
except ProcessorError as e:
print >> sys.stderr, str(e)
raise AutoPackagerError(
"Error in %s: Processor: %s: Error: %s"
%(identifier, step["Processor"], str(e)))
output_dict = {}
for key in processor.output_variables.keys():
output_dict[key] = self.env[key]
if self.verbose > 1:
# pretty print output variables
pprint.pprint({"Output": output_dict})
self.results.append({'Processor': step["Processor"],
'Input': input_dict,
'Output': output_dict})
if self.env.get("stop_processing_recipe"):
# processing should stop now
break
if self.verbose > 2:
pprint.pprint(self.env)
_processor_names = []
def import_processors():
'''Imports processors from the directory this init file is in'''
# get the directory this __init__.py file is in
mydir = os.path.dirname(os.path.abspath(__file__))
mydirname = os.path.basename(mydir)
# find all the .py files (minus this one)
processor_files = [
os.path.splitext(name)[0]
for name in os.listdir(mydir)
if name.endswith('.py') and not name == '__init__.py']
# Warning! Fancy dynamic importing ahead!
#
# import the filename as a submodule
# then add the attribute with the same name to the globals()
#
# This is the equivalent of:
#
# from Bar.Foo import Foo
#
for name in processor_files:
globals()[name] = getattr(__import__(
mydirname + '.' + name, fromlist=[name]), name)
_processor_names.append(name)
# convenience functions for adding and accessing processors
# since these can change dynamically
def add_processor(name, processor_object):
'''Adds a Processor to the autopkglib namespace'''
globals()[name] = processor_object
if not name in _processor_names:
_processor_names.append(name)
def get_processor(processor_name, recipe=None):
'''Returns a Processor object given a name and optionally a recipe,
importing a processor from the recipe directory if available'''
if recipe:
# search recipe dirs for processor
recipe_dir = os.path.dirname(recipe['RECIPE_PATH'])
processor_search_dirs = [recipe_dir]
if recipe.get("PARENT_RECIPES"):
# also look in the directories containing the parent recipes
parent_recipe_dirs = list(set([
os.path.dirname(item)
for item in recipe["PARENT_RECIPES"]]))
processor_search_dirs.extend(parent_recipe_dirs)
for directory in processor_search_dirs:
processor_filename = os.path.join(
directory, processor_name + '.py')
if os.path.exists(processor_filename):
try:
# attempt to import the module
_tmp = imp.load_source(
processor_name, processor_filename)
# look for an attribute with the step Processor name
_processor = getattr(_tmp, processor_name)
# add the processor to autopkglib's namespace
add_processor(processor_name, _processor)
# we've added a Processor, so stop searching
break
except (ImportError, AttributeError), err:
# if we aren't successful, that might be OK, we're going
# see if the processor was already imported
self.output(
"WARNING: %s: %s" % (processor_filename, err))
return globals()[processor_name]
def processor_names():
return _processor_names
# when importing autopkglib, need to also import all the processors
# in this same directory
import_processors()
| 35.334025
| 86
| 0.589983
|
4a09ac8a4cdc31d2e7f71256c3f4ec52ae62113d
| 6,623
|
py
|
Python
|
main.py
|
kikei/gbookmark-to-raindropio
|
b108bbfa03b6e2e429b5ecb67f361dadce51ca9a
|
[
"MIT"
] | null | null | null |
main.py
|
kikei/gbookmark-to-raindropio
|
b108bbfa03b6e2e429b5ecb67f361dadce51ca9a
|
[
"MIT"
] | null | null | null |
main.py
|
kikei/gbookmark-to-raindropio
|
b108bbfa03b6e2e429b5ecb67f361dadce51ca9a
|
[
"MIT"
] | null | null | null |
import datetime
import json
import sys
import traceback
from html.parser import HTMLParser
GBOOKMARK_XML = 'GoogleBookmarks.html'
INTERMEDIATE_JSON = 'intermediate.json'
RAINDROP_XML = 'Raindrop-out.html'
RAINDROP_COLLECTION_NAME = 'main'
class LinkItem(object):
def __init__(self):
self.title = None
self.href = None
self.createDate = None
self.tags = []
def addTag(self, tag):
if self.tags is None:
self.tags = [tag]
elif tag not in self.tags:
self.tags.append(tag)
def __str__(self):
return ('LinkItem(title={t}, href={h}, date={d}, tags={ts})'
.format(t=self.title, h=self.href,
d=self.createDate, ts=self.tags))
def toJSON(self):
return {
'title': self.title,
'href': self.href,
'createDate': self.createDate,
'tags': self.tags
}
def tagIs(name1, name2):
return name1.lower() == name2.lower()
def getAttribute(attrs, name):
for k, v in attrs:
if k == name:
return v
return None
class GBookmarkParser(HTMLParser):
STATE_INIT = 0
STATE_START = 1
STATE_LABELS = 2
STATE_READ_LABEL = 3
STATE_READ_ITEMS = 4
STATE_READ_ITEM = 5
def __init__(self, labelsIgnore=[]):
super().__init__()
self.count = 0
self.state = GBookmarkParser.STATE_INIT
self.currentLabel = None
self.currentItem = None
self.items = {}
self.labelsIgnore = labelsIgnore
def handle_starttag(self, name, attrs):
print('start {s} | {n} {a}'.format(s=self.state, n=name, a=attrs))
if self.state == GBookmarkParser.STATE_INIT:
if tagIs(name, 'dl'):
self.state = GBookmarkParser.STATE_START
elif self.state == GBookmarkParser.STATE_START:
if tagIs(name, 'dt'):
self.state = GBookmarkParser.STATE_LABELS
elif self.state == GBookmarkParser.STATE_LABELS:
if tagIs(name, 'h3'):
self.state = GBookmarkParser.STATE_READ_LABEL
self.currentLabel = name
elif tagIs(name, 'dl'):
self.state = GBookmarkParser.STATE_READ_ITEMS
if self.currentLabel is None:
raise Exception('no label')
elif self.state == GBookmarkParser.STATE_READ_ITEMS:
if tagIs(name, 'dt'):
self.state = GBookmarkParser.STATE_READ_ITEM
self.currentItem = LinkItem()
elif self.state == GBookmarkParser.STATE_READ_ITEM:
if tagIs(name, 'a'):
add_date = getAttribute(attrs, 'add_date')
if add_date is None:
raise Exception('Non date found')
self.currentItem.href = getAttribute(attrs, 'href')
self.currentItem.createDate = int(int(add_date) * 1e-6)
if self.currentLabel not in self.labelsIgnore:
self.currentItem.addTag(self.currentLabel)
print(name)
def handle_endtag(self, name):
print('end {s} | {n}'.format(s=self.state, n=name))
if self.state == GBookmarkParser.STATE_READ_LABEL:
if tagIs(name, 'h3'):
self.state = GBookmarkParser.STATE_LABELS
elif self.state == GBookmarkParser.STATE_LABELS:
if tagIs(name, 'dt'):
self.state = GBookmarkParser.STATE_START
elif self.state == GBookmarkParser.STATE_READ_ITEMS:
if tagIs(name, 'dl'):
self.state = GBookmarkParser.STATE_LABELS
elif self.state == GBookmarkParser.STATE_READ_ITEM:
if tagIs(name, 'a'):
print(self.currentItem)
self.addItem(self.currentItem)
self.state = GBookmarkParser.STATE_READ_ITEMS
def handle_data(self, data):
print('char {d}'.format(d=data))
if self.state == GBookmarkParser.STATE_READ_LABEL:
self.currentLabel = data.strip()
elif self.state == GBookmarkParser.STATE_READ_ITEM:
self.currentItem.title = data
def addItem(self, linkItem):
href = linkItem.href
if href in self.items:
for tag in linkItem.tags:
self.items[href].addTag(tag)
else:
self.items[href] = linkItem
class RaindropXMLBuilder(object):
def __init__(self):
self.collections = {}
def addLinkItem(self, collectionName, item):
if collectionName not in self.collections:
self.collections[collectionName] = []
self.collections[collectionName].append(item)
def buildXml(self):
header = '''<!DOCTYPE NETSCAPE-Bookmark-file-1>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<TITLE>Raindrop.io Bookmarks</TITLE>
<H1>Raindrop.io Bookmarks</H1>
<DL><p>
'''
lines = [header]
for collectionName in self.collections:
now = int(datetime.datetime.now().timestamp())
lines.append('<DT><H3 ADD_DATE="{d1}" LAST_MODIFIED="{d2}">{c}</H3>'
.format(d1=now, d2=now, c=collectionName))
lines.append('<DL><p>')
for item in self.collections[collectionName]:
lines.append(('<DT><A HREF="{h}" ADD_DATE="{d1}"' +
' LAST_MODIFIED="{d2}" TAGS="{ts}">{t}</A>')
.format(t=item.title, h=item.href,
d1=item.createDate, d2=item.createDate,
ts=','.join(item.tags)))
lines.append('</DL><p>')
lines.append('</DL><p>')
return '\n'.join(lines)
def main():
inFileXml = GBOOKMARK_XML
intFileJson = INTERMEDIATE_JSON
outFileXml = RAINDROP_XML
collectionName = RAINDROP_COLLECTION_NAME
labelsIgnore = ['ラベルなし']
parser = GBookmarkParser(labelsIgnore=labelsIgnore)
with open(inFileXml, 'r') as f:
xmlText = f.read()
parser.feed(xmlText)
parser.close()
items = parser.items
objs = [items[key].toJSON() for key in items.keys()]
with open(intFileJson, 'w') as f:
f.write(json.dumps(objs, indent=2, ensure_ascii=False))
builder = RaindropXMLBuilder()
for k in items.keys():
builder.addLinkItem(collectionName, items[k])
with open(outFileXml, 'w') as f:
outXml = builder.buildXml()
f.write(outXml)
if __name__ == '__main__':
try:
main()
except Exception as e:
traceback.print_exc(file=sys.stdout)
| 35.228723
| 80
| 0.58161
|
4a09acf4bfe4eff43a38be9f3ad1332bf9c176e1
| 23,214
|
py
|
Python
|
tests/test_multioutput.py
|
informaton/GPflow
|
6ee199f4ca1b02a83218578b94fef219b428ca04
|
[
"Apache-2.0"
] | 1
|
2020-01-27T19:05:28.000Z
|
2020-01-27T19:05:28.000Z
|
tests/test_multioutput.py
|
sdeepaknarayanan/GPflow
|
164d90d78c1c6fd966ae19ebaee59b9241bcba39
|
[
"Apache-2.0"
] | null | null | null |
tests/test_multioutput.py
|
sdeepaknarayanan/GPflow
|
164d90d78c1c6fd966ae19ebaee59b9241bcba39
|
[
"Apache-2.0"
] | null | null | null |
import gpflow
import numpy as np
import pytest
import scipy
import tensorflow as tf
import gpflow.multioutput.features as mf
import gpflow.multioutput.kernels as mk
from gpflow.features import InducingPoints
from gpflow.kernels import RBF
from gpflow.likelihoods import Gaussian
from gpflow.models import SVGP
from gpflow.multioutput.conditionals import fully_correlated_conditional_repeat, \
fully_correlated_conditional, independent_interdomain_conditional
from gpflow.test_util import session_tf
from gpflow.training import ScipyOptimizer
from gpflow.conditionals import _sample_mvn, sample_conditional
float_type = gpflow.settings.float_type
np.random.seed(1)
# ------------------------------------------
# Helpers
# ------------------------------------------
def predict(sess, model, Xnew, full_cov, full_output_cov):
m, v = model._build_predict(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
return sess.run([m, v])
def predict_all(sess, models, Xnew, full_cov, full_output_cov):
"""
Returns the mean and variance of f(Xnew) for each model in `models`.
"""
ms, vs = [], []
for model in models:
m, v = predict(sess, model, Xnew, full_cov, full_output_cov)
ms.append(m)
vs.append(v)
return ms, vs
def assert_all_array_elements_almost_equal(arr, decimal):
"""
Check if consecutive elements of `arr` are almost equal.
"""
for i in range(len(arr) - 1):
np.testing.assert_almost_equal(arr[i], arr[i+1], decimal=decimal)
def check_equality_predictions(sess, models, decimal=3):
"""
Executes a couple of checks to compare the equality of predictions
of different models. The models should be configured with the same
training data (X, Y). The following checks are done:
- check if log_likelihood is (almost) equal for all models
- check if predicted mean is (almost) equal
- check if predicted variance is (almost) equal.
All possible variances over the inputs and outputs are calculated
and equality is checked.
- check if variances within model are consistent. Parts of the covariance
matrices should overlap, and this is tested.
"""
log_likelihoods = [m.compute_log_likelihood() for m in models]
# Check equality of log likelihood
assert_all_array_elements_almost_equal(log_likelihoods, decimal=5)
# Predict: full_cov = True and full_output_cov = True
means_tt, vars_tt = predict_all(sess, models, Data.Xs, full_cov=True, full_output_cov=True)
# Predict: full_cov = True and full_output_cov = False
means_tf, vars_tf = predict_all(sess, models, Data.Xs, full_cov=True, full_output_cov=False)
# Predict: full_cov = False and full_output_cov = True
means_ft, vars_ft = predict_all(sess, models, Data.Xs, full_cov=False, full_output_cov=True)
# Predict: full_cov = False and full_output_cov = False
means_ff, vars_ff = predict_all(sess, models, Data.Xs, full_cov=False, full_output_cov=False)
# check equality of all the means
all_means = means_tt + means_tf + means_ft + means_ff
assert_all_array_elements_almost_equal(all_means, decimal=decimal)
# check equality of all the variances within a category
# (e.g. full_cov=True and full_output_cov=False)
all_vars = [vars_tt, vars_tf, vars_ft, vars_ff]
_ = [assert_all_array_elements_almost_equal(var, decimal=decimal) for var in all_vars]
# Here we check that the variance in different categories are equal
# after transforming to the right shape.
var_tt = vars_tt[0] # N x P x N x P
var_tf = vars_tf[0] # P x N x c
var_ft = vars_ft[0] # N x P x P
var_ff = vars_ff[0] # N x P
np.testing.assert_almost_equal(np.diagonal(var_tt, axis1=1, axis2=3),
np.transpose(var_tf, [1, 2, 0]), decimal=decimal)
np.testing.assert_almost_equal(np.diagonal(var_tt, axis1=0, axis2=2),
np.transpose(var_ft, [1, 2, 0]), decimal=decimal)
np.testing.assert_almost_equal(np.diagonal(np.diagonal(var_tt, axis1=0, axis2=2)),
var_ff, decimal=decimal)
def expand_cov(q_sqrt, W):
"""
:param G: cholesky of covariance matrices, L x M x M
:param W: mixing matrix (square), L x L
:return: cholesky of 1 x LM x LM covariance matrix
"""
q_cov = np.matmul(q_sqrt, q_sqrt.transpose(0, 2, 1)) # L x M x M
q_cov_expanded = scipy.linalg.block_diag(*q_cov) # LM x LM
q_sqrt_expanded = np.linalg.cholesky(q_cov_expanded) # LM x LM
return q_sqrt_expanded[None, ...]
def create_q_sqrt(M, L):
""" returns an array of L lower triangular matrices of size M x M """
return np.array([np.tril(np.random.randn(M, M)) for _ in range(L)]) # L x M x M
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
N, Ntest = 20, 5
D = 1 # input dimension
M = 3 # inducing points
L = 2 # latent gps
P = 3 # output dimension
MAXITER = int(15e2)
X = np.random.rand(N)[:, None] * 10 - 5
G = np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X))
Ptrue = np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]]) # L x P
Y = np.matmul(G, Ptrue)
Y += np.random.randn(*Y.shape) * [0.2, 0.2, 0.2]
Xs = np.linspace(-6, 6, Ntest)[:, None]
class DataMixedKernelWithEye(Data):
""" Note in this class L == P """
M, L = 4, 3
W = np.eye(L)
G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X,
3.0 * np.cos(Data.X) - Data.X,
1.0 + Data.X]) # N x P
mu_data = np.random.randn(M, L) # M x L
sqrt_data = create_q_sqrt(M, L) # L x M x M
mu_data_full = (mu_data @ W).reshape(-1, 1) # ML x 1
sqrt_data_full = expand_cov(sqrt_data, W) # 1 x LM x LM
Y = np.matmul(G, W)
Y += np.random.randn(*Y.shape) * np.ones((L,)) * 0.2
class DataMixedKernel(Data):
M = 5
L = 2
P = 3
W = np.random.randn(P, L)
G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X,
3.0 * np.cos(Data.X) - Data.X]) # N x L
mu_data = np.random.randn(M, L) # M x L
sqrt_data = create_q_sqrt(M, L) # L x M x M
Y = np.matmul(G, W.T)
Y += np.random.randn(*Y.shape) * np.ones((P,)) * 0.1
# ------------------------------------------
# Test sample conditional
# ------------------------------------------
@pytest.mark.parametrize("cov_structure", ["full", "diag"])
@pytest.mark.parametrize("num_samples", [None, 1, 10])
def test_sample_mvn(session_tf, cov_structure, num_samples):
"""
Draws 10,000 samples from a distribution
with known mean and covariance. The test checks
if the mean and covariance of the samples is
close to the true mean and covariance.
"""
N, D = 10000, 2
means = tf.ones((N, D), dtype=float_type)
if cov_structure == "full":
covs = tf.eye(D, batch_shape=[N], dtype=float_type)
elif cov_structure == "diag":
covs = tf.ones((N, D), dtype=float_type)
samples = _sample_mvn(means, covs, cov_structure, num_samples=num_samples)
value = session_tf.run(samples)
if num_samples is None:
assert value.shape == (N, D)
else:
assert value.shape == (num_samples, N, D)
value = value.reshape(-1, D)
samples_mean = np.mean(value, axis=0)
samples_cov = np.cov(value, rowvar=False)
np.testing.assert_array_almost_equal(samples_mean, [1., 1.], decimal=1)
np.testing.assert_array_almost_equal(samples_cov, [[1., 0.], [0., 1.]], decimal=1)
def _create_placeholder_dict(values):
return {name: tf.placeholder(float_type, shape=arr.shape) for name, arr in values.items()}
def _create_feed_dict(placeholders_dict, value_dict):
return {placeholder: value_dict[name] for name, placeholder in placeholders_dict.items()}
@pytest.mark.parametrize("whiten", [True, False])
@pytest.mark.parametrize("full_cov,full_output_cov", [(False, False), (False, True), (True, False)])
def test_sample_conditional(session_tf, whiten, full_cov, full_output_cov):
q_mu = np.random.randn(Data.M , Data.P) # M x P
q_sqrt = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]) # P x M x M
Z = Data.X[:Data.M, ...] # M x D
Xs = np.ones((Data.N, Data.D), dtype=float_type)
feature = InducingPoints(Z.copy())
kernel = RBF(Data.D)
values = {"Z": Z, "Xnew": Xs, "q_mu": q_mu, "q_sqrt": q_sqrt}
placeholders = _create_placeholder_dict(values)
feed_dict = _create_feed_dict(placeholders, values)
# Path 1
sample_f = sample_conditional(placeholders["Xnew"], feature, kernel,
placeholders["q_mu"], q_sqrt=placeholders["q_sqrt"], white=whiten,
full_cov=full_cov, full_output_cov=full_output_cov, num_samples=int(1e5))
value_f, mean_f, var_f = session_tf.run(sample_f, feed_dict=feed_dict)
value_f = value_f.reshape((-1,) + value_f.shape[2:])
# Path 2
if full_output_cov:
pytest.skip("sample_conditional with X instead of feature does not support full_output_cov")
sample_x = sample_conditional(placeholders["Xnew"], placeholders["Z"], kernel,
placeholders["q_mu"], q_sqrt=placeholders["q_sqrt"], white=whiten,
full_cov=full_cov, full_output_cov=full_output_cov, num_samples=int(1e5))
value_x, mean_x, var_x = session_tf.run(sample_x, feed_dict=feed_dict)
value_x = value_x.reshape((-1,) + value_x.shape[2:])
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(np.mean(value_x, axis=0),
np.mean(value_f, axis=0), decimal=1)
np.testing.assert_array_almost_equal(np.cov(value_x, rowvar=False),
np.cov(value_f, rowvar=False), decimal=1)
np.testing.assert_allclose(mean_x, mean_f)
np.testing.assert_allclose(var_x, var_f)
def test_sample_conditional_mixedkernel(session_tf):
q_mu = np.random.randn(Data.M , Data.L) # M x L
q_sqrt = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.L)]) # L x M x M
Z = Data.X[:Data.M,...] # M x D
N = int(10e5)
Xs = np.ones((N, Data.D), dtype=float_type)
values = {"Xnew": Xs, "q_mu": q_mu, "q_sqrt": q_sqrt}
placeholders = _create_placeholder_dict(values)
feed_dict = _create_feed_dict(placeholders, values)
# Path 1: mixed kernel: most efficient route
W = np.random.randn(Data.P, Data.L)
mixed_kernel = mk.SeparateMixedMok([RBF(Data.D) for _ in range(Data.L)], W)
mixed_feature = mf.MixedKernelSharedMof(InducingPoints(Z.copy()))
sample = sample_conditional(placeholders["Xnew"], mixed_feature, mixed_kernel,
placeholders["q_mu"], q_sqrt=placeholders["q_sqrt"], white=True)
value, mean, var = session_tf.run(sample, feed_dict=feed_dict)
# Path 2: independent kernels, mixed later
separate_kernel = mk.SeparateIndependentMok([RBF(Data.D) for _ in range(Data.L)])
shared_feature = mf.SharedIndependentMof(InducingPoints(Z.copy()))
sample2 = sample_conditional(placeholders["Xnew"], shared_feature, separate_kernel,
placeholders["q_mu"], q_sqrt=placeholders["q_sqrt"], white=True)
value2, mean2, var2 = session_tf.run(sample2, feed_dict=feed_dict)
value2 = np.matmul(value2, W.T)
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(np.mean(value, axis=0),
np.mean(value2, axis=0), decimal=1)
np.testing.assert_array_almost_equal(np.cov(value, rowvar=False),
np.cov(value2, rowvar=False), decimal=1)
@pytest.mark.parametrize('R', [1, 5])
@pytest.mark.parametrize("func", [fully_correlated_conditional_repeat,
fully_correlated_conditional])
def test_fully_correlated_conditional_repeat_shapes(func, R):
L, M, N, P = Data.L, Data.M, Data.N, Data.P
Kmm = tf.ones((L * M, L * M))
Kmn = tf.ones((L * M, N, P))
Knn = tf.ones((N, P))
f = tf.ones((L * M, R))
q_sqrt = None
white = True
m, v = func(Kmn, Kmm, Knn, f,
full_cov=False,
full_output_cov=False,
q_sqrt=q_sqrt,
white=white)
assert v.shape.as_list() == m.shape.as_list()
# ------------------------------------------
# Test Mixed Mok Kgg
# ------------------------------------------
def test_MixedMok_Kgg(session_tf):
data = DataMixedKernel
kern_list = [RBF(data.D) for _ in range(data.L)]
kern = mk.SeparateMixedMok(kern_list, W=data.W)
Kgg = kern.compute_Kgg(Data.X, Data.X) # L x N x N
Kff = kern.compute_K(Data.X, Data.X) # N x P x N x P
# Kff = W @ Kgg @ W^T
Kff_infered = np.einsum("lnm,pl,ql->npmq", Kgg, data.W, data.W)
np.testing.assert_array_almost_equal(Kff, Kff_infered, decimal=5)
# ------------------------------------------
# Integration tests
# ------------------------------------------
def test_shared_independent_mok(session_tf):
"""
In this test we use the same kernel and the same inducing features
for each of the outputs. The outputs are considered to be uncorrelated.
This is how GPflow handled multiple outputs before the multioutput framework was added.
We compare three models here:
1) an ineffient one, where we use a SharedIndepedentMok with InducingPoints.
This combination will uses a Kff of size N x P x N x P, Kfu if size N x P x M x P
which is extremely inefficient as most of the elements are zero.
2) efficient: SharedIndependentMok and SharedIndependentMof
This combinations uses the most efficient form of matrices
3) the old way, efficient way: using Kernel and InducingPoints
Model 2) and 3) follow more or less the same code path.
"""
# Model 1
q_mu_1 = np.random.randn(Data.M * Data.P, 1) # MP x 1
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kernel_1 = mk.SharedIndependentMok(RBF(Data.D, variance=0.5, lengthscales=1.2), Data.P)
feature_1 = InducingPoints(Data.X[:Data.M,...].copy())
m1 = SVGP(Data.X, Data.Y, kernel_1, Gaussian(), feature_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1)
m1.set_trainable(False)
m1.q_sqrt.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m1, maxiter=Data.MAXITER)
# Model 2
q_mu_2 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P
q_sqrt_2 = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]) # P x M x M
kernel_2 = RBF(Data.D, variance=0.5, lengthscales=1.2)
feature_2 = InducingPoints(Data.X[:Data.M, ...].copy())
m2 = SVGP(Data.X, Data.Y, kernel_2, Gaussian(), feature_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2)
m2.set_trainable(False)
m2.q_sqrt.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m2, maxiter=Data.MAXITER)
# Model 3
q_mu_3 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P
q_sqrt_3 = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]) # P x M x M
kernel_3 = mk.SharedIndependentMok(RBF(Data.D, variance=0.5, lengthscales=1.2), Data.P)
feature_3 = mf.SharedIndependentMof(InducingPoints(Data.X[:Data.M, ...].copy()))
m3 = SVGP(Data.X, Data.Y, kernel_3, Gaussian(), feature_3, q_mu=q_mu_3, q_sqrt=q_sqrt_3)
m3.set_trainable(False)
m3.q_sqrt.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m3, maxiter=Data.MAXITER)
check_equality_predictions(session_tf, [m1, m2, m3])
def test_separate_independent_mok(session_tf):
"""
We use different independent kernels for each of the output dimensions.
We can achieve this in two ways:
1) efficient: SeparateIndependentMok with Shared/SeparateIndependentMof
2) inefficient: SeparateIndependentMok with InducingPoints
However, both methods should return the same conditional,
and after optimization return the same log likelihood.
"""
# Model 1 (INefficient)
q_mu_1 = np.random.randn(Data.M * Data.P, 1)
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kern_list_1 = [RBF(Data.D, variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_1 = mk.SeparateIndependentMok(kern_list_1)
feature_1 = InducingPoints(Data.X[:Data.M,...].copy())
m1 = SVGP(Data.X, Data.Y, kernel_1, Gaussian(), feature_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1)
m1.set_trainable(False)
m1.q_sqrt.set_trainable(True)
m1.q_mu.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m1, maxiter=Data.MAXITER)
# Model 2 (efficient)
q_mu_2 = np.random.randn(Data.M, Data.P)
q_sqrt_2 = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]) # P x M x M
kern_list_2 = [RBF(Data.D, variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_2 = mk.SeparateIndependentMok(kern_list_2)
feature_2 = mf.SharedIndependentMof(InducingPoints(Data.X[:Data.M, ...].copy()))
m2 = SVGP(Data.X, Data.Y, kernel_2, Gaussian(), feature_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2)
m2.set_trainable(False)
m2.q_sqrt.set_trainable(True)
m2.q_mu.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m2, maxiter=Data.MAXITER)
check_equality_predictions(session_tf, [m1, m2])
def test_separate_independent_mof(session_tf):
"""
Same test as above but we use different (i.e. separate) inducing features
for each of the output dimensions.
"""
np.random.seed(0)
# Model 1 (INefficient)
q_mu_1 = np.random.randn(Data.M * Data.P, 1)
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kernel_1 = mk.SharedIndependentMok(RBF(Data.D, variance=0.5, lengthscales=1.2), Data.P)
feature_1 = InducingPoints(Data.X[:Data.M,...].copy())
m1 = SVGP(Data.X, Data.Y, kernel_1, Gaussian(), feature_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1)
m1.set_trainable(False)
m1.q_sqrt.set_trainable(True)
m1.q_mu.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m1, maxiter=Data.MAXITER)
# Model 2 (efficient)
q_mu_2 = np.random.randn(Data.M, Data.P)
q_sqrt_2 = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]) # P x M x M
kernel_2 = mk.SharedIndependentMok(RBF(Data.D, variance=0.5, lengthscales=1.2), Data.P)
feat_list_2 = [InducingPoints(Data.X[:Data.M, ...].copy()) for _ in range(Data.P)]
feature_2 = mf.SeparateIndependentMof(feat_list_2)
m2 = SVGP(Data.X, Data.Y, kernel_2, Gaussian(), feature_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2)
m2.set_trainable(False)
m2.q_sqrt.set_trainable(True)
m2.q_mu.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m2, maxiter=Data.MAXITER)
# Model 3 (Inefficient): an idenitical feature is used P times,
# and treated as a separate feature.
q_mu_3 = np.random.randn(Data.M, Data.P)
q_sqrt_3 = np.array([np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]) # P x M x M
kern_list = [RBF(Data.D, variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_3 = mk.SeparateIndependentMok(kern_list)
feat_list_3 = [InducingPoints(Data.X[:Data.M, ...].copy()) for _ in range(Data.P)]
feature_3 = mf.SeparateIndependentMof(feat_list_3)
m3 = SVGP(Data.X, Data.Y, kernel_3, Gaussian(), feature_3, q_mu=q_mu_3, q_sqrt=q_sqrt_3)
m3.set_trainable(False)
m3.q_sqrt.set_trainable(True)
m3.q_mu.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m3, maxiter=Data.MAXITER)
check_equality_predictions(session_tf, [m1, m2, m3])
def test_mixed_mok_with_Id_vs_independent_mok(session_tf):
data = DataMixedKernelWithEye
# Independent model
k1 = mk.SharedIndependentMok(RBF(data.D, variance=0.5, lengthscales=1.2), data.L)
f1 = InducingPoints(data.X[:data.M, ...].copy())
m1 = SVGP(data.X, data.Y, k1, Gaussian(), f1,
q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
m1.set_trainable(False)
m1.q_sqrt.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m1, maxiter=data.MAXITER)
# Mixed Model
kern_list = [RBF(data.D, variance=0.5, lengthscales=1.2) for _ in range(data.L)]
k2 = mk.SeparateMixedMok(kern_list, data.W)
f2 = InducingPoints(data.X[:data.M, ...].copy())
m2 = SVGP(data.X, data.Y, k2, Gaussian(), f2,
q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
m2.set_trainable(False)
m2.q_sqrt.set_trainable(True)
gpflow.training.ScipyOptimizer().minimize(m2, maxiter=data.MAXITER)
check_equality_predictions(session_tf, [m1, m2])
def test_compare_mixed_kernel(session_tf):
data = DataMixedKernel
kern_list = [RBF(data.D) for _ in range(data.L)]
k1 = mk.SeparateMixedMok(kern_list, W=data.W)
f1 = mf.SharedIndependentMof(InducingPoints(data.X[:data.M,...].copy()))
m1 = SVGP(data.X, data.Y, k1, Gaussian(), feat=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
kern_list = [RBF(data.D) for _ in range(data.L)]
k2 = mk.SeparateMixedMok(kern_list, W=data.W)
f2 = mf.MixedKernelSharedMof(InducingPoints(data.X[:data.M,...].copy()))
m2 = SVGP(data.X, data.Y, k2, Gaussian(), feat=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
check_equality_predictions(session_tf, [m1, m2])
def test_multioutput_with_diag_q_sqrt(session_tf):
data = DataMixedKernel
q_sqrt_diag = np.ones((data.M, data.L)) * 2
q_sqrt = np.repeat(np.eye(data.M)[None, ...], data.L, axis=0) * 2 # L x M x M
kern_list = [RBF(data.D) for _ in range(data.L)]
k1 = mk.SeparateMixedMok(kern_list, W=data.W)
f1 = mf.SharedIndependentMof(InducingPoints(data.X[:data.M,...].copy()))
m1 = SVGP(data.X, data.Y, k1, Gaussian(), feat=f1, q_mu=data.mu_data, q_sqrt=q_sqrt_diag, q_diag=True)
kern_list = [RBF(data.D) for _ in range(data.L)]
k2 = mk.SeparateMixedMok(kern_list, W=data.W)
f2 = mf.SharedIndependentMof(InducingPoints(data.X[:data.M,...].copy()))
m2 = SVGP(data.X, data.Y, k2, Gaussian(), feat=f2, q_mu=data.mu_data, q_sqrt=q_sqrt, q_diag=False)
check_equality_predictions(session_tf, [m1, m2])
def test_MixedKernelSeparateMof(session_tf):
data = DataMixedKernel
kern_list = [RBF(data.D) for _ in range(data.L)]
feat_list = [InducingPoints(data.X[:data.M, ...].copy()) for _ in range(data.L)]
k1 = mk.SeparateMixedMok(kern_list, W=data.W)
f1 = mf.SeparateIndependentMof(feat_list)
m1 = SVGP(data.X, data.Y, k1, Gaussian(), feat=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
kern_list = [RBF(data.D) for _ in range(data.L)]
feat_list = [InducingPoints(data.X[:data.M, ...].copy()) for _ in range(data.L)]
k2 = mk.SeparateMixedMok(kern_list, W=data.W)
f2 = mf.MixedKernelSeparateMof(feat_list)
m2 = SVGP(data.X, data.Y, k2, Gaussian(), feat=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
check_equality_predictions(session_tf, [m1, m2])
| 42.438757
| 107
| 0.65594
|
4a09ae38df0ab682c40c94f4be0fcbee8c7f7f35
| 936
|
py
|
Python
|
scoreboard.py
|
mvanderblom/yahtzee
|
6b5feb94b0268c133ea379981bcd7e927e6c0430
|
[
"MIT"
] | null | null | null |
scoreboard.py
|
mvanderblom/yahtzee
|
6b5feb94b0268c133ea379981bcd7e927e6c0430
|
[
"MIT"
] | null | null | null |
scoreboard.py
|
mvanderblom/yahtzee
|
6b5feb94b0268c133ea379981bcd7e927e6c0430
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import List
from exceptions import GameException
from rules import Rule
class ScoreBoard:
rules: List[Rule] = []
scores: List[int] = []
def register_rules(self, rules: List[Rule]):
self.rules = rules
self.scores = [None] * len(rules)
def set_score(self, rule_index, dice):
if 0 > rule_index or rule_index >= len(self.rules):
raise GameException(f'Unknown rule key {rule_index}')
rule = self.rules[rule_index]
score = self.scores[rule_index]
if score is not None:
raise GameException(f'Score for rule {rule.get_label()} is allready used')
self.scores[rule_index] = rule.get_score(dice)
def is_full(self):
return len(list(filter(lambda score: score is None, self.scores))) == 0
def get_total(self):
return sum(filter(lambda score: score is not None, self.scores))
| 30.193548
| 86
| 0.655983
|
4a09aeee23b94f89b6df7967d364f6f14ede37d9
| 4,125
|
py
|
Python
|
addons/wiki/tests/test_models.py
|
listinc/osf.io
|
b9a0357f3e9b6e905b732e750a16e9452c459d78
|
[
"Apache-2.0"
] | null | null | null |
addons/wiki/tests/test_models.py
|
listinc/osf.io
|
b9a0357f3e9b6e905b732e750a16e9452c459d78
|
[
"Apache-2.0"
] | 5
|
2017-02-13T19:38:59.000Z
|
2018-10-17T20:38:08.000Z
|
addons/wiki/tests/test_models.py
|
listinc/osf.io
|
b9a0357f3e9b6e905b732e750a16e9452c459d78
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import pytz
import datetime
from addons.wiki.exceptions import NameMaximumLengthError
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from osf_tests.factories import NodeFactory, UserFactory, ProjectFactory
from tests.base import OsfTestCase, fake
pytestmark = pytest.mark.django_db
# from website/addons/wiki/tests/test_wiki.py
class TestWikiPageModel:
@pytest.mark.enable_implicit_clean
def test_page_name_cannot_be_greater_than_100_characters(self):
bad_name = 'a' * 101
page = WikiPage(page_name=bad_name)
with pytest.raises(NameMaximumLengthError):
page.save()
def test_is_current_with_single_version(self):
user = UserFactory()
node = NodeFactory()
page = WikiPage(page_name='foo', node=node)
page.save()
version = page.create_version(user=user, content='hello')
assert version.is_current is True
def test_is_current_with_multiple_versions(self):
user = UserFactory()
node = NodeFactory()
page = WikiPage(page_name='foo', node=node)
page.save()
ver1 = page.create_version(user=user, content='draft1')
ver2 = page.create_version(user=user, content='draft2')
assert ver1.is_current is False
assert ver2.is_current is True
def test_is_current_deleted_page(self):
user = UserFactory()
node = NodeFactory()
page = WikiPage(page_name='foo', node=node)
page.save()
ver1 = page.create_version(user=user, content='draft1')
page.deleted = datetime.datetime(2017, 1, 1, 1, 00, tzinfo=pytz.utc)
page.save()
assert ver1.is_current is False
class TestWikiPage(OsfTestCase):
def setUp(self):
super(TestWikiPage, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
self.wiki = WikiFactory(user=self.user, node=self.project)
def test_wiki_factory(self):
wiki = WikiFactory()
assert wiki.page_name == 'home'
assert bool(wiki.user)
assert bool(wiki.node)
def test_wiki_version_factory(self):
version = WikiVersionFactory()
assert version.identifier == 1
assert version.content == 'First draft of wiki'
assert bool(version.user)
assert bool(version.wiki_page)
def test_url(self):
assert self.wiki.url == '{project_url}wiki/home/'.format(project_url=self.project.url)
def test_url_for_wiki_page_name_with_spaces(self):
wiki = WikiFactory(user=self.user, node=self.project, page_name='Test Wiki')
url = '{}wiki/{}/'.format(self.project.url, wiki.page_name)
assert wiki.url == url
def test_url_for_wiki_page_name_with_special_characters(self):
wiki = WikiFactory(user=self.user, node=self.project)
wiki.page_name = 'Wiki!@#$%^&*()+'
wiki.save()
url = '{}wiki/{}/'.format(self.project.url, wiki.page_name)
assert wiki.url == url
# Regression test for an issue on prod:
# https://www.flowdock.com/app/cos/archiver/threads/I09794CXgkkFK22_2kpEQfeIws2
# We can't assume that WikiVersion.identifier follows a contiguous
# sequence. There was a WikiPage that had versions (ordered by creation):
# 1, 2, 3, 4, 5, 6, 7, 8, 2, 3, 4, 5
# This test reproduces that state and makes sure that
# WikiPage.current_version_number, WikiPage.get_version, and WikiVersion.is_current
# behave as expected
def test_current_version_number_with_non_contiguous_version_numbers(self):
wiki = WikiFactory()
for i in range(1, 9):
WikiVersion(wiki_page=wiki, identifier=i, content=fake.sentence()).save()
for i in range(2, 6):
WikiVersion(wiki_page=wiki, identifier=i, content=fake.sentence()).save()
assert wiki.current_version_number == 5
latest_version = wiki.versions.order_by('-created')[0]
assert latest_version.is_current
assert wiki.get_version(5) == latest_version
| 38.915094
| 94
| 0.68097
|
4a09b053f2ceb17d113ec27053b9b5849876eef4
| 459
|
py
|
Python
|
AlienInvasion/ai_settings.py
|
severovlink/PythonPyGame
|
71157ba666631c7c4bc98adb1fcd7c4b37041b34
|
[
"MIT"
] | 1
|
2020-05-02T16:12:57.000Z
|
2020-05-02T16:12:57.000Z
|
AlienInvasion/ai_settings.py
|
severovlink/PythonPyGame
|
71157ba666631c7c4bc98adb1fcd7c4b37041b34
|
[
"MIT"
] | null | null | null |
AlienInvasion/ai_settings.py
|
severovlink/PythonPyGame
|
71157ba666631c7c4bc98adb1fcd7c4b37041b34
|
[
"MIT"
] | null | null | null |
class Settings:
def __init__(self):
self.screen_width = 1000
self.screen_height = 600
self.bg_color = (230, 230, 230)
self.ship_speed_factor = 1
self.bullet_speed_factor = 1
self.bullet_width = 5
self.bullet_height = 10
self.bullet_color = (10, 10, 10)
self.bullets_allowed = 3
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
self.fleet_direction = 1
| 28.6875
| 40
| 0.605664
|
4a09b1ead39bccb20cd15562cc7ed4acb59e829b
| 4,176
|
py
|
Python
|
Take Home Exams/egemenkilic_the2.py
|
egmnklc/Sabanci
|
31eaeedb01010879b5d62dee84130bd88e7e5bdf
|
[
"MIT"
] | null | null | null |
Take Home Exams/egemenkilic_the2.py
|
egmnklc/Sabanci
|
31eaeedb01010879b5d62dee84130bd88e7e5bdf
|
[
"MIT"
] | null | null | null |
Take Home Exams/egemenkilic_the2.py
|
egmnklc/Sabanci
|
31eaeedb01010879b5d62dee84130bd88e7e5bdf
|
[
"MIT"
] | null | null | null |
courses = input("Please enter the courses you have taken previously with letter grades: ")
courses_replace = courses.replace(":", ";")
courses_purified = courses_replace.split(";")
if courses.count(";") + 1 == courses.count(":"):
taken_courses = input("Please enter the courses you have taken this semester with letter grades: ")
taken_courses_replace = taken_courses.replace(":", ";")
taken_courses_purified = taken_courses_replace.split(";")
if taken_courses.count(";") + 1 == taken_courses.count(":"):
selected_course = input("Please enter the course you want to check: ")
if selected_course in courses_purified:
indexof_selectedCourse_in_courses = courses_purified.index(selected_course)
gradeOf_selectedCourse_in_courses = indexof_selectedCourse_in_courses + 1
grade_courses = courses_purified[gradeOf_selectedCourse_in_courses].upper()
if grade_courses == "F":
if selected_course in taken_courses_purified:
indexof_selectedCourse_in_takenCourses = taken_courses_purified.index(selected_course)
gradeOf_selectedCourse_in_takenCourses = indexof_selectedCourse_in_takenCourses + 1
grade_takenCourses = taken_courses_purified[gradeOf_selectedCourse_in_takenCourses].upper()
if grade_takenCourses == "F":
print("Your grade for", selected_course, "is F.")
elif grade_takenCourses == "U":
print("Letter grade will be U.")
elif grade_takenCourses != "F":
print("Your grade for", selected_course, "is F.")
else:
print("Invalid input")
elif selected_course not in taken_courses_purified:
print("Your grade for", selected_course, "is U.")
else:
print("Invalid input")
elif courses_purified[gradeOf_selectedCourse_in_courses].upper() == "U":
print("Your grade for", selected_course, "is", courses_purified[gradeOf_selectedCourse_in_courses].upper() + ".")
elif selected_course in courses_purified and selected_course in taken_courses_purified:
if taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper() == "F":
print("Your grade for", selected_course, "is", taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper() + ".")
elif grade_courses != "F" and taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper() != "F":
print("You can choose between S and", taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper(), "for", selected_course + ".")
else:
print("Invalid input")
elif selected_course in courses_purified and selected_course not in taken_courses_purified:
if grade_courses != "F":
print("Your grade for", selected_course, "is", grade_courses + ".")
else:
print("Invalid input")
elif selected_course not in taken_courses_purified and selected_course not in courses_purified:
print("You didn't take", selected_course, "this semester.")
elif selected_course not in courses_purified and selected_course in taken_courses_purified and taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper() == "F":
print("Your grade for", selected_course, "is U.")
elif selected_course in taken_courses_purified and selected_course not in courses_purified:
if taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper() != "F":
print("Your grade for", selected_course, "is", taken_courses_purified[taken_courses_purified.index(selected_course) + 1].upper() + ".")
else:
print("Invalid input")
else:
print("Invalid input")
else:
print("Invalid input")
| 73.263158
| 193
| 0.642241
|
4a09b2e7d4cc1721b0d8e84441fbc153952198d0
| 3,683
|
py
|
Python
|
HP Code Wars Documents/2014/Solutions/prob07_GreekAcro.py
|
p473lr/i-urge-mafia-gear
|
ae19efb1af2e85ed8bcbbcc3d12ae0f024f3565e
|
[
"Apache-2.0"
] | null | null | null |
HP Code Wars Documents/2014/Solutions/prob07_GreekAcro.py
|
p473lr/i-urge-mafia-gear
|
ae19efb1af2e85ed8bcbbcc3d12ae0f024f3565e
|
[
"Apache-2.0"
] | null | null | null |
HP Code Wars Documents/2014/Solutions/prob07_GreekAcro.py
|
p473lr/i-urge-mafia-gear
|
ae19efb1af2e85ed8bcbbcc3d12ae0f024f3565e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#CodeWars 2014
#
# Greek Acrophonic Numerals
#
# The earliest alphabet-related system of numerals used with Greek letters
# was the set of acrophonic Attic numerals. These numerals operated much like
# Roman numerals (which derived from this scheme),
# with: ? = 1, ? = 5, ? = 10, ? = 100, ? = 1000, ? = 10000.
# Also, 50, 500, 5000, and 50000 were represented by composites of ? and a
# tiny version of the applicable power of ten.
#
# For this problem we'll use a numeral scheme that follows the pattern
# of this ancient Greek system. We'll represent the numerals like this:
# I = 1, P = 5, D = 10, H = 100, C = 1000, and M = 10000.
# For values above ten, groups of five are represented by a P followed
# by another letter. For example, 50 would be written PD and 475 HHHHPDDDP.
#
# Write a program to convert between Greek Acrophonic Numbers and our
# familiar decimal number system.
#
# Input
#
# The first line of input will indicate the number of conversions the
# program must perform. Each line after will contain either a Greek a
# crophonic number or a decimal number.
#
# 9
# 8
# 50
# 475
# CCPHHDDDDPII
# CCCPHHHHPII
# PMMCPDDDDDP
# 5678
# PMMPCPHDDDPI
# 8642
#
#
#
#
# Output
#
# For each input value the program must convert from decimal to Greek
# acrophonic, or vice-versa.
#
# PIII
# PD
# HHHHPDDDP
# 2647
# 3807
# 61095
# PCPHHPDDDPIII
# 65536
# PCCCCPHHDDDDII
#
import sys
print ("Enter N, then N words/numbers to translate.")
count = int(sys.stdin.readline())
while (count > 0):
count -= 1
line=""
line = sys.stdin.readline()
charIn = line[0]
if (charIn.isdigit()): # convert a number
value = int(line)
M = int(value / 10000)
value -= M*10000
C = int(value/1000)
value -= C*1000
H = int(value/100)
value -= H*100
D = int(value/10)
I = value-D*10
#print("M",M,"C",C,"H",H,"D",D,"I",I)
if (M>=5):
print ("PM", end="")
M -= 5
while (M>0):
print ("M", end="")
M-=1
if (C>=5):
print ("PC", end="")
C -= 5
while (C>0):
print ("C", end="")
C-=1
if (H>=5):
print ("PH", end="")
H -= 5
while (H>0):
print ("H", end="")
H-=1
if (D>=5):
print ("PD", end="")
D -= 5
while (D>0):
print ("D", end="")
D-=1
if (I>=5):
print ("P", end="") # For 1's digit, just "P" for 5.
I -= 5
while (I>0):
print ("I", end="")
I-=1
print("")
else: # Convert Greek to number
value = 0
multiple = 1
length = len(line)
for i in range(0, length):
charIn=line[i]
if (charIn == 'P'):
multiple = 5
charIn=line[i+1] # Check next character after P
if ( (charIn!='M') and (charIn!='C') and (charIn!='H') and (charIn!='D')):
# Deal with final lone P, or P followed by I
value+=5
multiple=1
else:
newVal=0
if (charIn == 'M'):
newVal=10000
elif (charIn == 'C'):
newVal=1000
elif (charIn == 'H'):
newVal=100
elif (charIn == 'D'):
newVal=10
elif (charIn == 'I'):
newVal=1
value += multiple*newVal
multiple = 1
print (value)
| 26.307143
| 90
| 0.499321
|
4a09b37fad23ebe0f4d6f6db5c415b9738538734
| 2,377
|
py
|
Python
|
ESN/esn.py
|
hhnam96/Reservoir-Computing-in-PyTorch
|
e349f6d8ed51a1157ca14e41e44bc015524a8c40
|
[
"MIT"
] | null | null | null |
ESN/esn.py
|
hhnam96/Reservoir-Computing-in-PyTorch
|
e349f6d8ed51a1157ca14e41e44bc015524a8c40
|
[
"MIT"
] | null | null | null |
ESN/esn.py
|
hhnam96/Reservoir-Computing-in-PyTorch
|
e349f6d8ed51a1157ca14e41e44bc015524a8c40
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
def to_sparse(tensor, density):
return tensor * (torch.rand_like(tensor) <= density).type(tensor.dtype)
def random_matrix(size):
return torch.rand(size)
class ESN(torch.nn.Module):
"""
Implements an Echo State Network.
Parameters:
- input_size: size of the input
- reservoir_size: number of units in the reservoir
- contractivity_coeff: spectral radius for the reservoir matrix
- density: density of the reservoir matrix, from 0 to 1.
- scale_in: scaling of the input-to-reservoir matrix
- f: activation function for the state transition function
"""
def __init__(self, input_size, reservoir_size, contractivity_coeff=0.9, density=1.0, scale_in=1.0, f=torch.tanh):
super(ESN, self).__init__()
self.input_size = input_size
self.reservoir_size = reservoir_size
self.contractivity_coeff = contractivity_coeff
self.density = density
self.scale_in = scale_in
self.f = f
self.W_in = random_matrix((reservoir_size, input_size)) * 2 - 1
self.W_hat = random_matrix((reservoir_size, reservoir_size)) * 2 - 1
self.W_hat = to_sparse(self.W_hat, density)
self.W_in = scale_in * self.W_in
# Prescale W_hat
self.W_hat = self._rescale_contractivity(self.W_hat)
# Register as parameters
self.W_in = nn.Parameter(self.W_in, requires_grad=False)
self.W_hat = nn.Parameter(self.W_hat, requires_grad=False)
def forward(self, input, initial_state=None):
"""
Compute the reservoir states for the given sequence.
Parameters:
- input: Input sequence of shape (seq_len, input_size)
Returns: a tensor of shape (seq_len, reservoir_size)
"""
x = torch.zeros((input.size(0), self.reservoir_size), device=self.W_hat.device)
if initial_state is not None:
x[0,:] = self.f( self.W_in @ input[0,:] + self.W_hat @ initial_state )
else:
x[0,:] = self.f( self.W_in @ input[0,:] )
for i in range(1, len(input)):
x[i,:] = self.f( self.W_in @ input[i,:] + self.W_hat @ x[i-1] )
return x
def _rescale_contractivity(self, W):
coeff = self.contractivity_coeff
return W * coeff / (W.eig()[0].abs().max())
| 34.449275
| 117
| 0.631048
|
4a09b3ea6cd563a8a426c8fdec82bda8183d1960
| 7,546
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_lib_keychain_act.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 177
|
2016-03-15T17:03:51.000Z
|
2022-03-18T16:48:44.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_lib_keychain_act.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2016-03-30T10:45:22.000Z
|
2020-07-14T16:28:13.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_lib_keychain_act.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 85
|
2016-03-16T20:38:57.000Z
|
2022-02-22T04:26:02.000Z
|
""" Cisco_IOS_XR_lib_keychain_act
This module contains a collection of YANG definitions
for Cisco IOS\-XR action package configuration.
Copyright (c) 2017 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class MasterKeyAdd(_Entity_):
"""
To add a new master key
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_keychain_act.MasterKeyAdd.Input>`
"""
_prefix = 'lib-keychain-act'
_revision = '2017-04-17'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MasterKeyAdd, self).__init__()
self._top_entity = None
self.yang_name = "master-key-add"
self.yang_parent_name = "Cisco-IOS-XR-lib-keychain-act"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self.input = MasterKeyAdd.Input()
self.input.parent = self
self._children_name_map["input"] = "input"
self._segment_path = lambda: "Cisco-IOS-XR-lib-keychain-act:master-key-add"
self._is_frozen = True
class Input(_Entity_):
"""
.. attribute:: new_key
New master key to be added
**type**\: str
"""
_prefix = 'lib-keychain-act'
_revision = '2017-04-17'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MasterKeyAdd.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "master-key-add"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('new_key', (YLeaf(YType.str, 'new-key'), ['str'])),
])
self.new_key = None
self._segment_path = lambda: "input"
self._absolute_path = lambda: "Cisco-IOS-XR-lib-keychain-act:master-key-add/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MasterKeyAdd.Input, ['new_key'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_keychain_act as meta
return meta._meta_table['MasterKeyAdd.Input']['meta_info']
def clone_ptr(self):
self._top_entity = MasterKeyAdd()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_keychain_act as meta
return meta._meta_table['MasterKeyAdd']['meta_info']
class MasterKeyDelete(_Entity_):
"""
Remove Master key
"""
_prefix = 'lib-keychain-act'
_revision = '2017-04-17'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MasterKeyDelete, self).__init__()
self._top_entity = None
self.yang_name = "master-key-delete"
self.yang_parent_name = "Cisco-IOS-XR-lib-keychain-act"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self._segment_path = lambda: "Cisco-IOS-XR-lib-keychain-act:master-key-delete"
self._is_frozen = True
def clone_ptr(self):
self._top_entity = MasterKeyDelete()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_keychain_act as meta
return meta._meta_table['MasterKeyDelete']['meta_info']
class MasterKeyUpdate(_Entity_):
"""
To update master key
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_lib_keychain_act.MasterKeyUpdate.Input>`
"""
_prefix = 'lib-keychain-act'
_revision = '2017-04-17'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MasterKeyUpdate, self).__init__()
self._top_entity = None
self.yang_name = "master-key-update"
self.yang_parent_name = "Cisco-IOS-XR-lib-keychain-act"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self.input = MasterKeyUpdate.Input()
self.input.parent = self
self._children_name_map["input"] = "input"
self._segment_path = lambda: "Cisco-IOS-XR-lib-keychain-act:master-key-update"
self._is_frozen = True
class Input(_Entity_):
"""
.. attribute:: old_key
key already added/key to be replaced
**type**\: str
**mandatory**\: True
.. attribute:: new_key
New master key to be added
**type**\: str
**mandatory**\: True
"""
_prefix = 'lib-keychain-act'
_revision = '2017-04-17'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MasterKeyUpdate.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "master-key-update"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('old_key', (YLeaf(YType.str, 'old-key'), ['str'])),
('new_key', (YLeaf(YType.str, 'new-key'), ['str'])),
])
self.old_key = None
self.new_key = None
self._segment_path = lambda: "input"
self._absolute_path = lambda: "Cisco-IOS-XR-lib-keychain-act:master-key-update/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MasterKeyUpdate.Input, ['old_key', 'new_key'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_keychain_act as meta
return meta._meta_table['MasterKeyUpdate.Input']['meta_info']
def clone_ptr(self):
self._top_entity = MasterKeyUpdate()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_lib_keychain_act as meta
return meta._meta_table['MasterKeyUpdate']['meta_info']
| 29.476563
| 126
| 0.601246
|
4a09b448d29ab23628d694d20d10e222d9e2a4e4
| 33,395
|
py
|
Python
|
src/lib/models/networks/pose_dla_dcn_no_bias.py
|
StudentWong/CenterNet
|
d20640d40db522b14813e21febff187bd4d75738
|
[
"MIT"
] | null | null | null |
src/lib/models/networks/pose_dla_dcn_no_bias.py
|
StudentWong/CenterNet
|
d20640d40db522b14813e21febff187bd4d75738
|
[
"MIT"
] | null | null | null |
src/lib/models/networks/pose_dla_dcn_no_bias.py
|
StudentWong/CenterNet
|
d20640d40db522b14813e21febff187bd4d75738
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
try:
from .DCNv2.dcn_v2 import DCN
except:
from src.lib.models.networks.DCNv2.dcn_v2 import DCN
pass
try:
from ..membership import Membership_Activation, Membership_norm
except:
from src.lib.models.membership import Membership_Activation, Membership_norm
pass
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
#if name.endswith('.pth'):
# model_weights = torch.load(data + name)
#else:
# model_url = get_model_url(data, name, hash)
# model_weights = model_zoo.load_url(model_url)
model_weights = torch.load('../weights/dla34-ba72cf86.pth')
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = DeformConv(c, o)
node = DeformConv(o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
class DLASeg_no_bias(nn.Module):
def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
last_level, head_conv, out_channel=0):
super(DLASeg_no_bias, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
# print(pretrained)
self.base = globals()[base_name](pretrained=pretrained)
#base = dla34(pretrained=pretrained)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)
if out_channel == 0:
out_channel = channels[self.first_level]
self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)])
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
if 'hm' in head:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True))
# fc = nn.Sequential(
# nn.Conv2d(channels[self.first_level], head_conv,
# kernel_size=3, padding=1, bias=True),
# nn.ReLU(inplace=True),
# nn.Conv2d(head_conv, head_conv,
# kernel_size=final_kernel, stride=1,
# padding=final_kernel // 2, bias=True))
# fc = nn.Sequential(
# nn.Conv2d(channels[self.first_level], head_conv,
# kernel_size=3, padding=1, bias=True),
# nn.ReLU(inplace=True),
# nn.Conv2d(head_conv, classes,
# kernel_size=final_kernel, stride=1,
# padding=final_kernel // 2, bias=True))
fc[-1].bias.data.fill_(0.0)
else:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
fill_fc_weights(fc)
else:
# fc = nn.Conv2d(channels[self.first_level], classes,
# kernel_size=final_kernel, stride=1,
# padding=final_kernel // 2, bias=True)
fc = nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True)
if 'hm' in head:
fc.bias.data.fill_(0.0)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
# c1 = [2.0, -5.0, -3.5, -6.0, -3.0]
# lamda1 = [2.0, 3.0, 2.5, 6.0, 4.0]
# c2 = [-5.0, 1.5, -2.5, -6.0, -5.0]
# lamda2 = [3.0, 1.5, 3.5, 3.0, 2.0]
# c3 = [-4.0, -3.5, 0.0, -6.0, -5.0]
# lamda3 = [4.0, 3.5, 2.0, 4.0, 3.0]
# c4 = [-3.5, -4.5, -5.0, 1.0, -3.5]
# lamda4 = [3.5, 3.5, 2.0, 3.0, 3.5]
# c5 = [-4.5, -6.5, -4.5, -5.0, 1.0]
# lamda5 = [3.5, 3.5, 3.5, 5.0, 3.0]
# c = [c1, c2, c3, c4, c5]
# lamda = [lamda1, lamda2, lamda3, lamda4, lamda5]
# init_c = torch.tensor(np.array(c), dtype=torch.float)
# init_lamda = torch.tensor(np.array(lamda), dtype=torch.float)
# self.menber_activation = Membership_Activation(5, 5,
# init_c=init_c,
# init_lamda=init_lamda)
# self.menber_activation = Membership_norm(5, 5,
# init_c=init_c/1,
# init_lamda=init_lamda/1)
self.menber_activation = Membership_norm(head_conv, heads['hm'])
# self.menber_activation = Membership_norm(5, 5)
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
z = {}
for head in self.heads:
if head == 'hm':
# print(z[head])
z['ft'] = self.__getattr__(head)(y[-1])
origin_shape = z['ft'].shape
z[head] = self.menber_activation(
z['ft'].view(origin_shape[0], origin_shape[1], origin_shape[2]*origin_shape[3])
).view(origin_shape[0], 5, origin_shape[2], origin_shape[3])
# print(z[head])
z['center'] = self.menber_activation.c
else:
z[head] = self.__getattr__(head)(y[-1])
return [z]
def get_pose_net_no_bias(num_layers, heads, head_conv=256, down_ratio=4):
model = DLASeg_no_bias('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
final_kernel=1,
last_level=5,
head_conv=head_conv)
return model
# import cv2
# if __name__ == '__main__':
# from src.lib.models.networks.DCNv2.dcn_v2 import DCN
# #num_layers: 34
# #heads: {'hm': 5, 'wh': 2, 'reg': 2}
# #head_conv: 256
# model = get_pose_net_no_bias(num_layers=34, heads={'hm': 5, 'wh': 2, 'reg': 2},
# head_conv=256)
# # print(model)
# ckpt = torch.load('/home/studentw/disk3/tracker/CenterNet/exp/ctdet/default/model_best_shangqi.pth')
# # print(ckpt['state_dict'].keys())
# model.load_state_dict(ckpt['state_dict'])
# model = model.cuda()
#
# mean = np.array([0.40789654, 0.44719302, 0.47026115],
# dtype=np.float32).reshape(1, 1, 3)
# std = np.array([0.28863828, 0.27408164, 0.27809835],
# dtype=np.float32).reshape(1, 1, 3)
#
# img = cv2.imread('/home/studentw/disk3/shangqi/train/200420000000.png')/255.0
#
#
# inp = (img - mean) / std
# inp = inp.transpose(2, 0, 1)
#
# # print(img-mean)
# input = torch.tensor(inp, dtype=torch.float).unsqueeze(0).cuda()
# y = model(input)
# # print(y[0].keys())
# print(np.max(y[0]['hm'][0][0:1].sigmoid().permute(1, 2, 0).detach().cpu().numpy()))
# print(np.min(y[0]['hm'][0][0:1].sigmoid().permute(1, 2, 0).detach().cpu().numpy()))
#
# print(np.max(y[0]['hm'][0][0:1].permute(1, 2, 0).detach().cpu().numpy()))
# print(np.min(y[0]['hm'][0][0:1].permute(1, 2, 0).detach().cpu().numpy()))
# cv2.imshow('1', y[0]['hm'][0][0:1].permute(1, 2, 0).detach().cpu().numpy())
# cv2.waitKey(0)
def save_features_output(ckpt_name, involve_train=False):
# from src.lib.models.networks.DCNv2.dcn_v2 import DCN
import cv2
from pycocotools import coco
from src.lib.models.decode import ctdet_decode_ret_peak
import seaborn as sns
import pandas as pd
# num_layers: 34
# heads: {'hm': 5, 'wh': 2, 'reg': 2}
# head_conv: 256
model = get_pose_net_no_bias(num_layers=34, heads={'hm': 5, 'wh': 2, 'reg': 2},
head_conv=256)
# print(model)
column = list(range(0, 256)) + ['class'] + ['score']
df = pd.DataFrame(columns=column)
df_line = 0
# print(df)
# exit()
ckpt = torch.load('/home/studentw/disk3/tracker/CenterNet/exp/ctdetnfs/default/' + ckpt_name)
# print(ckpt['state_dict'].keys())
model.load_state_dict(ckpt['state_dict'])
model = model.cuda()
# mean = np.array([0.40789654, 0.44719302, 0.47026115],
# dtype=np.float32).reshape(1, 1, 3)
# std = np.array([0.28863828, 0.27408164, 0.27809835],
# dtype=np.float32).reshape(1, 1, 3)
mean = np.array([0.317200417, 0.317200417, 0.317200417],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.22074733, 0.22074733, 0.22074733],
dtype=np.float32).reshape(1, 1, 3)
all_img = []
if involve_train:
train_data_dir = '/home/studentw/disk3/tracker/CenterNet/data/shangqi/train'
train_list = sorted(os.listdir(train_data_dir))
train_num = len(train_list)
for img_name in train_list:
all_img = all_img + [os.path.join(train_data_dir, img_name)]
label_train = coco.COCO('/home/studentw/disk3/tracker/CenterNet/data/shangqi/annotations/train.json')
val_data_dir = '/home/studentw/disk3/tracker/CenterNet/data/shangqi/val'
val_list = sorted(os.listdir(val_data_dir))
val_num = len(val_list)
for img_name in val_list:
all_img = all_img + [os.path.join(val_data_dir, img_name)]
label_val = coco.COCO('/home/studentw/disk3/tracker/CenterNet/data/shangqi/annotations/val.json')
mapping_ids = [1, 2, 3, 7, 8]
invert_mapping = {1: 0, 2: 1, 3: 2, 7: 3, 8: 4}
output_file = []
feature_res = []
# for clsnum in range(0, 5):
# file_name = '/home/studentw/disk3/tracker/CenterNet/data/shangqi/feature_out{:d}.npy'.format(clsnum)
# if not os.path.exists(file_name):
# os.mknod(file_name)
# output_file = output_file + [file_name]
# feature_res = feature_res + [[]]
# print(feature_res)
all_num = len(all_img)
for i, img_path in enumerate(all_img):
imgnum_in_json = int(img_path.split('/')[-1].replace('.png', ''))
# print(imgnum_in_json)
# exit()
if involve_train and i <= train_num-1:
label_id = label_train.getAnnIds(imgIds=[imgnum_in_json])
# print(label_id)
labels = [label_train.anns[label_id_single] for label_id_single in label_id]
# print(labels)
else:
label_id = label_val.getAnnIds(imgIds=[imgnum_in_json])
labels = [label_val.anns[label_id_single] for label_id_single in label_id]
img = cv2.imread(img_path) / 255.0
inp = (img - mean) / std
inp = inp.transpose(2, 0, 1)
# print(img-mean)
input = torch.tensor(inp, dtype=torch.float).unsqueeze(0).cuda()
with torch.no_grad():
y = model(input)[-1]
# print(np.max(y['reg'][0][:].permute(1, 2, 0).detach().cpu().numpy()))
# exit()
dets, xs, ys = ctdet_decode_ret_peak(y['hm'], y['wh'], reg=y['reg'], cat_spec_wh=False, K=20)
det_box_tlxywh = dets[0, :, 0:4].detach().cpu().numpy() * 4
det_box_cwh = det_box_tlxywh.copy()
det_box_cwh[:, 2:4] = det_box_tlxywh[:, 2:4] - det_box_tlxywh[:, 0:2]
det_box_cwh[:, 0:2] = det_box_tlxywh[:, 0:2] + 0.5*det_box_cwh[:, 2:4]
for label in labels:
matched = False
lab_cwh = np.array(label['bbox'], dtype=np.float)
lab_cwh[2:4] = np.array(label['bbox'][2:4])
lab_cwh[0:2] = np.array(label['bbox'][0:2]) + 0.5 * lab_cwh[2:4]
for detnum, det in enumerate(det_box_cwh):
# print(lab_cwh)
distance_xy = (lab_cwh[0:2]-det[0:2]) ** 2
if np.sqrt(distance_xy[0]+distance_xy[1])<2 \
and mapping_ids[int(dets[0][detnum][5])] == label['category_id']:
# print(int(lab_cwh[0]/4))
# print(xs[0][detnum])
# print(y['reg'][0, 1, int(ys[0][detnum]), int(xs[0][detnum])])
# print(int(lab_cwh[1] / 4))
# print(ys[0][detnum])
# print(y['reg'][0, 0, int(ys[0][detnum]), int(xs[0][detnum])])
# print(y['hm'][0, int(dets[0][detnum][5]), int(ys[0][detnum]), int(xs[0][detnum])])
# print('end')
feature_np = y['ft'][0, :, int(ys[0][detnum]),int(xs[0][detnum])].detach().cpu().numpy()
matched = True
feature_dict = dict()
# print(feature_np)
for fnum, f in enumerate(feature_np):
feature_dict[fnum] = f
feature_dict['class'] = label['category_id']
feature_dict['score'] = float(y['hm'][
0, invert_mapping[label['category_id']], int(ys[0][detnum]), int(
xs[0][detnum])].detach().cpu().numpy())
df.loc[df_line] = feature_dict
df_line = df_line + 1
if not matched and (label['category_id'] in mapping_ids):
feature_np = y['ft'][0, :, int(lab_cwh[1] / 4), int(lab_cwh[0] / 4)].detach().cpu().numpy()
matched = True
feature_dict = dict()
# print(feature_np)
for fnum, f in enumerate(feature_np):
feature_dict[fnum] = f
feature_dict['class'] = label['category_id']
feature_dict['score'] = float(y['hm'][
0, invert_mapping[label['category_id']], int(lab_cwh[1] / 4), int(lab_cwh[0] / 4)].detach().cpu().numpy())
df.loc[df_line] = feature_dict
df_line = df_line + 1
print('{:d}/{:d}'.format(i, all_num))
# print(df)
# exit()
df.to_csv("./" + ckpt_name.replace('.pth', '') + '.csv')
def draw_box(path, feature=256):
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
heads = list(range(0, feature))
df = pd.read_csv(path, header=0,index_col=0)
# print(df)
for head in heads:
sns.boxplot(x="class", y=str(head), data=df)
# sns.swarmplot(x="class", y=str(head), data=df, color=".25")
plt.show()
def draw_reduce_dim_feature(path, feature=256, transformer=None, alpha=False):
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
cat_id = [1, 2, 3, 7, 8]
heads = list(range(0, feature))
color_mapping = {1: 'red', 2: 'blue', 3: 'green', 7: 'orange', 8: 'black'}
df = pd.read_csv(path, header=0, index_col=0)
heads_feature = list(range(0, 256))
for i, f_int in enumerate(heads_feature):
heads_feature[i] = str(f_int)
feat_np = np.array(df[heads_feature])
cat_np = np.array(df['class'])
score = np.array(df['score'])
color = []
for cat_i in cat_np:
color = color + [color_mapping[cat_i]]
# print(cat_np)
if transformer is None:
transformer = PCA(2)
transformer.fit(feat_np)
reduce_dim = transformer.transform(feat_np)
# plt.scatter(reduce_dim[:, 0], reduce_dim[:, 1], c=color, s=3, alpha=score)
all_len = reduce_dim.shape[0]
if alpha:
for i in range(0, all_len):
plt.scatter(reduce_dim[i:i+1, 0], reduce_dim[i:i+1, 1], c=color[i], s=3, alpha=score[i])
print('{:d}/{:d}'.format(i, all_len))
else:
plt.scatter(reduce_dim[:, 0], reduce_dim[:, 1], c=color, s=3)
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.show()
return transformer
# print(reduce_dim.shape)
# for cat in cat_id:
# from src.lib.models.networks.DCNv2.dcn_v2 import DCN
# if __name__ == '__main__':
# class_feature0 = np.load('/home/studentw/disk3/tracker/CenterNet/data/shangqi/feature_out0.npy')
# class_feature1 = np.load('/home/studentw/disk3/tracker/CenterNet/data/shangqi/feature_out1.npy')
# class_feature2 = np.load('/home/studentw/disk3/tracker/CenterNet/data/shangqi/feature_out2.npy')
# class_feature3 = np.load('/home/studentw/disk3/tracker/CenterNet/data/shangqi/feature_out3.npy')
# class_feature4 = np.load('/home/studentw/disk3/tracker/CenterNet/data/shangqi/feature_out4.npy')
# import matplotlib.pyplot as plt
# class_feature = [class_feature0, class_feature1, class_feature2, class_feature3, class_feature4]
# color = ['red', 'blue', 'green', 'yellow', 'hotpink']
#
# for i in range(0, 5):
# figure = plt.figure(i)
# for j in range(0, 5):
# x = np.sqrt(class_feature[j][:, 5])
# y = class_feature[j][:, i]
# plt.scatter(x, y, c=color[j], s=8, label=j, alpha=0.6, edgecolors='gray', linewidths=0.5)
# plt.show()
# if __name__ == '__main__':
# from src.lib.models.membership import Membership_Activation
# from src.lib.models.networks.DCNv2.dcn_v2 import DCN
# model = get_pose_net_no_bias(num_layers=34, heads={'hm': 5, 'wh': 2, 'reg': 2},
# head_conv=256)
if __name__ == '__main__':
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
# testdata_cat1 = [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]]
# testdata_cat2 = [[1, 1], [1.5, 1], [2, 1], [2.5, 1], [3, 1]]
# df = np.array(testdata_cat1 + testdata_cat2)
# df = pd.DataFrame(columns=['data', 'class'])
# print(df)
# df.loc[0]={'data':0, 'class':1}
# df.loc[1] = {'data':0.5, 'class':2}
# print(df)
# exit()
# sns.boxplot(x="class", y="data", data=df)
# plt.show()
# save_features_output('2080_lamda0.05_batch8_lr1.25e-4_ap82.pth')
# draw_box('/home/studentw/disk3/tracker/CenterNet/src/2080_lamda0.01_batch8_lr1.25e-4_ap83_best.csv')
draw_reduce_dim_feature('/home/studentw/disk3/tracker/CenterNet/src/2080_lamda0.01_batch8_lr1.25e-4_ap83_best.csv')
draw_reduce_dim_feature('/home/studentw/disk3/tracker/CenterNet/src/2080_lamda0.05_batch8_lr1.25e-4_ap82.csv')
| 38.65162
| 156
| 0.552807
|
4a09b480089b9a2514ed5071816bafba48a9e25c
| 2,301
|
py
|
Python
|
live-explorer/gauss_laws.py
|
tychon/redpitaya-measurements
|
84efc3a91e23e8baac8559d576077cf791b536d2
|
[
"MIT"
] | null | null | null |
live-explorer/gauss_laws.py
|
tychon/redpitaya-measurements
|
84efc3a91e23e8baac8559d576077cf791b536d2
|
[
"MIT"
] | null | null | null |
live-explorer/gauss_laws.py
|
tychon/redpitaya-measurements
|
84efc3a91e23e8baac8559d576077cf791b536d2
|
[
"MIT"
] | 1
|
2020-06-04T08:26:09.000Z
|
2020-06-04T08:26:09.000Z
|
import numpy as np
from scipy.signal import find_peaks
from scipy.interpolate import interp1d
def slice_intersect(slices, data=None):
"""Calculate intersection of slices.
Optionally return data inside intersection (first axis) of every slice."""
inters = slice(
max(s.start for s in slices),
min(s.stop for s in slices))
assert inters.start <= inters.stop, "No intersection."
if data:
interd = [
trace[inters.start-s.start:inters.stop-s.start, ...]
for s, trace in zip(slices, data)
]
return inters, interd
else:
return inters
def envelope(ts, signal, fexp, peak_height=0.1):
samplerate = 1 / (ts[1]-ts[0])
peakidxs = find_peaks(signal, height=peak_height,
distance=0.8*samplerate/fexp)[0]
if len(peakidxs) < 4:
return None, slice(0, len(ts))
env = interp1d(ts[peakidxs], signal[peakidxs])
return env, slice(peakidxs[0], peakidxs[-1])
def gauss_laws(
l, latticeidxs,
ts, signals, fs,
C=20e-9, f0=60e3):
m = len(signals)
envs, slices = zip(*[envelope(ts, signals[i], fs[i])
for i in range(m)])
numbers = [
f0/fs[i] * C/2 * envs[i](ts[slices[i]])**2
if envs[i] is not None else np.full(ts[slices[i]].size, 0)
for i in range(m)]
Gs = np.full((l, 4, ts.size), np.nan, dtype=float)
for i in range(l):
try:
siteidx = latticeidxs.index(2*i)
Gs[i, 0, slices[siteidx]] = numbers[siteidx]
Gs[i, 3, slices[siteidx]] = numbers[siteidx]
if i > 0:
leftidx = latticeidxs.index(2*i-1)
s = slices[leftidx]
Gs[i, 1, s] = numbers[leftidx]
Gs[i, 3, s] -= (-1)**i * numbers[leftidx]
Gs[i, 3, :s.start] = np.nan
Gs[i, 3, s.stop:] = np.nan
if i < l-1:
rightidx = latticeidxs.index(2*i+1)
s = slices[rightidx]
Gs[i, 2, s] = numbers[rightidx]
Gs[i, 3, s] -= (-1)**i * numbers[rightidx]
Gs[i, 3, :s.start] = np.nan
Gs[i, 3, s.stop:] = np.nan
except ValueError:
Gs[i, 3] = np.nan
return Gs
| 31.958333
| 78
| 0.523251
|
4a09b49990dd6c9dcae41a816a6bb3565b0fb8a2
| 10,474
|
py
|
Python
|
QAStrategy/qastockbase.py
|
xiaopge/QAStrategy
|
3418324572287d3d61485309b0f1fdea344bfd59
|
[
"MIT"
] | 1
|
2019-12-24T03:57:42.000Z
|
2019-12-24T03:57:42.000Z
|
QAStrategy/qastockbase.py
|
xiaopge/QAStrategy
|
3418324572287d3d61485309b0f1fdea344bfd59
|
[
"MIT"
] | null | null | null |
QAStrategy/qastockbase.py
|
xiaopge/QAStrategy
|
3418324572287d3d61485309b0f1fdea344bfd59
|
[
"MIT"
] | null | null | null |
#
"""
stock_base
"""
import uuid
import datetime
import json
import os
import threading
import pandas as pd
import pymongo
from qaenv import (eventmq_ip, eventmq_password, eventmq_port,
eventmq_username, mongo_ip)
import QUANTAXIS as QA
from QUANTAXIS.QAARP import QA_Risk, QA_User
from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, RUNNING_ENVIRONMENT, ORDER_DIRECTION
from QAPUBSUB.consumer import subscriber_topic
from QAPUBSUB.producer import publisher_routing
from QAStrategy.qactabase import QAStrategyCTABase
from QIFIAccount import QIFI_Account
class QAStrategyStockBase(QAStrategyCTABase):
def __init__(self, code=['000001'], frequence='1min', strategy_id='QA_STRATEGY', risk_check_gap=1, portfolio='default',
start='2019-01-01', end='2019-10-21',
data_host=eventmq_ip, data_port=eventmq_port, data_user=eventmq_username, data_password=eventmq_password,
trade_host=eventmq_ip, trade_port=eventmq_port, trade_user=eventmq_username, trade_password=eventmq_password,
taskid=None, mongo_ip=mongo_ip):
super().__init__(code=code, frequence=frequence, strategy_id=strategy_id, risk_check_gap=risk_check_gap, portfolio=portfolio,
start=start, end=end,
data_host=eventmq_ip, data_port=eventmq_port, data_user=eventmq_username, data_password=eventmq_password,
trade_host=eventmq_ip, trade_port=eventmq_port, trade_user=eventmq_username, trade_password=eventmq_password,
taskid=taskid, mongo_ip=mongo_ip)
self.code = code
def subscribe_data(self, code, frequence, data_host, data_port, data_user, data_password):
"""[summary]
Arguments:
code {[type]} -- [description]
frequence {[type]} -- [description]
"""
self.sub = subscriber_topic(exchange='realtime_stock_{}'.format(
frequence), host=data_host, port=data_port, user=data_user, password=data_password, routing_key='')
for item in code:
self.sub.add_sub(exchange='realtime_stock_{}'.format(
frequence), routing_key=item)
self.sub.callback = self.callback
def upcoming_data(self, new_bar):
"""upcoming_bar :
Arguments:
new_bar {json} -- [description]
"""
self._market_data = pd.concat([self._old_data, new_bar])
# QA.QA_util_log_info(self._market_data)
if self.isupdate:
self.update()
self.isupdate = False
self.update_account()
# self.positions.on_price_change(float(new_bar['close']))
self.on_bar(new_bar)
def ind2str(self, ind, ind_type):
z = ind.tail(1).reset_index().to_dict(orient='records')[0]
return json.dumps({'topic': ind_type, 'code': self.code, 'type': self.frequence, 'data': z})
def callback(self, a, b, c, body):
"""在strategy的callback中,我们需要的是
1. 更新数据
2. 更新bar
3. 更新策略状态
4. 推送事件
Arguments:
a {[type]} -- [description]
b {[type]} -- [description]
c {[type]} -- [description]
body {[type]} -- [description]
"""
self.new_data = json.loads(str(body, encoding='utf-8'))
self.running_time = self.new_data['datetime']
if float(self.new_data['datetime'][-9:]) == 0:
self.isupdate = True
self.acc.on_price_change(self.new_data['code'], self.new_data['close'])
bar = pd.DataFrame([self.new_data]).set_index(['datetime', 'code']
).loc[:, ['open', 'high', 'low', 'close', 'volume']]
self.upcoming_data(bar)
def run_sim(self):
self.running_mode = 'sim'
self._old_data = QA.QA_fetch_stock_min(self.code, QA.QA_util_get_last_day(
QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()), format='pd', frequence=self.frequence).set_index(['datetime', 'code'])
self._old_data = self._old_data.loc[:, [
'open', 'high', 'low', 'close', 'volume']]
self.database = pymongo.MongoClient(mongo_ip).QAREALTIME
self.client = self.database.account
self.subscriber_client = self.database.subscribe
self.acc = QIFI_Account(
username=self.strategy_id, password=self.strategy_id, trade_host=mongo_ip)
self.acc.initial()
self.pub = publisher_routing(exchange='QAORDER_ROUTER', host=self.trade_host,
port=self.trade_port, user=self.trade_user, password=self.trade_password)
self.subscribe_data(self.code, self.frequence, self.data_host,
self.data_port, self.data_user, self.data_password)
self.add_subscriber('oL-C4w1HjuPRqTIRcZUyYR0QcLzo')
self.database.strategy_schedule.job_control.update(
{'strategy_id': self.strategy_id},
{'strategy_id': self.strategy_id, 'taskid': self.taskid,
'filepath': os.path.abspath(__file__), 'status': 200}, upsert=True)
# threading.Thread(target=, daemon=True).start()
self.sub.start()
def run(self):
while True:
pass
def get_code_marketdata(self, code):
return self.market_data.loc[(slice(None), code),:]
def get_current_marketdata(self):
return self.market_data.loc[(self.running_time, slice(None)),:]
def debug(self):
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username="admin", password='admin')
port = user.new_portfolio(self.portfolio)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type)
#self.positions = self.acc.get_position(self.code)
print(self.acc)
print(self.acc.market_type)
data = QA.QA_quotation(self.code, self.start, self.end, source=QA.DATASOURCE.MONGO,
frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT)
def x1(item):
# print(data)
self._on_1min_bar()
self._market_data.append(item)
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
if self.market_type == QA.MARKET_TYPE.STOCK_CN:
print('backtest: Settle!')
self.acc.settle()
self.running_time = str(item.name[0])
self.on_bar(item)
data.data.apply(x1, axis=1)
def update_account(self):
if self.running_mode == 'sim':
QA.QA_util_log_info('{} UPDATE ACCOUNT'.format(
str(datetime.datetime.now())))
self.accounts = self.acc.account_msg
self.orders = self.acc.orders
self.positions = self.acc.positions
self.trades = self.acc.trades
self.updatetime = self.acc.dtstr
elif self.running_mode == 'backtest':
#self.positions = self.acc.get_position(self.code)
self.positions = self.acc.positions
def send_order(self, direction='BUY', offset='OPEN', code=None, price=3925, volume=10, order_id='',):
towards = eval('ORDER_DIRECTION.{}_{}'.format(direction, offset))
order_id = str(uuid.uuid4()) if order_id == '' else order_id
if self.market_type == QA.MARKET_TYPE.STOCK_CN:
"""
在此对于股票的部分做一些转换
"""
if towards == ORDER_DIRECTION.SELL_CLOSE:
towards = ORDER_DIRECTION.SELL
elif towards == ORDER_DIRECTION.BUY_OPEN:
towards = ORDER_DIRECTION.BUY
if isinstance(price, float):
pass
elif isinstance(price, pd.Series):
price = price.values[0]
if self.running_mode == 'sim':
QA.QA_util_log_info(
'============ {} SEND ORDER =================='.format(order_id))
QA.QA_util_log_info('direction{} offset {} price{} volume{}'.format(
direction, offset, price, volume))
if self.check_order(direction, offset):
self.last_order_towards = {'BUY': '', 'SELL': ''}
self.last_order_towards[direction] = offset
now = str(datetime.datetime.now())
order = self.acc.send_order(
code=code, towards=towards, price=price, amount=volume, order_id=order_id)
order['topic'] = 'send_order'
self.pub.pub(
json.dumps(order), routing_key=self.strategy_id)
self.acc.make_deal(order)
self.bar_order['{}_{}'.format(direction, offset)] = self.bar_id
try:
for user in self.subscriber_list:
QA.QA_util_log_info(self.subscriber_list)
"oL-C4w2WlfyZ1vHSAHLXb2gvqiMI"
"""http://www.yutiansut.com/signal?user_id=oL-C4w1HjuPRqTIRcZUyYR0QcLzo&template=xiadan_report&\
strategy_id=test1&realaccount=133496&code=rb1910&order_direction=BUY&\
order_offset=OPEN&price=3600&volume=1&order_time=20190909
"""
requests.post('http://www.yutiansut.com/signal?user_id={}&template={}&strategy_id={}&realaccount={}&code={}&order_direction={}&order_offset={}&price={}&volume={}&order_time={}'.format(
user, "xiadan_report", self.strategy_id, self.acc.user_id, self.code.lower(), direction, offset, price, volume, now))
except Exception as e:
QA.QA_util_log_info(e)
else:
QA.QA_util_log_info('failed in ORDER_CHECK')
elif self.running_mode == 'backtest':
self.bar_order['{}_{}'.format(direction, offset)] = self.bar_id
self.acc.receive_simpledeal(
code=code, trade_time=self.running_time, trade_towards=towards, trade_amount=volume, trade_price=price, order_id=order_id)
#self.positions = self.acc.get_position(self.code)
if __name__ == '__main__':
QAStrategyStockBase(code=['000001', '000002']).run_sim()
| 40.440154
| 208
| 0.607027
|
4a09b648b2b7c0b3335c23621864eecff34233d3
| 766
|
py
|
Python
|
amc/__init__.py
|
mchen910/amc-prep
|
67cd16a56e64700c63e3353f0e2e666b5d4933a5
|
[
"MIT"
] | 1
|
2021-12-31T23:36:40.000Z
|
2021-12-31T23:36:40.000Z
|
amc/__init__.py
|
mchen910/amc-prep
|
67cd16a56e64700c63e3353f0e2e666b5d4933a5
|
[
"MIT"
] | 1
|
2022-01-01T21:06:46.000Z
|
2022-01-01T21:06:46.000Z
|
amc/__init__.py
|
mchen910/amc-prep
|
67cd16a56e64700c63e3353f0e2e666b5d4933a5
|
[
"MIT"
] | null | null | null |
"""
amc-prep
========
A tool to prepare for the AMC and AIME competitions.
To create a practice test, run (as an example):
>>> PROBLEMS = 25
>>> TEX_DIR = "tex"
>>> PDF_DIR = "pdf"
>>> EXAMS = ["AMC 12A", "AMC 10B"]
>>> P_FILENAME = "problems_1"
>>> S_FILENAME = "solutions_1"
>>> A_FILENAME = "answers_1"
>>> P_TITLE = "Problems"
>>> S_TITLE = "Solutions"
>>> A_TITLE = "Answers"
>>> write_practice_test(
PROBLEMS, TEX_DIR, EXAMS, P_FILENAME, S_FILENAME,
A_FILENAME, P_TITLE, S_TITLE, A_TITLE, pdf_dir=PDF_DIR
)
"""
__version__ = "1.0.0"
from amc.write import write_practice_test, write_default_test, get_problem, get_solution
from amc.randomizer import calculate_difficulties, set_difficulties
from amc.generate import convert_exam_name
| 25.533333
| 88
| 0.693211
|
4a09b6fa3148bf85babce826d769c5b202ff2105
| 802
|
py
|
Python
|
petersen/app/base.py
|
TRManderson/petersen
|
477a2403c21033d8c901ab555efc4e1045fdd4bc
|
[
"MIT"
] | null | null | null |
petersen/app/base.py
|
TRManderson/petersen
|
477a2403c21033d8c901ab555efc4e1045fdd4bc
|
[
"MIT"
] | null | null | null |
petersen/app/base.py
|
TRManderson/petersen
|
477a2403c21033d8c901ab555efc4e1045fdd4bc
|
[
"MIT"
] | null | null | null |
from flask import Flask
from mako.lookup import TemplateLookup
import pkg_resources
import os
module_path = os.path.join(pkg_resources.get_distribution('petersen').location, 'petersen')
app = Flask("petersen.app")
app.config['db_url'] = 'sqlite:///dev.db'
app.secret_key = 'TRMISAVATARAANGCONFIRMED' # TODO Move to env var and use an actual random string
template_lookup = TemplateLookup([
os.path.join(pkg_resources.get_distribution('petersen').location, 'petersen', 'templates')
])
@app.route('/')
@app.route('/index.html')
def index():
return template_lookup.get_template('index.mako').render(config=app.config)
@app.route('/assets/<path:path>')
def static_files(path):
path = path.split('/')
with open(os.path.join(module_path, 'assets', *path)) as f:
return f.read()
| 30.846154
| 99
| 0.726933
|
4a09b8df8bbfadf6668e7cb13f81a2c9ca202e42
| 19,545
|
py
|
Python
|
Instagram_bot/main.py
|
Mimoza-23/Instagram_bot
|
29e201165b34e1e8440c60928ea4c63f3da5ca3f
|
[
"Unlicense"
] | null | null | null |
Instagram_bot/main.py
|
Mimoza-23/Instagram_bot
|
29e201165b34e1e8440c60928ea4c63f3da5ca3f
|
[
"Unlicense"
] | null | null | null |
Instagram_bot/main.py
|
Mimoza-23/Instagram_bot
|
29e201165b34e1e8440c60928ea4c63f3da5ca3f
|
[
"Unlicense"
] | null | null | null |
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import time
import random
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
import os
import requests
import re
import sys
username = input('Введите ваш логин: ')
password = input('Введите ваш пороль: ')
class InstagramBot():
def __init__(self,username,password):
self.username = username
self.password = password
options = Options()
options.add_argument("--headless")
self.browser = webdriver.Firefox(options=options)
def close_browser(self):
self.browser.close()
self.browser.quit()
def login(self):
browser = self.browser
browser.get('https://www.instagram.com/')
time.sleep(random.randrange(4 ,6))
username_input = browser.find_element_by_name("username")
username_input.clear()
username_input.send_keys(username)
time.sleep(5)
password_input = browser.find_element_by_name("password")
password_input.clear()
password_input.send_keys(password)
password_input = browser.find_element_by_xpath("/html/body/div[1]/section/main/article/div[2]/div[1]/div/form/div/div[3]/button/div").click()
time.sleep(10)
def like_photo_by_hashtag(self, hashtag):
browser = self.browser
browser.get(f'https://www.instagram.com/explore/tags/{hashtag}/')
time.sleep(5)
for i in range(1, 4):
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(random.randrange(3, 5))
hrefs = browser.find_elements_by_tag_name('a')
posts_urls = [item.get_attribute('href') for item in hrefs if "/p/" in item.get_attribute('href')]
print(posts_urls)
for url in posts_urls[0:2]:
try:
browser.get(url)
time.sleep(5)
like_button = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/article/div[3]/section[1]/span[1]/button').click()
time.sleep(random.randrange(50,60))
except Exception as ex:
print(ex)
self.close_browser()
def xpath_exists(self,url):
browser = self.browser
try:
browser.find_element_by_xpath(url)
exist = True
except NoSuchElementException:
exist = False
return exist
def put_exactly_like(self, userpost):
browser = self.browser
browser.get(userpost)
time.sleep(4)
wrong_userpage = "/html/body/div[1]/section/main/div/h2"
if self.xpath_exists(wrong_userpage):
print("Такого поста не существует, проверьте URL")
self.close_browser()
else:
print("Пост успешно найден, ставим лайк!")
time.sleep(2)
like_button = "/html/body/div[1]/section/main/div/div/article/div[3]/section[1]/span[1]/button"
browser.find_element_by_xpath(like_button).click()
time.sleep(2)
print(f"Лайк на пост: {userpost} поставлен!")
self.close_browser()
#функция собирает ссылки на все посты пользователя
def get_all_posts_urls(self,userpage):
browser = self.browser
browser.get(userpage)
time.sleep(4)
wrong_userpage = "/html/body/div[1]/section/main/div/h2"
if self.xpath_exists(wrong_userpage):
print("Такого пользователя не существует, проверьте URL")
self.close_browser()
else:
print("Пользователь успешно найден, ставим лайки!")
time.sleep(2)
posts_count = int(browser.find_element_by_xpath("/html/body/div[1]/section/main/div/header/section/ul/li[1]/span/span").text)
loops_count = int(posts_count / 12)
print(loops_count)
posts_urls = []
for i in range(0, loops_count):
hrefs = browser.find_elements_by_tag_name('a')
hrefs = [item.get_attribute('href') for item in hrefs if "/p/" in item.get_attribute('href')]
for href in hrefs:
posts_urls.append(href)
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(random.randrange(2, 4))
print(f"Итерация #{i}")
file_name = userpage.split("/")[-2]
with open(f'{file_name}.txt', 'a') as file:
for post_url in posts_urls:
file.write(post_url + "\n")
set_posts_urls = set(posts_urls)
set_posts_urls = list(set_posts_urls)
with open(f'{file_name}_set.txt', 'a') as file:
for post_url in set_posts_urls:
file.write(post_url + '\n')
#функция ставит лайки по ссылке на аккаунт пользователя
def put_many_likes(self, userpage):
browser = self.browser
self.get_all_posts_urls(userpage)
file_name = userpage.split("/")[-2]
time.sleep(3)
browser.get(userpage)
time.sleep(4)
with open(f'{file_name}_set.txt') as file:
urls_list = file.readlines()
for post_url in urls_list[0:1000]:
try:
browser.get(post_url)
time.sleep(2)
like_button = "/html/body/div[1]/section/main/div/div/article/div[3]/section[1]/span[1]/button"
browser.find_element_by_xpath(like_button).click()
# time.sleep(random.randrange(80, 100))
time.sleep(2)
print(f"Лайк на пост: {post_url} успешно поставлен!")
except Exception as ex:
print(ex)
self.close_browser()
self.close_browser()
#отписка от пользователей
def unsubscribe_for_all_users(self,username):
browser = self.browser
browser.get(f"https://www.instagram.com/{username}/")
time.sleep(random.randrange(3,5))
following_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > ul > li:nth-child(3) > a")
following_count = following_button.find_element_by_tag_name("span").text
# если число подписчиков больше 999
if ',' in following_count:
following_count = int(''.join(following_count.split(',')))
else:
following_count = int(following_count)
print(f"Количество подписок: {following_count}")
time.sleep(random.randrange(2,4))
loops_count = int(following_count / 10) + 1
print(f"Кол-во перезагрузок страницы:{loops_count}")
following_users_dict = {}
for loop in range(1, loops_count +1):
count = 10
browser.get(f"https://www.instagram.com/{username}/")
time.sleep(random.randrange(3,5))
#клик на меню подписок
following_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > ul > li:nth-child(3) > a")
following_button.click()
time.sleep(random.randrange(3,5))
#сбор списка из подписок
following_div_block = browser.find_element_by_class_name("PZuss")
following_users = following_div_block.find_elements_by_tag_name("li")
time.sleep(random.randrange(3,5))
for user in following_users:
if not count:
break
user_url = user.find_element_by_tag_name("a").get_attribute("href")
user_name = user_url.split("/")[-2]
following_users_dict[username] = user_url
following_button = user.find_element_by_tag_name("button").click()
time.sleep(random.randrange(3,10))
unfollow_button = browser.find_element_by_css_selector("body > div:nth-child(20) > div > div > div > div.mt3GC > button.aOOlW.-Cab_").click()
print(f"Итерация №{count} >>>> Отписался от пользователя {user_name}")
count -= 1
time.sleep(random.randrange(10,15))
self.close_browser()
# подписка на всех подписчиков данного аккаунта
def get_all_followers(self,userpage):
browser = self.browser
browser.get(userpage)
time.sleep(4)
file_name = userpage.split('/')[-2]
if os.path.exists(f"{file_name}"):
print(f"Папка {file_name} уже существует!")
else:
print(f"Создаём папку пользователя {file_name}")
os.mkdir(file_name)
wrong_userpage = "/html/body/div[1]/section/main/div/h2"
if self.xpath_exists(wrong_userpage):
print(f"Пользователя {file_name} не существует, проверьте URL")
self.close_browser()
else:
print(f"Пользователь {file_name} успешно найден, начинаем скачивать ссылки на подписичиков!")
time.sleep(2)
followers_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > ul > li:nth-child(2) > a > span")
followers_count = followers_button.text
if "k" in followers_count:
followers_count = (''.join(followers_count.split('k')))
followers_count = int(''.join(followers_count.split('.')))
followers_count = followers_count * 100
elif "," in followers_count:
followers_count = (''.join(followers_count.split(',')))
followers_count = int(followers_count.split(' ')[0])
else:
followers_count = int(followers_count.split(' ')[0])
print("Количество подписчиков " + str(followers_count))
time.sleep(2)
loops_count = int(followers_count / 12)
if loops_count > 300:
loops_count = 100
print(f"Число итераций: {loops_count}")
time.sleep(4)
followers_button.click()
time.sleep(4)
followers_ul = browser.find_element_by_xpath("/html/body/div[5]/div/div/div[2]")
print(followers_ul)
try:
followers_urls = []
for i in range(1, loops_count + 1):
browser.execute_script("arguments[0].scrollTop = arguments[0].scrollHeight", followers_ul)
time.sleep(random.randrange(2, 4))
print(f"Итерация #{i}")
all_urls_div = followers_ul.find_elements_by_tag_name("li")
for url in all_urls_div:
url = url.find_element_by_tag_name("a").get_attribute("href")
followers_urls.append(url)
#сохраняем подписчиков в файл
with open(f"{file_name}/{file_name}.txt", "a") as text_file:
for link in followers_urls:
text_file.write(link + "\n")
with open(f"{file_name}/{file_name}.txt") as text_file:
users_urls = text_file.readlines()
for user in users_urls[0:100000]:
try:
try:
with open(f'{file_name}/{file_name}_subscribe_list.txt','r') as subscribe_list_file:
lines = subscribe_list_file.readlines()
if user in lines:
print(f'Мы уже подписаны на {user}, переходим к следующему пользователю!')
continue
except Exception as ex:
print('Файл со ссылками ещё не создан!')
# print(ex)
browser = self.browser
browser.get(user)
page_owner = user.split("/")[-2]
if self.xpath_exists("/html/body/div[1]/section/main/div/header/section/div[1]/div/a"):
print("Это наш профиль, уже подписан, пропускаем итерацию!")
elif self.xpath_exists(
"/html/body/div[2]/section/main/div/header/section/div[1]/div[1]/div/div[2]/div/span/span[1]/button"):
print(f"Уже подписаны, на {page_owner} пропускаем итерацию!")
else:
time.sleep(random.randrange(4, 8))
if self.xpath_exists(
"/html/body/div[1]/section/main/div/div/article/div[1]/div/h2"):
try:
follow_button = browser.find_element_by_css_selector(
"#react-root > section > main > div > header > section > div.nZSzR > div.Igw0E.IwRSH.eGOV_.ybXk5._4EzTm > div > div > button").click()
print(f'Запросили подписку на пользователя {page_owner}. Закрытый аккаунт!')
except Exception as ex:
print(ex)
else:
try:
if self.xpath_exists("/html/body/div[2]/section/main/div/header/section/div[1]/div[1]/div/div/div/span/span[1]/button"):
follow_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > div.nZSzR > div.Igw0E.IwRSH.eGOV_.ybXk5._4EzTm > div > div > div > span > span.vBF20._1OSdk > button").click()
print(f'Подписались на пользователя {page_owner}. Открытый аккаунт!')
else:
follow_button = browser.find_element_by_css_selector("#react-root > section > main > div > header > section > div.nZSzR > div.Igw0E.IwRSH.eGOV_.ybXk5._4EzTm > div > div > div > span > span.vBF20._1OSdk > button").click()
print(f'Подписались на пользователя {page_owner}. Открытый аккаунт!')
except Exception as ex:
print(ex)
# записываем данные в файл для ссылок всех подписок, если файла нет, создаём, если есть - дополняем
with open(f'{file_name}/{file_name}_subscribe_list.txt','a') as subscribe_list_file:
subscribe_list_file.write(user)
time.sleep(random.randrange(8, 12))
except Exception as ex:
print(ex)
self.close_browser()
except Exception as ex:
print(ex)
self.close_browser()
self.close_browser()
def statistics(self,username):
browser = self.browser
browser.get(f"https://www.instagram.com/{username}/")
time.sleep(random.randrange(3,5))
posts = browser.find_element_by_css_selector("span.-nal3").text
posts = int(posts.split(' ')[0])
print(posts)
publication = browser.find_element_by_css_selector("div.Nnq7C:nth-child(1) > div:nth-child(1)").click()
likes = []
time.sleep(random.randrange(3,5))
browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button")
like = browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button").text
like = int(like.split(' ')[0])
likes.append(like)
print(likes)
nexxt = browser.find_element_by_xpath("/html/body/div[5]/div[1]/div/div/a").click()
time.sleep(random.randrange(3,5))
print(likes)
a = 1
while a < posts:
if self.xpath_exists("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button"):
like = browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/button").text
like = int(like.split(' ')[0])
likes.append(like)
a+=1
print(likes)
if self.xpath_exists("/html/body/div[5]/div[1]/div/div/a[2]"):
nexxt = browser.find_element_by_xpath("/html/body/div[5]/div[1]/div/div/a[2]").click()
pass
else:
pass
time.sleep(random.randrange(4,6))
elif self.xpath_exists("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/span"):
browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/span").click()
time.sleep(2)
likes_in_video = browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/div[4]").text
like = int(likes_in_video.split(' ')[0])
likes.append(like)
a+=1
print(likes)
browser.find_element_by_xpath("/html/body/div[5]/div[2]/div/article/div[3]/section[2]/div/div/div[1]").click()
time.sleep(random.randrange(4,6))
if self.xpath_exists("/html/body/div[5]/div[1]/div/div/a[2]"):
nexxt = browser.find_element_by_xpath("/html/body/div[5]/div[1]/div/div/a[2]").click()
pass
else:
pass
time.sleep(random.randrange(4,6))
pass
def listsum(numList):
theSum = 0
for i in numList:
theSum = theSum + i
return theSum
print("Количество лайков на странице:" + str(listsum(likes)))
self.close_browser
print("1-Подписка на подписчиков конкурента")
print("2-Отписка от всех подписок")
print("3-статистика вашего аккаунта(Лайки + подписчики)")
function = input("Выберите одну из функций: ")
if function == "1" :
concurent = input("Вставьте ссылку на конкурента: ")
my_bot = InstagramBot(username, password)
my_bot.login()
my_bot.get_all_followers(concurent)
elif function == "2":
userpage = input("Введите никнейм вашего аккаунта:")
my_bot = InstagramBot(username, password)
my_bot.login()
my_bot.unsubscribe_for_all_users(userpage)
elif function == "3":
username = "bot_by_dr"
password = "danila200342"
name = input("Введите никнейм вашего аккаунта:")
my_bot = InstagramBot(username, password)
my_bot.login()
my_bot.statistics(name)
| 43.627232
| 265
| 0.542696
|
4a09b93f27fde985fca4c351ac143902f84ae746
| 760
|
py
|
Python
|
var/spack/repos/builtin/packages/octave-quaternion/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/octave-quaternion/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/octave-quaternion/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class OctaveQuaternion(OctavePackage, SourceforgePackage):
"""Quaternion package for GNU Octave,
includes a quaternion class with overloaded operators."""
homepage = "https://octave.sourceforge.io/quaternion/"
sourceforge_mirror_path = "octave/quaternion-2.4.0.tar.gz"
version('2.4.0', sha256='4c2d4dd8f1d213f080519c6f9dfbbdca068087ee0411122b16e377e0f4641610')
version('2.2.2', sha256='261d51657bc729c8f9fe915532d91e75e48dce2af2b298781e78cc93a5067cbd')
conflicts('octave@6:')
extends('octave@3.8.0:5.2.0')
| 36.190476
| 95
| 0.764474
|
4a09babc92bbc515ab40ed1fb92e9a3f6846b333
| 1,606
|
py
|
Python
|
codigo/desafio_iafront/jobs/conversao/job_conversao.py
|
gabilew/desafio-iafront
|
bd9f10389608f3fb40cbd9208b9d12ab0bf75c95
|
[
"MIT"
] | null | null | null |
codigo/desafio_iafront/jobs/conversao/job_conversao.py
|
gabilew/desafio-iafront
|
bd9f10389608f3fb40cbd9208b9d12ab0bf75c95
|
[
"MIT"
] | null | null | null |
codigo/desafio_iafront/jobs/conversao/job_conversao.py
|
gabilew/desafio-iafront
|
bd9f10389608f3fb40cbd9208b9d12ab0bf75c95
|
[
"MIT"
] | null | null | null |
from functools import partial
import click
import numpy as np
import pandas as pd
from codigo.desafio_iafront.data.saving import save_partitioned
from codigo.desafio_iafront.jobs.clusters.clusters import kmeans
from codigo.desafio_iafront.data.dataframe_utils import read_partitioned_json
from codigo.desafio_iafront.jobs.common import filter_date
@click.command()
@click.option('--dataset', type=click.Path(exists=True))
@click.option('--saida', type=click.Path(exists=False, dir_okay=True, file_okay=False))
@click.option('--particao')
@click.option('--data-inicial', type=click.DateTime(formats=["%d/%m/%Y"]))
@click.option('--data-final', type=click.DateTime(formats=["%d/%m/%Y"]))
def main(dataset: str, saida: str, particao, data_inicial, data_final):
assert(particao in ['hora','dia','minuto' ])
filter_function = partial(filter_date, data_inicial=data_inicial, data_final=data_final)
dataframe = read_partitioned_json(file_path=dataset, filter_function=filter_function)
dataframe['datahora'] = dataframe.datahora.apply(pd.to_datetime)
if particao == 'minuto':
dataframe['tempo'] = dataframe.datahora.values.astype('<M8[m]')
elif particao == 'hora':
dataframe['tempo'] = dataframe.datahora.values.astype('<M8[h]')
elif particao == 'dia':
dataframe['tempo'] = dataframe.datahora.values.astype('<M8[D]')
dataframe['taxa_conversao'] = dataframe.groupby(['tempo','cluster_label'])['convertido'].transform('mean')
save_partitioned(dataframe, saida, ['cluster_label','tempo' ])
if __name__ == '__main__':
main()
| 37.348837
| 111
| 0.724159
|
4a09bacda19761df74805679be633f72d8c59b97
| 237
|
py
|
Python
|
raissyon/raissyon/doctype/interested_parties_requirement/test_interested_parties_requirement.py
|
mhbu50/raissyon
|
73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce
|
[
"MIT"
] | null | null | null |
raissyon/raissyon/doctype/interested_parties_requirement/test_interested_parties_requirement.py
|
mhbu50/raissyon
|
73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce
|
[
"MIT"
] | null | null | null |
raissyon/raissyon/doctype/interested_parties_requirement/test_interested_parties_requirement.py
|
mhbu50/raissyon
|
73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Accurate Systems and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestInterestedPartiesRequirement(unittest.TestCase):
pass
| 21.545455
| 58
| 0.78481
|
4a09bb400ce61a5f1ac9a18f435d876c23545ff7
| 7,060
|
py
|
Python
|
pkg/suggestion/v1alpha2/bayesianoptimization/src/algorithm_manager.py
|
zhenghuiwang/katib
|
de96a52dd9e9c8cf4165927b7fb17023cfd652fd
|
[
"Apache-2.0"
] | null | null | null |
pkg/suggestion/v1alpha2/bayesianoptimization/src/algorithm_manager.py
|
zhenghuiwang/katib
|
de96a52dd9e9c8cf4165927b7fb17023cfd652fd
|
[
"Apache-2.0"
] | null | null | null |
pkg/suggestion/v1alpha2/bayesianoptimization/src/algorithm_manager.py
|
zhenghuiwang/katib
|
de96a52dd9e9c8cf4165927b7fb17023cfd652fd
|
[
"Apache-2.0"
] | null | null | null |
""" module for algorithm manager """
import numpy as np
from pkg.api.v1alpha2.python import api_pb2
from .utils import get_logger
def deal_with_discrete(feasible_values, current_value):
""" function to embed the current values to the feasible discrete space"""
diff = np.subtract(feasible_values, current_value)
diff = np.absolute(diff)
return feasible_values[np.argmin(diff)]
def deal_with_categorical(feasible_values, one_hot_values):
""" function to do the one hot encoding of the categorical values """
index = np.argmax(one_hot_values)
#index = one_hot_values.argmax()
return feasible_values[int(index)]
class AlgorithmManager:
""" class for the algorithm manager
provide some helper functions
"""
def __init__(self, experiment_name, experiment, parameter_config, X_train, y_train, logger=None):
self.logger = logger if (logger is not None) else get_logger()
self._experiment_name = experiment_name
self._experiment = experiment
self._goal = self._experiment.spec.objective.type
self._dim = parameter_config.dim
self._lowerbound = parameter_config.lower_bounds
self._upperbound = parameter_config.upper_bounds
self._types = parameter_config.parameter_types
self._names = parameter_config.names
# record all the feasible values of discrete type variables
self._discrete_info = parameter_config.discrete_info
self._categorical_info = parameter_config.categorical_info
self._name_id = parameter_config.name_ids
self._X_train = self._mapping_params(X_train)
self.parse_X()
self._y_train = y_train
self._parse_metric()
@property
def experiment_name(self):
""" return the experiment_name """
return self._experiment_name
@property
def experiment(self):
""" return the experiment """
return self._experiment
@property
def goal(self):
""" return the optimization goal"""
return self._goal
@property
def dim(self):
""" return the dimension """
return self._dim
@property
def lower_bound(self):
""" return the lower bound of all the parameters """
return self._lowerbound
@property
def upper_bound(self):
""" return the upper bound of all the parameters """
return self._upperbound
@property
def types(self):
""" return the types of all the parameters """
return self._types
@property
def names(self):
""" return the names of all the parameters """
return self._names
@property
def discrete_info(self):
""" return the info of all the discrete parameters """
return self._discrete_info
@property
def categorical_info(self):
""" return the info of all the categorical parameters """
return self._categorical_info
@property
def X_train(self):
""" return the training data """
return self._X_train
@property
def y_train(self):
""" return the target of the training data"""
return self._y_train
def _mapping_params(self, parameters_list):
if len(parameters_list) == 0:
return None
ret = []
for parameters in parameters_list:
maplist = [np.zeros(1)]*len(self._names)
for p in parameters:
self.logger.debug("mapping: %r", p, extra={
"Experiment": self._experiment_name})
map_id = self._name_id[p.name]
if self._types[map_id] in [api_pb2.DOUBLE, api_pb2.INT, api_pb2.DISCRETE]:
maplist[map_id] = float(p.value)
elif self._types[map_id] == api_pb2.CATEGORICAL:
for ci in self._categorical_info:
if ci["name"] == p.name:
maplist[map_id] = np.zeros(ci["number"])
for i, v in enumerate(ci["values"]):
if v == p.value:
maplist[map_id][i] = 1
break
self.logger.debug("mapped: %r", maplist, extra={
"Experiment": self._experiment_name})
ret.append(np.hstack(maplist))
return ret
def _parse_metric(self):
""" parse the metric to the dictionary """
self.logger.info("Ytrain: %r", self._y_train, extra={
"Experiment": self._experiment_name})
if not self._y_train:
self._y_train = None
return
y = []
for metric in self._y_train:
if self._goal == api_pb2.MAXIMIZE:
y.append(float(metric))
else:
y.append(-float(metric))
self.logger.debug("Ytrain: %r", y, extra={
"Experiment": self._experiment_name})
self._y_train = np.array(y)
def parse_X(self):
if not self._X_train:
self._X_train = None
return
self.logger.debug("Xtrain: %r", self._X_train, extra={
"Experiment": self._experiment_name})
self._X_train = np.array(self._X_train)
def parse_x_next(self, x_next):
""" parse the next suggestion to the proper format """
counter = 0
result = []
for i in range(len(self._types)):
if self._types[i] == api_pb2.INT:
result.append(int(round(x_next[counter], 0)))
counter = counter + 1
elif self._types[i] == api_pb2.DISCRETE:
for param in self._discrete_info:
if param["name"] == self._names[i]:
result.append(
deal_with_discrete(
param["values"], x_next[counter])
)
counter = counter + 1
break
elif self._types[i] == api_pb2.CATEGORICAL:
for param in self._categorical_info:
if param["name"] == self._names[i]:
result.append(deal_with_categorical(
feasible_values=param["values"],
one_hot_values=x_next[counter:counter +
param["number"]],
))
counter = counter + param["number"]
break
elif self._types[i] == api_pb2.DOUBLE:
result.append(x_next[counter])
counter = counter + 1
return result
def convert_to_dict(self, x_next):
""" convert the next suggestion to the dictionary """
result = []
for i in range(len(x_next)):
tmp = dict({
"name": self._names[i],
"value": x_next[i],
"type": self._types[i],
})
result.append(tmp)
return result
| 34.950495
| 101
| 0.561615
|
4a09bb68bf52be660796e8f4d8e077b053587479
| 2,388
|
py
|
Python
|
herby.py
|
joykrupinski/Herbywebsite
|
71a17931207496dfe2c77791c845833ce961c6a6
|
[
"MIT"
] | null | null | null |
herby.py
|
joykrupinski/Herbywebsite
|
71a17931207496dfe2c77791c845833ce961c6a6
|
[
"MIT"
] | null | null | null |
herby.py
|
joykrupinski/Herbywebsite
|
71a17931207496dfe2c77791c845833ce961c6a6
|
[
"MIT"
] | null | null | null |
import time
from mraa import getGpioLookup
from upm import pyupm_buzzer as upmBuzzer
import pandas as pd
import datetime
#Moisture and buzzer
from grove.grove_moisture_sensor import GroveMoistureSensor
from grove.display.jhd1802 import JHD1802
#TDS
from TDS import GroveTDS
#Light sensor
from grove.grove_light_sensor_v1_2 import GroveLightSensor
#humidity and temperature
from seeed_dht import DHT
#import fuer csv Datei
import csv
#main function
def moisture_tds_main():
lcd = JHD1802()
sensor = GroveMoistureSensor(4)
#buzzer = upmBuzzer.Buzzer(getGpioLookup('GPIO12'))
sensor_tds = GroveTDS(0)
print('TDS Value: {0}'.format(sensor_tds.TDS))
mois = sensor.moisture
if 0 <= mois and mois < 300:
level = 'dry'
#buzzer.playSound(upmBuzzer.BUZZER_DO, 200000)
elif 300 <= mois and mois < 600:
level = 'moist'
else:
level = 'wet'
print('moisture: {}, {}'.format(mois, level))
lcd.setCursor(0, 0)
lcd.write('moisture: {0:>6}'.format(level))
lcd.setCursor(1,0)
lcd.write('TDS: {0:>12}'.format(sensor_tds.TDS)) #hier muss noch ein threshold hin
return level,sensor_tds.TDS
def Light_main():
sensor = GroveLightSensor(2)
print('light value {}'.format(sensor.light))
return sensor.light
def temp_hum_main():
lcd = JHD1802()
sensor = DHT('11', 5)
humi, temp = sensor.read()
print('temperature {}C, humidity {}%'.format(temp,humi))
lcd.setCursor(0,0)
lcd.write('temperature: {0:2}C'.format(temp))
lcd.setCursor(1,0)
lcd.write('humidity: {0:5}%'.format(humi))
return humi,temp
def main():
while True:
mois,tds = moisture_tds_main()
time.sleep(2)
light = Light_main()
hum,temp = temp_hum_main()
time.sleep(5)
today = pd.to_datetime('today')
toWrite = [tds, mois, light, temp, hum]
df = pd.read_csv('test.csv')
df[today] = toWrite
df.to_csv('test.csv', index=False)
if __name__ == '__main__':
main()
| 22.961538
| 90
| 0.55737
|
4a09bb74835749ddef15ce2fea34d5d5e193345d
| 1,712
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
DrTruong/recipe-app-api
|
605126943ced62d506e7e4a156a1f6b8b083772f
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
DrTruong/recipe-app-api
|
605126943ced62d506e7e4a156a1f6b8b083772f
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
DrTruong/recipe-app-api
|
605126943ced62d506e7e4a156a1f6b8b083772f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-19 09:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.352941
| 266
| 0.639603
|
4a09bce1083e71752b83a6bdaff19617535760d5
| 113,278
|
py
|
Python
|
src/bot_data/bot2.py
|
PolarisVoid/EvoAI
|
7a7cacd4b4382613b5fb56a389a47677d8d476b3
|
[
"MIT"
] | null | null | null |
src/bot_data/bot2.py
|
PolarisVoid/EvoAI
|
7a7cacd4b4382613b5fb56a389a47677d8d476b3
|
[
"MIT"
] | null | null | null |
src/bot_data/bot2.py
|
PolarisVoid/EvoAI
|
7a7cacd4b4382613b5fb56a389a47677d8d476b3
|
[
"MIT"
] | null | null | null |
bot2_wieght_layer_one = [[0.06164539460362484, 0.11759471334368621, 0.17893658817359204, 0.28249387564514794, 0.33749921883077205, 0.003398909401950423, 0.5455878097874893, 0.6117000886611348, 0.43536487655162326, 0.5100973424460118, 0.8588695566361971, 0.1728339016872269, 0.23927759750481115, 0.25663709275524227, 0.33721382176671955, 0.8163590217678586, 0.16869261607512276, 0.39468731571561344, 0.3591034237467271, 0.7831810781892757, 0.4146313794243499, 0.6713149763882611, 0.6219072163671232, 0.13269674546988552, 0.46590289032332155, 0.8140920564818188, 0.8126424640890263, 0.6539005211863612, 0.9895970487990933, 0.31167716939266565, 0.5481403986521937, 0.0806262652907912, 0.17471105292823808, 0.8357528332166033, 0.48019524595307006, 0.045439486954199526, 0.5433045432805172, 0.020626147432828867, 0.3811029567043519, 0.48249542059214745, 0.20609031547302326, 0.4912568075706476, 0.9300995831170994, 0.8817977938836364, 0.8732114167229797, 0.20079625834123582, 0.11487816484426305, 0.4919127177018996, 0.4159812243623058, 0.8522013483692638, 0.6007682150895853, 0.9897602351285053, 0.8812127997577442, 0.6821760614351214, 0.13035693341164578, 0.7001511354112889, 0.0527499682937852, 0.9775747968974925, 0.8604795221216932, 0.0027536603832913897, 0.9432319342171016, 0.5333676813831052, 0.008097559611472294, 0.1480829608440053, 0.4382004734143371, 0.1500192203208256, 0.19114832600326126, 0.13710894750945668, 0.019957455204595242, 0.9355141619709086, 0.11550358460781274, 0.35461780038553203, 0.652551523466154, 0.9501961220139139, 0.24065007567050356, 0.6708864932458757, 0.7917828402930284, 0.19084448735394655, 0.3325332626144133, 0.1445966769772098, 0.7847931820707713, 0.47818398288969066, 0.24040679195030445, 0.9632402841951456, 0.0381429298986764, 0.09688101457752163, 0.791749980054461, 0.7404343107229056, 0.5185960088073419], [0.0949408114228687, 0.26851658670999135, 0.5316868249459203, 0.39371177713049843, 0.7992651603703586, 0.6664840653344608, 0.6066858132336905, 0.41183016497547287, 0.7114159515144651, 0.391266907553385, 0.05202962623038343, 0.35524082117810707, 0.22030448944934045, 0.11488829997981731, 0.6045038192811035, 0.9435731885362056, 0.23253425483463874, 0.2757742429086838, 0.37147287817884844, 0.5584021914471722, 0.7464593561701123, 0.7851434944954666, 0.4714508995512604, 0.5089491991306426, 0.4939730064385368, 0.11477425850910061, 0.6813699689098405, 0.28631375073248877, 0.8957967946103974, 0.7653246600265212, 0.6483745217348631, 0.0758584876961711, 0.2961267178729796, 0.706018575968342, 0.09716725522269498, 0.6405028172320292, 0.17750750491975176, 0.24205288692619809, 0.45919275081939104, 0.28032845442304966, 0.5742274507853824, 0.23214534542880505, 0.24844571652371816, 0.9079943158153301, 0.6177921703103912, 0.019584095636697496, 0.6711775120070259, 0.680412399146662, 0.8215933996657706, 0.593503977321132, 0.8513697545649881, 0.778678296703712, 0.7588078211460776, 0.8153408419125336, 0.693758347744056, 0.0939874384689331, 0.038064197333687044, 0.790615010752138, 0.790777524540574, 0.047885795178014945, 0.8541231298108187, 0.06260515601404659, 0.3032211792191828, 0.1382762149744341, 0.26234395122766885, 0.6584972563180395, 0.12430214843256815, 0.5403973724096952, 0.5259673017183524, 0.15060337291151316, 0.3094235496270866, 0.6316627594099493, 0.401829969029898, 0.5256639241741446, 0.8195972440753729, 0.4763428873526768, 0.5823043324102309, 0.9706681124008787, 0.42831026551651663, 0.8131532727054611, 0.108042827945241, 0.12631154199543948, 0.9078695984600011, 0.0926451583512492, 0.009626086858457672, 0.6353901241775926, 0.8644705224562332, 0.7413369104386599, 0.8264315045661133], [0.26288420032563553, 0.6888778013569268, 0.37699233966680656, 0.26351856335835244, 0.8058556333742184, 0.44487173656860346, 0.32709803319629416, 0.8118256774381845, 0.08518448007551815, 0.8807247266570104, 0.5667954631467951, 0.3069909865128885, 0.6083952008036234, 0.4856792693378261, 0.39454832009876595, 0.7407798539521446, 0.30140898521090975, 0.0735716256582899, 0.6018616233329149, 0.930614703761898, 0.08883831176283896, 0.6563117175334947, 0.7060367503424626, 0.21676245983822673, 0.6470331349605378, 0.66847058769475, 0.5194849760027975, 0.31414960361928623, 0.8993139546173473, 0.8894864760344574, 0.5557883492159309, 0.28481413053982174, 0.9160887684347282, 0.8713960628870259, 0.5247474638791193, 0.7827053622931611, 0.511210386868041, 0.9576459275103089, 0.5090303829395305, 0.8857575802589358, 0.34665348772912563, 0.18634201188178778, 0.8104818233569228, 0.035091884726603184, 0.42151700840009687, 0.5301330520809617, 0.5418174570839942, 0.15350764621784907, 0.11321818604269362, 0.8878811419904545, 0.6141238671217, 0.6446717498290994, 0.8243826151542211, 0.30196308005116856, 0.4315979258594207, 0.3628680163433815, 0.13208173743766305, 0.4048743392800853, 0.791552681400632, 0.4643018611296049, 0.9092807265964338, 0.6080280163531255, 0.3618549455243125, 0.6042662521505608, 0.9242329779993006, 0.37162524239694705, 0.17154762701483384, 0.2144276118277294, 0.04956768029714487, 0.3655677206149295, 0.1209231987107019, 0.9272604232517231, 0.40855280633600566, 0.915853621229325, 0.44114871618847185, 0.6036748056186075, 0.9725053490957547, 0.7272022982370554, 0.875878543161957, 0.7811975671376351, 0.0922275832447339, 0.23233067857894119, 0.2842607648579869, 0.3522724344535282, 0.6454307319479796, 0.8571233423899355, 0.8936918271077074, 0.4446853087632687, 0.44674921745496887], [0.04203721655721737, 0.42694513820117896, 0.4241892955723152, 0.5619675411853173, 0.3622563273107702, 0.4230635272851252, 0.348798853815828, 0.6877947714281547, 0.3894846076539873, 0.5505048774162598, 0.6282306938019656, 0.6546046685856478, 0.31520891347834, 0.6924435325876893, 0.46290412869704767, 0.0029123764861037627, 0.7884303067618716, 0.3950779839765163, 0.7154679394482505, 0.16803521299362822, 0.08123607953945255, 0.340864348011913, 0.09833806799688549, 0.008652117915405544, 0.2074476048858943, 0.6442182641068379, 0.10731529740591872, 0.714196358505375, 0.008101908565606109, 0.4636272677193597, 0.09999261868784981, 0.9162758786953885, 0.4032327224757868, 0.5591273379270498, 0.585970019289074, 0.6637234683784854, 0.4973797792869853, 0.9504009083128252, 0.08608657731541614, 0.789007525435353, 0.6734689062480621, 0.9278408221122073, 0.7921413232051832, 0.6530436328347982, 0.3288508450269557, 0.8695118630121301, 0.27439840729359843, 0.5100453167135391, 0.2418195644083322, 0.1565391069817199, 0.7380313215895394, 0.7421295480848095, 0.5164545285857708, 0.5011285339947563, 0.6275835632454213, 0.12679011599403522, 0.13520016903633136, 0.09312700214317216, 0.6333070663998973, 0.07754759390637167, 0.526774943158234, 0.22240997698538323, 0.11937969617513966, 0.12373611532254458, 0.9990184789829386, 0.5837454920098482, 0.058682425798910476, 0.22135848086193632, 0.43155058080169983, 0.15543800687129716, 0.6276284721407552, 0.7408843208109742, 0.23426243763121657, 0.09800554350943902, 0.30858692044248404, 0.1288211306770365, 0.6645434502169878, 0.9269203174101681, 0.6757815266356324, 0.1284119426003527, 0.6061004723823248, 0.5730211456898613, 0.03403281248378964, 0.14757486376400297, 0.21421146633708787, 0.8587366230206596, 0.48190412364028756, 0.1659564922721457, 0.9859103634974987], [0.8686109886973042, 0.053132275525251016, 0.8220001822920415, 0.9656710461057608, 0.7358780830344824, 0.15600858694057318, 0.9525560566953473, 0.34683079208612655, 0.6321181156436274, 0.14983798616419208, 0.05935648271554639, 0.9965040087848673, 0.07083536313425454, 0.7962797387641137, 0.6893319228861058, 0.4820118205779135, 0.24864934764387925, 0.4558389729703043, 0.686638551763131, 0.7136892153946012, 0.8642756709785994, 0.1810063760385242, 0.30507710710116376, 0.05112541153778938, 0.6878199346524582, 0.6091555290862972, 0.7410127480053258, 0.7485792786238491, 0.06967472328609003, 0.8509535693780306, 0.060833221858902276, 0.6227934437252414, 0.6603586124780828, 0.9980230252014822, 0.3156200048399611, 0.7124201672118785, 0.5133699761736478, 0.9264036290317057, 0.7560700863897425, 0.6459111230442711, 0.2665719317791979, 0.5077792761566124, 0.506239354329863, 0.644339336221108, 0.9006591628800766, 0.20778092667847214, 0.37009872014149914, 0.3733123301597472, 0.5059969483942388, 0.8157825547259269, 0.8604411423034982, 0.4717852595371531, 0.5024710808729939, 0.6786122511509565, 0.4208599295164287, 0.8870594869242168, 0.8292690222209227, 0.24854290425603554, 0.2978224013353141, 0.632577275391601, 0.9294289834028182, 0.5784357853667383, 0.955430489133128, 0.9324958538940681, 0.23447892878801657, 0.3181680436229405, 0.8015884209963352, 0.9758873443402389, 0.9645887091578591, 0.779743511873895, 0.9336489488928145, 0.3192598103121519, 0.6053623542689743, 0.9771408348873797, 0.9163900147063255, 0.4666268414729614, 0.4435588608975114, 0.9568287263755082, 0.1893054862658896, 0.5807492340626742, 0.547625547091439, 0.7657882572313293, 0.7281646426582721, 0.4657952487303165, 0.1677110746358178, 0.13314337059865, 0.48296579907970894, 0.11368904641771105, 0.14738964678209932], [0.038121210573585085, 0.8734102811751672, 0.6012494041396512, 0.4277033549956032, 0.07986821968790536, 0.16018186431815284, 0.11122348975239982, 0.7453697459721262, 0.555033550805579, 0.8820887173789215, 0.32216936444208, 0.7197803496971055, 0.9299569376065673, 0.7894449823095971, 0.6580546159755136, 0.6097440790011451, 0.8966021033157728, 0.6666680871484946, 0.3886663327297014, 0.24818130547679607, 0.9017632885363654, 0.7563981639808075, 0.7225134905127797, 0.7794309995156051, 0.6833137418168587, 0.33908758813534434, 0.2586372561308432, 0.08722106004922303, 0.79649457056885, 0.6065827950666796, 0.5450434028003354, 0.861061498344561, 0.6368606535842244, 0.813304062082235, 0.9603417146039339, 0.6925960287373822, 0.6082781605901871, 0.8932415105028092, 0.06032120503561933, 0.6059332808551148, 0.22897661224699972, 0.9639289381306182, 0.7098897422611351, 0.9641827759581989, 0.2688892047096927, 0.5328619034027616, 0.07654380774985203, 0.4775631969536748, 0.1893924595903833, 0.8421435427271505, 0.0613669431813485, 0.33091233406804765, 0.4693047584594251, 0.8028800490474981, 0.3390098609167477, 0.4514691160087907, 0.09991919433872953, 0.17010708981821454, 0.789485943695638, 0.7971825014166204, 0.13629024439745718, 0.8285414543431664, 0.2504942702749128, 0.6650922914708813, 0.6780727965300818, 0.8263478037922158, 0.16783724852253967, 0.6895409892866654, 0.373062555122905, 0.7664816158009012, 0.9582277942764619, 0.553881211397907, 0.8904318671798129, 0.2303121239051319, 0.1554201037915688, 0.4731336586540854, 0.8441018884466202, 0.056607200068850894, 0.4797702301282303, 0.8927713082972824, 0.7891967276278575, 0.1374378260635306, 0.6622390892735176, 0.16856140676081044, 0.22591543810570647, 0.5522655246129753, 0.8512980723332015, 0.4704184807832885, 0.09546789978103887], [0.3819676000103507, 0.7856020662013007, 0.5226145695125102, 0.9843019935685324, 0.005321895098934437, 0.9894079625462943, 0.8323234626265799, 0.9716965302374024, 0.7942323660743528, 0.3106415863643829, 0.46644739132468394, 0.1277046109208042, 0.014573691970977243, 0.5933935628760462, 0.6901805862493495, 0.5830084929488285, 0.9840897440104448, 0.31299657199032505, 0.8876482781108136, 0.3388822204960502, 0.19039723351615423, 0.0965284515459296, 0.14711000611816172, 0.880856828474887, 0.1044585743021137, 0.4476840693900467, 0.33577086245259224, 0.6415129230393731, 0.7906696233340389, 0.923990110406922, 0.45171431303485254, 0.004577080206336515, 0.4039760942377799, 0.4611835258217284, 0.3816890686208354, 0.8923677456999202, 0.5075518408287434, 0.963893980649633, 0.9528016238709627, 0.7831186937712006, 0.8177049158972645, 0.2288535130391649, 0.5760931892251125, 0.6561996997769719, 0.5199600830467906, 0.529064493013452, 0.9134719829115401, 0.0937633089574118, 0.18367429255905132, 0.6984747057012822, 0.44010575240191996, 0.6713176260955017, 0.3276828541795396, 0.6016606099487664, 0.36995172774941676, 0.2320464672499868, 0.7647723272639902, 0.2356623283172924, 0.7844016751653717, 0.5908459691484975, 0.6934388551038502, 0.03407146029339869, 0.22310322432446172, 0.7713883375174753, 0.15634782862586138, 0.42390290090102456, 0.969859675395163, 0.7611627334609034, 0.09316168407301106, 0.050592560628008876, 0.03788099607013784, 0.5457816567801677, 0.545810740679468, 0.6288719689041281, 0.9422224535861248, 0.3867287025249787, 0.030850968510798382, 0.5983205742007399, 0.5940665303601921, 0.062442413256190044, 0.5897870960640741, 0.846131346652557, 0.6238262245756966, 0.1698215686709792, 0.8492192021692431, 0.4337052684919057, 0.118663028725426, 0.20550692486897248, 0.9166281061902368], [0.8250764194566157, 0.9861037701089608, 0.7950664559289287, 0.20820922643413753, 0.6699398588425878, 0.08378072671702397, 0.39136840749685353, 0.23403615835407487, 0.7688200840042284, 0.8994016078938385, 0.5033500114459462, 0.7106174285718547, 0.568755297101758, 0.38518294585703583, 0.4895366106832939, 0.3242701548358823, 0.05702197543160459, 0.33419030439714725, 0.5781141413918509, 0.22706471659834104, 0.9652215182504813, 0.9619356606614962, 0.28338479287628715, 0.15402181827865413, 0.6973670659545607, 0.5071399701623963, 0.6878155197899071, 0.9585306108947058, 0.6071174015461387, 0.19456790156622406, 0.15233963806351614, 0.147225306583174, 0.15239239798241144, 0.5916658744493736, 0.9813079444001308, 0.6978675482047102, 0.5607762630395466, 0.2904875655449519, 0.7029439901213821, 0.28426272872174896, 0.8409693430311569, 0.6664670032369142, 0.6787784699018814, 0.9165288583064375, 0.37934519960218793, 0.6290800494056594, 0.5422230052450996, 0.668296411428358, 0.6676497102489439, 0.905142251856571, 0.06979982511498639, 0.20293390935999867, 0.28334658317295913, 0.15174226113020062, 0.16980395749342003, 0.775895701354199, 0.5752668565340264, 0.153530684443677, 0.5269497759134754, 0.2877050741329724, 0.24169979442260003, 0.9897369915975661, 0.43067897278520795, 0.8853689662404949, 0.6562021517071165, 0.6416308010431183, 0.9822837146253867, 0.9365231682815031, 0.3422618375969799, 0.41196917146611867, 0.6061592522050304, 0.3686597419976737, 0.4618255662234332, 0.8958534853196586, 0.6944768360596492, 0.07478290855178249, 0.5302871597899491, 0.4948260601697989, 0.9824868256084978, 0.3200245355365756, 0.004117771749728827, 0.9036423800175969, 0.027457643525330422, 0.22393688336437012, 0.18678184016666066, 0.5359859883994522, 0.6730063786011723, 0.27212152355323893, 0.2409528592649265], [0.7115988953269362, 0.013376243665091825, 0.6819709736950457, 0.8517394315627774, 0.39989551761506625, 0.9895148686976507, 0.5430290735814673, 0.22401543740933527, 0.037074098216047235, 0.021652621438229436, 0.801995210825608, 0.8999445697577932, 0.9121822417508373, 0.6183561761794385, 0.6040999822704861, 0.7437454944564151, 0.5210070629818068, 0.288244386968896, 0.33886381082238204, 0.1828421408716887, 0.09624070790372874, 0.13170497175172147, 0.8834538646635416, 0.6596984233276002, 0.9330447755208287, 0.20890030614233634, 0.3729667264406804, 0.969597730850698, 0.23478169071899269, 0.04517205348692532, 0.9864851820040966, 0.8023189425665926, 0.17898918493546645, 0.7214962738119753, 0.9897510220479178, 0.8188298743683045, 0.8857307169259053, 0.9112913561683754, 0.004108121853403701, 0.8302820667649183, 0.6673971242120236, 0.013830595792233336, 0.4007660293536235, 0.19508812884997517, 0.2202620418580089, 0.7558805630467871, 0.9095412304780623, 0.531238721043509, 0.09659857067622424, 0.6678038556468746, 0.9409797270351618, 0.3682714839820477, 0.6105354765243363, 0.5799409013923376, 0.8280472606999251, 0.018689414155600104, 0.10524588449233341, 0.06140740029593694, 0.5447555359787627, 0.5004499229967772, 0.7612860152598911, 0.34472169090588745, 0.3081177559234062, 0.5436752141091149, 0.3405830046355348, 0.6096800984447427, 0.27697781244948216, 0.5991431303252307, 0.806676271601409, 0.0015100091129893745, 0.09244578864102615, 0.4224431048380637, 0.3372934710021751, 0.7738193696578799, 0.5744361980360349, 0.5944900525457435, 0.9432594677742634, 0.34291074265145893, 0.4560218209810194, 0.9202636774401679, 0.019588543724073593, 0.952929403541115, 0.7501606522485554, 0.5221967350330936, 0.8003210385287968, 0.28662629359501934, 0.17568033650322046, 0.9060551589924962, 0.19382345325018868], [0.19412424046926935, 0.25966728598301225, 0.1344246054715469, 0.6543763940401446, 0.3298368331920606, 0.9775440234813856, 0.9289040548903927, 0.6104682375204649, 0.3037972023325225, 0.6569850580080979, 0.46425224943016363, 0.028847369636955222, 0.16058454021686663, 0.812496983949694, 0.7811951899454682, 0.903037611457576, 0.28929713402573765, 0.881237826418188, 0.5406438842578235, 0.24409476388760398, 0.1206407225860161, 0.31357111799829296, 0.1485921471535454, 0.7552359072296163, 0.34670309258236487, 0.4541677887477713, 0.0776648002348933, 0.035028661030255925, 0.10636049378067602, 0.23615339494278476, 0.9924333948867693, 0.7182047579648838, 0.8072181196733538, 0.49014835708589277, 0.09693467504244457, 0.7450501625959286, 0.38834103933503217, 0.17170842839010914, 0.8125075849562403, 0.14767856385962186, 0.35518999905751425, 0.13811745989649438, 0.7037569287035215, 0.010157189425719526, 0.1166848376205325, 0.19849341155410194, 0.06389444930289478, 0.76142710258009, 0.7240554039139917, 0.24354311768267012, 0.31537402085668975, 0.0008752700361571719, 0.3509390061775911, 0.5280139420368944, 0.25291304342426624, 0.44076511956630104, 0.8057995927695311, 0.9634087744548007, 0.010257951165729695, 0.22598918151556413, 0.8543281704981627, 0.021091723894810732, 0.8525018602148665, 0.46166309414779927, 0.11539183453793389, 0.8897119617922419, 0.615232025819663, 0.6116345773929299, 0.12508518567281435, 0.5473255314070382, 0.5567582541391447, 0.8791900233926985, 0.7203449781071749, 0.6453502014724083, 0.3957922952921802, 0.8293659384623335, 0.03543554529546444, 0.7298391257822536, 0.5261408411698483, 0.9186006552751077, 0.3311575855344585, 0.981085251557378, 0.5761040310325748, 0.9047047654370917, 0.8579484737068233, 0.7086616644191254, 0.14436193982261458, 0.3715260400195811, 0.030045375780240247], [0.16988533631143277, 0.8405035850437732, 0.8306199356236805, 0.4231336411801341, 0.932657561404666, 0.7649978174304071, 0.745560102397483, 0.7790472146190711, 0.9273106911641368, 0.6764820573949035, 0.7982565257229238, 0.08291565650520016, 0.18227356822683327, 0.9960562169565633, 0.8557825905252855, 0.9789246926259091, 0.40412675298099954, 0.19784943412934597, 0.1894417697450438, 0.7980274308093626, 0.4205939410003695, 0.05324284589088513, 0.353733174430236, 0.40798376424779603, 0.9378546478434885, 0.04088860794600768, 0.8829476172305231, 0.8918811640786777, 0.8916612769395539, 0.9024597319146525, 0.30212575770413597, 0.5526928172661733, 0.23676929621999288, 0.4985618691838827, 0.20280873493875207, 0.9277834944179332, 0.690386077727586, 0.17212958401375078, 0.8925599988981479, 0.7179750509390161, 0.028881006422961653, 0.28524487720549285, 0.4568006345404456, 0.9262288103173796, 0.11251549681443429, 0.369369988454894, 0.29204017417506956, 0.40080896232274554, 0.04613451909935806, 0.34915192045116317, 0.12736792985482814, 0.6984549650318492, 0.24160659374391635, 0.03570089218258954, 0.5174679173547465, 0.7692435789671025, 0.5335156644598394, 0.11990419254140283, 0.0984457247197622, 0.11881305628871375, 0.9990042877493638, 0.719006325690659, 0.16333551989447848, 0.7619766842540325, 0.4255682987459818, 0.38630575697123537, 0.7319675212244869, 0.4652983401351768, 0.45853342448955636, 0.850771115303728, 0.6043214147547331, 0.6012508600539902, 0.9925794705708576, 0.2584442237931045, 0.4434406437631603, 0.4566570965434782, 0.023326322806302868, 0.22252382325111875, 0.7892045182524503, 0.21616296833481352, 0.1753121858647515, 0.10332127539014846, 0.8827749477979144, 0.6158853972916288, 0.7097961806003356, 0.8011800853803625, 0.3182670938203591, 0.06963425372281906, 0.07934879972069386], [0.8977331229665807, 0.805133815878012, 0.16758639002466813, 0.7158592889738677, 0.7249100785803195, 0.42024882869101354, 0.33213723824861296, 0.4840147676191897, 0.6270658851564407, 0.8627485582338668, 0.03252416037377026, 0.09801712877301483, 0.6124494063862824, 0.9454316207465704, 0.6163991754583137, 0.6267070894918131, 0.5052684862442985, 0.8778879865579923, 0.25155439042704764, 0.6835235670656312, 0.1539384265900574, 0.5459101750085772, 0.07676098900406503, 0.3527933208698042, 0.5607070461815675, 0.8159839032737789, 0.4442099277660503, 0.07769806437549132, 0.619595554765114, 0.7432005278417938, 0.9983273213191391, 0.5895002882587285, 0.5702990896519958, 0.6482991926327965, 0.2914897341900866, 0.015939741482734027, 0.5760420788747618, 0.07647219925269555, 0.8461370213219794, 0.7845487599606968, 0.7293954771714991, 0.3477001399399545, 0.23503643993559353, 0.7536004724736735, 0.7725233513548672, 0.2989555140879231, 0.30894369161506174, 0.6757548470559546, 0.26288770624765534, 0.34567436041173516, 0.3417472736386581, 0.015517704826117473, 0.8679805334796293, 0.21694611214794113, 0.5853646692349742, 0.5827115382566582, 0.09529965615452562, 0.2273417695392238, 0.7424346897574793, 0.7619525089238375, 0.8427537041534912, 0.29003275739431067, 0.17895969617909357, 0.46949574641094893, 0.9311801174467135, 0.1784337004607821, 0.9186393871869422, 0.0051066799128118845, 0.876863554319607, 0.8438116881578223, 0.6506172981219339, 0.4163671818297321, 0.31417002985701825, 0.22594207322044801, 0.8369786001726265, 0.6908977259550367, 0.31963800818487154, 0.5910695684981475, 0.5699499120149057, 0.8794273161449772, 0.12950745223721105, 0.0834802087610701, 0.18380561661600447, 0.7545344045787054, 0.9211978716343918, 0.7685681383680525, 0.44719270109648834, 0.3696743881553809, 0.7672596979266197], [0.7943941515880996, 0.19994218325217195, 0.5757505868903123, 0.6400016219700853, 0.06929635575761195, 0.671971410047797, 0.914934870522267, 0.16681738580372252, 0.5785406992671711, 0.892069713486921, 0.49463082218366716, 0.24830645442889865, 0.8219471675400346, 0.557431068199943, 0.139282493165233, 0.811745645397972, 0.5826627763717621, 0.25038192491348876, 0.2164401596754545, 0.040480249401937174, 0.9091143520965227, 0.9418554233829761, 0.5884959069644311, 0.2470362237486694, 0.1905992943750483, 0.6956579805436159, 0.9081522603501074, 0.8393573000160733, 0.3980191015074558, 0.5007762757534806, 0.12190648503822277, 0.8265082407150128, 0.27335553335051077, 0.9027054901872874, 0.2605602808458847, 0.6868393088503779, 0.8017125111067014, 0.38250875680540086, 0.6822474885969158, 0.542153528247675, 0.37298033390364704, 0.6151590918567726, 0.23289124997409127, 0.09590936200259903, 0.24949917870119087, 0.04815468183297289, 0.3501564162362566, 0.010284632667310722, 0.6367137783959437, 0.11709408116183306, 0.167495637992346, 0.09391271678122814, 0.8368604873062734, 0.7295302306641968, 0.7854369196078781, 0.13951112123283427, 0.32671402882620737, 0.9050648282297404, 0.30915469719656896, 0.043784344331695424, 0.592733963445146, 0.20951015661064143, 0.2706511881817709, 0.3123834598850297, 0.19096917746675024, 0.8503522450919889, 0.14697334177072519, 0.019286712287975183, 0.8394303268155519, 0.4501513866442637, 0.7399979351858328, 0.33169356416037066, 0.3003705832506385, 0.43245052742136214, 0.8748817701760293, 0.2895608502807344, 0.0037511205942024617, 0.7858467349101559, 0.32004015814075726, 0.39213683784841935, 0.549117050133883, 0.1570497448957059, 0.5272436467489064, 0.5724195577197965, 0.0006635581565660464, 0.4332571008521081, 0.42987593357383136, 0.31051308061085015, 0.8160595801591279], [0.34883336701683065, 0.25534267073509564, 0.6753754347437685, 0.058485963718806544, 0.1048768665248151, 0.823654558218235, 0.9388055691450828, 0.7724754830563504, 0.7498493664470922, 0.5716912629312355, 0.904153848873626, 0.41840854293028595, 0.8743222783554695, 0.6428495546078835, 0.8597819454821276, 0.43281888833633686, 0.46146376975555425, 0.07522862829480137, 0.12781499910521665, 0.5880649798277356, 0.06607562216007723, 0.18681485869945091, 0.7073241681154877, 0.5112429419110276, 0.7044529987243243, 0.46278422535423314, 0.7201031730426968, 0.28334727543755767, 0.7690741170323723, 0.8583722595268529, 0.05101816636276857, 0.7497335134707038, 0.14037618185520773, 0.2640221933893825, 0.8944735108837747, 0.6245985884777804, 0.5344350576134846, 0.17924888828960372, 0.7839553813276058, 0.695958610025176, 0.48408868391797677, 0.337960791262122, 0.36835178645479505, 0.8610938122962956, 0.5039975914683276, 0.9573257019517255, 0.7283151575096484, 0.9727672085468801, 0.2242432920537103, 0.09475150443045244, 0.019804211782547543, 0.9921206349951374, 0.5570025353211155, 0.47025663203694035, 0.3517583370767944, 0.3732644642132885, 0.819356955282558, 0.6568091116039804, 0.22771175090774276, 0.8169878835058312, 0.11654671965794783, 0.13804474778836728, 0.9030697391395514, 0.38808146460805437, 0.6695027057816809, 0.8108398596486467, 0.5018018593101548, 0.39692080674075536, 0.4881720565183093, 0.716817762442481, 0.4335405752430165, 0.1081260338933937, 0.6791345448270798, 0.7059952728372516, 0.44584357959185483, 0.3494778280683418, 0.8863976459182941, 0.5946911115628206, 0.9692755529342238, 0.8642834738735549, 0.5636317580012299, 0.8353567424476515, 0.7886310839747149, 0.47081416028466583, 0.706753625768054, 0.08373405082603191, 0.038800229719011536, 0.29548421115757106, 0.39058855802811066], [0.6311080169413623, 0.8204631477619608, 0.631392639134058, 0.9877350293309075, 0.8206600688855, 0.14569805582736117, 0.9210531578642477, 0.4057983385027657, 0.2334352908410663, 0.8752345747581672, 0.9445096956581542, 0.01603351697347777, 0.4971479835977136, 0.9468579480804181, 0.9501375711809821, 0.44488516509893306, 0.4811058577097168, 0.5807963047171925, 0.17907560135992517, 0.8313882928820195, 0.16032872239671048, 0.8105929217364457, 0.6442243385838441, 0.7661984595126186, 0.04837827957286234, 0.04149783126702378, 0.8167923778425776, 0.1574252020351692, 0.5912724104553868, 0.12516712358250426, 0.7536538891651581, 0.13995851211267007, 0.6640955741399517, 0.6212054732769942, 0.47738432529772123, 0.8846879794498531, 0.7427723945604695, 0.44378721124314346, 0.11987566831887364, 0.5130861085995694, 0.6923505889483461, 0.1051557037070483, 0.15226866872594602, 0.27134264816576326, 0.7343030398418767, 0.41548245972253584, 0.8019269362141491, 0.3754519137345135, 0.8374917103204842, 0.7604570835668002, 0.7307744456587333, 0.30480947003149217, 0.31634360198911726, 0.45833287272728707, 0.8763041898453954, 0.44296516315862444, 0.06650140942906979, 0.4430126625441626, 0.19673070874646514, 0.4947429347036254, 0.9987354360716425, 0.16974708400247995, 0.29773061338110185, 0.8219481587307137, 0.36092073834247895, 0.8909640857066641, 0.6754072310800961, 0.01657211028458605, 0.5310302612292598, 0.4700231074639841, 0.2563307720265059, 0.5620569825051573, 0.3271224336098406, 0.06145351732643989, 0.17201487373792557, 0.9188726114295062, 0.47697188423689296, 0.9063770061406501, 0.6277661634739234, 0.11411489460168056, 0.8722503714981975, 0.7003781413827732, 0.1680492245596542, 0.7359127928893573, 0.41724374332006975, 0.1412512784989225, 0.2727136858760857, 0.4147305062626807, 0.9975775529370474], [0.7178687375969622, 0.3897180535612178, 0.2671401940128413, 0.8077017368569787, 0.4953098775239376, 0.43344004357608745, 0.4551025818655873, 0.9103707190827052, 0.4305874177076394, 0.119211810301187, 0.829431595212271, 0.05403465808407437, 0.26376318052934644, 0.9605136477524838, 0.4153538611091987, 0.7323829641365457, 0.3693026138666894, 0.4095161345320988, 0.22711062252350112, 0.4945806152121006, 0.9550600483587657, 0.7872099021714731, 0.4421098623992942, 0.9641873728814733, 0.7657339172969408, 0.6738659780535672, 0.07048663866628291, 0.9273034274200006, 0.23267977597110634, 0.5752927128013263, 0.642924738127772, 0.20234237829903523, 0.48585373643540186, 0.44108394956663177, 0.22569882123813267, 0.21271323588298707, 0.5844563673531301, 0.34216105144497155, 0.25990976901793905, 0.7080198496296771, 0.988644379893442, 0.1919570311524016, 0.793874476722731, 0.3179214501359229, 0.7826647368211967, 0.05071903013639978, 0.11581316760366145, 0.7881031161505838, 0.2595612452718483, 0.7084828947202615, 0.42215685823803395, 0.5462777807333478, 0.9558349149073812, 0.8448902296484716, 0.23034975755586307, 0.6500209388376501, 0.06386768528233022, 0.9797197905271012, 0.6150144863587335, 0.3852562520553232, 0.34110855194058287, 0.2348192608538151, 0.8139482543166076, 0.7735583377536099, 0.34253505556987685, 0.4056511379731068, 0.5604731461694274, 0.1554307150762404, 0.14804075085365742, 0.6432711727269629, 0.10579762650861335, 0.9659147475185191, 0.07673761199383367, 0.17955744859350886, 0.2604167788733416, 0.44289117793969357, 0.24997005710566578, 0.3202972328335938, 0.04200193188503776, 0.17063057969984818, 0.19929120023866786, 0.5304248982242553, 0.32224606651031573, 0.07274676073000241, 0.19426234706333634, 0.39009635337467174, 0.46224034499598843, 0.30335601546405355, 0.3860590456826586], [0.3882399985537035, 0.48388176583171516, 0.00844954323960212, 0.993759826470642, 0.6852973434402175, 0.5481974784431178, 0.40874636537366915, 0.03780707080848755, 0.5543103739854286, 0.6393313003237175, 0.5874097252714436, 0.4747008714231149, 0.7682579408329642, 0.2201312447842545, 0.9387685150987508, 0.9706075222127798, 0.5505768586591818, 0.8185883602031621, 0.4228044923633003, 0.9221299540960494, 0.37016601988034514, 0.9301991652897181, 0.34826096441875765, 0.3332092270621754, 0.41824734118732987, 0.2327758153463746, 0.12629555958587535, 0.5180779577160318, 0.44685093431982503, 0.38183037615210524, 0.26470890192978747, 0.4615442831235763, 0.5362475885717681, 0.9838151078045613, 0.5267602350672755, 0.5044020875566849, 0.5958954150103261, 0.8312932276794046, 0.11608179495176252, 0.18086011896484233, 0.3817324130431107, 0.2763100345906937, 0.42481863365235517, 0.9450712162495588, 0.48301773148079385, 0.41020620274807296, 0.0739445882436146, 0.11787565156444757, 0.47087984631869795, 0.18768596085955913, 0.5479946119255197, 0.16315837827712265, 0.8983425982707746, 0.1335692396041036, 0.14218779756860733, 0.06573258262008985, 0.7473011533449726, 0.4590430336072915, 0.0669611793961914, 0.24331238950950884, 0.4740134655138064, 0.647149133069595, 0.5093611708319558, 0.5568250156943847, 0.9106512797693705, 0.5648296200674274, 0.8457016958730549, 0.5545328106616363, 0.7068750820671215, 0.26237907754516354, 0.6282716130608745, 0.8183325468918391, 0.8811106091510014, 0.46892253348094926, 0.29390008015750135, 0.7388892721662245, 0.947700275679873, 0.4290489563570149, 0.8971644103452863, 0.7854929209027184, 0.8919001190317551, 0.07236942140137637, 0.7674165397532634, 0.08356598502802104, 0.4023248123832168, 0.7525786274635758, 0.9851921800384628, 0.431899249681593, 0.5838155411251781], [0.49658695030980604, 0.502290698735242, 0.848176224634148, 0.6918182310243578, 0.1424430922346811, 0.9834137953182711, 0.4417842271192324, 0.2265561536240911, 0.9405475810645073, 0.5534853259713578, 0.048188846412682396, 0.1765303120620736, 0.22243884016543125, 0.39972150471386314, 0.2545710169328589, 0.6425567322484697, 0.6353330304620849, 0.007519180438403272, 0.2908933161304931, 0.7543804520626717, 0.7786709128375345, 0.5423371787558489, 0.6624695275998788, 0.6952663514272349, 0.3353884271206796, 0.6388270796934244, 0.2683182733779914, 0.23367911522464535, 0.4541821658639521, 0.19591943672980605, 0.16213643515322163, 0.8878005008197262, 0.31923458886053013, 0.034456771418754806, 0.5958169311393563, 0.7963059139289018, 0.6346780211405326, 0.662284761607793, 0.16239939733159492, 0.34642155018688714, 0.25783394249393043, 0.06708382734488294, 0.3296450554073682, 0.04129515204426659, 0.7511987601006433, 0.0664927744351762, 0.5802535274870162, 0.55307840796695, 0.6373653556466344, 0.9492678951743291, 0.32008949751912796, 0.7311194884553168, 0.8030876642860563, 0.8638654783878461, 0.31045643534068035, 0.16901618228076265, 0.3565102455727672, 0.584112978296832, 0.2428899833827326, 0.7771611301163937, 0.9418565987933794, 0.4200574223443402, 0.758528223953796, 0.6022733875393305, 0.6049273374169705, 0.4414876865825189, 0.6672576396500668, 0.6595929525482785, 0.8120041749283197, 0.8632135062652495, 0.9907069361486139, 0.5860777005353733, 0.391235424018328, 0.5632339282100933, 0.7802399642735414, 0.30918138192937217, 0.25398588118852516, 0.40660982739676166, 0.1539219719184648, 0.3342792990955409, 0.21797613263060467, 0.39628135025370814, 0.39952598644250126, 0.00537342011520725, 0.9046158899915941, 0.8633943449855006, 0.7443308523209836, 0.6129746591355978, 0.9987713981485243], [0.3517925511899246, 0.08925429425985254, 0.4847943390309455, 0.17388155415181394, 0.08484263556582927, 0.6893607733021515, 0.3489569300850677, 0.11176203625792269, 0.8923513224375444, 0.7128110567137325, 0.6728319142782412, 0.44927700136937687, 0.762997549120866, 0.3493814729992185, 0.4176274834001059, 0.2971324690840651, 0.12199474423052215, 0.6588675451721858, 0.5589737601207878, 0.08617649572300401, 0.3474159909666269, 0.6700802634847447, 0.7516767956825806, 0.9540051786388831, 0.3743899873371629, 0.9377185855861467, 0.5672165857660896, 0.19727359961557545, 0.45770231046853516, 0.4557918901040643, 0.17128812953952888, 0.10383541482828595, 0.7793298166978359, 0.4443277945313784, 0.04701779685532281, 0.36801078991195435, 0.443345627180344, 0.10201106713753871, 0.12285321198942967, 0.2519204146352483, 0.014817124067966425, 0.41036377188157747, 0.7059552410652442, 0.2231790763495426, 0.32163409261066145, 0.6598823096156039, 0.8388585233423136, 0.22159816342054217, 0.9148557360686104, 0.32970151973750117, 0.9871878812195284, 0.43946604589476435, 0.6629010114080851, 0.16704845643638544, 0.36964435521115513, 0.13575975769602922, 0.4102306899179464, 0.5459834620336277, 0.7909682432600074, 0.46854500622034956, 0.5109222899528449, 0.4484727395813738, 0.8859394392031166, 0.68923444028022, 0.30831032526272406, 0.4406160004406312, 0.05812212041356957, 0.4465503950715919, 0.3388950268177431, 0.23409406888515938, 0.0021656891204355766, 0.5050650204677776, 0.27913585061523827, 0.34086501689021975, 0.12487192343607656, 0.53113567334413, 0.5996757798083875, 0.9869713510848829, 0.2740425937847074, 0.4954821227568401, 0.4983726857800791, 0.46167333416286926, 0.8978734535855806, 0.9648286577587, 0.0009589033306145467, 0.7450827926875007, 0.7519641204898775, 0.6813549291053944, 0.8020919342222829], [0.618646373196065, 0.4509791926006913, 0.2666037807297339, 0.3726120296647357, 0.41645602019505634, 0.6537733189644176, 0.024060684939191113, 0.15052771512323726, 0.1887744389580568, 0.3963687858968875, 0.9259334921064493, 0.6556455288422468, 0.5292864330932568, 0.3819232328023301, 0.07309430689045715, 0.24830438025002555, 0.9984699530453031, 0.6695570336020775, 0.9471915410738203, 0.9378058502622095, 0.698647693291205, 0.41011733482039203, 0.10715404173112641, 0.06848975937338797, 0.5918663188468033, 0.4651303110572613, 0.028762491383856337, 0.4166961933908738, 0.40158285856408094, 0.3457573374874908, 0.598460486613518, 0.3274113656259703, 0.09364364600480779, 0.006303865709857526, 0.50848183272612, 0.7843615054823848, 0.41991434496781455, 0.815553157844587, 0.41928459964280607, 0.8081589062474154, 0.9552035259295598, 0.23422452285523898, 0.7929426912187734, 0.6616654936247067, 0.5205455305275628, 0.054706774410965275, 0.013039581233206032, 0.6134673363892678, 0.5536394390846958, 0.9746512944677954, 0.5143921369505784, 0.7935052101472316, 0.15031345758594594, 0.8852302231817908, 0.8711722680851872, 0.8233282447377525, 0.8347120351750474, 0.7113979080734718, 0.21106672658979442, 0.8260034660814856, 0.4176217250838955, 0.38433266401066835, 0.26322001793294614, 0.8227045050942323, 0.8926552168470804, 0.9405760315337565, 0.7134667359005108, 0.9258343089649919, 0.9508574394757564, 0.8214327736493353, 0.8896431949895447, 0.3163726269223083, 0.9863879161624455, 0.12078353834690025, 0.7194220737846676, 0.7502212345091808, 0.7760266555118711, 0.9990941760943814, 0.8300750834415891, 0.013410423762567625, 0.32180969987335395, 0.15889392144717718, 0.9658585706304642, 0.6641784094791006, 0.5100538501067531, 0.7990684425842112, 0.04307452112596433, 0.05564077798593681, 0.8215256564483726], [0.7201172793137911, 0.23251954861544333, 0.5437862803576843, 0.057684232208632835, 0.10802941767603458, 0.25244356556622183, 0.9533882917591305, 0.9544594403146845, 0.5989301669181855, 0.3448425739289145, 0.5823484650896282, 0.03332169507402771, 0.9656388205726648, 0.9333378754193974, 0.9471548350616985, 0.1451039636606919, 0.4491744215596003, 0.025908457586622413, 0.4507397055740239, 0.34218932837149785, 0.5261705390599722, 0.43242147711363654, 0.3305894465581499, 0.2530019456628395, 0.9738219476571379, 0.7280798117111813, 0.6580047222932867, 0.12575304448320812, 0.9543007630262967, 0.41383356068562815, 0.36277714219703094, 0.6952338248357949, 0.17704116747216392, 0.7907299571129992, 0.31093142555509323, 0.3065028037918335, 0.6472329428637392, 0.905911179180539, 0.9449100440710526, 0.7853749975285116, 0.26298431725196125, 0.7455622649625495, 0.3885793578703448, 0.6031123170004996, 0.8892108456984316, 0.12740929126317213, 0.5538788951795993, 0.09357299735525937, 0.8674154131628599, 0.22311384660570444, 0.534574425977844, 0.5559362063144538, 0.867694377746817, 0.9798483920082687, 0.31480896799980596, 0.9187144648646788, 0.5221640780384454, 0.6798608650671003, 0.7575599187536692, 0.4500829330267039, 0.16175049010552056, 0.8145400587202286, 0.42481330951428653, 0.5552725016427802, 0.8149081026278017, 0.0742032315697474, 0.7838411637761177, 0.5242361093579306, 0.5154637927767609, 0.6870730233199159, 0.33391417993675365, 0.5561669730439157, 0.6491658243926314, 0.9766361990653089, 0.29085146074555046, 0.540508065944002, 0.6991902160054762, 0.7727969295969148, 0.07623387743933985, 0.6843422219845229, 0.7048947588786171, 0.8927319641036019, 0.6669713766128158, 0.448285803001788, 0.1388904539762923, 0.14727749285584324, 0.5899349269902303, 0.3505843484792016, 0.015764084384846333], [0.3737995465130436, 0.32103066550177517, 0.3258779300712662, 0.34499833013899817, 0.5945609902926461, 0.45490664531549896, 0.46463832377424263, 0.12788970689580015, 0.4262884723479685, 0.384563443170884, 0.02050747952650156, 0.9211896166422925, 0.13240601648893802, 0.6460580673028733, 0.6007424586663481, 0.428237120492456, 0.9471817259552778, 0.9808463227709657, 0.11848646900539572, 0.8165992395364313, 0.7541448247176897, 0.10260529342412283, 0.9584722866492288, 0.13308459853857013, 0.9498673628123795, 0.49171084905263995, 0.8972739649312458, 0.2586060968170868, 0.6327109871624279, 0.02598866686634249, 0.9971593069221395, 0.06908971698286281, 0.16847659731719944, 0.2991377705089211, 0.4283839520905277, 0.8014198863066887, 0.01623337397571223, 0.8129767320749176, 0.7190033683044822, 0.14351877411792935, 0.8813879682228527, 0.2719572373069513, 0.7516010439619937, 0.40970630702698685, 0.17987174676176065, 0.14313778010232314, 0.47729481170037313, 0.019438523923596907, 0.22246308656129332, 0.40763282987426275, 0.13195451984333384, 0.2838426993225701, 0.500230876466877, 0.08096788991816029, 0.3285147365926936, 0.5968468889168141, 0.19960336702651582, 0.5868217050146867, 0.8798891024137493, 0.5834963321469141, 0.8088848710426502, 0.9176145587142629, 0.2335241644086561, 0.625141750013927, 0.9526121427921403, 0.6182921803277928, 0.044570604976103434, 0.37385069179178776, 0.2261464620508783, 0.8930879601685217, 0.2752703707712164, 0.12226691637116982, 0.15988804494648046, 0.8871828352932238, 0.10972212591909691, 0.3954841857493764, 0.7769629855680463, 0.7092422425696784, 0.6270033706140533, 0.6400950749675068, 0.42612989316014394, 0.6608632930846193, 0.6648827082242661, 0.5793756280984093, 0.08041778320192183, 0.8049889983234583, 0.5214790069587059, 0.59374762929748, 0.09634419015533868], [0.7820041122803312, 0.18886501016049906, 0.9603208817847642, 0.20142604938255948, 0.0013938179038205378, 0.9071641106951541, 0.1451427804176335, 0.7693177756646333, 0.0718122942821171, 0.06121182523383539, 0.671386631512311, 0.18346309492579094, 0.9142344301771508, 0.7960366477380088, 0.4292364525900193, 0.7344826956575944, 0.4955668511690088, 0.25134042522554134, 0.36194428687852676, 0.1611094319197447, 0.5275099494508567, 0.42730851205613907, 0.06644802568629515, 0.36681015220942803, 0.7642070226825387, 0.6681709637809561, 0.6646238565613952, 0.1998068357261369, 0.7168158451700072, 0.8884357804140092, 0.6734537503781566, 0.8147020899650693, 0.2797767052981479, 0.641416425842395, 0.8748658961962782, 0.20551630839801405, 0.6575006874586868, 0.9590454757120385, 0.30629595287789224, 0.8379439893208782, 0.8939904388859379, 0.5793970358964597, 0.4673048918586332, 0.6844004001312166, 0.1929402067764927, 0.06161998351124609, 0.25969085023001526, 0.27913124678571266, 0.958070846306704, 0.5874316404768927, 0.008935887595776237, 0.7566468951991709, 0.2940849491885088, 0.3682301913782935, 0.6354590868677459, 0.8135007823790931, 0.4195893632960015, 0.3619181724545594, 0.18866225351928267, 0.7877708161389502, 0.8051067083674438, 0.5226565355683789, 0.48796554840707307, 0.2824321738326854, 0.7911126942314957, 0.7550593288603493, 0.07232932146998661, 0.7842934805545608, 0.4225342123964011, 0.35460424358275355, 0.38301939799312423, 0.055418629946730524, 0.6742122071915803, 0.9835444219478777, 0.6474120385876304, 0.709144180762834, 0.4877524301962557, 0.9472515889210666, 0.7844634337155332, 0.8654272719447756, 0.35131172882272244, 0.8237069546581403, 0.05611696740228511, 0.19881911075912095, 0.05234586016350051, 0.06942467978290245, 0.7003000149412542, 0.0821512023208616, 0.2942574786586727], [0.7136569505117181, 0.623891362111125, 0.14281052792771853, 0.13213338335006686, 0.7245891841393, 0.45684798930722803, 0.09482267272742351, 0.8589258110349575, 0.4417651390710019, 0.5681208498598845, 0.6339089716354159, 0.28449937875593423, 0.8998893620715094, 0.4059673046214619, 0.5999372987486468, 0.9231468653798263, 0.18786142455376253, 0.852848502397882, 0.4801041928737685, 0.5946313809354304, 0.3900998088324835, 0.9306202509893609, 0.35237612619617187, 0.1115115238873916, 0.212456648457937, 0.6452797731014895, 0.5463186133996614, 0.5121865350590638, 0.6508627038333789, 0.27268121189372097, 0.07076742432380023, 0.8282464976311218, 0.28916832614998866, 0.08861626535073552, 0.31165178647206804, 0.29984871220138054, 0.7058799419500233, 0.880003269769303, 0.7319654914534965, 0.9954713455491797, 0.5148532889355395, 0.9441125859006126, 0.7090238658126532, 0.9538382546434501, 0.414627541155298, 0.769403239977852, 0.8363693985650476, 0.23207957365631837, 0.2569561280467574, 0.6240617648722113, 0.04495851519986671, 0.15383202536082263, 0.07540416077983858, 0.44696728510083317, 0.16629574333876573, 0.15677462954630117, 0.8121846090116006, 0.8093263986502998, 0.7560217596382363, 0.16089923507236936, 0.6668357300535112, 0.423611375966119, 0.29119454291064073, 0.29962773050058644, 0.8630975625521909, 0.2482250790732412, 0.09445702228532027, 0.15707426561102356, 0.5965929382331904, 0.9484745638339924, 0.4739838460354757, 0.5407550541059278, 0.15968494344924744, 0.7450639917193645, 0.6245501811691226, 0.14677773173862174, 0.5108979955218822, 0.4399925448486025, 0.37996599744250914, 0.46687948150912406, 0.6405695830928566, 0.6748407674386164, 0.6058854697110065, 0.08290895191841885, 0.14586036420978288, 0.6787072215045377, 0.6014281164941757, 0.49242500583537063, 0.21311119612370633], [0.08173198701106299, 0.010379337235255304, 0.823857915515266, 0.9564597961818011, 0.7880364612976141, 0.6332558976735757, 0.9873088250899867, 0.3035887773281407, 0.9964286881968804, 0.39352569244964053, 0.579408608067701, 0.9496811868890824, 0.5382947894262838, 0.14678338029769888, 0.6942026342030474, 0.9243486027155629, 0.32006948763263243, 0.32880683522359566, 0.1141683361978062, 0.868189612534822, 0.5843318704707369, 0.4130458716720481, 0.38342152999975454, 0.8476318995064842, 0.02039862715027796, 0.4042682961362484, 0.30644257143490783, 0.6253921533971225, 0.4603055045809876, 0.2613590578961975, 0.490533450090176, 0.10031505115129369, 0.07601494222071525, 0.8991068694344343, 0.7842708839909973, 0.10087242609625235, 0.8500978682804134, 0.8647195887230827, 0.4824680033549239, 0.2644036133650548, 0.5977443880512404, 0.5458751895252384, 0.04840317375471015, 0.6366950666743777, 0.8656297528081743, 0.597337982243856, 0.901290072863806, 0.358701422029317, 0.7351790072981379, 0.21580431337293748, 0.16424779763034958, 0.31934510622277446, 0.49837650495581776, 0.587376309504809, 0.5880762225500585, 0.8854958097888945, 0.8895124418484387, 0.4896878057173901, 0.40735588800957623, 0.9149984310252838, 0.5575343577155614, 0.258203176626722, 0.5071400812330061, 0.7852736392922995, 0.15078080832515706, 0.25314304696602863, 0.7435855307819372, 0.967869282302304, 0.0008681213375135721, 0.6126492797928971, 0.8441584644786072, 0.10697171777328396, 0.5251703145617266, 0.5564694804820449, 0.26505201189253236, 0.8837466367085983, 0.5009207895914243, 0.18191863034021416, 0.46931804162782587, 0.8084766269215808, 0.22675393044673597, 0.40423771694279453, 0.04113957791858314, 0.955759816344345, 0.26904575958588484, 0.8223355091305783, 0.1583324605973978, 0.9111719089043109, 0.918410225677375], [0.07787243269508615, 0.38111662365283827, 0.4576825854003286, 0.44302363760172936, 0.03509639985188806, 0.12195429720936923, 0.9404434418890606, 0.36188502388446997, 0.5476034393165524, 0.6583028364988535, 0.7536217872589681, 0.6580138004696071, 0.2510797760654213, 0.6631143496978111, 0.2540274438248574, 0.22799356796642867, 0.984309397731135, 0.7358197930770651, 0.59350744845364, 0.5411856645724133, 0.1839497961228701, 0.893255311808414, 0.7423350832881564, 0.3463904596825982, 0.43605701258966445, 0.7786228398458829, 0.0070724560401342895, 0.4110543636138233, 0.7321051821114496, 0.682353253176557, 0.6701257777351604, 0.2587883741406978, 0.43729999774621564, 0.8037015752031588, 0.4773675214624987, 0.4766141391440729, 0.290875542095151, 0.17589774139532344, 0.8523623782537858, 0.7623728826694479, 0.7422488103821516, 0.4099842874931976, 0.8765561884125159, 0.18366052568962687, 0.018958317806207048, 0.42741551756176155, 0.46224968307293945, 0.3619272894961917, 0.08383782243807314, 0.6154707495146532, 0.8629362423968409, 0.6727947735509338, 0.03302903964790227, 0.6292841217747424, 0.46642522599795055, 0.7788668474804622, 0.9502600782443549, 0.34553491473444997, 0.18825526418889893, 0.06123476939407979, 0.027682918418823088, 0.05298483457900527, 0.08295515787372432, 0.18422104763345581, 0.437634749920336, 0.1517693311209859, 0.32430805747435043, 0.7399260962445784, 0.873307286403529, 0.42813895056644347, 0.36952266610168205, 0.8268472043742152, 0.12203331389277206, 0.3702920501303758, 0.6834549790009443, 0.01274643661937902, 0.42941266896129804, 0.986565050641893, 0.8226029901403734, 0.015889364657073912, 0.14783104965210203, 0.43183978793232825, 0.600958637297015, 0.3037316819716013, 0.5578221017090029, 0.8620161193148487, 0.1431615634979485, 0.2503110258346961, 0.6435644226810809], [0.5096369942995734, 0.9837236037079221, 0.41834520220386107, 0.08593893748711923, 0.637316862671869, 0.7575007982960899, 0.5265988991494379, 0.6099903557414894, 0.7871135924321371, 0.92880240319948, 0.7925000081026083, 0.8945154296313212, 0.6392567243772014, 0.43485825729158467, 0.14100200668756113, 0.9405843312926258, 0.4871218627954508, 0.6116113646053511, 0.13796175767921826, 0.2508091816030078, 0.0028510498782998717, 0.7456087576314647, 0.3343281975273278, 0.4962982544976837, 0.6215344666701973, 0.1519697111037004, 0.8178973817250849, 0.01533013720889631, 0.5401205886021205, 0.0290436540762522, 0.8872966212524372, 0.9866654064477957, 0.668601328478074, 0.11436270818548433, 0.9699006842506332, 0.46779857369089073, 0.9477452258627224, 0.9131324767229507, 0.36007924018444815, 0.6925391527026871, 0.9914158147446588, 0.49475894765540385, 0.7237267706437979, 0.4745915087431989, 0.14137277550001714, 0.42788870880198315, 0.4319982500158013, 0.939011640332463, 0.29126063485570886, 0.42280353909039503, 0.36025909969238823, 0.2619652899340401, 0.6999695613273562, 0.9585194347181937, 0.505805644283156, 0.6019961960212182, 0.4213965263145967, 0.8465096774886673, 0.08647727794367444, 0.39184472895025835, 0.2105867686597962, 0.7716299034647766, 0.16365013369533077, 0.34847835160857255, 0.7821379944757425, 0.9012017326394736, 0.5824787848348084, 0.8466345226692155, 0.26719460106649207, 0.26380576732062655, 0.24532910670502173, 0.6616646753939724, 0.6936754097325822, 0.4634214386289224, 0.014229488238234445, 0.9590888604112022, 0.6381521185673908, 0.7086767421523404, 0.9845834718385998, 0.608453959672973, 0.8151108847632158, 0.9702531413206348, 0.8518533243198384, 0.5089601601806626, 0.15574252895528362, 0.3741971089763485, 0.4138701556650688, 0.373172915007026, 0.43296144778081924], [0.023075651692732957, 0.7196868456239929, 0.32359634552239624, 0.026136833602392162, 0.32730325958375006, 0.5449436119998764, 0.48566399454104103, 0.31315774723838097, 0.4442697955329986, 0.17101831218323194, 0.5528470356485515, 0.3310466814375218, 0.2865388367027547, 0.2306361967545284, 0.8782366202734819, 0.8727210754152572, 0.7826138445426047, 0.7998783630798583, 0.8385045833207532, 0.2489566812308842, 0.0030326892924655846, 0.11163747372287469, 0.22705559735233993, 0.8276617983136728, 0.8052051137125531, 0.13580727886276478, 0.9827892115109265, 0.7219633337650587, 0.4079408136241989, 0.48229913599380825, 0.35716249787798726, 0.5840741506205555, 0.17200961347931398, 0.8182966397226223, 0.28879048242414573, 0.06102763931148314, 0.6072348223899604, 0.5044050972994322, 0.3601217879686066, 0.5106846477715079, 0.41239355662317656, 0.6145765230364453, 0.12275320545702118, 0.4235009621759014, 0.5847539560737934, 0.22359616388219494, 0.3939753914026828, 0.16693466982327554, 0.18387964743637863, 0.9758589888268341, 0.43479918405108287, 0.7314258205930515, 0.4408460876678314, 0.9781399106509023, 0.10912091617425057, 0.4900050131937753, 0.2727147566574263, 0.22097829490720255, 0.07342333080010244, 0.6091551257460205, 0.6999858471407688, 0.9009557619956491, 0.6157828949269578, 0.4395077563576585, 0.05787590873933224, 0.40451819814052037, 0.03757462853688931, 0.22296991594254667, 0.15381433898353725, 0.687999015913726, 0.7152364413532586, 0.46186769439579023, 0.8607263748920503, 0.39149437075269033, 0.8905738192583356, 0.5752921099159773, 0.6914373860534004, 0.7052666274847682, 0.36698672360198015, 0.24265336085779443, 0.28555286611142916, 0.4940357199005213, 0.9904850018233012, 0.29429771306974783, 0.9737710147141363, 0.7275023901758002, 0.9635414261603389, 0.14835293298905916, 0.06783100995429947], [0.5190708518574201, 0.6756328357655637, 0.627583469815044, 0.7073317130279386, 0.35733806718600425, 0.48279805511303286, 0.312325421724845, 0.4987383229313187, 0.8187588900101102, 0.15314010100297548, 0.4139180387089296, 0.9277194406482395, 0.3757068672006818, 0.019652010525890495, 0.18906373351762262, 0.4948828832727491, 0.5864650988208565, 0.5225215018045392, 0.37582893241822946, 0.03995006250554989, 0.1857427024837176, 0.05917433444860409, 0.4785539422402234, 0.7360359462673376, 0.9467463517855079, 0.24085962415096251, 0.9409815827906853, 0.9204413443032953, 0.30200607982034955, 0.19846291052911103, 0.5897494151356294, 0.35517834654074, 0.2881185551537764, 0.4445245943237327, 0.9407263571303448, 0.20749832467141383, 0.3670567217276701, 0.5289337740227498, 0.8995912447455152, 0.49382681878831214, 0.04858222148617841, 0.29327107115599627, 0.9015881543285386, 0.5582375663031237, 0.8364139550761875, 0.44457366249593067, 0.5520278286615479, 0.529272262555593, 0.2501615521935774, 0.7697805577767121, 0.4049184433931583, 0.4418796360525805, 0.1299816279076026, 0.041963545970262195, 0.016651485585518655, 0.7380920578357998, 0.7328451911911964, 0.6000078959866184, 0.09815294061652646, 0.11082637392316297, 0.770130968097444, 0.9892269182562965, 0.8052858872375738, 0.4984783379787595, 0.0025409962904615613, 0.45532040211218117, 0.32137006553640957, 0.3993144329986641, 0.15857433103356888, 0.7262176997013572, 0.36804677520154416, 0.9196203770271872, 0.5712513521174387, 0.7349941043215749, 0.84170260258091, 0.40966560656816176, 0.4066439456444466, 0.9139566055219245, 0.5048976910997683, 0.3663851886960652, 0.1823566205180579, 0.8556806305962996, 0.8673933737700206, 0.36840546447709455, 0.7331981575847133, 0.9071073192440304, 0.4670486204825338, 0.9107882461270771, 0.55024492579283], [0.9916605391181048, 0.35452652996831857, 0.9255302920425631, 0.07858754174546001, 0.7481130529040567, 0.19231115372579355, 0.85087917478983, 0.4008101278888563, 0.31700892791313395, 0.204700479838695, 0.172756926500515, 0.3443964041878764, 0.18321415446847422, 0.11331356331620135, 0.9387719679232389, 0.06644437253598368, 0.29180779654351163, 0.8985586339987193, 0.5436897631457531, 0.22933998288264468, 0.6617429277939769, 0.30048061511523483, 0.272089742126571, 0.5497522566728384, 0.5388379465313378, 0.14456607955585266, 0.4189720144327512, 0.3107857412582774, 0.8606641623855523, 0.4179644698022503, 0.49955160638556395, 0.057405140349158224, 0.11176592517125772, 0.6954737935776204, 0.3567038336481342, 0.6334975393650555, 0.7051849705303675, 0.8835483564602059, 0.40378096310910183, 0.39955468177194986, 0.5699882075027073, 0.9804091186509848, 0.2802366722461441, 0.05221580109876078, 0.7975897550304166, 0.93800194466268, 0.24248032381773288, 0.5582834501495542, 0.6881880048833641, 0.8858851557398668, 0.42007872005350466, 0.7414223293933104, 0.9845839401067794, 0.9955675694302523, 0.08462039410016187, 0.8036388843092604, 0.15837116232142912, 0.06220389145063365, 0.9510297407762228, 0.6520154744831638, 0.6457990956645117, 0.049640298990299536, 0.6183845148406915, 0.1330783141553411, 0.25922869551795846, 0.22477404790650446, 0.6878420242013011, 0.6094065439688925, 0.24417615446104102, 0.5477748520198343, 0.2678265629234591, 0.9662787230264556, 0.23169254523079474, 0.9231942714743511, 0.7043315036984801, 0.8372653390215956, 0.5154211398325057, 0.46611632930556046, 0.6838920638809898, 0.9852601596808799, 0.41761676388090196, 0.9612967996729467, 0.4167566084359494, 0.41719492752159626, 0.0777324158319016, 0.7422339037159814, 0.5688878163673126, 0.8251727102756614, 0.16855510920139638], [0.7355438352482577, 0.6925933766058245, 0.04718812931586325, 0.8761415466709402, 0.1910563533983899, 0.5223165866334196, 0.31381746356072815, 0.489419946837674, 0.4977466774802063, 0.4633900263299394, 0.21278519204723634, 0.8213790500400951, 0.2802894444413965, 0.7158239964585421, 0.6884531691543105, 0.7067786919824665, 0.8055701188413942, 0.9381976849875685, 0.0125148594734944, 0.08674556212611884, 0.9306531452812048, 0.6167907063464839, 0.007951682076609234, 0.2170162138472994, 0.5757996865767284, 0.012874800069472725, 0.802170313932196, 0.30974306577341326, 0.06639580553371927, 0.8668244441457591, 0.9498389227448061, 0.029388864586887276, 0.8367555702158369, 0.07340477583900729, 0.7000976617814307, 0.9277581376546926, 0.990236008878633, 0.9893265486849699, 0.7943390403763847, 0.9262195533928418, 0.38015675855007225, 0.5169820675729929, 0.09564994277920458, 0.6252161323599225, 0.36696730705975655, 0.6518143955459257, 0.34995991600568066, 0.5750362972964428, 0.6191950073706901, 0.994973886001233, 0.8943167540828563, 0.046282875476623575, 0.8212604972157119, 0.2443574106878903, 0.8215150160734872, 0.9132999673500928, 0.6371621212468636, 0.8709121006716914, 0.8884991047133167, 0.23327706546893012, 0.8811770431630596, 0.785623238591644, 0.4938223670648916, 0.8412876078784358, 0.20123523678885757, 0.3754960782000448, 0.7738219447863048, 0.6980256923545776, 0.4862077428920546, 0.6690392992023901, 0.15993825511207715, 0.4783530878168314, 0.31602088409224716, 0.6813350079139003, 0.2612982583035145, 0.41780240651822576, 0.7341328263352762, 0.34333334437522156, 0.994944004888762, 0.6221027364280515, 0.8148653429937533, 0.13744105445135057, 0.05252536870616131, 0.12654804849896073, 0.24259706455581942, 0.9695409443186849, 0.9169340827701145, 0.006677560394288018, 0.7458335546024766], [0.06730986990768384, 0.4610133450278042, 0.03733097333445845, 0.7609700208703581, 0.16934347568744035, 0.173315265734599, 0.6446860708077695, 0.24818262966018279, 0.20617746859191366, 0.7765137718538611, 0.18699463199220734, 0.6008407737618675, 0.3606877635400322, 0.5752770692101531, 0.03819576570181027, 0.8563282808164094, 0.25200821820485, 0.23716789626076018, 0.004772572979767631, 0.24926081674984268, 0.30479022641134235, 0.8086442781462745, 0.8957229581251202, 0.5981990015097088, 0.266958674860422, 0.5422720137106976, 0.5762383340524392, 0.10194007416840167, 0.7596261515437651, 0.33920620090450326, 0.517513511207073, 0.5515724963492631, 0.36163218387913765, 0.4609712981914097, 0.21539273646481838, 0.5703953449514472, 0.33128268566995944, 0.731638339772117, 0.373204589734941, 0.651585066900108, 0.5012451561164063, 0.24072740844142682, 0.4218978478491353, 0.9715164411670084, 0.5490386371592862, 0.07497090388776739, 0.7657888757998514, 0.9611699207048136, 0.4310487211800018, 0.046179130539853896, 0.8198611827326828, 0.9179085329873199, 0.8562412835474034, 0.3578555712516487, 0.7015976585357372, 0.7885383638716655, 0.14850602514425115, 0.7482422809932668, 0.11635902135014209, 0.28882825916210486, 0.3553142044341462, 0.3121347546645653, 0.6132612973481106, 0.22013132517617418, 0.8829939090578839, 0.5789062979634642, 0.21700280877886047, 0.9712447720554749, 0.48289125897736185, 0.3994770307458444, 0.5483454859471691, 0.9255156070487872, 0.767672645202571, 0.6234577896546327, 0.9435583774385061, 0.7430264742658989, 0.3358807560120908, 0.3137868408462624, 0.7008899952198328, 0.8768252390010001, 0.5706898512836106, 0.7814059143822703, 0.1040812476318056, 0.5908281560647537, 0.39321386495970323, 0.8883155683215058, 0.2847333495917055, 0.9144732509262852, 0.22976528115958628], [0.05981842471504517, 0.6206549590547321, 0.4506711635290671, 0.5792434828819188, 0.40008154161752074, 0.020550575264911553, 0.09306765391486249, 0.8707433715728362, 0.8066172646833889, 0.17087850613029898, 0.5982314044451104, 0.6309739918371184, 0.7312485599916431, 0.6595486563206431, 0.36376272218023387, 0.5695617707570054, 0.8854737017510358, 0.24342285021134946, 0.240575530241807, 0.17245410078959278, 0.7987358572888311, 0.7312795504117775, 0.5380759040986183, 0.3342185349661859, 0.07316658385526442, 0.3775359887215245, 0.42120372889699254, 0.6092380557513105, 0.3809457785885876, 0.46948269084409755, 0.6012429102040323, 0.9380315987739248, 0.44763326072987963, 0.38497568035626784, 0.7909605038432952, 0.7600362598661928, 0.9321407715424422, 0.47513479091481514, 0.6819236918150542, 0.9918262666032999, 0.8478036860622565, 0.3515289515349266, 0.7427837661686444, 0.5757631811770424, 0.14419122457534317, 0.48845731409275306, 0.19685708834167281, 0.4085770027672322, 0.7932188876625637, 0.4731790048920601, 0.5836588973963179, 0.7078282399846024, 0.774338176158506, 0.8072278529651508, 0.4037119497999868, 0.3152333970571751, 0.2561212934711413, 0.42332065166468114, 0.3086053930826439, 0.4604820606780955, 0.34155528046330497, 0.855490794321016, 0.3537440998312471, 0.7096573670200893, 0.366185429690708, 0.5169485231468995, 0.8643650850622032, 0.8047209249510456, 0.8924286871502217, 0.03961820629322055, 0.4205732851893068, 0.40390587327323246, 0.12203636802495588, 0.5047740866944037, 0.053108540011705285, 0.8752936997725949, 0.5144259625755311, 0.9458829344623462, 0.9881850379545126, 0.1407834736455803, 0.8784731683488521, 0.8399181487873897, 0.7817319188845809, 0.032734272104587236, 0.8781319823755211, 0.048412764197214964, 0.18869122716970488, 0.17604025725769978, 0.7049571662439098], [0.08113178416590705, 0.696387538183512, 0.9109371581035248, 0.025031181274060677, 0.3515371289183752, 0.2913877087496153, 0.6652677977199924, 0.769647274160916, 0.814359185730735, 0.3602383102864142, 0.8225288389804912, 0.26365484450497023, 0.1536894864613647, 0.21468034737970254, 0.6188279267758033, 0.3673768532748949, 0.7029109600440291, 0.8133729523178441, 0.5711405358863976, 0.48584479511246215, 0.4878970291872934, 0.20076321556035936, 0.5325757552509318, 0.8361480948867408, 0.1372320631114815, 0.17746455830927854, 0.4728760868626114, 0.3157873643258069, 0.8293963727223874, 0.5741708106346539, 0.8951057148232548, 0.3325649310480727, 0.9756998338698519, 0.5282429349060458, 0.3033187005001652, 0.47612786582698174, 0.3823756138684963, 0.42508133950528615, 0.4127330578110493, 0.0017749212932937075, 0.2669101008736733, 0.1465354628348916, 0.14097312538079954, 0.5757727379923674, 0.2684873881981964, 0.18752427053941645, 0.41152833431779134, 0.19898064863269593, 0.6087391159798994, 0.12511728618709306, 0.45909014877610155, 0.20423542881579193, 0.2693790908365711, 0.883781114687842, 0.1400384121826702, 0.6684576400993602, 0.8113502599229342, 0.6078536698949429, 0.05909827615494867, 0.8054682972917956, 0.1983680652087938, 0.2872555008031432, 0.7401993313397814, 0.5440228967116215, 0.0032605056300302193, 0.5646908247862947, 0.24107243775514076, 0.1971377327914383, 0.5588515351856491, 0.9844814211316424, 0.8311102791650344, 0.3494959972362125, 0.580095798470058, 0.6369365424051247, 0.1897407774781894, 0.8072977868807175, 0.8581172380475843, 0.23328792355649086, 0.38592370992337555, 0.43380221159184196, 0.2530239799920274, 0.34822144341501915, 0.7480332324590185, 0.6997995131917084, 0.9404516961737432, 0.3566142827881752, 0.342888727667768, 0.710865052187013, 0.6870189614642587], [0.6514637931756233, 0.03396289538729569, 0.16391229860431555, 0.735163055657347, 0.11071544623318641, 0.3911514912584504, 0.2827841101669898, 0.9898793243216691, 0.26808175579234217, 0.7426294391359995, 0.18759239643838388, 0.7256460687551296, 0.9747254468927438, 0.07876332176551082, 0.3735617298531958, 0.5510380053066483, 0.8346846224783451, 0.11856785198486053, 0.8040460876886321, 0.4382605910920647, 0.4583378049101262, 0.9185830899955371, 0.1868852461518251, 0.60685145250924, 0.42680996467573085, 0.07029986954587553, 0.3631555990988862, 0.8486682294151014, 0.5333054136525754, 0.03563919542798255, 0.12427827769417144, 0.9415044843795013, 0.9071035549430879, 0.9319233844284089, 0.6749053310252412, 0.47876408852964814, 0.11470470067901484, 0.2433631914764326, 0.8519136219257101, 0.7595069387456461, 0.9520730505085702, 0.17287564909860742, 0.36139537499393937, 0.5598694015245399, 0.21567364449178827, 0.09439278334471235, 0.572617410590504, 0.195397658680722, 0.9473612633014387, 0.6390476773919598, 0.23150496339037885, 0.21194253406860664, 0.44340359917736993, 0.30271559332798703, 0.7531097326810732, 0.9586922097320101, 0.1448686459404187, 0.3778983941181622, 0.29114643202044166, 0.572949267663157, 0.08239949522357148, 0.39300537967839433, 0.6773183981987367, 0.23206913324267953, 0.7141439293360554, 0.27256343138966743, 0.03994370735145014, 0.8467042395636266, 0.9696254920546858, 0.6414579307303837, 0.30474608782406976, 0.1357410180039258, 0.9849618540565426, 0.710591858605881, 0.6937615604907785, 0.30631221606012526, 0.9242651003435961, 0.7195551989599519, 0.7998840862361762, 0.8569008984281015, 0.9328036129664286, 0.6514150117694621, 0.8584629789573955, 0.5445730598472244, 0.9004916004788179, 0.5122035387376342, 0.24495340638529706, 0.8683168379765074, 0.22575971817198448], [0.8266278989752844, 0.0684267828461439, 0.6392737660837189, 0.15612047737352808, 0.7606041379717027, 0.3501820278915502, 0.6157495087778129, 0.22858489664951975, 0.7712223046677094, 0.5317830754338605, 0.41261566400480576, 0.5389787444775646, 0.331015166601795, 0.23613874927919232, 0.7515183497245795, 0.5554239413124435, 0.425316826265306, 0.3474275866501132, 0.21969536412508917, 0.1919274721336921, 0.3443982329187967, 0.7488736738707903, 0.8237645583038672, 0.7482242725639778, 0.006612811862512058, 0.6085203605647781, 0.7716373331447535, 0.9911174688329635, 0.6972577907339471, 0.35496244923656706, 0.8081706156053041, 0.7610708792230356, 0.7488587283140159, 0.1967041443664177, 0.48172304334214533, 0.4382339428444275, 0.07390726050247454, 0.6634270091420054, 0.9869007045500495, 0.9817987488267363, 0.7065156655684602, 0.23126364220532947, 0.6395252713530065, 0.5587732038479154, 0.9623843947514331, 0.29959345792930325, 0.9992858367567165, 0.3141182894141228, 0.007625217623363656, 0.551282013226527, 0.034548364002545684, 0.3966970380056537, 0.5909663828076284, 0.9255049398755791, 0.5208263901870068, 0.6661077929351233, 0.36927477436026834, 0.24278475481287365, 0.9004395822060014, 0.899674667369625, 0.5637112047499901, 0.7574667380552644, 0.6147051779894909, 0.24773885175283228, 0.20170351238269957, 0.25955662919436673, 0.128553733884064, 0.3177899621543979, 0.4873329427838947, 0.18368429066621683, 0.11289070262621304, 0.2598466171605375, 0.2807129819848404, 0.18540756639668665, 0.6734646948616361, 0.21896679025158983, 0.7457002702470664, 0.862973561352364, 0.52866913181477, 0.5331291188509489, 0.8277507987076989, 0.2832367763651419, 0.19951938145565218, 0.6021256428968662, 0.44066997592974966, 0.5502590449747871, 0.03760203973538523, 0.11322782486184013, 0.7171276684405459], [0.378813469515499, 0.8742583609466889, 0.6301784662829916, 0.35267654650968094, 0.07117334308073853, 0.8579106367459383, 0.6776141390390711, 0.7188612995393914, 0.053601648831374415, 0.09632772798906653, 0.5383472244331343, 0.6428478484664998, 0.2336528429895921, 0.7261900559898327, 0.4968516919675119, 0.5407604840518533, 0.3750745869304558, 0.26233505414334, 0.7100777100163748, 0.9387588920982569, 0.5096048380348441, 0.483450873046552, 0.42881817138761036, 0.2261850528073358, 0.24423603921895842, 0.791970790489254, 0.7996751780075918, 0.4945141830479496, 0.7187706207501089, 0.3187758563689377, 0.9092529156600871, 0.48239914943199713, 0.08594029807009584, 0.8482304463841885, 0.6813076022570372, 0.6339246339309245, 0.5549021433155299, 0.667349605844135, 0.523156393457633, 0.278315181879885, 0.7307582258115213, 0.6918175897039442, 0.39797266216110216, 0.5063465724291007, 0.9381656662678858, 0.3690172955344023, 0.7378385086362494, 0.37293251020166596, 0.21191924649239136, 0.11386750914103594, 0.06859810882879891, 0.6395999158751746, 0.0916422597266151, 0.6604528468464329, 0.8886183091432973, 0.49750674056768696, 0.2310372361201085, 0.5855449913483796, 0.8583312841304914, 0.422324715824016, 0.2875101113231282, 0.2599354337523684, 0.38347359454688923, 0.8842963371231815, 0.09725524381030293, 0.5248511350540306, 0.7987846149444715, 0.3995841792351552, 0.6157689471423579, 0.9144507591762738, 0.5449381671545607, 0.5010524641462252, 0.040492103288250925, 0.1681690896453507, 0.9425756424897969, 0.2656346794134088, 0.4466737847164016, 0.42278198006210377, 0.16752376813156555, 0.8078930507415623, 0.8432986268626345, 0.7309550870596352, 0.9815847699066709, 0.7412392653437498, 0.01077098273392263, 0.16968958816984991, 0.6283414290834911, 0.8593575747889078, 0.37161230605820283], [0.9037611280604957, 0.2502129833983312, 0.8088980025115039, 0.7205011756898944, 0.9439143956630173, 0.5689705792909496, 0.6333349632409515, 0.6605112205822883, 0.146923191726968, 0.9338715548068215, 0.32016965760382643, 0.7214406144961025, 0.680029412133224, 0.7121039483954671, 0.4269458370245951, 0.19673605332543642, 0.9155824770038562, 0.3488368920117867, 0.14306078921744314, 0.9467416738944499, 0.24121737104850316, 0.5077420360007876, 0.05316467206395992, 0.988980089272359, 0.35177930042278627, 0.6891183542944551, 0.7645025899907655, 0.23230221218213032, 0.9779110175834379, 0.44938081623872306, 0.21130839867092965, 0.3221713997251694, 0.8176184280508215, 0.8201447217466494, 0.31144020240858183, 0.5936712000960171, 0.09362316298448592, 0.6472249849490573, 0.1301920162550957, 0.12694462187809763, 0.8324790803254445, 0.7399924546476921, 0.022958488961357326, 0.4476725308229209, 0.038092952852635587, 0.817638930784089, 0.024959580725845587, 0.6046455561830957, 0.7307066624253079, 0.965791888687905, 0.5730918323523271, 0.5137759194492076, 0.9183640454192973, 0.2710587719095342, 0.073218572196594, 0.36644293713240916, 0.23498530959396902, 0.3214705920706401, 0.7817720816749059, 0.7359217004247496, 0.3638453974515984, 0.14346901890213692, 0.409319144933025, 0.9836781655896751, 0.6887274793582663, 0.6565853417903896, 0.09115308815315282, 0.17883141884051512, 0.013543690047306711, 0.2513895648663288, 0.9889306991279236, 0.7667836142454583, 0.1642501149878628, 0.982200939047679, 0.09933331730387751, 0.9967725456744967, 0.780592571784447, 0.897005876945876, 0.9527993194024859, 0.8293935774894287, 0.09961114285610073, 0.9666124160511853, 0.8596690665241485, 0.9404355070904641, 0.9022010555858258, 0.11203836425903024, 0.8055544130979683, 0.5134040848046894, 0.32117225703856556], [0.10395532618684533, 0.16649237681666096, 0.27694842181753054, 0.5469299394916459, 0.8258684636161808, 0.14585423689012256, 0.5597421305229524, 0.49360619766732416, 0.5594749633269677, 0.1540339172555094, 0.81356717388572, 0.9456566890916647, 0.899096450863968, 0.5406319044673098, 0.32128041839609844, 0.7309867636335438, 0.07519216205371171, 0.8070631325044554, 0.274957916484315, 0.17672939020197853, 0.9170923618679351, 0.44390475363840387, 0.2842601361757463, 0.662455285626705, 0.774310641993748, 0.5489250834121373, 0.4979159485213184, 0.10143817757794049, 0.05223233210457301, 0.6084878281639291, 0.08397217296808501, 0.0930296052821249, 0.35644359267759695, 0.7503170209199068, 0.6800667274602131, 0.5153095342562182, 0.057240939643044775, 0.23521382784209355, 0.46238952066173034, 0.28826672575024903, 0.9542643244743729, 0.6326523747740527, 0.8252848687770257, 0.10226952938510114, 0.5475133945983558, 0.038668349944942504, 0.5271746802083764, 0.7971863128926346, 0.5080730951792115, 0.7139995460205235, 0.035075527438267695, 0.5902473612447828, 0.8628031888560845, 0.9664300997082392, 0.5662468716983032, 0.6248107410773339, 0.6428220147678342, 0.46040864866986453, 0.2822650102532448, 0.4224158135964854, 0.014239337965989196, 0.7378752520054214, 0.6519943369061885, 0.03805847104776128, 0.6828483927573121, 0.7395591734039013, 0.29933966192164574, 0.47018485931813736, 0.22404267193236493, 0.4604202628199138, 0.7330767096882213, 0.4300675224064734, 0.3254509919494343, 0.9832750772094692, 0.16839038830719621, 0.47907141302054, 0.5706333216397184, 0.23721399054495862, 0.46619409111542165, 0.6674363511931996, 0.04728528609062399, 0.12050212271082017, 0.2550643345637652, 0.16087710627601737, 0.7178021663576204, 0.44429346334994635, 0.5450403581729426, 0.11527147936628812, 0.6763779473161352], [0.5411358826143802, 0.6694136444222956, 0.14061125199159874, 0.89154211793243, 0.6449943137503733, 0.8463276393346788, 0.07568579359455452, 0.7594014572324701, 0.2602282551895726, 0.4365093832064195, 0.8987698555919504, 0.9669217099629893, 0.17747417965951529, 0.7379801758596262, 0.5292709388836184, 0.1742353170490546, 0.690103510850106, 0.6663877257262327, 0.3900715020153124, 0.591646897358315, 0.33936856182772634, 0.49010227796414196, 0.5919211537771514, 0.5725004370805458, 0.6346504922634901, 0.1534574742000523, 0.25280201425220616, 0.20647266491429728, 0.9999369633117553, 0.2634778562485619, 0.3098707339950576, 0.18877996149525922, 0.9313899212438421, 0.6621573334475375, 0.625056581476026, 0.019623919819141467, 0.2524833604417476, 0.30090597274918773, 0.5145666363513494, 0.3345647383207524, 0.7179900932465052, 0.3272153040371544, 0.5591648065231294, 0.40179441709902164, 0.06216033578983249, 0.489845135397188, 0.3023023145352213, 0.08409035002072873, 0.2075373001300307, 0.4242719297577323, 0.5892404509143129, 0.6062728831841375, 0.44962255128952655, 0.5757590759465453, 0.8198509816150631, 0.9602942888565942, 0.046388323995318004, 0.8953951286082695, 0.5284980059780261, 0.887725513774816, 0.19343240143272145, 0.4901239635599256, 0.33827242306665706, 0.568430584713855, 0.4537055429350061, 0.47872646486265014, 0.11045003704605694, 0.15519491885313408, 0.5404429569996151, 0.4700576776308467, 0.7022989256529829, 0.9024539894518936, 0.23066920664369805, 0.7159670211222973, 0.38877922368247253, 0.7937868837260693, 0.35390208919708177, 0.45962282059377124, 0.17938214215940362, 0.24463051675049408, 0.8366494800712482, 0.41759494482371273, 0.5648062150802045, 0.4602568754611568, 0.035410742236063, 0.701232415508797, 0.6753127760589952, 0.7656227057834769, 0.3626710964599478]]
bot2_bias_layer_one = [[0.8098133893557861, 0.8517414770903443, 0.09473965415712471, 0.918940989676444, 0.6113615736356325, 0.007990034873106233, 0.20577025349505929, 0.7862640746753443, 0.38240754392495124, 0.6908550112445189, 0.16453336503911364, 0.09819287867172966, 0.5418478998783176, 0.636931125594865, 0.10073757153357521, 0.4332044000355185, 0.0813757949855547, 0.0393453474943255, 0.007994133404271908, 0.03301111458572137, 0.40286490624741567, 0.0019004256908868866, 0.17910307417880678, 0.07524412755766419, 0.24859746577231545, 0.32088145532853196, 0.9064578666099385, 0.7541923841196129, 0.7853754627528752, 0.5826183733168254, 0.7277619600859386, 0.5040503960223371, 0.4394498590599356, 0.4632035299362348, 0.7752974602992608, 0.5396654258674353, 0.6169218098564385, 0.8217138268771912, 0.3552536879753071, 0.7431939338886753], [0.08539645843859878, 0.5245747641975729, 0.06628491515205637, 0.8545236351380201, 0.2606634717631866, 0.33521656013914425, 0.7242509371864242, 0.28479712411489033, 0.7608114457137303, 0.085783445045307, 0.25441353740654893, 0.05772717371837621, 0.08308396340488167, 0.5453058750317734, 0.41601987110196925, 0.4891184701862057, 0.6199389023538219, 0.8353064180578085, 0.12678433964882274, 0.8194529097410522, 0.8256262782666017, 0.4585601605931411, 0.7999085127386367, 0.6455730195676858, 0.3136017886438556, 0.20339566809541398, 0.8809850309483905, 0.6024141037892973, 0.5901809527925166, 0.31378012400181843, 0.4543341633323861, 0.43501418974137074, 0.807537751805766, 0.9149014428501796, 0.4953044270360093, 0.9043209950169286, 0.4975079781368432, 0.4487246995610791, 0.6087603430269607, 0.1919549641347329], [0.762865885020926, 0.3090706747886224, 0.45673132727421506, 0.4691571286845606, 0.9183023896980699, 0.5791134514834484, 0.4659936737662542, 0.09613093061320377, 0.47606211886490735, 0.001452131231395204, 0.3766230683072156, 0.5462295669752207, 0.3512684439293695, 0.17812421183126537, 0.36703813562637566, 0.5230508453577216, 0.818714711480409, 0.9057582404210482, 0.3479093392995426, 0.7103914121894379, 0.7402821352100469, 0.4671840875926646, 0.7071026031475419, 0.7100227417760628, 0.27134794038049537, 0.46521757440182376, 0.9853851092435608, 0.17410671111691778, 0.6742561371433154, 0.4097015205778288, 0.42673700644444, 0.3540172608947706, 0.2537811487049939, 0.931248510487099, 0.3399840690857515, 0.7448894828255423, 0.4913478677152312, 0.17501076317383701, 0.5333508640678729, 0.2534885625242229], [0.49411993820518973, 0.12325242659376257, 0.978684852616548, 0.21502756477431217, 0.04114123725709895, 0.7420235059322737, 0.5263903863854248, 0.16980122353152227, 0.1187681933362923, 0.6456444399287653, 0.3631675637343642, 0.9118789165273248, 0.17361872244929144, 0.9629527309165361, 0.22826256716589177, 0.0169629424245602, 0.0631871335645211, 0.9071802692469664, 0.1573956690493038, 0.0901553378807669, 0.8568159031772344, 0.38600936635155636, 0.06696654704539795, 0.341258382283226, 0.48282199611761123, 0.1552949020161778, 0.9433544846766335, 0.8827776517674782, 0.49991211519116685, 0.48488254089505056, 0.5119168669056638, 0.0527411113419598, 0.42022318990500507, 0.2805151996612476, 0.07939613050594119, 0.7574260978791433, 0.9160808115396656, 0.9907463496412023, 0.6158277196745737, 0.702701243080784], [0.8508494619886124, 0.568014087369302, 0.10989548775433089, 0.18087009786126507, 0.25032151736761477, 0.18104077085593828, 0.8531177372248228, 0.08434760222412074, 0.927803803804331, 0.8393658884907246, 0.06959872210661566, 0.5389577745739028, 0.024336280547830547, 0.9320375451390188, 0.09409032310097076, 0.4445825435755162, 0.3740200843421779, 0.5081481025290564, 0.15746770905910634, 0.39992016102777517, 0.29184868103126105, 0.6677366698000917, 0.49958007049686104, 0.8458613141091254, 0.8228565101513694, 0.4547971278395334, 0.3451282687463164, 0.39293008355649783, 0.9429631405933497, 0.23200614157557709, 0.8199393514191775, 0.995555550474845, 0.06341007407669386, 0.16487199771983685, 0.012900099610931526, 0.26306225325579224, 0.11930687216846747, 0.9121693962376946, 0.048350367373389824, 0.8664520959166685], [0.8605521814407496, 0.8308197440237254, 0.03721937701014555, 0.6174264559744114, 0.6469376308324067, 0.7630961955096324, 0.30428593785892755, 0.4437060857626385, 0.14397535162699981, 0.3329827163012262, 0.07144166974003685, 0.01113336052678926, 0.8672680627125757, 0.13223369158739529, 0.5743130421115986, 0.9277423322579507, 0.3970421277031958, 0.37340774857018677, 0.6287234788793563, 0.29719993175509096, 0.8198788898300773, 0.6481429734475234, 0.049979594248697023, 0.07436075246831597, 0.14646047736771184, 0.8852149530456511, 0.11554070259723304, 0.1991678711320668, 0.545729311711771, 0.0687549828775258, 0.515767112311942, 0.47948838052173803, 0.020876983066662302, 0.9077614971479421, 0.1578453721337727, 0.6412105324776644, 0.37856034123817306, 0.008957844788689884, 0.26495548914894584, 0.060018336272638195], [0.6588504343410715, 0.36205201428317546, 0.5068885149818937, 0.18382585763873982, 0.37049176987169286, 0.529561244043408, 0.015862077707304967, 0.9313102349675794, 0.687028153893334, 0.030843273369228874, 0.5379600945004784, 0.730391641827693, 0.17183248703347986, 0.5159022381312026, 0.4903269362545951, 0.42954147383386077, 0.8464062416116934, 0.2430587197853442, 0.543626321631992, 0.5261888090717121, 0.23205984255356382, 0.8619491214002009, 0.8776883737169503, 0.14631234499004853, 0.584673048882789, 0.06619690764422426, 0.8056718473992915, 0.20404938482335422, 0.4276123156486633, 0.9955314771503246, 0.4379246148656435, 0.24441992834699566, 0.6755987728856515, 0.8409752298786368, 0.8141986788702298, 0.6641617762765133, 0.49596218050871355, 0.021066252394865703, 0.07450065572008857, 0.5501805094233319], [0.5180469475960245, 0.48262016215768, 0.7700491412300875, 0.2816227686583571, 0.6962093204808055, 0.09423807980242738, 0.45519042616853533, 0.6302840118488728, 0.30904681346602325, 0.8702846554637275, 0.01381541691481225, 0.5037255361540814, 0.9930946405099729, 0.5769738380464239, 0.08258033104766305, 0.9899070026496571, 0.9986915539333815, 0.04596764127719921, 0.6510861964993756, 0.7275139994825771, 0.9874440832244797, 0.8527241369336939, 0.05882621282987033, 0.5811428127140147, 0.9967556920953268, 0.4222244037063062, 0.8966023295722705, 0.7050741680594973, 0.9233181811108967, 0.3338060162317543, 0.783903039595665, 0.6548303602838034, 0.47778052919515746, 0.4409919300054047, 0.28975451140123964, 0.42389708425097616, 0.69543213796914, 0.8961599565214918, 0.8146224133059811, 0.09035063842412439], [0.8788580515192743, 0.43779506456624184, 0.24834051736329066, 0.2656797871562575, 0.8638552390603761, 0.33528254604266383, 0.10362292988613564, 0.9966131417454268, 0.8683237835291365, 0.4858912332779536, 0.7402751143663338, 0.00037219688247669414, 0.6455198231593385, 0.7943512683149747, 0.7438556871320743, 0.2996248658172236, 0.4835398449759396, 0.6260928643587179, 0.0675491353083969, 0.009668394132282154, 0.916540898344074, 0.4266367725254826, 0.8254307624064205, 0.39935359821259975, 0.2343614065467451, 0.9451307724816218, 0.875376574334315, 0.2855733980376859, 0.6833031441220883, 0.2717719161210289, 0.9563285678880818, 0.06728157323840556, 0.3938904445571211, 0.8377738390505088, 0.8541254444755476, 0.08028820819958538, 0.5385069202498077, 0.18695409664998786, 0.8056701533645299, 0.616411992721403], [0.45314408351769075, 0.23969856898399144, 0.6300140202645366, 0.7217059557114197, 0.5355034465645064, 0.5084687276832895, 0.08948757446317313, 0.09713601010754735, 0.4594354507074694, 0.3320112320489502, 0.7349427972539172, 0.667524567276875, 0.29091602081800483, 0.2446638165940328, 0.1350229444764557, 0.9855432953086426, 0.06908904668049631, 0.290541988430567, 0.4680651651879427, 0.4074168039818342, 0.856767387556419, 0.4646045098573085, 0.15523007046109005, 0.8615129440003624, 0.6846377679252584, 0.959556124863841, 0.13125539283827603, 0.814233964157434, 0.9907338390525046, 0.05432852552142664, 0.5732297872966396, 0.578142964086068, 0.14804596608911824, 0.6281797592210248, 0.09117016692611413, 0.7568762295708822, 0.02000418648579616, 0.13279557793975538, 0.9010183455385482, 0.634649805099594], [0.6069099322077773, 0.1639457950040708, 0.6578452952191881, 0.249578401044094, 0.4924686996467802, 0.19561924095740646, 0.34013421123943843, 0.5154486431623382, 0.40061819391037823, 0.9043615734477765, 0.9035455109153495, 0.7835410273235396, 0.6654888746655837, 0.7021594429754926, 0.9468774057875384, 0.9062284796478397, 0.6470579428600212, 0.9224416662718178, 0.8080739507090865, 0.9326232213496454, 0.5605246425220937, 0.02505575147356387, 0.6169226985887685, 0.10362952254227398, 0.6170188729225116, 0.6527348806429845, 0.7235715945126997, 0.1933978719435695, 0.6144437015230525, 0.9506635146433586, 0.45877570401936085, 0.835746629040441, 0.7593567860348549, 0.15618688417496973, 0.09517564397382594, 0.39256298103299325, 0.9086001758356094, 0.1474531207155606, 0.4699278402694409, 0.4988856686304596], [0.8715249390723581, 0.8894713197358379, 0.6988416818510511, 0.39912185337127504, 0.4032911053996532, 0.8392404447907437, 0.9383416416481817, 0.7350365116439551, 0.40572370812990166, 0.26127820377050714, 0.8540114213965291, 0.7985737212607101, 0.45304336971663184, 0.9792776858540826, 0.4795111951865726, 0.436403932346742, 0.903973352335468, 0.058998690041980106, 0.25387695278130074, 0.6685682873607797, 0.3868309734107349, 0.6251111547816574, 0.23828229963567882, 0.9671273302312672, 0.5118195817564001, 0.1074459495192912, 0.607591253372802, 0.7810992314875033, 0.4513321791613364, 0.4123539952099621, 0.4285877558901299, 0.6968580165781417, 0.6671138924892561, 0.9953401885022177, 0.7595165948739764, 0.8420935121340062, 0.9771893337720792, 0.3626770711875382, 0.12834641495255994, 0.9256205451000825], [0.9521776832896389, 0.568650118942922, 0.7971027328487393, 0.8584560566348637, 0.539904928748725, 0.7343103351736533, 0.054240678299801504, 0.30648122835989144, 0.5739696451169126, 0.8370366763706548, 0.4840169598290923, 0.17825433494343446, 0.8488158004943095, 0.3894252913289179, 0.033127964258894194, 0.7975479261871515, 0.44690809757296757, 0.7239411473642109, 0.3481948184468674, 0.5264498781612205, 0.23259812865274254, 0.25419715777900864, 0.3419556158359649, 0.3541844346455515, 0.6640863756342139, 0.3978737366027687, 0.5079996526320889, 0.21780992860769777, 0.8463962382627102, 0.7190185293467406, 0.6691186727315184, 0.06523118529322225, 0.13890777525599396, 0.35028274074639343, 0.20155736409566138, 0.325433156803529, 0.5818533898868357, 0.6912526619851749, 0.671050967142969, 0.7783299183376484], [0.9280279070094857, 0.955205836937495, 0.4581387308742111, 0.1732619865435081, 0.9742672447423133, 0.4186233358448569, 0.2668135869115733, 0.5439057612044063, 0.11476235127289891, 0.36530511549829725, 0.6516012432574763, 0.3484724898551558, 0.7858739672812046, 0.06700199744412616, 0.5393790725834391, 0.3200450829041954, 0.9887017828942456, 0.07992517175085612, 0.1342377365988564, 0.11939878457191067, 0.959670817525613, 0.5804722490707808, 0.8620530669607042, 0.614868156373877, 0.16719409925539686, 0.8453192573975898, 0.5513939976290743, 0.7115469034179419, 0.6747443022734654, 0.5032502447219555, 0.32954660734103103, 0.5550056232797322, 0.12483467995527397, 0.9620224404981714, 0.1568807651835753, 0.5517479077711502, 0.07599360785880205, 0.007914084901613494, 0.29852214673359656, 0.2205718472149365], [0.5066573293072844, 0.6350882008427062, 0.3984144071961717, 0.9531751518949365, 0.40546595198565094, 0.9202703643215342, 0.015125323015762526, 0.7420947352370252, 0.9643010828420133, 0.5469391311335765, 0.7848947722599662, 0.7043292666113592, 0.3713297754702669, 0.6563582189662892, 0.7628660703586905, 0.5232503421444927, 0.005773812527222533, 0.11451775756745608, 0.6842371091098961, 0.9512078222163812, 0.9424401563565522, 0.2898268573747188, 0.8489887137575722, 0.5494434539748653, 0.970419627380109, 0.48542951814804525, 0.2132279740372407, 0.8488969438510133, 0.8845824726173773, 0.5292961324069498, 0.6984130458180883, 0.11457292010624887, 0.12450638680082016, 0.24751287402141542, 0.5964915129482805, 0.7921126458247454, 0.7536216194650208, 0.22059309864732313, 0.8876602485484925, 0.9733836079623215], [0.4689559892726014, 0.23456170403457577, 0.2504406718873652, 0.8559361510039482, 0.5219845379732128, 0.7659159879364025, 0.974937227420831, 0.7164009034475675, 0.5242997917161424, 0.256261997121647, 0.30821258863960155, 0.4001341883115167, 0.762750919043169, 0.0693542775540904, 0.31793323980886046, 0.1497171861388359, 0.4909491378564319, 0.8979749145799868, 0.5365559092603434, 0.06149315879487638, 0.12532703562564773, 0.44708374850375177, 0.6972740676118728, 0.45599901187810576, 0.7186819250001037, 0.5911764190030714, 0.03270129437312952, 0.4233858318352648, 0.4178514581958812, 0.608895805493141, 0.8787145863270246, 0.6890365733717072, 0.20988000833828657, 0.2434746988857891, 0.9360549547629158, 0.6486677261847971, 0.5941247721575885, 0.3437227530607314, 0.15260094486631548, 0.15985670640602523], [0.5816859917203133, 0.3320819261351662, 0.7987073703896941, 0.09922786098125669, 0.34724234615219884, 0.9577034405359519, 0.6372262689417003, 0.17955665297880485, 0.7397319325950502, 0.025866479943835174, 0.7818814508813974, 0.04219588100365623, 0.14722283002738323, 0.35531907437264554, 0.6801049099993596, 0.6269849446043878, 0.5522779800163178, 0.2735722940379185, 0.8604933039866235, 0.9383708123399633, 0.08266898642650322, 0.7793985818303281, 0.6606760046003821, 0.48275662786062357, 0.2878608432338855, 0.8015646439464147, 0.19245384462370374, 0.5427035941886155, 0.883768998209872, 0.1869030141803577, 0.9357271391201805, 0.9211049247132806, 0.4352965506358034, 0.5900560079892989, 0.2540206055282104, 0.6318447224411718, 0.5824476587657342, 0.5869940578216224, 0.9689606747380094, 0.7143057261043148], [0.24788708049029184, 0.46571147495942944, 0.21814797880030257, 0.3030353855260457, 0.2807913835646624, 0.7548849476591624, 0.7500236151189714, 0.3284257760152486, 0.5451098744869302, 0.06455240811004048, 0.3639573225525985, 0.14630618452593325, 0.04212613827865752, 0.17188593802421082, 0.641297582514534, 0.10095729757117489, 0.49786811183881874, 0.9756258712227663, 0.8938346049994649, 0.7920435600386986, 0.9777922902301668, 0.2759601942295812, 0.6655190961508546, 0.11135609714374484, 0.21569913691043352, 0.8400753054147533, 0.9319546453422277, 0.34767006194473093, 0.5870405490098136, 0.6503242348701878, 0.5459911344264388, 0.14773803023343268, 0.9831295656968573, 0.9227361541333012, 0.9091437116053804, 0.5885633232299633, 0.40777974519714744, 0.14018171303255067, 0.6541492501690632, 0.7372493560874991], [0.6345358063532958, 0.34302692167964044, 0.5236775416660151, 0.1301475701764655, 0.5151951465989066, 0.8696500136093763, 0.13323204318901494, 0.5999004646611705, 0.4907426339641221, 0.22726154464863768, 0.6646665804518473, 0.740521368962113, 0.1301832339581407, 0.91804161833915, 0.4756447580285944, 0.722075008436855, 0.20218121256539556, 0.9168700732949776, 0.5628525683981785, 0.338026777301234, 0.8535221674416945, 0.7047788447866906, 0.2393243407444121, 0.15945554952217667, 0.8287695117568514, 0.7925907527676603, 0.18534697882335882, 0.17878675044145798, 0.7498561298731341, 0.9757201424982014, 0.4082408114928029, 0.4087645540463135, 0.5808490813845741, 0.08086662749320084, 0.7110391367561907, 0.2871729328459871, 0.8339150583370282, 0.6841208428799035, 0.35044959384809116, 0.3613338572007667], [0.7289339728832752, 0.7938634730275304, 0.9907409262077357, 0.36326456830686826, 0.18911625985287817, 0.29108456444205666, 0.44845083700548605, 0.9554698960189626, 0.6669380084391604, 0.3180871222334578, 0.32549147497338293, 0.9111087057703284, 0.6102274147312787, 0.42535143844654133, 0.15345132461333344, 0.4849457179359349, 0.30821263448829983, 0.37180947633826444, 0.29345531801363767, 0.9844021434648353, 0.2756324085713858, 0.83214994119903, 0.3870499393638992, 0.32971643880045454, 0.040239268521841565, 0.44050708100876423, 0.6764263125622123, 0.03910649739666994, 0.6050152302158391, 0.4312043758251195, 0.873976715810414, 0.9642175865993055, 0.49722458357839927, 0.47465739264949625, 0.7427232854457001, 0.23276387653187258, 0.9456550859564535, 0.06494736715669225, 0.1439477102386032, 0.23694955926126715], [0.6511853505888769, 0.1571457598334165, 0.7209733570437836, 0.08759077118215963, 0.8761282682544966, 0.22961741662359947, 0.18078160260783804, 0.05382297291911731, 0.703279321089786, 0.6292742342943655, 0.909323137865147, 0.07445077922850996, 0.6492146917075864, 0.20657647931267342, 0.8636634921460622, 0.8078130728322542, 0.8693972612971849, 0.5856792657189279, 0.7382608670589565, 0.9636426066099292, 0.46039977080564765, 0.3473431993100694, 0.9666562689695674, 0.41696961763264595, 0.7105860905967781, 0.6737407147332477, 0.06328190784272636, 0.6685414491184337, 0.5457056038619228, 0.3082822024778128, 0.7988823708064737, 0.032735836606153534, 0.03270864292020237, 0.1282565270164837, 0.4099368048956181, 0.242260588476907, 0.5551719707920637, 0.6893960533620772, 0.0735910939918144, 0.7068300189717046], [0.7946389023129138, 0.43388791960457573, 0.966478845849244, 0.7532323896747355, 0.9491910336206126, 0.39198905919493376, 0.6676611329071588, 0.3896070997301332, 0.9130629286784185, 0.5394310111014043, 0.8652697103045756, 0.5521417830769716, 0.8325813979242003, 0.18018200127198902, 0.736245122787011, 0.032679391485539666, 0.5701301473435451, 0.9014326193243501, 0.005250420175710202, 0.4423974171688039, 0.60097741526464, 0.6418774777009056, 0.5996677126520846, 0.627379607368136, 0.08266301601783788, 0.5207447598320555, 0.574981230569835, 0.31239150276790606, 0.4131810459118017, 0.09891976233256439, 0.8653923768498957, 0.4691142478741608, 0.08882290339743859, 0.7843547508025277, 0.6247251698719112, 0.5130914210352407, 0.0858750100558211, 0.9398859299899754, 0.9793507998889628, 0.9952154166206595], [0.87583651404485, 0.02752850203062507, 0.8038803896202464, 0.8456084294074199, 0.600458212839183, 0.49459351381114947, 0.14411289825626317, 0.8268492886839992, 0.7708422931332581, 0.8902144735043416, 0.29833398939669853, 0.271448687271742, 0.9738878191945355, 0.6762940835559229, 0.8310125422496912, 0.9068199690474809, 0.1740837340698489, 0.5686182389995258, 0.560515921246594, 0.38505094557874153, 0.5093632618995069, 0.2663702112901837, 0.0011746385998198106, 0.9856076363450361, 0.9759122441488239, 0.35634259676953195, 0.6882586006950456, 0.7359372886279811, 0.2699951959279415, 0.6593094745130548, 0.06155274139688793, 0.8220004219217961, 0.5304440001799111, 0.1358943978521775, 0.08175220009947481, 0.6979784402710401, 0.6356154080525677, 0.31843213191658193, 0.24944587182930944, 0.19834217694791434], [0.8223411513188686, 0.01721124527137352, 0.9986847724220971, 0.5676700336573005, 0.2739398688112247, 0.9500611537418001, 0.45673792189232576, 0.26577997904371864, 0.641673412515668, 0.5863545669997379, 0.08400457177860798, 0.5697781832614356, 0.9606952764747008, 0.34766881218276535, 0.7835183604472876, 0.6622765802489494, 0.7184960335616533, 0.9207716768874361, 0.5348225164526031, 0.10521998825407564, 0.5053281923443308, 0.23198453766931737, 0.756049611202406, 0.2827801354532521, 0.5882681497088121, 0.14797102797060802, 0.6205053595257013, 0.997208837920931, 0.07635359457949231, 0.2320246230422257, 0.4111802903587317, 0.2333068930299097, 0.5605125554349479, 0.8162851893936793, 0.09007270121924416, 0.30085298636947655, 0.5104071562602416, 0.8908287524116258, 0.6389692779029484, 0.26435215064022244], [0.36548882941474325, 0.6851095404026911, 0.5477223033235684, 0.012679282134705705, 0.9422826101259096, 0.03748770342115626, 0.16653195708122637, 0.979872150605543, 0.6199914139088496, 0.674501439837266, 0.4700830745683223, 0.765303432461688, 0.8337535726957264, 0.9963090216064795, 0.626284678783091, 0.013385656636263121, 0.6467153752369224, 0.7296351982548293, 0.06706051735866092, 0.1715964851791657, 0.14482492244136735, 0.7781970217638168, 0.3235540817633742, 0.5440492265347966, 0.48153609998020064, 0.4725811700143572, 0.08279666197344604, 0.7575454174984818, 0.12333676495863266, 0.8145924378934254, 0.19609563814458786, 0.45574179548811833, 0.9333816820636562, 0.56122481223487, 0.6738874120215674, 0.537687184867085, 0.8348622695759411, 0.3862188538315737, 0.7847937507217616, 0.49712275366550096], [0.8234156973312248, 0.109824587709478, 0.9369845557939008, 0.8431420431468502, 0.8373537329735404, 0.5805731090389791, 0.42794908772781004, 0.9615200415980961, 0.1055933125644618, 0.5434468081966417, 0.7085450592195061, 0.4742097784437962, 0.18048523883764578, 0.22167703769247815, 0.3890647235688076, 0.7655144728493144, 0.023731436168792253, 0.5934359896505481, 0.7829012363328235, 0.5615878440152606, 0.0974805403977036, 0.44201388325523505, 0.15054710077413558, 0.08391222604610449, 0.4471511682359015, 0.7867804400064397, 0.31370026944743934, 0.0636168733849577, 0.5234335984620849, 0.979736098923061, 0.25265645159260575, 0.6788496184126503, 0.7233752879440516, 0.16758095015349594, 0.3026150985234426, 0.869686042459066, 0.4552369301514789, 0.17173556179652316, 0.19802419774791802, 0.8366556395643258], [0.6098325547213952, 0.903643628240506, 0.21960707216851316, 0.41779915216515595, 0.1612631050906188, 0.417034513262561, 0.482540414044201, 0.6830103324096702, 0.13602777684566103, 0.910302359364505, 0.681968057932578, 0.8780435382307683, 0.5359319501121278, 0.7474505814456893, 0.620975607401, 0.5376538647601056, 0.6667918926491202, 0.7255753486826546, 0.006155632454144078, 0.10169275025312352, 0.18253512004193317, 0.31635520010779916, 0.03214737690522251, 0.29268278131260483, 0.58425305876513, 0.830053937889353, 0.38425628661319633, 0.5700124585218511, 0.7889376730022822, 0.7756114725425425, 0.20388939654399585, 0.8028838567438665, 0.7374456348726304, 0.642247302378858, 0.5734759541534682, 0.3338544587117074, 0.7610737205870047, 0.24727892236141824, 0.9124634807294387, 0.3977493754335223], [0.6812884474776483, 0.9813512325555763, 0.8255539757690348, 0.737756721232419, 0.7678783996572204, 0.3560407695970079, 0.6588788020749292, 0.21958638250885143, 0.14741669941661495, 0.46943491491279254, 0.2603329518256807, 0.1341301839025102, 0.7111127434219008, 0.08368952354863157, 0.5795029169996677, 0.035215943380001, 0.8195967311487706, 0.49471896897699685, 0.24302022699085635, 0.4968042513509763, 0.9306952144610745, 0.29695028076451246, 0.100730101407013, 0.23772613939778475, 0.4043178341271537, 0.15873583522050405, 0.41157572732173453, 0.9345665597099909, 0.5499235771630597, 0.9096437566821151, 0.07304516693871865, 0.6924613375457626, 0.824166583299927, 0.07919250227786567, 0.9231978129700513, 0.5006263188282076, 0.24670527524450403, 0.2299508791316447, 0.29306220528303706, 0.21084487101723492], [0.2082658877747432, 0.6213285052993952, 0.7551358024376907, 0.3291649324710396, 0.6705654954885912, 0.14930928374229568, 0.2510821077398131, 0.2680771170040087, 0.42316944049606464, 0.8307688139262213, 0.5955569750926396, 0.672633430657083, 0.8115332147038677, 0.2660274699098675, 0.3095607159854986, 0.638210688792907, 0.11909953407800267, 0.21511771176336836, 0.2107125167147308, 0.2953749733005996, 0.31643441797506766, 0.7845983113109912, 0.8749249058619768, 0.2749583313492685, 0.6765856769420999, 0.5150749473182297, 0.6802535572137037, 0.9977660051798725, 0.7040217959385355, 0.2160158969946483, 0.30949991514692476, 0.2276882963329514, 0.325478957507062, 0.5095598789407816, 0.02675807605573355, 0.09453851779937794, 0.9656393499517198, 0.5388140681214647, 0.5578894139784234, 0.6772395290632756], [0.07142081562716251, 0.3126543040051768, 0.2005276693434076, 0.6781953157960239, 0.9160504569975377, 0.11822037980827393, 0.8472143706047415, 0.07545864636587418, 0.22415009963976973, 0.9027916757872158, 0.24633984685021693, 0.45911576968958623, 0.45843344713323686, 0.4136933810379815, 0.9436725387896446, 0.5222183599351876, 0.4182846836599011, 0.6586512548896375, 0.5826407923726015, 0.22447562496285767, 0.9193030166257411, 0.7708415378724407, 0.13250274736052625, 0.8186252873436881, 0.2259291221337354, 0.9318243851927942, 0.6315829823758565, 0.38942745539008294, 0.3171382840494441, 0.6416357616714589, 0.9080362475714496, 0.6787133397745797, 0.29837850839327973, 0.15068821958186818, 0.4575307858522435, 0.6578960268251981, 0.515767868845725, 0.24161571937253434, 0.19177634959704215, 0.10369902665053732], [0.6324544756970223, 0.6793159735264874, 0.11482868550150915, 0.46426604671594074, 0.39511920762951935, 0.22834104744379724, 0.612430907556584, 0.23437574802513106, 0.13958548599363263, 0.505310900685364, 0.9010324847847135, 0.8634240435858092, 0.06237151467238333, 0.3054421376229304, 0.225950089562021, 0.6170906745263235, 0.08032636784965286, 0.10172109547969577, 0.006682903571774035, 0.015251761737707037, 0.17884944602377995, 0.02037651553679909, 0.02837996351883343, 0.3314074435155062, 0.5242236414200451, 0.7634212006329721, 0.0628859055331441, 0.7773919558819311, 0.8337126221331125, 0.5014597274733884, 0.9705424216029284, 0.32108541965150184, 0.050792469977848365, 0.8062972945963173, 0.5429640840662834, 0.15564073704015546, 0.19116114906643467, 0.2282085275543776, 0.7483468223720382, 0.6970882229494724], [0.04469820010715808, 0.4347951515138332, 0.218197086765204, 0.4560729462285865, 0.5642925135417707, 0.252624843948104, 0.9945780480781895, 0.8340665087941155, 0.37549849396986457, 0.3768934737181936, 0.674468428151845, 0.31946918993317985, 0.6862753438799273, 0.3957290427339315, 0.902010240143378, 0.11970420898801226, 0.04241747018068043, 0.9753654120952318, 0.8581981693932066, 0.33051639652451004, 0.9530877268450173, 0.6776717850908859, 0.4928945429286711, 0.6145433165185367, 0.16649827416188623, 0.5117864707089749, 0.8876266420269143, 0.05253304780937029, 0.0553461979576938, 0.11212581490534734, 0.9517976114926301, 0.26076247488014803, 0.9065763488363033, 0.5458317700056052, 0.4786619822622107, 0.6762276363227591, 0.4093985936614447, 0.2928342977082544, 0.21287618933968167, 0.7398485894578113], [0.2782466584201275, 0.8432713190634493, 0.12559249652900029, 0.8558841700632275, 0.10842945756013933, 0.44748722658106654, 0.1461333495652094, 0.3588365827639678, 0.3265049424689562, 0.7437918892601473, 0.8973360813895144, 0.43123883110698935, 0.3176817234760144, 0.7236463962695089, 0.7705252641458651, 0.37952595983537407, 0.4442580290179454, 0.10856528263664378, 0.6831443682774723, 0.8929622751319906, 0.9289112746815954, 0.3911102493800739, 0.5713945049291206, 0.014724642437559221, 0.7870753675366067, 0.17197074734802265, 0.5331215907518516, 0.7433629760960199, 0.02051728456634183, 0.6515145612226493, 0.08220076560870682, 0.002399850450769292, 0.5071720297053005, 0.36639921979382406, 0.5499734347838079, 0.4311264543448624, 0.7452022672851648, 0.8508706587415187, 0.19771390244271492, 0.06453223412621767], [0.4592137946098719, 0.16202660046633088, 0.1459727657006602, 0.6140180588209179, 0.010179720248950308, 0.7128596787462611, 0.09609958345457459, 0.12613038674236932, 0.5672899205676033, 0.22533089916152393, 0.5816509389914524, 0.4257105258804075, 0.43802564454926207, 0.5382970742180392, 0.516397084741296, 0.49695294133486734, 0.16663684135770906, 0.09093066276756878, 0.14185326462886627, 0.5001259560461664, 0.11662087686068767, 0.4074995032446651, 0.6877164778766831, 0.4968444632081547, 0.022151397667231065, 0.5586918736909054, 0.1109551920662386, 0.9140847716897371, 0.8027049300705342, 0.22626345919001967, 0.7567520028140299, 0.23307705225083164, 0.9227896415883015, 0.5575522756385357, 0.529827974201969, 0.31987854560530515, 0.2837698491648073, 0.8237100252180363, 0.9423517052479605, 0.13443820766431647], [0.4150280350190034, 0.8698134388122423, 0.9570751500571373, 0.6324386756998756, 0.540111467948902, 0.7501445442717266, 0.2038590533999719, 0.5077494377731566, 0.23821070097877806, 0.7088567561159091, 0.35323508073082643, 0.40235978330900213, 0.7032137822318082, 0.10615468450863319, 0.9680127297212252, 0.8729873580994634, 0.7392888577955659, 0.6861836826962242, 0.9131418618571678, 0.8172800393680792, 0.36215317281223647, 0.23628432010391454, 0.1464060068035523, 0.04647189428641518, 0.40028503313010044, 0.18099079571174637, 0.5529553114755774, 0.4177706583515033, 0.3286109656371585, 0.28825990328074247, 0.7424420878020256, 0.3432294282477496, 0.4600665973732678, 0.5658934554290154, 0.9434976715339716, 0.1739640812520986, 0.34358878094625855, 0.5143931061095723, 0.27900054820262277, 0.18179786642492035], [0.16855142511976495, 0.9155989468362578, 0.9135444188168159, 0.5818483248864036, 0.2487427294440444, 0.9253801944333891, 0.46302503154647523, 0.7269516722260063, 0.6833946930800607, 0.6976223376940894, 0.31676850539563817, 0.5733884823834442, 0.011816321829051346, 0.37746432636975125, 0.7106673128540459, 0.22586371612852296, 0.6286098125961425, 0.17404503083540146, 0.8402430847542848, 0.45880682244027404, 0.20141473225355178, 0.30439052710458825, 0.1731908356570213, 0.7954600548549043, 0.42194828165982445, 0.7212802219673307, 0.5424923634449412, 0.5469549191601002, 0.25542126202724147, 0.9347186552666736, 0.3041364988036934, 0.26674053410825704, 0.42589256613960746, 0.012884253220725617, 0.45276310973196154, 0.3633973526078108, 0.2987063193294509, 0.317888455909898, 0.29737346106642115, 0.4372171368726302], [0.2271090754638142, 0.5309728362792387, 0.2593057991332315, 0.18013544687155658, 0.6600641828558227, 0.3486689351479232, 0.3406944475314798, 0.763291684240361, 0.7803147339311878, 0.3802305195166227, 0.962137552109872, 0.1509547736859962, 0.30942023182259826, 0.5171483278303478, 0.7909210325877803, 0.5931385136890415, 0.32640257718684385, 0.6891755346055273, 0.8549943832365773, 0.35883760843415013, 0.08193107988807535, 0.6080605677201576, 0.7709581550465354, 0.6540982798797553, 0.5038627642813266, 0.9322070898149913, 0.9357319143385618, 0.6923004055151549, 0.33027159411109996, 0.5489032853146347, 0.5398929634388292, 0.37652470730882204, 0.33689297037378607, 0.09135179414065175, 0.31671459721690143, 0.9753479181577791, 0.38903439786204386, 0.760119647424914, 0.06713642379332574, 0.8565669254594629], [0.18724387435131085, 0.9170066669944795, 0.780933987208106, 0.8835773348396264, 0.7650563132841777, 0.025753662038066616, 0.609426808428248, 0.16839899099808087, 0.024264587185609865, 0.3159173455860418, 0.5641918768773387, 0.4311485836167678, 0.5539594796550967, 0.5780337912893986, 0.6808518711920258, 0.7766674267158453, 0.8320487790690319, 0.841932779808393, 0.5685950108492716, 0.012172388419269375, 0.524533560517297, 0.28140540194948727, 0.09669069977952338, 0.19559998117260258, 0.13565739540524502, 0.05363275005087953, 0.9612430199261067, 0.963628483187374, 0.5210985926274203, 0.24400396026381, 0.6420115395966591, 0.8306282123813339, 0.2992129181235652, 0.24610381364471878, 0.6319786007795268, 0.13871841265891838, 0.9994195050901726, 0.9078943585426309, 0.495767369774729, 0.9628394252670023], [0.31900780238452087, 0.05725297212771496, 0.29149175660036997, 0.6118214909923858, 0.24840714327465507, 0.22960830611003102, 0.9848033875757, 0.05285737082373987, 0.8270411642358899, 0.6305819366521195, 0.8741840127430542, 0.6372309926568828, 0.7806552050346645, 0.14022377654798368, 0.9213693034059838, 0.048843066312338035, 0.25088779551906903, 0.9573214204585015, 0.828670464759275, 0.25521903490857856, 0.5944102274168471, 0.3624096556612033, 0.464857310847948, 0.4073483595152312, 0.609650278258684, 0.3987275955644709, 0.31816029816843205, 0.832326411073587, 0.4124591654044415, 0.4919283110958538, 0.7813752776078279, 0.42629658934580006, 0.8025311646881463, 0.20791740538216974, 0.31762209274177833, 0.15147723732241003, 0.8531246520213348, 0.6913758184226751, 0.295724882041824, 0.3447478857148104], [0.8616611768440776, 0.3282408156857918, 0.6132464769582574, 0.34414852094446324, 0.08679488076004227, 0.059808803392155, 0.21942401786390942, 0.79508659771392, 0.6438220844670475, 0.644876132904172, 0.09128684409205956, 0.5662056096860411, 0.17703057170940573, 0.8830040287739771, 0.10457803861865134, 0.46332973558776225, 0.04186948769275123, 0.6489619224425696, 0.15956230405708594, 0.44716163299879486, 0.49442159695001564, 0.06797656715938838, 0.8497685940531968, 0.6036038672050943, 0.14287324838232118, 0.08239586950682654, 0.07508434658422769, 0.8581056946726642, 0.874502968774891, 0.27431890208331244, 0.82187052005187, 0.9676752567669016, 0.8557477404669347, 0.38375979031696295, 0.3380947743581926, 0.6340724415768388, 0.7991534930751144, 0.33222072905407884, 0.9474768205115931, 0.38142961711241774]]
bot2_wieght_layer_two = [[0.6336453197101171, 0.5815279574995987, 0.9783582518065754, 0.9311996271014021, 0.5585205390729819, 0.9248230923731571, 0.1365261630297262, 0.11282089440218246, 0.11911170003540716, 0.3367620961590869, 0.0447727074811507, 0.7149466902962044, 0.10663268361153966, 0.3301421193006562, 0.4538508350234438, 0.13182419389078714, 0.3589590701190779, 0.8742648145080278, 0.04314633869242679, 0.9692963258303389, 0.6117500629463366, 0.9575316114473218, 0.5690446247987205, 0.004210304032849943, 0.026447270750709406, 0.5542693006886962, 0.920503818556026, 0.09625410359502085, 0.07976707963890828, 0.8255874532821929, 0.25366326008936735, 0.43411747744952445, 0.9065898390608346, 0.7215201879173707, 0.7574948279379808, 0.44798304139587874, 0.11228245677130322, 0.7655660321419064, 0.8484238520654066, 0.7808724965576286], [0.3611092540974079, 0.33992617197953934, 0.1923825332200515, 0.5450342842110734, 0.45721885212725644, 0.15012225832228354, 0.9338113770484332, 0.5172033380586507, 0.6754851806407343, 0.8134304836276238, 0.15800989125745513, 0.8600652101682541, 0.8067627570497002, 0.8214345915402529, 0.5150472632030321, 0.5488320068582001, 0.11357793009909045, 0.798543589161052, 0.7684908865498689, 0.17749232357782507, 0.4972319418120771, 0.06000333623068166, 0.7188184501851622, 0.7070920020231879, 0.37475045415533137, 0.5518911943398483, 0.27108012482888477, 0.32665482526180256, 0.1566228760341123, 0.6291041824160525, 0.11408831427371624, 0.23944663518952014, 0.7648402477028073, 0.9669808753255765, 0.03531282907083033, 0.5294092985120701, 0.93391029437854, 0.2267005737206167, 0.5498651175764105, 0.20361755424682582], [0.3044297748834365, 0.9845051843274087, 0.6951871891641187, 0.6723778962065313, 0.3104687660378279, 0.3925936849411267, 0.990705785460751, 0.8086462261914736, 0.8095353713852532, 0.19849180546084144, 0.002695920735379609, 0.7654545040731175, 0.996497479006508, 0.0033045493116468094, 0.31290087989211246, 0.9504126487147433, 0.04506718901227513, 0.4793313117756284, 0.1665705969003899, 0.31062085172712484, 0.9581633109922111, 0.137729562559314, 0.46710523276554794, 0.09374956844366544, 0.6553544111472761, 0.9400696381171181, 0.06351310703467883, 0.6716932764868131, 0.5006557179933356, 0.44790028935613146, 0.6704610124095887, 0.3263490630315795, 0.7590891960562753, 0.6701487335658455, 0.44182450355543423, 0.9493466226784659, 0.09740221917603575, 0.23192630340743803, 0.27451355765866126, 0.4850434187560305], [0.9675899050026648, 0.026624053210347665, 0.20488948247584626, 0.5737309271316858, 0.8263195259168915, 0.24677246592268398, 0.984556730109338, 0.9554606047297811, 0.5510542526157304, 0.7160368370741722, 0.668358375540984, 0.2616904868519714, 0.48542329725619615, 0.893905663498931, 0.6278571570819299, 0.6535147945640112, 0.2796843217382424, 0.21347445097487883, 0.4933696551154857, 0.28747532182669866, 0.4355837727800831, 0.04163303579622346, 0.4154805458555002, 0.39165997849829637, 0.9080353572924138, 0.14759122933740643, 0.4623570701322548, 0.39249748625429826, 0.08160943833181888, 0.18879016598206033, 0.1394208966313868, 0.38058215699102715, 0.3278658296206354, 0.7224690800965092, 0.11462763591554703, 0.14432189122264683, 0.7231473628428357, 0.5699111342070733, 0.26889658623416224, 0.34148826261659715], [0.6633081242331987, 0.2945832661864549, 0.22022689125747974, 0.42383401397602716, 0.48995728309176667, 0.6166490991474116, 0.5190813277558259, 0.18074155648535595, 0.14634099956724766, 0.6874362664823955, 0.8607528566061163, 0.16482714967558298, 0.04077687543183128, 0.7411618964712264, 0.868221114658625, 0.6731385034729073, 0.7771968903834203, 0.3089724507610647, 0.19586024535607371, 0.7790775458419115, 0.38330165380320524, 0.014681068132437858, 0.15071262790995132, 0.33990628784076415, 0.09980253431314468, 0.8787378054802036, 0.21142330201455317, 0.6948828375771445, 0.5609147904220864, 0.90358234609459, 0.944555669753058, 0.5362746029590083, 0.020126756504711296, 0.5664305976639811, 0.18559484385564162, 0.09824076166642559, 0.8201583867955566, 0.9931332324620878, 0.5217749176414617, 0.9781017240777914], [0.9222093613848612, 0.44178860271197573, 0.35848418283937833, 0.6444353264761266, 0.35883736496180985, 0.37100760060642524, 0.8879547439733411, 0.37779812928909173, 0.18385869912904795, 0.5048577093349206, 0.3541041223049938, 0.579468143446369, 0.6659504869781362, 0.7863178729452257, 0.07030918453290191, 0.5179337616222077, 0.47647210879032464, 0.8986331769949558, 0.5780848679249226, 0.965084170346359, 0.6663683285749079, 0.7062062438466865, 0.8219855613251392, 0.2927110201710261, 0.04614347473050995, 0.28341207764473597, 0.10439662718354592, 0.587178985431056, 0.2759538726112184, 0.10177321622404623, 0.6752817491717018, 0.8411024002204998, 0.7963708670579901, 0.0773872952623671, 0.9717134303968813, 0.5845126708605527, 0.4339105235220999, 0.9836377482890631, 0.20022090586528007, 0.8722753982604928], [0.2582351732492406, 0.7445262736389326, 0.8575608777904854, 0.7180412846134171, 0.6377287394706945, 0.08730707469398202, 0.502149530282621, 0.15985361038426593, 0.5716076929631203, 0.08583030191964047, 0.4299503865757748, 0.7082236353220532, 0.5400764469171381, 0.43350493356916764, 0.5788975014694491, 0.8011953023592024, 0.30869129951519103, 0.752260894172023, 0.8160253046060574, 0.40091117174366186, 0.19771712116214912, 0.7079237135827079, 0.9255189040535339, 0.5303825943902812, 0.10213138524066234, 0.40895117659576086, 0.7978451097405125, 0.6776790695639946, 0.4716464077979332, 0.9022115277437954, 0.1864844945880969, 0.6321656106631081, 0.9322701012108101, 0.6691549155080079, 0.7283134734824626, 0.623499955903299, 0.1980905594979241, 0.8453449679198455, 0.5266198172903302, 0.7011530352237703], [0.4916472429452101, 0.7755978216175891, 0.02673154637879549, 0.4487704919639537, 0.16432589759352823, 0.8637432962889766, 0.1605551581872865, 0.6217873173190257, 0.34593833707369803, 0.36355301615411384, 0.9717918996047148, 0.9916347430511897, 0.062035606885969874, 0.2092569314799675, 0.8953457527647722, 0.9200000753767215, 0.24683127393914528, 0.6448815162024774, 0.3970142239193638, 0.22440152134005353, 0.4640382940963115, 0.47663380143082656, 0.9077253978073108, 0.8941601676686831, 0.22298055832089414, 0.37874460267929677, 0.46978172381803585, 0.8276055022287891, 0.8521339488646205, 0.6210825658757823, 0.9586565852163798, 0.7708791525409106, 0.6540897641295187, 0.5142955460653582, 0.7797852198601628, 0.7919679292412594, 0.4323460200283532, 0.8447547244488893, 0.8599531030592226, 0.3563496090104019]]
bot2_bias_layer_two = [0.3481263442264637, 0.22984188326620958, 0.6486293150858051, 0.07936269641292215, 0.13484299781025555, 0.266678436668928, 0.011535493357277549, 0.4578257222386709, 0.32753909722803953, 0.8556378842415199, 0.18347930848961558, 0.3975037529817238, 0.3580120752173289, 0.09617133314378257, 0.7425666868142676, 0.5020603588315647, 0.3858382724512246, 0.6043996189072306, 0.3981621997628003, 0.31285084087688575, 0.40247458095338895, 0.6547346305723953, 0.5245280895956188, 0.9833997829021797, 0.9678530735849864, 0.9478987675202447, 0.5545781459152567, 0.825013019441155, 0.7760771254735672, 0.9403254615846827, 0.646937173069365, 0.123850731804237, 0.09005064159805887, 0.30755047595615703, 0.20438774909602841, 0.5465572082581296, 0.6989976890988027, 0.8291206243440139, 0.8873119632524405, 0.23749490834143994]
bot2_wieght_layer_three = [0.10561388989115106, 0.031165178509474534, 0.8387913898303738, 0.8480730511650196, 0.35082170005922675, 0.3756419879214141, 0.029963506433908882, 0.07147845254336738, 0.2845883796087588, 0.9660129440216177, 0.9828514118611299, 0.2296361354551355, 0.4889795335791779, 0.195075175308713, 0.10496073826152352, 0.5140819116517243, 0.40380429590126443, 0.592530704368867, 0.3664721643052896, 0.29349835872054186, 0.41632936859409797, 0.9179650145412076, 0.3911136897891463, 0.29911641518598997, 0.37326688497277927, 0.49306619394199125, 0.07199694908336796, 0.5251845902873932, 0.34212548587182623, 0.5718443938834498, 0.33715275395523536, 0.4998674256604355, 0.572074617736516, 0.6808790865934756, 0.8722059144594074, 0.1484082297616055, 0.5064040861444984, 0.9515800600399794, 0.7513845053079572, 0.45673547236238443]
bot2_bias_layer_three = [0.7002550764630685, 0.3316434478695953, 0.3325917734910845, 0.26618472440546936, 0.8979923057084386, 0.2842944093917992, 0.4138402152442202, 0.8468844802963897]
bot2_fitness = 66.50771442853161
bot2_num = 2
| 12,586.444444
| 72,305
| 0.850624
|
4a09bce66e2657721252a9dfb0b252080db584c7
| 3,837
|
py
|
Python
|
gcloud/tasktmpl3/validators.py
|
springborland/bk-sops
|
a9057672c10efb5f2414a805a30ead4092429c76
|
[
"Apache-2.0"
] | null | null | null |
gcloud/tasktmpl3/validators.py
|
springborland/bk-sops
|
a9057672c10efb5f2414a805a30ead4092429c76
|
[
"Apache-2.0"
] | null | null | null |
gcloud/tasktmpl3/validators.py
|
springborland/bk-sops
|
a9057672c10efb5f2414a805a30ead4092429c76
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import ujson as json
from gcloud.constants import TEMPLATE_EXPORTER_SOURCE_PROJECT
from gcloud.utils.validate import RequestValidator
from gcloud.utils.strings import check_and_rename_params
from gcloud.commons.template.utils import read_template_data_file
class FormValidator(RequestValidator):
def validate(self, request, *args, **kwargs):
template_id = request.GET.get("template_id")
if not template_id:
return False, "template_id can not be empty"
return True, ""
class ExportValidator(RequestValidator):
def validate(self, request, *args, **kwargs):
try:
data = json.loads(request.body)
except Exception:
return False, "request body is not a valid json"
template_id_list = data.get("template_id_list")
if not isinstance(template_id_list, list):
return False, "invalid template_id_list"
if not template_id_list:
return False, "template_id_list can not be empty"
return True, ""
class ImportValidator(RequestValidator):
def validate(self, request, *args, **kwargs):
f = request.FILES.get("data_file", None)
if not f:
return False, "data_file can not be empty"
override = request.POST.get("override")
if override is None:
return False, "override can not be empty"
r = read_template_data_file(f)
if not r["result"]:
return False, r["message"]
if "template_source" in r["data"]["template_data"]:
if r["data"]["template_data"]["template_source"] != TEMPLATE_EXPORTER_SOURCE_PROJECT:
return False, "can not import common template"
f.seek(0)
return True, ""
class CheckBeforeImportValidator(RequestValidator):
def validate(self, request, *args, **kwargs):
f = request.FILES.get("data_file", None)
if not f:
return False, "data_file can not be empty"
r = read_template_data_file(f)
if not r["result"]:
return False, r["message"]
if "template_source" in r["data"]["template_data"]:
if r["data"]["template_data"]["template_source"] != TEMPLATE_EXPORTER_SOURCE_PROJECT:
return False, "can not import common template"
f.seek(0)
return True, ""
class GetTemplateCountValidator(RequestValidator):
def validate(self, request, *args, **kwargs):
group_by = request.GET.get("group_by", "category")
check_result = check_and_rename_params({}, group_by)
if not check_result["success"]:
return False, check_result["content"]
return True, ""
class DrawPipelineValidator(RequestValidator):
def validate(self, request, *args, **kwargs):
try:
data = json.loads(request.body)
except Exception:
return False, "request body is not a valid json"
pipeline_tree = data.get("pipeline_tree")
if not pipeline_tree:
return False, "pipeline_tree can not be empty"
return True, ""
| 31.975
| 115
| 0.665624
|
4a09c001ece36ee46492be68949dc635aa05d858
| 192
|
py
|
Python
|
iniciante/python/2160-nome-no-formulario.py
|
tfn10/beecrowd
|
1ebf19ca9a253eb326160f03145d20be33064969
|
[
"MIT"
] | null | null | null |
iniciante/python/2160-nome-no-formulario.py
|
tfn10/beecrowd
|
1ebf19ca9a253eb326160f03145d20be33064969
|
[
"MIT"
] | null | null | null |
iniciante/python/2160-nome-no-formulario.py
|
tfn10/beecrowd
|
1ebf19ca9a253eb326160f03145d20be33064969
|
[
"MIT"
] | null | null | null |
def nome_no_formulario():
nome = input()
tamanho_de_caracteres = len(nome)
if tamanho_de_caracteres > 80:
print('NO')
else:
print('YES')
nome_no_formulario()
| 17.454545
| 37
| 0.625
|
4a09c0b8b83d08d50da4727ba0892b0db77910f5
| 3,679
|
py
|
Python
|
datasheet_generator/datasheet_generator.py
|
peterpolidoro/HRIM
|
148a6a5f88543ad1b62efecdccf7bbb2b744877b
|
[
"Apache-2.0"
] | 32
|
2018-02-05T15:32:19.000Z
|
2019-02-14T03:16:38.000Z
|
datasheet_generator/datasheet_generator.py
|
peterpolidoro/HRIM
|
148a6a5f88543ad1b62efecdccf7bbb2b744877b
|
[
"Apache-2.0"
] | 24
|
2018-02-06T15:41:56.000Z
|
2019-02-18T15:42:35.000Z
|
datasheet_generator/datasheet_generator.py
|
peterpolidoro/HRIM
|
148a6a5f88543ad1b62efecdccf7bbb2b744877b
|
[
"Apache-2.0"
] | 17
|
2019-02-21T17:27:31.000Z
|
2022-01-21T02:28:16.000Z
|
import argparse
import subprocess
import xml.etree.ElementTree as et
from defusedxml.ElementTree import parse
component_type_array = ['arm', 'battery', "camera", "converyor", "depthsensor",
"encoder", "force", "forcetoque", "gasdetector",
"gps", "gripper", "hygrometer", "lidar", "microphone",
"mobile", "motor", "rangefinder", "rotaryservo",
"thermometer", "torque"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--filename', help='file name')
parser.add_argument('--logo', help='logo file')
parser.add_argument('--component_type', help='component type',
choices=component_type_array)
args = parser.parse_args()
tree = parse(args.filename)
root = tree.getroot()
company_name = args.logo
component_type = args.component_type
with open('latex/template.tex', 'r') as content_file:
content = content_file.read()
string_concepts = ''
string_characteristics = ''
string_features = ''
index = 0
for prop in root:
tag = prop.tag
if str(tag) == "generic_specs":
for x in prop:
if index % 2 == 0:
string_concepts = string_concepts + \
'\\rowcolor[HTML]{C0C0C0} \n'
name = x.attrib["name"]
if "unit" in x.attrib:
unit = x.attrib["unit"]
else:
unit = ""
v = ""
for value in x:
v = value.text
name = name.replace("_", '\_')
string_concepts += str(name) + ' & ' + str(v) + " " + str(
unit) + ' \\\\ \hline\n'
index = index + 1
if str(tag) == "component_features":
for x in prop:
name = x.attrib["name"]
unit = ""
if "unit" in x.attrib:
unit = x.attrib["unit"]
value_min = 0
value_max = 0
for value in x:
if value.tag == "value_min":
value_min = value.text
if value.tag == "value_max" :
value_max = value.text
name = name.replace("_", '\_')
string_features += str(name) + ' & ' + str(
value_min) + "/" + str(value_max) + " " + str(
unit) + " & " + x.attrib["description"] + ' \\\\ \hline\n'
elif str(tag) == "component_specs":
for x in prop:
name = x.attrib["name"]
unit = ""
if "unit" in x.attrib:
unit = x.attrib["unit"]
v = ""
for value in x:
v = value.text
name = name.replace("_", '\_')
string_characteristics += str(name) + ' & ' + str(
v) + " " + str(unit) + ' \\\\ \hline\n'
content = content.replace('{%CONCEPT%}', string_concepts)
content = content.replace('{%CHARACTERISTICS%}', string_characteristics)
content = content.replace('{%FEATURES%}', string_features)
content = content.replace('{%COMPANY_LOGO%}', company_name)
content = content.replace('{%DEVICE_TYPE%}', component_type)
f = open("latex/output.tex", "w")
f.write(content)
f.close()
subprocess.run(['/usr/bin/pdflatex',
'latex/output.tex'], shell=False)
subprocess.run(['/usr/bin/evince', 'output.pdf'], shell=False)
| 36.068627
| 79
| 0.480565
|
4a09c0ca1faebdb4855862ad532ac903827b4160
| 10,334
|
py
|
Python
|
pytorch_lightning/loops/fit_loop.py
|
Tshimanga/pytorch-lightning
|
ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/loops/fit_loop.py
|
Tshimanga/pytorch-lightning
|
ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/loops/fit_loop.py
|
Tshimanga/pytorch-lightning
|
ff7305f74d4e097a45b6a4d8c0fff6d4f5aaa386
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from contextlib import suppress
from typing import Any, Dict, Optional
from pytorch_lightning.loops import Loop
from pytorch_lightning.loops.epoch import TrainingEpochLoop
from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection
from pytorch_lightning.trainer.progress import Progress
from pytorch_lightning.trainer.supporters import TensorRunningAccum
log = logging.getLogger(__name__)
class FitLoop(Loop):
"""
This Loop iterates over the epochs to run the training.
Args:
min_epochs: The minimum number of epochs
max_epochs: The maximum number of epochs
"""
def __init__(self, min_epochs: Optional[int] = None, max_epochs: Optional[int] = None):
super().__init__()
self.max_epochs = max_epochs
self.min_epochs = min_epochs
self.epoch_loop: Optional[TrainingEpochLoop] = None
self.epoch_progress = Progress()
# caches the loaded dataloader state until dataloader objects are available
self._dataloader_state_dict: Dict[str, Any] = {}
@property
def current_epoch(self) -> int:
"""Return the current epoch"""
return self.epoch_progress.current.completed
@current_epoch.setter
def current_epoch(self, value: int) -> None:
"""Setter for the current epoch"""
self.epoch_progress.current.completed = value
@property
def global_step(self) -> int:
"""Returns the global step"""
return self.epoch_loop.global_step
@global_step.setter
def global_step(self, value: int) -> None:
"""Sets the global step (forwards to epoch_loop)"""
self.epoch_loop.global_step = value
@property
def total_batch_idx(self) -> int:
"""Returns the current batch index (across epochs)"""
return self.epoch_loop.total_batch_idx
@property
def batch_idx(self) -> int:
"""Returns the current batch index (within this epoch)"""
return self.epoch_loop.batch_idx
@property
def split_idx(self) -> int:
"""Returns the index of the current batch split (within the current batch) for bptt"""
return self.epoch_loop.batch_loop.split_idx
@property
def min_steps(self) -> int:
# TODO(@justusschock): Why aren't we using the attribute in this class?
"""Returns the minimum numnber of steps to run"""
return self.epoch_loop.min_steps
@min_steps.setter
def min_steps(self, value: int) -> None:
"""Sets the minimum number of steps (forwards to epoch_loop)"""
# TODO(@awaelchli): This setter is required by debugging connector (fast dev run), should be avoided
self.epoch_loop.min_steps = value
@property
def max_steps(self) -> int:
"""Returns the maximum number of steps to run"""
return self.epoch_loop.max_steps
@max_steps.setter
def max_steps(self, value: int) -> None:
"""Sets the maximum number of steps (forwards to epoch_loop)"""
# TODO(@awaelchli): This setter is required by debugging connector (fast dev run), should be avoided
self.epoch_loop.max_steps = value
@property
def running_loss(self) -> TensorRunningAccum:
"""Returns the running loss"""
return self.epoch_loop.batch_loop.running_loss
@property
def _skip_backward(self) -> bool:
"""Determines whether the loop will skip backward during automatic optimization."""
return self.epoch_loop.batch_loop._skip_backward
@_skip_backward.setter
def _skip_backward(self, value: bool) -> None:
"""Determines whether the loop will skip backward during automatic optimization."""
self.epoch_loop.batch_loop._skip_backward = value
@property
def _results(self) -> ResultCollection:
if self.trainer.training:
return self.epoch_loop._results
if self.trainer.validating:
return self.epoch_loop.val_loop._results
raise RuntimeError("`FitLoop._results` property isn't defined. Accessed outside of scope")
@property
def done(self) -> bool:
"""Evaluates when to leave the loop.
Returns True if trainer.should_stop was set (e.g. by early stopping)
or if the maximum number of steps or epochs is reached.
"""
# TODO(@awaelchli): Move track steps inside training loop and move part of these condition inside training loop
stop_steps = self.max_steps is not None and self.global_step >= self.max_steps
stop_epochs = self.max_epochs is not None and self.current_epoch >= self.max_epochs
should_stop = False
if self.trainer.should_stop:
# early stopping
met_min_epochs = self.current_epoch >= self.min_epochs if self.min_epochs else True
met_min_steps = self.global_step >= self.min_steps if self.min_steps else True
if met_min_epochs and met_min_steps:
should_stop = True
else:
log.info(
"Trainer was signaled to stop but required minimum epochs"
f" ({self.min_epochs}) or minimum steps ({self.min_steps}) has"
" not been met. Training will continue..."
)
self.trainer.should_stop = should_stop
return stop_steps or should_stop or stop_epochs
@property
def skip(self) -> bool:
"""Whether we should skip the training and immediately return from the call to :meth:`run`."""
return self.done or self.trainer.num_training_batches == 0
def connect(self, epoch_loop: TrainingEpochLoop):
"""Connects a training epoch loop to this fit loop."""
self.epoch_loop = epoch_loop
def reset(self) -> None:
"""Resets the internal state of this loop"""
def on_run_start(self) -> None:
"""Calls the ``on_train_start`` hook."""
self._results.to(device=self.trainer.lightning_module.device)
self.trainer.call_hook("on_train_start")
def on_advance_start(self) -> None:
"""Prepares the dataloader for training and calls the hooks ``on_epoch_start`` and ``on_train_epoch_start``"""
model = self.trainer.lightning_module
# reset train dataloader
if self.current_epoch != 0 and self.trainer._should_reload_dl_epoch:
self.trainer.reset_train_dataloader(model)
if self._dataloader_state_dict:
self.trainer.train_dataloader.load_state_dict(self._dataloader_state_dict)
self._dataloader_state_dict = {}
# TODO: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(self.current_epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
# stores accumulated grad fractions per batch
self.epoch_loop.batch_loop.accumulated_loss = TensorRunningAccum(
window_length=self.trainer.accumulate_grad_batches
)
self.epoch_progress.increment_ready()
def advance(self) -> None:
"""Runs one whole epoch."""
dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
data_fetcher = self.trainer.data_connector.get_profiled_dataloader(dataloader)
with self.trainer.profiler.profile("run_training_epoch"):
# run train epoch
epoch_output = self.epoch_loop.run(data_fetcher)
if epoch_output is None:
return
# the global step is manually decreased here due to backwards compatibility with existing loggers
# as they expect that the same step is used when logging epoch end metrics even when the batch loop has
# finished. this means the attribute does not exactly track the number of optimizer steps applied.
# TODO(@carmocca): deprecate and rename so users don't get confused
self.global_step -= 1
# log epoch metrics
self.trainer.logger_connector.update_train_epoch_metrics()
self.global_step += 1
def on_advance_end(self) -> None:
self.epoch_progress.increment_completed()
def on_run_end(self) -> None:
"""Calls the ``on_train_end`` hook"""
# NOTE: the current_epoch is already incremented
# Lightning today does not increment the current epoch at the last epoch run in Trainer.fit
# To simulate that current behavior, we decrement here.
# TODO: must be fixed by https://github.com/PyTorchLightning/pytorch-lightning/issues/5007
self.current_epoch -= 1
# hook
self.trainer.call_hook("on_train_end")
# give accelerators a chance to finish
self.trainer.accelerator.on_train_end()
def should_accumulate(self) -> bool:
"""Whether the gradients should be accumulated"""
return self.epoch_loop._should_accumulate()
def teardown(self) -> None:
self.epoch_loop.teardown()
def on_save_checkpoint(self) -> Dict:
state_dict = super().on_save_checkpoint()
# FIXME(@tchaton) Should pass has_completed=True when iterator is exhausted ?
state_dict["dataloader_state_dict"] = self.trainer.train_dataloader.state_dict(has_completed=False)
return state_dict
def on_load_checkpoint(self, state_dict: Dict) -> None:
# cache the dataloader state dict until the dataloader objects are available
self._dataloader_state_dict = state_dict.get("dataloader_state_dict", {})
| 40.84585
| 119
| 0.683569
|
4a09c1cf08c3c16d2942cef6e2824c49da151e33
| 411
|
py
|
Python
|
examples/web_counter/tdmf_pipelines.py
|
cmdimkpa/test-driven-modular-framework
|
3006557ddc5a9e52c59fb492f5d01a130ab1c759
|
[
"MIT"
] | null | null | null |
examples/web_counter/tdmf_pipelines.py
|
cmdimkpa/test-driven-modular-framework
|
3006557ddc5a9e52c59fb492f5d01a130ab1c759
|
[
"MIT"
] | null | null | null |
examples/web_counter/tdmf_pipelines.py
|
cmdimkpa/test-driven-modular-framework
|
3006557ddc5a9e52c59fb492f5d01a130ab1c759
|
[
"MIT"
] | null | null | null |
# applicable tests
testEngine.add_test("package", "download_webpage", "string_only")
testEngine.add_test("unit", "download_webpage", ("download_test", ["https://cmdimkpa.github.io/test.txt"], ["test\n"]))
# initiate application flags
flags.set("byte_counter", 0)
# pipelines
download_web_page = Pipeline([
( "download_webpage", [ "https://en.wikipedia.org/wiki/History_of_Africa" ] )
])
| 27.4
| 120
| 0.695864
|
4a09c20d0fd51ab1796ba15e38996e0b6b7f22ec
| 23
|
py
|
Python
|
py-flask/venv/Lib/site-packages/virtualenv/version.py
|
tienduy-nguyen/python-learning
|
3f91482638e11bb19f877a8044c11bce7e91aefc
|
[
"MIT"
] | null | null | null |
py-flask/venv/Lib/site-packages/virtualenv/version.py
|
tienduy-nguyen/python-learning
|
3f91482638e11bb19f877a8044c11bce7e91aefc
|
[
"MIT"
] | 5
|
2021-03-19T11:01:43.000Z
|
2022-02-10T12:02:58.000Z
|
py-flask/venv/Lib/site-packages/virtualenv/version.py
|
tienduy-nguyen/python-learning
|
3f91482638e11bb19f877a8044c11bce7e91aefc
|
[
"MIT"
] | null | null | null |
__version__ = "20.0.20"
| 23
| 23
| 0.695652
|
4a09c2d86f4737e006fb6123681956ea55952910
| 605
|
py
|
Python
|
leetcode/325.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:15:25.000Z
|
2019-08-28T23:15:25.000Z
|
leetcode/325.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
leetcode/325.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
"""
link: https://leetcode-cn.com/problems/maximum-size-subarray-sum-equals-k
problem: 求数组中最长子数组,满足其和为 k,子数组必然连续。时间O(n)
solution: 遍历时记录 sum(nums[:i+1]) 的首次出现位置,并检查 sum(nums[:i+1]) - k 是否在之前出现过,
若有,记该值对应位置 j,[j:i+1] 为一个满足条件的子数组
"""
class Solution:
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
m, t, res = {}, 0, 0
for i, v in enumerate(nums):
t += v
if t not in m:
m[t] = i
if t == k:
res = i + 1
elif t - k in m:
res = max(i - m[t - k], res)
return res
| 26.304348
| 73
| 0.494215
|
4a09c3207584daed52b331459707b9cd84028996
| 704
|
py
|
Python
|
evidence/refs/Pfam.py
|
dprada/evidence
|
d8400fe1a3c662be01f6f9f658fc5b92b894556d
|
[
"MIT"
] | null | null | null |
evidence/refs/Pfam.py
|
dprada/evidence
|
d8400fe1a3c662be01f6f9f658fc5b92b894556d
|
[
"MIT"
] | null | null | null |
evidence/refs/Pfam.py
|
dprada/evidence
|
d8400fe1a3c662be01f6f9f658fc5b92b894556d
|
[
"MIT"
] | 1
|
2021-11-06T16:03:46.000Z
|
2021-11-06T16:03:46.000Z
|
class Pfam():
def __init__(self, id=None):
self.database = 'Pfam'
self.id = id
self._long_name = 'Pfam: The protein families database'
self._web = 'http://pfam.xfam.org/'
def __call__(self):
tmp_dict = {
'database' : 'Pfam',
'id' : self.id
}
return tmp_dict
def __repr__(self):
return f'<Pfam: {self.id}>'
def __str__(self):
return f'Pfam: {self.id}'
def __deepcopy__(self):
return Pfam(id=self.id)
def _webid(self):
return self._web
def _repr_html_(self):
return f'<a href="{self._webid()}">{self.database}: {self.id}</a>'
| 18.051282
| 74
| 0.517045
|
4a09c33f5f37074f4ddbbaa47f669ce5c287e9ab
| 317
|
py
|
Python
|
my/transformer_tutorial_tf2/__init__.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
my/transformer_tutorial_tf2/__init__.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
my/transformer_tutorial_tf2/__init__.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created at 2020-01-03
@author: dongwan.kim
References
- https://www.tensorflow.org/tutorials/text/transformer
- https://blog.tensorflow.org/2019/05/transformer_annotated-chatbot-tutorial-with-tensorflow-2.html
Summary
- translation Portuguese to English
-
Question
- eager tensor?
"""
| 17.611111
| 99
| 0.747634
|
4a09c6ba4762050bc9265fa3a3384f9f9ab017fd
| 1,838
|
py
|
Python
|
examples/moderngl_logo.py
|
minuJeong/moderngl-window
|
6386478f1e6b07cefda8f4d9324d972ab88b34ec
|
[
"MIT"
] | 142
|
2019-11-11T23:14:28.000Z
|
2022-03-29T08:37:03.000Z
|
examples/moderngl_logo.py
|
minuJeong/moderngl-window
|
6386478f1e6b07cefda8f4d9324d972ab88b34ec
|
[
"MIT"
] | 107
|
2019-10-31T20:31:45.000Z
|
2022-03-23T15:01:41.000Z
|
examples/moderngl_logo.py
|
minuJeong/moderngl-window
|
6386478f1e6b07cefda8f4d9324d972ab88b34ec
|
[
"MIT"
] | 36
|
2019-12-12T16:14:10.000Z
|
2022-01-18T22:58:21.000Z
|
import numpy as np
import moderngl
import moderngl_window as mglw
class ModernglLogo(mglw.WindowConfig):
title = "ModernGL Logo"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
in vec2 vert;
in vec4 vert_color;
uniform vec2 scale;
uniform float rotation;
out vec4 frag_color;
void main() {
frag_color = vert_color;
float r = rotation * (0.5 + gl_InstanceID * 0.05);
mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r));
gl_Position = vec4((rot * vert) * scale, 0.0, 1.0);
}
''',
fragment_shader='''
#version 330
in vec4 frag_color;
out vec4 color;
void main() {
color = vec4(frag_color);
}
''',
)
self.scale = self.prog['scale']
self.rotation = self.prog['rotation']
self.scale.value = (0.5, self.aspect_ratio * 0.5)
vertices = np.array([
1.0, 0.0,
1.0, 0.0, 0.0, 0.5,
-0.5, 0.86,
0.0, 1.0, 0.0, 0.5,
-0.5, -0.86,
0.0, 0.0, 1.0, 0.5,
])
self.vbo = self.ctx.buffer(vertices.astype('f4').tobytes())
self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, 'vert', 'vert_color')
def render(self, time, frametime):
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.BLEND)
self.rotation.value = time
self.vao.render(instances=10)
if __name__ == '__main__':
mglw.run_window_config(ModernglLogo)
| 26.257143
| 90
| 0.476605
|
4a09c6be6275abd91c10db3f60de2b434d2e5b0a
| 485
|
py
|
Python
|
backend/migrations/0004_auto_20170816_2053.py
|
romic-kid/project-kfsystem
|
3ed63c5c063493dc0dd7e0c4b62ba7481bf63311
|
[
"BSD-3-Clause"
] | 2
|
2018-03-22T08:42:41.000Z
|
2018-07-03T09:22:28.000Z
|
backend/migrations/0004_auto_20170816_2053.py
|
romic-kid/project-kfsystem
|
3ed63c5c063493dc0dd7e0c4b62ba7481bf63311
|
[
"BSD-3-Clause"
] | 2
|
2019-04-25T02:10:10.000Z
|
2022-03-02T01:11:28.000Z
|
backend/migrations/0004_auto_20170816_2053.py
|
romic-kid/project-kfsystem
|
3ed63c5c063493dc0dd7e0c4b62ba7481bf63311
|
[
"BSD-3-Clause"
] | 1
|
2019-03-14T03:13:05.000Z
|
2019-03-14T03:13:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-16 12:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_auto_20170816_1729'),
]
operations = [
migrations.AlterField(
model_name='robotinfo',
name='keyword',
field=models.CharField(blank=True, default='empty', max_length=100),
),
]
| 23.095238
| 80
| 0.626804
|
4a09c87d5e778065c7477a67ceb662c8099a5854
| 2,997
|
py
|
Python
|
momentumopt/python/momentumopt/kinoptpy/qp.py
|
machines-in-motion/kino-dynamic-opt
|
ba9188eea6b80b102b1d0880470bedc0faa5e243
|
[
"BSD-3-Clause"
] | 26
|
2019-11-18T17:39:43.000Z
|
2021-12-18T00:38:22.000Z
|
momentumopt/python/momentumopt/kinoptpy/qp.py
|
machines-in-motion/kino_dynamic_opt
|
ba9188eea6b80b102b1d0880470bedc0faa5e243
|
[
"BSD-3-Clause"
] | 25
|
2019-11-11T19:54:51.000Z
|
2021-04-07T13:41:47.000Z
|
momentumopt/python/momentumopt/kinoptpy/qp.py
|
machines-in-motion/kino-dynamic-opt
|
ba9188eea6b80b102b1d0880470bedc0faa5e243
|
[
"BSD-3-Clause"
] | 10
|
2019-12-15T14:36:51.000Z
|
2021-09-29T10:42:19.000Z
|
'''
@file qp.py
@package momentumopt
@author Brahayam Ponton (brahayam.ponton@tuebingen.mpg.de)
@license License BSD-3-Clause
@copyright Copyright (c) 2019, New York University and Max Planck Gesellschaft.
@date 2019-10-08
'''
from numpy import *
# from cvxopt import matrix, spmatrix
# from cvxopt.solvers import options, qp
# from cvxpy import Constant, Minimize, Problem, Variable, quad_form
from quadprog import solve_qp
class QpSolver():
def quadprog_solve_qp(self, P, q, G=None, h=None, A=None, b=None, initvals=None):
'''
Solve a Quadratic Program defined as:
minimize
(1/2) * x.T * P * x + q.T * x
subject to
G * x <= h
A * x == b
using quadprog <https://pypi.python.org/pypi/quadprog/>.
Parameters
----------
P : numpy.array
Symmetric quadratic-cost matrix.
q : numpy.array
Quadratic-cost vector.
G : numpy.array
Linear inequality constraint matrix.
h : numpy.array
Linear inequality constraint vector.
A : numpy.array, optional
Linear equality constraint matrix.
b : numpy.array, optional
Linear equality constraint vector.
initvals : numpy.array, optional
Warm-start guess vector (not used).
Returns
-------
x : numpy.array
Solution to the QP, if found, otherwise ``None``.
Note
----
The quadprog solver only considers the lower entries of `P`, therefore it
will use a wrong cost function if a non-symmetric matrix is provided.
'''
if initvals is not None:
print("quadprog: note that warm-start values ignored by wrapper")
qp_G = P
qp_a = -q
if A is not None and G is None:
meq = A.shape[0]
return solve_qp(qp_G, qp_a, -A.T, -b, meq)[0]
elif G is not None:
if A is not None:
qp_C = -vstack([A, G]).T
qp_b = -hstack([b, h])
meq = A.shape[0]
# print("EQUALITY AND INEQUALITY CONSTRAINTS")
else: # no equality constraint
qp_C = -G.T
qp_b = -h
meq = 0
# print("NO EQUALITY CONSTRAINT")
return solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0]
else:
# print("UNCONSTRAINED OPTIMIZATION")
return solve_qp(qp_G, qp_a)[0]
def cvxopt_solve_qp(self, P, q, G=None, h=None, A=None, b=None):
# P = .5 * (P + P.T) # make sure P is symmetric
args = [matrix(P), matrix(q)]
if G is not None:
args.extend([matrix(G), matrix(h)])
if A is not None:
args.extend([matrix(A), matrix(b)])
sol = qp(*args)
if 'optimal' not in sol['status']:
return None
return array(sol['x']).reshape((P.shape[1],))
| 32.576087
| 85
| 0.541208
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.