content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import hashlib
import os
# TODO:
# - change the separator used in the integrity hash file from ':' to
# '::' to make it entirely unlikely that it could be part of a valid
# filename
#
HASHFILE_PART_SEP = ":"
| [
11748,
12234,
8019,
198,
11748,
28686,
628,
198,
2,
16926,
46,
25,
198,
2,
220,
220,
532,
1487,
262,
2880,
1352,
973,
287,
262,
11540,
12234,
2393,
422,
705,
32105,
284,
198,
2,
220,
220,
220,
220,
705,
3712,
6,
284,
787,
340,
500... | 2.884615 | 78 |
import requests
from hashlib import md5
from urllib.parse import urlsplit, urlencode, unquote_plus, quote_plus
import urllib.request
import re
import numpy as np
import PIL
from PIL import Image
headers = {"User-Agent": "Wolfram Android App"}
APPID = "3H4296-5YPAGQUJK7" # Mobile app AppId
SERVER = "api.wolframalpha.com"
SIG_SALT = "vFdeaRwBTVqdc5CL" # Mobile app salt
s = requests.Session()
s.headers.update(headers)
def basic_test(query_part):
"""
https://products.wolframalpha.com/api/documentation/#formatting-input
"""
print(craft_signed_url(f"https://{SERVER}/v2/query.jsp?{query_part}"))
r = s.get(craft_signed_url(f"https://{SERVER}/v2/query.jsp?{query_part}"))
if r.status_code == 200:
return r.text
else:
raise Exception(f"Error({r.status_code}) happened!\n{r.text}")
if __name__ == "__main__":
obama = str(input("What would you like to know, you filthy pirate? "))
bruh=urllib.parse.quote_plus(obama)
txt = basic_test("input=" + bruh + "&podstate=Result__Step-by-step%20solution&format=image")
print(txt)
reg = re.findall("https[^';]+[^']+",txt)
q = 6547777776
for i in reg:
if "=" in (i[-3],i[-2]) and i[-1].isdigit():
q+=1
print(i)
urllib.request.urlretrieve(i.replace("amp;",""), str(q)+".jpg")
list_im = [str(i)+".jpg" for i in range(6547777777,q+1)]
imgs = [PIL.Image.open(i) for i in list_im]
print(sorted([(np.sum(i.size), i.size) for i in imgs]))
wideboy = sorted([i.size[0] for i in imgs])[-1]
#get width of widest image
#resize all images so they match that width
imgs2 = []
for img in imgs:
wpercent = (wideboy/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((wideboy,hsize), Image.ANTIALIAS)
imgs2.append(img)
#stack images
print(sorted([(np.sum(i.size), i.size) for i in imgs2]))
#delete images
#save resultx
imgs_comb = np.vstack((i for i in imgs2))
imgs_comb = Image.fromarray(imgs_comb)
imgs_comb.save('filth.jpg')
###&format=plaintext&output=json | [
11748,
7007,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
7278,
489,
270,
11,
2956,
11925,
8189,
11,
555,
22708,
62,
9541,
11,
9577,
62,
9541,
198,
11748,
2956,
297,
571,
13,
25927,
198,
... | 2.374699 | 830 |
from . import db
| [
6738,
764,
1330,
20613,
628
] | 3.6 | 5 |
import operator as op
import itertools
with open("input.txt") as file:
data = file.read()
shop = """Weapons: Cost Damage Armor
Dagger 8 4 0
Shortsword 10 5 0
Warhammer 25 6 0
Longsword 40 7 0
Greataxe 74 8 0
Armor: Cost Damage Armor
Leather 13 0 1
Chainmail 31 0 2
Splintmail 53 0 3
Bandedmail 75 0 4
Platemail 102 0 5
Rings: Cost Damage Armor
Damage +1 25 1 0
Damage +2 50 2 0
Damage +3 100 3 0
Defense +1 20 0 1
Defense +2 40 0 2
Defense +3 80 0 3
"""
weapons = []
armors = []
rings = []
current = None
for line in shop.splitlines():
if "Weapons:" in line:
current = weapons
elif "Armor:" in line:
current = armors
elif "Rings:" in line:
current = rings
elif line == "":
current = None
else:
name, cost, damage, armor = line.rsplit(None, 3)
current.append([name, int(cost), int(damage), int(armor)])
boss = {}
for line in data.splitlines():
prop, val = map(str.strip, line.split(":"))
boss[prop] = int(val)
player = {
'Hit Points': 100,
'Damage': 0,
'Armor': 0
}
solve() | [
11748,
10088,
355,
1034,
198,
11748,
340,
861,
10141,
198,
198,
4480,
1280,
7203,
15414,
13,
14116,
4943,
355,
2393,
25,
198,
220,
220,
220,
1366,
796,
2393,
13,
961,
3419,
198,
198,
24643,
796,
37227,
41818,
25,
220,
220,
220,
6446,
... | 1.993846 | 650 |
import calendar
import numpy as np
import pandas as pd
import re
import scipy.interpolate as interp
import urllib
import warnings
from datetime import datetime as dt,timedelta
from scipy.ndimage import gaussian_filter as gfilt
from ..plot import Plot
#Import tools
from .tools import *
from ..utils import *
try:
import cartopy.feature as cfeature
from cartopy import crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
except:
warnings.warn("Warning: Cartopy is not installed in your python environment. Plotting functions will not work.")
try:
import matplotlib as mlib
import matplotlib.lines as mlines
import matplotlib.patheffects as patheffects
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patches as mpatches
except:
warnings.warn("Warning: Matplotlib is not installed in your python environment. Plotting functions will not work.")
| [
11748,
11845,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
11748,
629,
541,
88,
13,
3849,
16104,
378,
355,
987,
79,
198,
11748,
2956,
297,
571,
198,
11748,
14601,
198,
6738,
4818,
807... | 3.160656 | 305 |
python3.7
# import statements
import codecs
import glob
import math
import os
import pandas as pd
import re
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# import function
import tse
import feather
import importlib
# importlib if needed
importlib.reload(tse)
# define chrome options
CHROME_PATH = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
WINDOW_SIZE = '1920,1080'
CHROMEDRIVER_PATH = '/usr/local/bin/chromedriver'
# set options
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--window-size=%s' % WINDOW_SIZE)
chrome_options.binary_location = CHROME_PATH
# open invisible browser
browser = webdriver.Chrome(executable_path = CHROMEDRIVER_PATH,
options = chrome_options)
# set implicit wait for page load
browser.implicitly_wait(10)
# import test dataset with 1,000 individuals
candidates = pd.read_csv('candidatesPython.csv')
# test
i = 8765
# create dictionary for any random candidate
arguments = {'electionYear' : candidates.loc[int(i), 'electionYear'],
'electionID' : candidates.loc[int(i), 'electionID'],
'electoralUnitID': candidates.loc[int(i), 'electoralUnitID'],
'candidateID' : candidates.loc[int(i), 'candidateID']}
tse.scraper(browser).case(**arguments)
tse.scraper(browser).decision(url)
problemCases = feather.read_dataframe('problemCases.feather')
problemCases.to_csv('problemCases.csv', index = False)
# extract protocol number from url
num = re.search('(?<=nprot=)(.)*(?=&)', self.url).group(0)
# replace weird characters by nothing
num = re.sub(r'\/|\.|\&|\%|\-', '', num)
ber = re.compile('[0-9]+(-)[0-9]+(?=\\.html)')
candclear = re.compile('(?<=-)[0-9]+(?=\\.html)')
protclear = re.compile(r'\/|\.|\&|\%|\-')
file = file[0]
tse.parser('./html-first-run/7615.html').parse_summary()
test = codecs.open(file, 'r', 'cp1252').read()
soup = BeautifulSoup(test, 'lxml')
regex0 = re.compile(r'\n|\t')
regex1 = re.compile(r'\\n|\\t')
regex2 = re.compile('\xa0')
regex3 = re.compile(' +')
regex4 = re.compile('^PROCESSO')
regex5 = re.compile('^MUNIC[IÍ]PIO')
regex6 = re.compile('^PROTOCOLO')
regex7 = re.compile('^(requere|impugnan|recorren|litis)', re.IGNORECASE)
regex8 = re.compile('^(requeri|impugnad|recorri|candid)', re.IGNORECASE)
regex9 = re.compile('^(ju[íi]z|relator)', re.IGNORECASE)
regex10 = re.compile('^assunt', re.IGNORECASE)
regex11 = re.compile('^localiz', re.IGNORECASE)
regex12 = re.compile('^fase', re.IGNORECASE)
regex13 = re.compile('(?<=:)(.)*')
tables = soup.find_all('table')
test = tse.parser(files[0])
test = tse.parser(files[6]).parse_summary()
test['stage'] = [None]
summary = {k: v for k, v in test.items() for v in test[k]}
pd.DataFrame(summary, index = [0]).T
import os
import pandas as pd
import csv
#
file = '../2018 TSE Databank/DespesaCandidato2004.txt'
kwargs = {'sep': ';', 'index_col': False, 'encoding': 'latin_1',
'error_bad_lines': False, 'quoting': csv.QUOTE_NONE}
dataframe = pd.read_csv(file, **kwargs)
dataframe.to_csv('DespesaCandidato20042.txt', '#', doublequote = False,
escapechar = '\\')
| [
29412,
18,
13,
22,
198,
2,
1330,
6299,
198,
11748,
40481,
82,
198,
11748,
15095,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
11748,
640,
198,
6738,
275,
82,
19,
1330,
23762,
50,
... | 2.486616 | 1,457 |
# Generated by Django 2.0.7 on 2018-07-17 01:13
from django.conf import settings
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
22,
319,
2864,
12,
2998,
12,
1558,
5534,
25,
1485,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
198,
11748,
42625,
142... | 3.08 | 75 |
from django.db.models.query import QuerySet
from rest_framework.filters import BaseFilterBackend
from rest_framework.request import Request
from rest_framework.views import APIView
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
22766,
1330,
43301,
7248,
198,
6738,
1334,
62,
30604,
13,
10379,
1010,
1330,
7308,
22417,
7282,
437,
198,
6738,
1334,
62,
30604,
13,
25927,
1330,
19390,
198,
6738,
1334,
62,
30604,
13,
33571,... | 3.956522 | 46 |
import numpy as np
from build_matrix_A import build_matrix_A
from build_matrix_b import build_matrix_b
from matrix_solver import solve_matrix
from generate_mesh import generate_mesh
def transient_model(time, material_property_library, mesh, g_dot_list, boundary_conditions_list):
"""This function will calculate the transient response of the fuel particle to changes in either heat
generation rate or to boundary wall temperature. The A matrix has 1 added to its diagonal entries while
the b matrix has the previous temperature set added to its entries. These changes need to be made now that
dT/dt is no longer zero:
T_time+1 = T_time + dT/dt *Delta_t
Where previously dT/dt was equal to zero, the matrices simplified dramatically. Now this is not the case,
so the b matrix must be appended with the known constant T_time and the A matrix must be appended with 1
along the diagonal to account for the presence of T_time+1.
inputs
------
- time: An array of the time steps taken by the solver. Passed through to the matrix generators.
- mesh: the array of points for the physical location of nodes and the materials assigned to them
- material_property_library: a dictionary or nested dictionaries of the materials used in the modeled region
- g_dot_list: a list with the heat generation rates before the transient and during the transient. g_dot_list
is of length 2, with the 0th entry being prior to the transient and fist entry after time zero.
- boundary_condtions_list: a list with the boundary temperature before the transient and during the transient
outputs. This list is of length 2, with the 0th entry being prior to the transient and fist entry after time zero.
-T: numpy array containing the temperatures for each node and time
"""
total_nodes = len(mesh[0][1])
for i in range(1,len(mesh)):
total_nodes += len(mesh[i][1])-1
T = np.zeros((total_nodes, len(time)))
Dt = time[1]-time[0]
A = Dt*build_matrix_A(material_property_library, mesh)
b = Dt*build_matrix_b(boundary_conditions_list[0], mesh, material_property_library, g_dot_list[0])
T_initial = solve_matrix(A,b)
A += np.eye(A.shape[0])
#The following line removes the addition of one from the final entry, where the boundary condition is imposed.
A[-1,-1] += -1
b = np.add(T_initial,b)
b[-1] += -T_initial[-1]
T[:,0] = T_initial
b_temp = Dt*build_matrix_b(boundary_conditions_list[1], mesh, material_property_library, g_dot_list[1])
for i in range(1,len(time)):
b = b_temp.copy()
b = np.add(b, T[:,i-1])
#The following line removes the addition of T to the boundary condition
b[-1] += -T[-1,i-1]
T[:,i] = solve_matrix(A, b)
return T
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1382,
62,
6759,
8609,
62,
32,
1330,
1382,
62,
6759,
8609,
62,
32,
201,
198,
6738,
1382,
62,
6759,
8609,
62,
65,
1330,
1382,
62,
6759,
8609,
62,
65,
201,
198,
6738,
17593,
62,
82,
1437... | 2.717331 | 1,079 |
from __future__ import division
from mmtbx.secondary_structure import build as ssb
import time
if (__name__ == "__main__"):
t0=time.time()
exercise_process_params()
print "Time: %6.4f"%(time.time()-t0)
print "OK"
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
8085,
83,
65,
87,
13,
38238,
62,
301,
5620,
1330,
1382,
355,
264,
36299,
198,
11748,
640,
198,
198,
361,
357,
834,
3672,
834,
6624,
366,
834,
12417,
834,
1,
2599,
198,
220,
256,
15,
... | 2.611765 | 85 |
from openapi_schema_generator.OpenApiSchemaGenerator import OpenApiSchemaGenerator
import argparse
import os
import json
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default=None)
parser.add_argument('--output', type=str, default='result.json')
args = parser.parse_args()
if not (args.path and os.path.isfile(args.path)):
print("Please insert path correctly")
else:
generator = OpenApiSchemaGenerator(args.path)
write_json(args.output, generator.deploy_schema())
| [
6738,
1280,
15042,
62,
15952,
2611,
62,
8612,
1352,
13,
11505,
32,
14415,
27054,
2611,
8645,
1352,
1330,
4946,
32,
14415,
27054,
2611,
8645,
1352,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
331,
43695,
628,... | 2.759434 | 212 |
import cv2
import matplotlib.pyplot as plt
| [
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628
] | 2.75 | 16 |
from iglo.lamp import Lamp
from iglo.helpers import Helpers | [
6738,
45329,
5439,
13,
75,
696,
1330,
28607,
198,
6738,
45329,
5439,
13,
16794,
364,
1330,
10478,
364
] | 3.277778 | 18 |
from django.contrib import admin
# Register your models here.
from youtube_rest_api.models import Video
admin.site.register(Video, VideoAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
6738,
35116,
62,
2118,
62,
15042,
13,
27530,
1330,
7623,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
10798,
11,
7623,
46787,
8,
198
] | 3.5 | 42 |
"""
Tests for particle octree
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from yt.frontends.stream.data_structures import load_particles
from yt.geometry.oct_container import \
OctreeContainer
from yt.geometry.particle_oct_container import \
ParticleOctreeContainer, \
ParticleRegions
from yt.geometry.oct_container import _ORDER_MAX
from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
from yt.testing import \
assert_equal, \
requires_file
from yt.units.unit_registry import UnitRegistry
from yt.units.yt_array import YTArray
from yt.utilities.lib.geometry_utils import get_morton_indices
import yt.units.dimensions as dimensions
import yt.data_objects.api
NPART = 32**3
DLE = np.array([0.0, 0.0, 0.0])
DRE = np.array([10.0, 10.0, 10.0])
dx = (DRE-DLE)/(2**_ORDER_MAX)
index_ptype_snap = "snapshot_033/snap_033.0.hdf5"
@requires_file(index_ptype_snap)
os33 = "snapshot_033/snap_033.0.hdf5"
@requires_file(os33)
| [
37811,
198,
51,
3558,
329,
18758,
19318,
631,
628,
198,
198,
37811,
198,
198,
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2211,
11,
331,
83,
7712,
4816,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
... | 3.035377 | 424 |
from .sudo_query_helpers import update
from helpers import log
def update_with_suppressed_fail(query_string):
"""
A last resort update, if the potential error of an update needs supression.
:param query_string: string
"""
try:
update(query_string)
except Exception as e:
log("""
WARNING: an error occured during the update_with_suppressed_fail.
Message {}
Query {}
""".format(e, query_string))
log("""WARNING: I am sorry you have to read this message""")
| [
6738,
764,
24032,
62,
22766,
62,
16794,
364,
1330,
4296,
198,
6738,
49385,
1330,
2604,
628,
198,
4299,
4296,
62,
4480,
62,
18608,
2790,
62,
32165,
7,
22766,
62,
8841,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
938,
12... | 2.408333 | 240 |
from enum import Enum
__author__ = 'raymond'
| [
6738,
33829,
1330,
2039,
388,
198,
198,
834,
9800,
834,
796,
705,
2433,
6327,
6,
198
] | 2.875 | 16 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import subprocess
import time
from e2e.fixtures import cluster
from e2e.utils.load_balancer import common
from e2e.utils.aws.acm import AcmCertificate
from e2e.utils.aws.elbv2 import ElasticLoadBalancingV2
from e2e.utils.aws.iam import IAMPolicy
from e2e.utils.aws.route53 import Route53HostedZone
from e2e.utils.utils import (
kubectl_delete_kustomize,
print_banner,
load_yaml_file,
wait_for,
)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
config_file_path = common.CONFIG_FILE
print_banner("Reading Config")
cfg = load_yaml_file(file_path=config_file_path)
delete_lb_resources(cfg)
| [
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
18931,
198,
11748,
850,
14681,
198,
11748,
640,
198,
198... | 2.664474 | 304 |
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
import re
from django.shortcuts import redirect,reverse
| [
6738,
42625,
14208,
13,
26791,
13,
10378,
8344,
341,
1330,
6046,
1574,
35608,
259,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
302,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
11,
50188,
628
] | 3.864865 | 37 |
# -*- coding: utf-8 -*-
import abc
import six
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
450,
66,
198,
198,
11748,
2237,
628
] | 2.181818 | 22 |
from flair.embeddings import WordEmbeddings, FlairEmbeddings, StackedEmbeddings
from flair.data import Sentence
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from flair.data import Sentence
import torch as torch
from tqdm import tqdm
if __name__ == "__main__":
text1 = "I am good."
text2 = "I am not bad."
flair = flair_semantic()
flair.predict_similarity(text1, text2)
| [
6738,
37457,
13,
20521,
67,
654,
1330,
9678,
31567,
6048,
654,
11,
1610,
958,
31567,
6048,
654,
11,
520,
6021,
31567,
6048,
654,
201,
198,
6738,
37457,
13,
7890,
1330,
11352,
594,
201,
198,
6738,
1341,
35720,
13,
4164,
10466,
13,
2487... | 2.587209 | 172 |
#!/usr/bin/env python
""" Utility for git-bisecting nose failures
"""
DESCRIP = 'Check nose output for given text, set sys exit for git bisect'
EPILOG = \
"""
Imagine you've just detected a nose test failure. The failure is in a
particular test or test module - here 'test_analyze.py'. The failure *is* in
git branch ``main-master`` but it *is not* in tag ``v1.6.1``. Then you can
bisect with something like::
git co main-master
git bisect start HEAD v1.6.1 --
git bisect run /path/to/bisect_nose.py nibabel/tests/test_analyze.py:TestAnalyzeImage.test_str
You might well want to test that::
nosetests nibabel/tests/test_analyze.py:TestAnalyzeImage.test_str
works as you expect first.
Let's say instead that you prefer to recognize the failure with an output
string. Maybe this is because there are lots of errors but you are only
interested in one of them, or because you are looking for a Segmentation fault
instead of a test failure. Then::
git co main-master
git bisect start HEAD v1.6.1 --
git bisect run /path/to/bisect_nose.py --error-txt='HeaderDataError: data dtype "int64" not recognized' nibabel/tests/test_analyze.py
where ``error-txt`` is in fact a regular expression.
You will need 'argparse' installed somewhere. This is in the system libraries
for python 2.7 and python 3.2 onwards.
We run the tests in a temporary directory, so the code you are testing must be
on the python path.
"""
import os
import sys
import shutil
import tempfile
import re
from functools import partial
from subprocess import check_call, Popen, PIPE, CalledProcessError
from argparse import ArgumentParser, RawDescriptionHelpFormatter
caller = partial(check_call, shell=True)
popener = partial(Popen, stdout=PIPE, stderr=PIPE, shell=True)
# git bisect exit codes
UNTESTABLE = 125
GOOD = 0
BAD = 1
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
34030,
329,
17606,
12,
41907,
478,
278,
9686,
15536,
198,
37811,
198,
30910,
36584,
796,
705,
9787,
9686,
5072,
329,
1813,
2420,
11,
900,
25064,
8420,
329,
17606,
47457,
478,
6,
... | 3.17657 | 589 |
import sys
sys.path.insert(0, '/srv/wcdo/src_viz')
from dash_apps import *
### DATA ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
conn = sqlite3.connect(databases_path + 'stats_production.db'); cursor = conn.cursor()
### DASH APP ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
dash_app16 = Dash(__name__, server = app, url_base_pathname= webtype + '/time_gap/', external_stylesheets=external_stylesheets, external_scripts=external_scripts)
dash_app16.config['suppress_callback_exceptions']=True
title = "Time Gap"
dash_app16.title = title+title_addenda
dash_app16.layout = html.Div([
navbar,
html.H3(title, style={'textAlign':'center'}),
dcc.Markdown('''
This page shows stastistics and graphs that illustrate the Time gap in Wikipedia language editions content. It is not ready yet.
'''),
footbar,
], className="container")
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
#### CALLBACKS ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
| [
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
31051,
27891,
85,
14,
86,
66,
4598,
14,
10677,
62,
85,
528,
11537,
198,
6738,
14470,
62,
18211,
1330,
1635,
628,
198,
198,
21017,
42865,
44386,
44386,
44386,
44386,
44386,
4438... | 2.975138 | 362 |
# Generated by Django 3.1.2 on 2020-10-26 13:02
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
940,
12,
2075,
1511,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from django.contrib.sites.shortcuts import get_current_site
from rest_framework import generics, permissions, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from . import serializers
from teams.models import Team, TeamInvitation
class CreateTeamAPIView(generics.CreateAPIView):
"""
Endpoint to create a team.
"""
permission_classes = (permissions.IsAuthenticated, )
authentication_classes = (TokenAuthentication, )
serializer_class = serializers.TeamCreateSerializer
queryset = Team.objects.all()
class InviteToTeamAPIView(generics.CreateAPIView):
"""
Endpoint to invite people to a team.
"""
permission_classes = (permissions.IsAuthenticated, )
authentication_classes = (TokenAuthentication, )
serializer_class = serializers.TeamInvitationCreateSerializer
queryset = TeamInvitation.objects.all()
| [
6738,
42625,
14208,
13,
3642,
822,
13,
49315,
13,
19509,
23779,
1330,
651,
62,
14421,
62,
15654,
198,
6738,
1334,
62,
30604,
1330,
1152,
873,
11,
21627,
11,
3722,
198,
6738,
1334,
62,
30604,
13,
41299,
3299,
1330,
29130,
47649,
3299,
... | 3.373188 | 276 |
import Sofa
import os.path
import SofaPython.Quaternion as quat
from SofaPython.Tools import listToStr as concat
import math
import numpy
import inspect
# be sure that the cache path exists
# automatic generation when s-t changed
class ImageProducerInfo:
""" Used with insertImageCache
"""
def getImageTransform(filename, scaleFactor=1):
""" Returns dim, voxelsize and rigid position of an image given an .mhd header image file
a scaleFactor can be given to normalize image length units (usually in mm)
"""
scale=[0,0,0]
tr=[0,0,0]
dim=[0,0,0]
with open(filename,'r') as f:
for line in f:
splitted = line.split()
if len(splitted)!=0:
if 'ElementSpacing'==splitted[0] or 'spacing'==splitted[0] or 'scale3d'==splitted[0] or 'voxelSize'==splitted[0]:
scale = map(float,splitted[2:5])
if 'Position'==splitted[0] or 'Offset'==splitted[0] or 'translation'==splitted[0] or 'origin'==splitted[0]:
tr = map(float,splitted[2:5])
if 'Orientation'==splitted[0] or 'Rotation'==splitted[0] or 'TransformMatrix'==splitted[0] :
R = numpy.array([map(float,splitted[2:5]),map(float,splitted[5:8]),map(float,splitted[8:11])])
if 'DimSize'==splitted[0] or 'dimensions'==splitted[0] or 'dim'==splitted[0]:
dim = map(int,splitted[2:5])
q = quat.from_matrix(R)
if scaleFactor!=1:
scale = [s*scaleFactor for s in scale]
tr = [t*scaleFactor for t in tr]
offset=[tr[0],tr[1],tr[2],q[0],q[1],q[2],q[3]]
return (dim,scale,offset)
def getImageType(filename):
""" Returns type of an image given an .mhd header image file
"""
t=""
with open(filename,'r') as f:
for line in f:
splitted = line.split()
if len(splitted)!=0:
if 'ElementType'==splitted[0] or 'voxelType'==splitted[0] or 'scale3d'==splitted[0] or 'voxelSize'==splitted[0]:
t = str(splitted[2])
if t=="MET_CHAR":
return "ImageC"
elif t=="MET_DOUBLE":
return "ImageD"
elif t=="MET_FLOAT":
return "ImageF"
elif t=="MET_INT":
return "ImageI"
elif t=="MET_LONG":
return "ImageL"
elif t=="MET_SHORT":
return "ImageS"
elif t=="MET_UCHAR":
return "ImageUC"
elif t=="MET_UINT":
return "ImageUI"
elif t=="MET_ULONG":
return "ImageUL"
elif t=="MET_USHORT":
return "ImageUS"
elif t=="MET_BOOL":
return "ImageB"
else:
return None
def transformToData(scale,offset,timeOffset=0,timeScale=1,isPerspective=0):
""" Returns a transform, formatted to sofa data given voxelsize, rigid position (offset), time and camera parameters
"""
return concat(offset[:3])+' '+concat(quat.to_euler(offset[3:])*180./math.pi)+' '+concat(scale)+' '+str(timeOffset)+' '+str(timeScale)+' '+str(int(isPerspective))
# controller you must derived from and instanciate in the same context than your ImageViewer if you want to define actions to manually add / remove point from an image plane
# return a dictionary of id -> point: {id0 : point0, idn : pointn, ...}
# a point is defined as follows: {'position': [x, y, z], 'color': [r, g, b], ...custom parameters... }
# simpler python script controllers based on SofaPython.script
# TODO maybe this should be double Inherited from both ImagePlaneController and SofaPython.script.Controller
# not to copy code. But then testing inheritance against ImagePlaneController has to be checked.
| [
11748,
1406,
13331,
198,
11748,
28686,
13,
6978,
198,
11748,
1406,
13331,
37906,
13,
4507,
9205,
295,
355,
627,
265,
198,
6738,
1406,
13331,
37906,
13,
33637,
1330,
1351,
2514,
13290,
355,
1673,
265,
198,
11748,
10688,
198,
11748,
299,
... | 2.320996 | 1,567 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# $File: uklogger.py
# $Date: Sat Dec 14 18:03:00 2013 +0800
# $Author: jiakai <jia.kai66@gmail.com>
"""utilities for handling logging"""
import traceback
from termcolor import colored
from ukutil import is_in_unittest
def log_api(msg):
"""log a message from api-website"""
print colored('API', 'green'), msg
# TODO: use log util, log to file, including time, module, etc.
def log_fetcher(msg):
"""log a message from fetcher"""
print colored('FETCHER', 'yellow'), msg
# TODO: use log util, log to file, including time, module, etc.
def log_info(msg):
"""log an info message"""
print colored('INFO', 'blue'), msg
# TODO: use log util, log to file, including time, module, etc.
def log_err(msg):
"""log an err message"""
print colored('ERR', 'red', attrs=['blink']), msg
# TODO: use log util, log to file, including time, module, etc.
def log_exc(exc):
"""log an unexpected exception"""
log_err('Caught unexpected exception: {}\n{}'.format(
exc, traceback.format_exc()))
if is_in_unittest():
log_api = log_fetcher = log_info = lambda msg: None
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
720,
8979,
25,
334,
74,
6404,
1362,
13,
9078,
198,
2,
720,
10430,
25,
7031,
4280,
1478,
1248,
25,
3070,
25,
... | 2.698157 | 434 |
#!/usr/bin/env python3
import sys
import os
from argparse import ArgumentParser, ArgumentTypeError
import numpy as np
from config import *
if __name__ == "__main__":
main(sys.argv) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
220,
198,
11748,
28686,
198,
6738,
1822,
29572,
1330,
45751,
46677,
11,
45751,
6030,
12331,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
4566,
1330,
1635,
1... | 3.032787 | 61 |
import sublime, sublime_plugin
import os, json, subprocess, threading
from ..libs import NodeJS
from ..libs.global_vars import *
from ..libs.popup_manager import popup_manager
from ..libs import util
from ..libs import Hook
from ..libs import FlowCLI
from ..libs import flow_ide_clients
default_completions = util.open_json(os.path.join(PACKAGE_PATH, 'default_autocomplete.json')).get('completions')
| [
11748,
41674,
11,
41674,
62,
33803,
198,
11748,
28686,
11,
33918,
11,
850,
14681,
11,
4704,
278,
198,
6738,
11485,
8019,
82,
1330,
19081,
20120,
198,
6738,
11485,
8019,
82,
13,
20541,
62,
85,
945,
1330,
1635,
198,
6738,
11485,
8019,
8... | 3.108527 | 129 |
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
import os.path
from setuptools import find_packages
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
long_description=get_doc(),
version="1.0.1",
name='apigw-manager',
python_requires='==3.*,>=3.6.1',
author="blueking",
author_email="contactus_bk@tencent.com",
license='Apach License 2.0',
packages=find_packages("src", exclude=["__pycache__", "*.pyc"]),
package_dir={"": "src"},
package_data={},
zip_safe=False,
install_requires=[
'future==0.18.2',
'pyjwt>=1.6.4',
'pyyaml==5.*,>=5.4.1',
'setuptools>=21.0.0',
'urllib3>=1.25.3',
'python-dateutil>=2.8.1,<3.0.0',
'bkapi-bk-apigateway>=1.0.0,<2.0.0',
],
extras_require={
"extra": [
'django>=1.11.1',
'cryptography>=3.1.1',
]
},
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1635,
9368,
1087,
14573,
15708,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
5525,
241,
251,
165,
110,
116,
162,
247,
118,
12859,
239,
12,
164,
... | 2.469676 | 709 |
"""
SPDX-FileCopyrightText: 2021 International Photoacoustic Standardisation Consortium (IPASC)
SPDX-FileCopyrightText: 2021 Janek Gröhl
SPDX-License-Identifier: MIT
"""
from google_drive_downloader import GoogleDriveDownloader
import os
import matplotlib.pyplot as plt
import numpy as np
class TestClassBase:
"""
This base class can be used for the implementation of image reconstruction test cases.
It automatically downloads a sample IPASC-formatted HDF5 file and
"""
@staticmethod
@staticmethod
| [
37811,
198,
4303,
36227,
12,
8979,
15269,
8206,
25,
33448,
4037,
5555,
330,
21618,
8997,
5612,
42727,
357,
4061,
42643,
8,
198,
4303,
36227,
12,
8979,
15269,
8206,
25,
33448,
2365,
988,
1902,
9101,
18519,
198,
4303,
36227,
12,
34156,
12... | 3.496689 | 151 |
from __future__ import print_function
from feed_data import Batcher
from KBQA import KBQA, TextQA, TextKBQA
import tensorflow as tf
import argparse
import time
import numpy as np
import cPickle as pickle
from tqdm import tqdm
import pdb
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--train_file", required=True)
parser.add_argument("--dev_file", required=True)
parser.add_argument("-k", "--kb_file", required=True)
parser.add_argument("--text_kb_file", required=True)
parser.add_argument("-v", "--vocab_dir", required=True)
parser.add_argument("-b", "--batch_size", default=32)
parser.add_argument("--dev_batch_size", default=200)
parser.add_argument("-M", "--max_facts", required=True)
parser.add_argument("--max_text_facts", required=True)
parser.add_argument("-m", "--min_facts", required=True)
parser.add_argument("-i", "--hops", default=3)
parser.add_argument("-d", "--embedding_dim", default=50)
parser.add_argument("--entity_vocab_size", required=True)
parser.add_argument("--relation_vocab_size", required=True)
parser.add_argument("--learning_rate", required=True)
parser.add_argument("--grad_clip_norm", required=True)
parser.add_argument("--verbose", default=0)
parser.add_argument("--dev_eval_counter", default=200)
parser.add_argument("--save_counter", default=200)
parser.add_argument("--dev_max_facts", default=15000)
parser.add_argument("--dev_max_text_facts", default=15000)
parser.add_argument("--output_dir", default='.')
parser.add_argument("--load_model", default=0)
parser.add_argument("--model_path", default='')
parser.add_argument("--load_pretrained_vectors", default=0)
parser.add_argument("--pretrained_vector_path", default='')
parser.add_argument("--use_kb", default=1, type=int)
parser.add_argument("--use_text", default=0, type=int)
parser.add_argument("--print_attention_weights", default=0, type=int)
parser.add_argument("--mode", default='train')
parser.add_argument("--combine_text_kb_answer", default='concat2')
parser.add_argument("--separate_key_lstm", default=0, type=int)
args = parser.parse_args()
entity_vocab_size = int(args.entity_vocab_size)
relation_vocab_size = int(args.relation_vocab_size)
train_file = args.train_file
dev_file = args.dev_file
kb_file = args.kb_file
text_kb_file = args.text_kb_file
vocab_dir = args.vocab_dir
embedding_size = int(args.embedding_dim)
batch_size = int(args.batch_size)
dev_batch_size = int(args.dev_batch_size)
min_facts = int(args.min_facts)
max_facts = int(args.max_facts)
max_text_facts = int(args.max_text_facts)
lr = float(args.learning_rate)
grad_clip_norm = int(args.grad_clip_norm)
verbose = (int(args.verbose) == 1)
hops = int(args.hops)
dev_eval_counter = int(args.dev_eval_counter)
save_counter = int(args.save_counter)
dev_max_facts = int(args.dev_max_facts)
dev_max_text_facts = int(args.dev_max_text_facts)
output_dir = args.output_dir
load_model = (int(args.load_model) == 1)
model_path = args.model_path
use_kb = (args.use_kb == 1)
use_text = (args.use_text == 1)
if load_model:
assert len(model_path) != 0 or model_path is not None
load_pretrained_vectors = (int(args.load_pretrained_vectors) == 1)
pretrained_vector_path = args.pretrained_vector_path
print_attention_weights = (args.print_attention_weights == 1)
mode = args.mode
combine_text_kb_answer = args.combine_text_kb_answer
separate_key_lstm = (args.separate_key_lstm == 1)
t = Trainer()
t.fit()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
3745,
62,
7890,
1330,
6577,
2044,
198,
6738,
14204,
48,
32,
1330,
14204,
48,
32,
11,
8255,
48,
32,
11,
8255,
22764,
48,
32,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11... | 2.539986 | 1,463 |
#This is really badly written, but the function "create_data_set(x)"
#returns a dictionary with x keys and such that the values correspond
#to ordered pairs (A,B), where A is a 30x30 matrix (where the pixels in the path have pixels
#1 and the other pixels have all value 0) and B is the ground truth
#(pixels outside are 0 and pixels inside are 1).
#note: Matrices are represented by lists of lists, i.e. A[x][y] corresponds to the pixel (x,y).
from random import randint
import numpy as np
import operator
VECTORS ={0:(0,-1), 1:(1,-1),2:(1,0),3:(1,1),4:(0,1),5:(-1,1),6:(-1,0),7:(-1,-1)}
INV_VECTORS={(0,-1):0, (1,-1):1,(1,0):2,(1,1):3,(0,1):4,(-1,1):5,(-1,0):6,(-1,-1):7}
NUM_NEIGHBORS=8
RIGHT_TURN=2
outside = 0
inside = 0.3
even = [0,2,4,6]
mid = 1
height = 78
width = 78
minimum = 1
maximum = 3
#True iff (x,y) has valid coordinates
'''
def image():
var= True
while var:
(LATTICE, succ) = create_loop(12,12)
if succ>20:
var= False
LATTICE1=np.array(gnd(LATTICE), dtype=np.uint8)
LATTICE = fix(LATTICE)
image = np.array(LATTICE, dtype=np.uint8)
#plt.imshow(image, interpolation='none')
plt.imshow(LATTICE1, interpolation='none')
plt.show()
'''
| [
2,
1212,
318,
1107,
11234,
3194,
11,
475,
262,
2163,
366,
17953,
62,
7890,
62,
2617,
7,
87,
16725,
220,
198,
2,
7783,
82,
257,
22155,
351,
2124,
8251,
290,
884,
326,
262,
3815,
6053,
220,
198,
2,
1462,
6149,
14729,
357,
32,
11,
... | 2.16 | 600 |
"""This script creates a simple static plot of data from the DtssHost via a DtsClient."""
import sys
import os
from tempfile import NamedTemporaryFile
import logging
from shyft.time_series import DtsClient, UtcPeriod, Calendar, TsVector, utctime_now, TimeSeries
from bokeh.plotting import figure, show, output_file
from bokeh.models import DatetimeTickFormatter, Range1d, LinearAxis
import numpy as np
from visual.utils import bokeh_time_from_timestamp, get_xy
from weather.data_sources.netatmo.domain import NetatmoDomain, types
from weather.data_sources.netatmo.repository import NetatmoEncryptedEnvVarConfig
from weather.data_sources.heartbeat import create_heartbeat_request
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
handlers=[
logging.StreamHandler()
])
heartbeat = TimeSeries(create_heartbeat_request('static_plot'))
env_pass = sys.argv[1]
env_salt = sys.argv[2]
print(f'salt: {env_salt}\npass: {env_pass}')
config = NetatmoEncryptedEnvVarConfig(
username_var='NETATMO_USER',
password_var='NETATMO_PASS',
client_id_var='NETATMO_ID',
client_secret_var='NETATMO_SECRET',
password=env_pass,
salt=env_salt,
)
# Get measurements form domain:
domain = NetatmoDomain(
username=config.username,
password=config.password,
client_id=config.client_id,
client_secret=config.client_secret
)
station = 'Eftasåsen'
module = 'Stua'
plot_data = [
{'data': domain.get_measurement(station_name=station, data_type=types.temperature.name, module_name=module),
'color': '#E64C3E'}, # red
{'data': domain.get_measurement(station_name=station, data_type=types.co2.name, module_name=module),
'color': '#B0CA55'}, # green
{'data': domain.get_measurement(station_name=station, data_type=types.humidity.name, module_name=module),
'color': '#0F2933'}, # dark green
]
# ('Pressure', 'mbar', point_fx.POINT_INSTANT_VALUE, '#33120F'), # brown
# ('Noise', 'db', point_fx.POINT_INSTANT_VALUE, '#E39C30'), # yellow
# ('Rain', 'mm', point_fx.POINT_INSTANT_VALUE, '#448098'), # light blue
# ('WindStrength', 'km / h', point_fx.POINT_INSTANT_VALUE, '#8816AB'), # purple
# Get timeseries from measurements:
client = DtsClient(f'{os.environ["DTSS_SERVER"]}:{os.environ["DTSS_PORT_NUM"]}')
# client = DtsClient(f'{socket.gethostname()}:{os.environ["DTSS_PORT_NUM"]}')
tsv = TsVector([meas['data'].time_series for meas in plot_data])
cal = Calendar('Europe/Oslo')
epsilon = 0.1
now = utctime_now()
period = UtcPeriod(now - cal.DAY*3, now)
data = client.evaluate(tsv, period)
try:
fig = figure(title=f'Demo plot {cal.to_string(now)}', height=400, width=1400, x_axis_type='datetime')
fig.line([1, 2, 3, 4, 5], [5, 3, 4, 2, 1])
fig.yaxis.visible = False
fig.xaxis.formatter = DatetimeTickFormatter(
months=["%Y %b"],
days=["%F %H:%M"],
hours=["%a %H:%M"],
minutes=["%H:%M"]
)
axis_switch = ['left', 'right']
# Create axes:
for variable in plot_data:
axis_side = axis_switch[0]
axis_switch.reverse()
fig.extra_y_ranges[variable['data'].data_type.name_lower] = Range1d()
fig.add_layout(
obj=LinearAxis(
y_range_name=variable['data'].data_type.name_lower,
axis_label=f"{variable['data'].data_type.name} [{variable['data'].data_type.unit}]",
major_label_text_color=variable['color'],
major_tick_line_color=variable['color'],
minor_tick_line_color=variable['color'],
axis_line_color=variable['color'],
axis_label_text_color=variable['color'],
axis_label_text_font_style='bold',
),
place=axis_side
)
# Plot data:
x_ranges = []
for ts, variable in zip(data, plot_data):
x, y = get_xy(cal, ts)
x_ranges.extend([min(x), max(x)])
fig.line(x=x, y=y,
color=variable['color'],
legend_label=variable['data'].data_type.name,
y_range_name=variable['data'].data_type.name_lower,
line_width=3)
fig.extra_y_ranges[variable['data'].data_type.name_lower].start = np.nanmin(y) - epsilon * (np.nanmax(y) - np.nanmin(y))
fig.extra_y_ranges[variable['data'].data_type.name_lower].end = np.nanmax(y) + epsilon * (np.nanmax(y) - np.nanmin(y))
fig.x_range = Range1d(bokeh_time_from_timestamp(cal, period.start), bokeh_time_from_timestamp(cal, period.end))
output_file(NamedTemporaryFile(prefix='netatmo_demo_plot_', suffix='.html').name)
show(fig)
finally:
del client
| [
37811,
1212,
4226,
8075,
257,
2829,
9037,
7110,
286,
1366,
422,
262,
360,
83,
824,
17932,
2884,
257,
360,
912,
11792,
526,
15931,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,
198,
11748,
... | 2.287871 | 2,053 |
# Copyright 2012 Jeffrey R. Spies
# License: Apache License, Version 2.0
# Website: http://jspi.es/benchmark
import time
import random
import re
import operator
import math
import sys
import os
| [
2,
15069,
2321,
19627,
371,
13,
1338,
444,
198,
2,
13789,
25,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
15887,
25,
2638,
1378,
73,
2777,
72,
13,
274,
14,
26968,
4102,
198,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
302,
19... | 3.482143 | 56 |
import torch.nn as nn
class SCRNet(nn.Module):
"""
implementation of the baseline scene coordinate regression network
"""
| [
11748,
28034,
13,
20471,
355,
299,
77,
628,
198,
4871,
6374,
49,
7934,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7822,
286,
262,
14805,
3715,
20435,
20683,
3127,
198,
220,
220,
220,
37227,
198
] | 3.317073 | 41 |
# Ultroid - UserBot
# Copyright (C) 2020 TeamUltroid
#
# Recode by @mrismanaziz
# @sharinguserbot
from telethon.tl.types import ChannelParticipantAdmin as admin
from telethon.tl.types import ChannelParticipantCreator as owner
from telethon.tl.types import UserStatusOffline as off
from telethon.tl.types import UserStatusOnline as onn
from telethon.tl.types import UserStatusRecently as rec
from telethon.utils import get_display_name
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import man_cmd
@man_cmd(pattern="tag(on|off|all|bots|rec|admins|owner)?(.*)")
CMD_HELP.update(
{
"tagger": f"**Plugin : **`tagger`\
\n\n • **Syntax :** `{cmd}tagall`\
\n • **Function : **Tag Top 100 Members di group chat.\
\n\n • **Syntax :** `{cmd}tagowner`\
\n • **Function : **Tag Owner group chat\
\n\n • **Syntax : **`{cmd}tagadmins`\
\n • **Function : **Tag Admins group chat.\
\n\n • **Syntax :** `{cmd}tagbots`\
\n • **Function : **Tag Bots group chat.\
\n\n • **Syntax :** `{cmd}tagrec`\
\n • **Function : **Tag Member yang Baru Aktif.\
\n\n • **Syntax :** `{cmd}tagon`\
\n • **Function : **Tag Online Members (hanya berfungsi jika privasi dimatikan)\
\n\n • **Syntax :** `{cmd}tagoff`\
\n • **Function : **Tag Offline Members (hanya berfungsi jika privasi dimatikan)\
"
}
)
| [
2,
6172,
3882,
532,
11787,
20630,
201,
198,
2,
15069,
357,
34,
8,
12131,
4816,
16301,
3882,
201,
198,
2,
201,
198,
2,
3311,
1098,
416,
2488,
76,
2442,
805,
1031,
528,
201,
198,
2,
2488,
21987,
7220,
13645,
201,
198,
201,
198,
6738... | 2.227606 | 681 |
"""
auto merge printing v2
python 2.7.14 tested
require module pypiwin32: pip install pypiwin32
require module pil: pip install pillow
"""
import os
import subprocess
from datetime import datetime
from Tkinter import Tk, Frame, Button, Label, Checkbutton, IntVar, StringVar, \
TOP, LEFT, RIGHT, BOTTOM
from ttk import Combobox
import win32print
from printlib import PrintLib
from PIL import Image
DEFAULT_PRINTER = "EPSON M100 Series MD"
SRC_FOLDER = "PNG"
MERGED_FOLDER = "PNGMERGED"
ARCHIVE_FOLDER = "PNGARCHIVE"
FULLPAPERIFONE = False
PRINTPERPAGE = 4 # 2 or 4
if PRINTPERPAGE == 2:
IMAGE_ORDER = 0 # 0 = RIGHT THEN BELOW, 1 = BELOW THEN RIGHT
IMAGE_ROTATE = 90
else:
IMAGE_ORDER = 1
IMAGE_ROTATE = 0
SOURCE_WIDTH = 1275
SOURCE_HEIGHT = 1650
WINDOW_WIDTH = 250
WINDOW_HEIGHT = 115
class App(object):
"""main app"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
# pylint: disable=no-member
@staticmethod
def getimages(path, maxfind):
"""get images"""
imglist = []
for fpath in os.listdir(path):
ext = os.path.splitext(fpath)[1]
if ext == ".png":
imglist.append(fpath)
# print fname
if len(imglist) == maxfind:
break
return imglist
@staticmethod
def movefiles(srcpath, dstpath, filenames):
"""move files"""
for fname in filenames:
os.rename(srcpath + "/" + fname, dstpath + "/" + fname)
def mergeimages(self, srcpath, dstpath, acvpath):
"""merge images"""
imglist = self.getimages(srcpath, PRINTPERPAGE)
if not imglist: # empty list
self.lbst.configure(text="No document found")
return
# source images size
basew = SOURCE_WIDTH
baseh = SOURCE_HEIGHT
# crop this size (about 73.5% width)
imgwcrop = int(basew / 2 / 0.68)
imghcrop = int(baseh / 4 / 0.68)
# paste this size
if IMAGE_ROTATE == 90 or IMAGE_ROTATE == 270:
imgw = int(basew / 2)
imgh = int(baseh / 2)
else:
imgw = int(basew / 2)
imgh = int(baseh / 4)
# full paper if only 1 image
if FULLPAPERIFONE and len(imglist) == 1:
imgw = int(basew)
imgh = int(baseh / 2)
# new dimension of paper
baseh = int(baseh / 2) # half source A4 = A5
print "Creating image", basew, baseh
baseim = Image.new("RGB", (basew, baseh), (255, 255, 255))
left = 0
top = 0
for fname in imglist:
img = Image.open(srcpath + "/" + fname, "r")
imgcrop = img.crop((0, 0, imgwcrop, imghcrop))
print imgcrop.width, imgcrop.height
if IMAGE_ROTATE != 0:
imgcrop = imgcrop.rotate(IMAGE_ROTATE, expand=True)
print imgcrop.width, imgcrop.height
imgcrop = imgcrop.resize((imgw, imgh), Image.LANCZOS)
baseim.paste(imgcrop, (left, top, left + imgw, top + imgh))
if IMAGE_ORDER == 0: # right then below
if left == 0:
left += imgw
else:
left = 0
top += imgh
else: # below then right
if top == 0:
top += imgh
else:
top = 0
left += imgw
dstfn = dstpath + r"\merge-" + \
datetime.now().strftime("%Y%m%d%H%M%S") + ".png"
baseim = baseim.convert('P', colors=256, palette=Image.ADAPTIVE)
baseim.save(dstfn, optimize=True)
self.movefiles(srcpath, acvpath, imglist)
self.lbst.configure(text=dstfn)
print "New file:", dstfn
return dstfn
@staticmethod
def printdocument(filename, printername):
"""print document"""
prnlib = PrintLib()
prnlib.printfile(filename, printername)
def buttonprinthandler(self):
"""button print handler"""
self.lbst.configure(text="Merging images")
dst = self.mergeimages(self.srcpath, self.dstpath, self.acvpath)
self.lbst.configure(text="Printing document")
self.printdocument(dst, self.printername)
def buttonclearhandler(self):
"""button clear handler"""
imglist = self.getimages(self.srcpath, 100)
self.movefiles(self.srcpath, self.acvpath, imglist)
return
def documentcounthandler(self):
"""document count handler"""
docfound = len(self.getimages(self.srcpath, 100))
self.lbdoc.configure(text="Document found: " + str(docfound))
if self.chautoprintval.get() == 1 and docfound >= PRINTPERPAGE:
self.lbst.configure(text="Auto print now")
self.buttonprinthandler()
else:
self.lbst.configure(text="Ready: " + self.printername)
if self.chprintallval.get() == 1 and docfound > 0:
self.lbst.configure(text="Print all now")
self.buttonprinthandler()
else:
self.chprintallval.set(0)
self.lbst.configure(text="Ready: " + self.printername)
self.main.after(2000, self.documentcounthandler)
def buttonexplorehandler(self):
"""button explore"""
print 'explorer "' + os.getcwd() + '\\' + self.srcpath + '"'
subprocess.Popen('explorer "' + os.getcwd() +
'\\' + self.srcpath + '"')
return
@staticmethod
def populateprinters(defprinter):
"""populate printers"""
defidx = 0
printer_info = win32print.EnumPrinters(
win32print.PRINTER_ENUM_LOCAL | win32print.PRINTER_ENUM_CONNECTIONS
)
printer_names = []
for pidx in range(0, len(printer_info)):
printer_names.append(printer_info[pidx][2])
if printer_info[pidx][2] == defprinter:
defidx = pidx
return (defidx, printer_names)
def printerselecthandler(self, event):
"""printer select"""
print event
self.printername = self.cbprinterval.get()
return
App()
| [
37811,
201,
198,
23736,
20121,
13570,
410,
17,
201,
198,
29412,
362,
13,
22,
13,
1415,
6789,
201,
198,
46115,
8265,
279,
4464,
72,
5404,
2624,
25,
7347,
2721,
279,
4464,
72,
5404,
2624,
201,
198,
46115,
8265,
5560,
25,
7347,
2721,
2... | 1.978066 | 3,237 |
import requests
#Class designed to comunicate the app
#with a API and Get JSON information.
### Variables ###
# url API
### Constructors ###
### Setters and Getters###
### Methods ###
#GetInfo: Get JSON Information from the API.
#Input: Void
#Output: JSON Object
| [
11748,
7007,
201,
198,
201,
198,
2,
9487,
3562,
284,
401,
403,
5344,
262,
598,
220,
201,
198,
2,
4480,
257,
7824,
290,
3497,
19449,
1321,
13,
201,
198,
201,
198,
197,
21017,
15965,
2977,
44386,
201,
198,
197,
2,
19016,
7824,
201,
... | 2.641667 | 120 |
import pytest
from aioredis.util import encode_command
| [
11748,
12972,
9288,
198,
198,
6738,
257,
72,
1850,
271,
13,
22602,
1330,
37773,
62,
21812,
628,
628,
628,
628
] | 3.15 | 20 |
import asyncio
import pytest
from starlette.testclient import TestClient
from app.db import drop_database
from app.main import app
@pytest.fixture(scope="session", autouse=True)
@pytest.fixture(scope="session")
def event_loop():
"""custom event loop, fix for motor's run_in_executor"""
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
| [
11748,
30351,
952,
198,
198,
11748,
12972,
9288,
198,
6738,
3491,
21348,
13,
9288,
16366,
1330,
6208,
11792,
198,
198,
6738,
598,
13,
9945,
1330,
4268,
62,
48806,
198,
6738,
598,
13,
12417,
1330,
598,
628,
198,
31,
9078,
9288,
13,
69,... | 2.938462 | 130 |
from yaml import safe_load
with open(__file__.replace("__init__.py", "") + "letters.yml", encoding='utf-8') as f:
lets = safe_load(f)
letters = {}
every = set()
for k, v in lets.items():
letters[tuple(v)] = k
every.update(v)
| [
6738,
331,
43695,
1330,
3338,
62,
2220,
198,
198,
4480,
1280,
7,
834,
7753,
834,
13,
33491,
7203,
834,
15003,
834,
13,
9078,
1600,
366,
4943,
1343,
366,
15653,
13,
88,
4029,
1600,
21004,
11639,
40477,
12,
23,
11537,
355,
277,
25,
19... | 2.438776 | 98 |
from standalone.hub import AutoHubServer
| [
6738,
27669,
13,
40140,
1330,
11160,
16066,
10697,
198
] | 4.555556 | 9 |
from django import forms
from search.models.session_alias import SessionAlias
from share.models import Session
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
2989,
13,
27530,
13,
29891,
62,
26011,
1330,
23575,
40489,
198,
6738,
2648,
13,
27530,
1330,
23575,
628
] | 4.346154 | 26 |
"""
Starry was originally designed for maps in thermal light.
Maps in reflected light are quirky in how things are normalized,
so we need to make sure albedos, intensities, and fluxes are
all self-consistent in the code. There are some fudges we had
to make under the hood to ensure all of these tests pass.
"""
import numpy as np
import starry
import pytest
@pytest.fixture(autouse=True)
def test_one_over_r_squared(map, n_tests=10, plot=False):
"""Test that the flux decreases as 1/r^2."""
flux0 = map.flux()
zs = np.linspace(1, 10, 100)
flux = map.flux(xs=0, ys=0, zs=zs)
assert np.allclose(flux, flux0 / zs ** 2)
def test_sys_flux():
"""Test the normalization of the flux."""
# Instantiate a system. Planet has radius `r` and is at
# distance `d` from a point illumination source.
d = 10
r = 2
planet = starry.Secondary(starry.Map(reflected=True), a=d, r=r)
star = starry.Primary(starry.Map(), r=0)
sys = starry.System(star, planet)
# Get the star & planet flux when it's at full phase
t_full = 0.5 * sys._get_periods()[0]
f_star, f_planet = sys.flux(t=t_full, total=False)
# Star should have unit flux
assert np.allclose(f_star, 1.0)
# Planet should have flux equal to (2 / 3) r^2 / d^2
f_expected = (2.0 / 3.0) * r ** 2 / d ** 2
assert np.allclose(f_planet, f_expected)
| [
37811,
198,
1273,
6532,
373,
6198,
3562,
329,
8739,
287,
18411,
1657,
13,
198,
47010,
287,
12548,
1657,
389,
37276,
287,
703,
1243,
389,
39279,
11,
198,
568,
356,
761,
284,
787,
1654,
435,
3077,
418,
11,
17509,
871,
11,
290,
28462,
... | 2.595865 | 532 |
from flask import render_template, redirect, request, url_for, session, g, jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from flask_login import LoginManager, login_required, login_user, \
logout_user, current_user, UserMixin
from requests_oauthlib import OAuth2Session
from requests.exceptions import HTTPError
from app import app, db, models, login_manager
from config import Auth
from .models import User, im_data
import os, datetime, json
@app.route('/status')
""" OAuth Session creation """
@app.route('/login')
@app.route('/gCallback')
@app.route('/logout')
@login_required
""" OAuth Session creation """
@app.route('/')
@app.route('/index')
@login_required
@app.route('/getEntries', methods=['GET'])
@login_required
@app.route('/getEntriesSSP', methods=['GET'])
@login_required
@app.route('/postEntry', methods=['POST'])
@login_required
@app.route('/delEntry', methods=['GET'])
@login_required
@app.route('/getEntry', methods=['GET'])
@login_required
@app.route('/<url_name>', methods=['GET'])
| [
6738,
42903,
1330,
8543,
62,
28243,
11,
18941,
11,
2581,
11,
19016,
62,
1640,
11,
6246,
11,
308,
11,
33918,
1958,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
44161,
282,
26599,
1330,
393,
62,
198,
673... | 3.008523 | 352 |
from orchestration.database import database_update
#print database_update.machine_ip('wangwy', 'wangwy', 'cdh', 'slave1')
#print database_update.machine_ip('wangwy', 'wangwy', 'cdh', 'master')
#print database_update.machine_ip('wangwy', 'wangwy', 'cdh', 'cloudera_manager')
print database_update.machine_ip('mongo', 'mongo', 'test', 'web')
| [
6738,
17771,
12401,
13,
48806,
1330,
6831,
62,
19119,
198,
198,
2,
4798,
6831,
62,
19119,
13,
30243,
62,
541,
10786,
47562,
21768,
3256,
705,
47562,
21768,
3256,
705,
10210,
71,
3256,
705,
36341,
16,
11537,
198,
2,
4798,
6831,
62,
191... | 2.948276 | 116 |
import importlib
import math
from numpy.random import randint
from AlphaZero.search.math_helper import random_variate_dirichlet, weighted_random_choice
# Parameter for PUCT Algorithm
c_punt = 5.0
class MCTreeNode(object):
"""Tree Node in MCTS.
"""
def expand(self, policy, value):
"""Expand a leaf node according to the network evaluation.
NO visit count is updated in this function, make sure it's updated externally.
Args:
policy: a list of (action, prob) tuples returned by the network
value: the value of this node returned by the network
Returns:
None
"""
# Check if the node is leaf
if not self.is_leaf():
return
# Update W(s,a) for this parent node by formula W(s,a) = W(s,a) + v
self.update(value)
# Create valid children
for action, prob in policy:
# TODO: Is there an extra condition to check pass and suicide?
# checking will be moved to MCTSearch
if action not in self._children:
self._children[action] = MCTreeNode(self, prob)
def select(self):
""" Select the best child of this node.
Returns:
tuple: A tuple of (action, next_node) with highest Q(s,a)+U(s,a)
"""
# Argmax_a(Q(s,a)+U(s,a))
return max(self._children.items(), key=lambda act_node: act_node[1].get_selection_value())
def update(self, v):
""" Update the three values
Args:
v: value
Returns:
None
"""
# Each node's N(s,a) is updated when simulation is executed on this node,
# no need to update here. See MCTSearch.
# N(s,a) = N(s,a) + 1
# self._visit_cnt += 1
# W(s,a) = W(s,a) + v
self._total_action_val += v
def get_selection_value(self):
"""Implements PUCT Algorithm's formula for current node.
Returns:
None
"""
# U(s,a)=c_punt * P(s,a) * sqrt(Parent's N(s,a)) / (1 + N(s,a))
usa = c_punt * self._prior_prob * math.sqrt(self._parent.visit_count) / (1.0 + self._visit_cnt)
# Q(s,a) + U(s,a)
return self.get_mean_action_value() + usa
def get_mean_action_value(self):
"""Calculates Q(s,a)
Returns:
real: mean action value
"""
# TODO: Should this value be inverted with color?
# If yes, the signature should be changed to (self, color)
if self._visit_cnt == 0:
return 0
return self._total_action_val / self._visit_cnt
def visit(self):
"""Increment the visit count.
Returns:
None
"""
self._visit_cnt += 1
def is_leaf(self):
"""Checks if it is a leaf node (i.e. no nodes below this have been expanded).
Returns:
bool: if the current node is leaf.
"""
return self._children == {}
def is_root(self):
"""Checks if it is a root node.
Returns:
bool: if the current node is root.
"""
return self._parent is None
@property
@property
@property
@prior_prob.setter
class MCTSearch(object):
""" Create a Monto Carlo search tree.
"""
def __init__(self, evaluator, game_config, max_playout=1600):
"""
Arguments:
evaluator: A function that takes a state and returns (policies, value),
where value is a float in range [-1,1]
policies is a list of (action, prob)
game_config: Game configuration file
"""
self._root = MCTreeNode(None, 1.0)
self._evaluator = evaluator
self._max_playout = max_playout
self.d_alpha = game_config['d_alpha']
self.d_epsilon = game_config['d_epsilon']
self._transform_types = game_config['transform_types']
if self._transform_types == 0 or self._transform_types == 1:
self.enable_transform = False
else:
self.enable_transform = True
self._sc = importlib.import_module(game_config['state_converter_path'])
self._reverse_transformer = self._sc.ReverseTransformer(game_config)
self._reverse_transform = self._reverse_transformer.reverse_transform
def _playout(self, state, node):
"""
Recursively executes playout from the current node.
Args:
state: current board state
node: the node to start simulation
Returns:
real: the action value of the current node
"""
# The current node is visited
node.visit()
# TODO: Do we need a max tree depth/size?
if not node.is_leaf():
# Greedily select next move.
action, next_node = node.select()
#
current_player = state.current_player
state.do_move(action)
# The result of the simulation is returned after the complete playout
# Update this level of node with this value
simres_value = self._playout(state, next_node)
# Visit count is updated when. this node is first called with _playout
# Therefore there is no visit count update in update()
# Update relative value
node.update(-current_player * simres_value)
# Return the same result to the parent
return simres_value
else: # Node is a leaf
# Evaluate the state and get output from NN
if self.enable_transform:
# Generate a random transform ID
random_transform_id = randint(self._transform_types)
state_eval = state.copy()
state_eval.transform(random_transform_id)
transformed_children_candidates, value = self._evaluator(state_eval)
self._reverse_transform(transformed_children_candidates, random_transform_id)
children_candidates = transformed_children_candidates
else:
children_candidates, value = self._evaluator(state)
# Remove invalid children
children_candidates = [(action, prob) for action, prob in children_candidates if state.is_legal(action)]
# If not the end of game, expand node and terminate playout.
# Else just terminate playout.
if len(children_candidates) != 0 and not state.is_end_of_game:
# Value stored (total action value) is always relative to itself
# i.e. 1 if it wins and -1 if it loses
# value returned by NN has -1 when white wins, multiplication will inverse
node.expand(children_candidates, -state.current_player * value)
# Q(s,a)=W(s,a)
# Return the black win value to update (recursively)
else:
# No valid move, game should end. Overwrite the value with the real game result.
# Game result is absolute: 1, 0, or -1
value = state.get_winner()
node.update(-state.current_player * value)
return value
def _get_search_probs(self):
""" Calculate the search probabilities exponentially to the visit counts.
Returns:
list: a list of (action, probs)
"""
# A list of (action, selection_weight), weight is not normalized
moves = [(action, node.visit_count) for action, node in self._root.children.items()]
total = sum([count for _, count in moves])
normalized_probs = [(action, count / total) for action, count in moves]
return normalized_probs
def _calc_move(self, state, dirichlet=False):
""" Performs MCTS.
"temperature" parameter of the two random dist is not implemented,
because the value is set to either 1 or 0 in the paper, which can
be controlled by toggling the option.
Args:
state: current state
dirichlet: enable Dirichlet noise described in "Self-play" section
Returns:
None
"""
# The root of the tree is visited.
self._root.visit()
# Dirichlet noise is applied to the children of the roots, we will expand the
# root first
if self._root.is_leaf():
# Evaluate the state and get output from NN
if self.enable_transform:
# Generate a random transform ID
random_transform_id = randint(self._transform_types)
state_eval = state.copy()
state_eval.transform(random_transform_id)
transformed_children_candidates, value = self._evaluator(state_eval)
self._reverse_transform(transformed_children_candidates, random_transform_id)
children_candidates = transformed_children_candidates
else:
children_candidates, value = self._evaluator(state)
# Remove invalid children
children_candidates = [(action, prob) for action, prob in children_candidates if state.is_legal(action)]
# Only create legal children
self._root.expand(children_candidates, value)
if dirichlet:
# Get a list of random numbers from d=Dirichlet distribution
dirichlet_rand = random_variate_dirichlet(self.d_alpha, len(self._root.children))
for action, eta in zip(self._root.children.keys(), dirichlet_rand):
# Update the P(s,a) of all children of root
self._root.children[action].prior_prob = (1 - self.d_epsilon) * self._root.children[
action].prior_prob + self.d_epsilon * eta
# Do search loop while playout limit is not reached and time remains
# TODO: Implement timing module
for _ in range(self._max_playout):
self._playout(state.copy(), self._root)
def calc_move(self, state, dirichlet=False, prop_exp=True):
""" Calculates the best move
Args:
state: current state
dirichlet: enable Dirichlet noise described in "Self-play" section
prop_exp: select the final decision proportional to its exponential visit
Returns:
tuple: the calculated result (x, y)
"""
self._calc_move(state, dirichlet)
# Select the best move according to the final search tree
# select node randomly with probability: N(s,a)/ParentN(s,a)
if prop_exp:
# A list of (action, selection_weight), weight is not necessarily normalized
return weighted_random_choice(self._get_search_probs())
else:
# Directly select the node with most visits
return max(self._root.children.items(), key=lambda act_node: act_node[1].visit_count)[0]
def calc_move_with_probs(self, state, dirichlet=False):
""" Calculates the best move, and return the search probabilities.
This function should only be used for self-play.
Args:
state: current state
dirichlet: enable Dirichlet noise described in "Self-play" section
Returns:
tuple: the result (x, y) and a list of (action, probs)
"""
self._calc_move(state, dirichlet)
probs = self._get_search_probs()
result = weighted_random_choice(self._get_search_probs())
return result, probs
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know about the subtree, assuming
that calc_move() has been called already. Siblings of the new root will be garbage-collected.
Returns:
None
"""
if last_move in self._root.children:
self._root = self._root.children[last_move]
self._root._parent = None
else:
self._root = MCTreeNode(None, 1.0)
| [
11748,
1330,
8019,
198,
11748,
10688,
198,
198,
6738,
299,
32152,
13,
25120,
1330,
43720,
600,
198,
198,
6738,
12995,
28667,
13,
12947,
13,
11018,
62,
2978,
525,
1330,
4738,
62,
25641,
378,
62,
15908,
488,
1616,
11,
26356,
62,
25120,
... | 2.289753 | 5,270 |
"""
Sage Interacts
Sage interacts are applications of the `@interact decorator <../../sagenb/notebook/interact.html>`_.
They are conveniently accessible in the Sage Notebook via ``interacts.[TAB].[TAB]()``.
The first ``[TAB]`` lists categories and the second ``[TAB]`` reveals the interact examples.
EXAMPLES:
Invoked in the notebook, the following command will produce the fully formatted
interactive mathlet. In the command line, it will simply return the underlying
HTML and Sage code which creates the mathlet::
sage: interacts.calculus.taylor_polynomial()
<html>...</html>
AUTHORS:
- William Stein
- Harald Schilly, Robert Marik (2011-01-16): added many examples (#9623) partially based on work by Lauri Ruotsalainen
"""
#*****************************************************************************
# Copyright (C) 2009 William Stein <wstein@gmail.com>
# Copyright (C) 2011 Harald Schilly <harald.schilly@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.all import *
x = SR.var('x')
# It is important that this file is lazily imported for this to work
from sage.repl.user_globals import get_global
# Get a bunch of functions from the user globals. In SageNB, this will
# refer to SageNB functions; in Jupyter, this will refer to Jupyter
# functions. In the command-line and for doctests, we import the
# SageNB functions as fall-back.
for name in ("interact", "checkbox", "input_box", "input_grid",
"range_slider", "selector", "slider", "text_control"):
try:
obj = get_global(name)
except NameError:
import sagenb.notebook.interact
obj = sagenb.notebook.interact.__dict__[name]
globals()[name] = obj
def library_interact(f):
"""
This is a decorator for using interacts in the Sage library.
This is just the ``interact`` function wrapped in an additional
function call: ``library_interact(f)()`` is equivalent to
executing ``interact(f)``.
EXAMPLES::
sage: import sage.interacts.library as library
sage: @library.library_interact
....: def f(n=5):
....: print(n)
sage: f() # an interact appears if using the notebook, else code
<html>...</html>
"""
@sage_wraps(f)
return library_wrapper
def html(obj):
"""
Shorthand to pretty print HTML
EXAMPLES::
sage: from sage.interacts.library import html
sage: html("<h1>Hello world</h1>")
<h1>Hello world</h1>
"""
from sage.all import html
pretty_print(html(obj))
@library_interact
def demo(n=slider(range(10)), m=slider(range(10))):
"""
This is a demo interact that sums two numbers.
INPUT:
- ``n`` -- integer slider
- ``m`` -- integer slider
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.demo()
<html>...</html>
"""
print(n + m)
@library_interact
def taylor_polynomial(
title = text_control('<h2>Taylor polynomial</h2>'),
f=input_box(sin(x)*exp(-x),label="$f(x)=$"), order=slider(range(1,13))):
"""
An interact which illustrates the Taylor polynomial approximation
of various orders around `x=0`.
- ``f`` -- function expression
- ```order``` -- integer slider
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.taylor_polynomial()
<html>...</html>
"""
x0 = 0
p = plot(f,(x,-1,5), thickness=2)
dot = point((x0,f(x=x0)),pointsize=80,rgbcolor=(1,0,0))
ft = f.taylor(x,x0,order)
pt = plot(ft,(-1, 5), color='green', thickness=2)
html('$f(x)\;=\;%s$'%latex(f))
html('$\hat{f}(x;%s)\;=\;%s+\mathcal{O}(x^{%s})$'%(x0,latex(ft),order+1))
show(dot + p + pt, ymin = -.5, ymax = 1)
@library_interact
def definite_integral(
title = text_control('<h2>Definite integral</h2>'),
f = input_box(default = "3*x", label = '$f(x)=$'),
g = input_box(default = "x^2", label = '$g(x)=$'),
interval = range_slider(-10,10,default=(0,3), label="Interval"),
x_range = range_slider(-10,10,default=(0,3), label = "plot range (x)"),
selection = selector(["f", "g", "f and g", "f - g"], default="f and g", label="Select")):
"""
This is a demo interact for plotting the definite integral of a function
based on work by Lauri Ruotsalainen, 2010.
INPUT:
- ``function`` -- input box, function in x
- ``interval`` -- interval for the definite integral
- ``x_range`` -- range slider for plotting range
- ``selection`` -- selector on how to visualize the integrals
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.definite_integral()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(f).function(x)
g = symbolic_expression(g).function(x)
f_plot = Graphics(); g_plot = Graphics(); h_plot = Graphics();
text = ""
# Plot function f.
if selection != "g":
f_plot = plot(f(x), x, x_range, color="blue", thickness=1.5)
# Color and calculate the area between f and the horizontal axis.
if selection == "f" or selection == "f and g":
f_plot += plot(f(x), x, interval, color="blue", fill=True, fillcolor="blue", fillalpha=0.15)
text += r"$\int_{%.2f}^{%.2f}(\color{Blue}{f(x)})\,\mathrm{d}x=\int_{%.2f}^{%.2f}(%s)\,\mathrm{d}x=%.2f$" % (
interval[0], interval[1],
interval[0], interval[1],
latex(f(x)),
f(x).nintegrate(x, interval[0], interval[1])[0]
)
if selection == "f and g":
text += r"<br/>"
# Plot function g. Also color and calculate the area between g and the horizontal axis.
if selection == "g" or selection == "f and g":
g_plot = plot(g(x), x, x_range, color="green", thickness=1.5)
g_plot += plot(g(x), x, interval, color="green", fill=True, fillcolor="yellow", fillalpha=0.5)
text += r"$\int_{%.2f}^{%.2f}(\color{Green}{g(x)})\,\mathrm{d}x=\int_{%.2f}^{%.2f}(%s)\,\mathrm{d}x=%.2f$" % (
interval[0], interval[1],
interval[0], interval[1],
latex(g(x)),
g(x).nintegrate(x, interval[0], interval[1])[0]
)
# Plot function f-g. Also color and calculate the area between f-g and the horizontal axis.
if selection == "f - g":
g_plot = plot(g(x), x, x_range, color="green", thickness=1.5)
g_plot += plot(g(x), x, interval, color="green", fill=f(x), fillcolor="red", fillalpha=0.15)
h_plot = plot(f(x)-g(x), x, interval, color="red", thickness=1.5, fill=True, fillcolor="red", fillalpha=0.15)
text = r"$\int_{%.2f}^{%.2f}(\color{Red}{f(x)-g(x)})\,\mathrm{d}x=\int_{%.2f}^{%.2f}(%s)\,\mathrm{d}x=%.2f$" % (
interval[0], interval[1],
interval[0], interval[1],
latex(f(x)-g(x)),
(f(x)-g(x)).nintegrate(x, interval[0], interval[1])[0]
)
show(f_plot + g_plot + h_plot, gridlines=True)
html(text)
@library_interact
def function_derivative(
title = text_control('<h2>Derivative grapher</h2>'),
function = input_box(default="x^5-3*x^3+1", label="Function:"),
x_range = range_slider(-15,15,0.1, default=(-2,2), label="Range (x)"),
y_range = range_slider(-15,15,0.1, default=(-8,6), label="Range (y)")):
"""
This is a demo interact for plotting derivatives of a function based on work by
Lauri Ruotsalainen, 2010.
INPUT:
- ``function`` -- input box, function in x
- ``x_range`` -- range slider for plotting range
- ``y_range`` -- range slider for plotting range
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.function_derivative()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(function).function(x)
df = derivative(f, x)
ddf = derivative(df, x)
plots = plot(f(x), x_range, thickness=1.5) + plot(df(x), x_range, color="green") + plot(ddf(x), x_range, color="red")
if y_range == (0,0):
show(plots, xmin=x_range[0], xmax=x_range[1])
else:
show(plots, xmin=x_range[0], xmax=x_range[1], ymin=y_range[0], ymax=y_range[1])
html("<center>$\color{Blue}{f(x) = %s}$</center>"%latex(f(x)))
html("<center>$\color{Green}{f'(x) = %s}$</center>"%latex(df(x)))
html("<center>$\color{Red}{f''(x) = %s}$</center>"%latex(ddf(x)))
@library_interact
def difference_quotient(
title = text_control('<h2>Difference quotient</h2>'),
f = input_box(default="sin(x)", label='f(x)'),
interval= range_slider(0, 10, 0.1, default=(0.0,10.0), label="Range"),
a = slider(0, 10, None, 5.5, label = '$a$'),
x0 = slider(0, 10, None, 2.5, label = '$x_0$ (start point)')):
"""
This is a demo interact for difference quotient based on work by
Lauri Ruotsalainen, 2010.
INPUT:
- ``f`` -- input box, function in `x`
- ``interval`` -- range slider for plotting
- ``a`` -- slider for `a`
- ``x0`` -- slider for starting point `x_0`
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.difference_quotient()
<html>...</html>
"""
html('<h2>Difference Quotient</h2>')
html('<div style="white-space: normal;">\
<a href="http://en.wikipedia.org/wiki/Difference_quotient" target="_blank">\
Wikipedia article about difference quotient</a></div>'
)
x = SR.var('x')
f = symbolic_expression(f).function(x)
fmax = f.find_local_maximum(interval[0], interval[1])[0]
fmin = f.find_local_minimum(interval[0], interval[1])[0]
f_height = fmax - fmin
measure_y = fmin - 0.1*f_height
measure_0 = line2d([(x0, measure_y), (a, measure_y)], rgbcolor="black")
measure_1 = line2d([(x0, measure_y + 0.02*f_height), (x0, measure_y-0.02*f_height)], rgbcolor="black")
measure_2 = line2d([(a, measure_y + 0.02*f_height), (a, measure_y-0.02*f_height)], rgbcolor="black")
text_x0 = text("x0", (x0, measure_y - 0.05*f_height), rgbcolor="black")
text_a = text("a", (a, measure_y - 0.05*f_height), rgbcolor="black")
measure = measure_0 + measure_1 + measure_2 + text_x0 + text_a
tanf = symbolic_expression((f(x0)-f(a))*(x-a)/(x0-a)+f(a)).function(x)
fplot = plot(f(x), x, interval[0], interval[1])
tanplot = plot(tanf(x), x, interval[0], interval[1], rgbcolor="#FF0000")
points = point([(x0, f(x0)), (a, f(a))], pointsize=20, rgbcolor="#005500")
dashline = line2d([(x0, f(x0)), (x0, f(a)), (a, f(a))], rgbcolor="#005500", linestyle="--")
html('<h2>Difference Quotient</h2>')
show(fplot + tanplot + points + dashline + measure, xmin=interval[0], xmax=interval[1], ymin=fmin-0.2*f_height, ymax=fmax)
html(r"<br>$\text{Line's equation:}$")
html(r"$y = %s$<br>"%tanf(x))
html(r"$\text{Slope:}$")
html(r"$k = \frac{f(x_0)-f(a)}{x_0-a} = %s$<br>" % (N(derivative(tanf(x), x), digits=5)))
@library_interact
def quadratic_equation(A = slider(-7, 7, 1, 1), B = slider(-7, 7, 1, 1), C = slider(-7, 7, 1, -2)):
"""
This is a demo interact for solving quadratic equations based on work by
Lauri Ruotsalainen, 2010.
INPUT:
- ``A`` -- integer slider
- ``B`` -- integer slider
- ``C`` -- integer slider
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.quadratic_equation()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(A*x**2 + B*x + C).function(x)
html('<h2>The Solutions of the Quadratic Equation</h2>')
html("$%s = 0$" % f(x))
show(plot(f(x), x, (-10, 10), ymin=-10, ymax=10), aspect_ratio=1, figsize=4)
d = B**2 - 4*A*C
if d < 0:
color = "Red"
sol = r"\text{solution} \in \mathbb{C}"
elif d == 0:
color = "Blue"
sol = -B/(2*A)
else:
color = "Green"
a = (-B+sqrt(B**2-4*A*C))/(2*A)
b = (-B-sqrt(B**2-4*A*C))/(2*A)
sol = r"\begin{cases}%s\\%s\end{cases}" % (latex(a), latex(b))
if B < 0:
dis1 = "(%s)^2-4*%s*%s" % (B, A, C)
else:
dis1 = "%s^2-4*%s*%s" % (B, A, C)
dis2 = r"\color{%s}{%s}" % (color, d)
html("$Ax^2 + Bx + C = 0$")
calc = r"$x = \frac{-B\pm\sqrt{B^2-4AC}}{2A} = " + \
r"\frac{-%s\pm\sqrt{%s}}{2*%s} = " + \
r"\frac{-%s\pm\sqrt{%s}}{%s} = %s$"
html(calc % (B, dis1, A, B, dis2, (2*A), sol))
@library_interact
def trigonometric_properties_triangle(
a0 = slider(0, 360, 1, 30, label="A"),
a1 = slider(0, 360, 1, 180, label="B"),
a2 = slider(0, 360, 1, 300, label="C")):
"""
This is an interact for demonstrating trigonometric properties
in a triangle based on work by Lauri Ruotsalainen, 2010.
INPUT:
- ``a0`` -- angle
- ``a1`` -- angle
- ``a2`` -- angle
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.geometry.trigonometric_properties_triangle()
<html>...</html>
"""
import math
# Returns the distance between points (x1,y1) and (x2,y2)
# Returns an angle (in radians) when sides a and b
# are adjacent and the side c is opposite to the angle
# Returns the area of a triangle when an angle alpha
# and adjacent sides a and b are known
xy = [0]*3
html('<h2>Trigonometric Properties of a Triangle</h2>')
# Coordinates of the angles
a = [math.radians(float(x)) for x in [a0, a1, a2]]
for i in range(3):
xy[i] = (cos(a[i]), sin(a[i]))
# Side lengths (bc, ca, ab) corresponding to triangle vertices (a, b, c)
al = [distance(xy[1], xy[2]), distance(xy[2], xy[0]), distance(xy[0], xy[1])]
# The angles (a, b, c) in radians
ak = [angle(al[0], al[1], al[2]), angle(al[1], al[2], al[0]), angle(al[2], al[0], al[1])]
# The area of the triangle
A = area(ak[0], al[1], al[2])
unit_circle = circle((0, 0), 1, aspect_ratio=1)
# Triangle
triangle = line([xy[0], xy[1], xy[2], xy[0]], rgbcolor="black")
triangle_points = point(xy, pointsize=30)
# Labels of the angles drawn in a distance from points
a_label = text("A", (xy[0][0]*1.07, xy[0][1]*1.07))
b_label = text("B", (xy[1][0]*1.07, xy[1][1]*1.07))
c_label = text("C", (xy[2][0]*1.07, xy[2][1]*1.07))
labels = a_label + b_label + c_label
show(unit_circle + triangle + triangle_points + labels, figsize=[5, 5], xmin=-1, xmax=1, ymin=-1, ymax=1)
angl_txt = r"$\angle A = {%s}^{\circ},$ $\angle B = {%s}^{\circ},$ $\angle C = {%s}^{\circ}$" % (
math.degrees(ak[0]),
math.degrees(ak[1]),
math.degrees(ak[2])
)
html(angl_txt)
html(r"$AB = %s,$ $BC = %s,$ $CA = %s$"%(al[2], al[0], al[1]))
html(r"Area of triangle $ABC = %s$"%A)
@library_interact
def unit_circle(
function = selector([(0, sin(x)), (1, cos(x)), (2, tan(x))]),
x = slider(0,2*pi, 0.005*pi, 0)):
"""
This is an interact for Sin, Cos and Tan in the Unit Circle
based on work by Lauri Ruotsalainen, 2010.
INPUT:
- ``function`` -- select Sin, Cos or Tan
- ``x`` -- slider to select angle in unit circle
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.geometry.unit_circle()
<html>...</html>
"""
xy = (cos(x), sin(x))
t = SR.var('t')
html('<div style="white-space: normal;">Lines of the same color have\
the same length</div>')
# Unit Circle
C = circle((0, 0), 1, figsize=[5, 5], aspect_ratio=1)
C_line = line([(0, 0), (xy[0], xy[1])], rgbcolor="black")
C_point = point((xy[0], xy[1]), pointsize=40, rgbcolor="green")
C_inner = parametric_plot((cos(t), sin(t)), (t, 0, x + 0.001), color="green", thickness=3)
C_outer = parametric_plot((0.1 * cos(t), 0.1 * sin(t)), (t, 0, x + 0.001), color="black")
C_graph = C + C_line + C_point + C_inner + C_outer
# Graphics related to the graph of the function
G_line = line([(0, 0), (x, 0)], rgbcolor="green", thickness=3)
G_point = point((x, 0), pointsize=30, rgbcolor="green")
G_graph = G_line + G_point
# Sine
if function == 0:
Gf = plot(sin(t), t, 0, 2*pi, axes_labels=("x", "sin(x)"))
Gf_point = point((x, sin(x)), pointsize=30, rgbcolor="red")
Gf_line = line([(x, 0),(x, sin(x))], rgbcolor="red")
Cf_point = point((0, xy[1]), pointsize=40, rgbcolor="red")
Cf_line1 = line([(0, 0), (0, xy[1])], rgbcolor="red", thickness=3)
Cf_line2 = line([(0, xy[1]), (xy[0], xy[1])], rgbcolor="purple", linestyle="--")
# Cosine
elif function == 1:
Gf = plot(cos(t), t, 0, 2*pi, axes_labels=("x", "cos(x)"))
Gf_point = point((x, cos(x)), pointsize=30, rgbcolor="red")
Gf_line = line([(x, 0), (x, cos(x))], rgbcolor="red")
Cf_point = point((xy[0], 0), pointsize=40, rgbcolor="red")
Cf_line1 = line([(0, 0), (xy[0], 0)], rgbcolor="red", thickness=3)
Cf_line2 = line([(xy[0], 0), (xy[0], xy[1])], rgbcolor="purple", linestyle="--")
# Tangent
else:
Gf = plot(tan(t), t, 0, 2*pi, ymin=-8, ymax=8, axes_labels=("x", "tan(x)"))
Gf_point = point((x, tan(x)), pointsize=30, rgbcolor="red")
Gf_line = line([(x, 0), (x, tan(x))], rgbcolor="red")
Cf_point = point((1, tan(x)), pointsize=40, rgbcolor="red")
Cf_line1 = line([(1, 0), (1, tan(x))], rgbcolor="red", thickness=3)
Cf_line2 = line([(xy[0], xy[1]), (1, tan(x))], rgbcolor="purple", linestyle="--")
C_graph += Cf_point + Cf_line1 + Cf_line2
G_graph += Gf + Gf_point + Gf_line
show(graphics_array([C_graph, G_graph]))
@library_interact
def special_points(
title = text_control('<h2>Special points in triangle</h2>'),
a0 = slider(0, 360, 1, 30, label="A"),
a1 = slider(0, 360, 1, 180, label="B"),
a2 = slider(0, 360, 1, 300, label="C"),
show_median = checkbox(False, label="Medians"),
show_pb = checkbox(False, label="Perpendicular Bisectors"),
show_alt = checkbox(False, label="Altitudes"),
show_ab = checkbox(False, label="Angle Bisectors"),
show_incircle = checkbox(False, label="Incircle"),
show_euler = checkbox(False, label="Euler's Line")):
"""
This interact demo shows special points in a triangle
based on work by Lauri Ruotsalainen, 2010.
INPUT:
- ``a0`` -- angle
- ``a1`` -- angle
- ``a2`` -- angle
- ``show_median`` -- checkbox
- ``show_pb`` -- checkbox to show perpendicular bisectors
- ``show_alt`` -- checkbox to show altitudes
- ``show_ab`` -- checkbox to show angle bisectors
- ``show_incircle`` -- checkbox to show incircle
- ``show_euler`` -- checkbox to show euler's line
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.geometry.special_points()
<html>...</html>
"""
import math
# Return the intersection point of the bisector of the angle <(A[a],A[c],A[b]) and the unit circle. Angles given in radians.
# Returns the distance between points (x1,y1) and (x2,y2)
# Returns the line (graph) going through points (x1,y1) and (x2,y2)
# Coordinates of the angles
a = [math.radians(float(x)) for x in [a0, a1, a2]]
xy = [(math.cos(a[i]), math.sin(a[i])) for i in range(3)]
# Labels of the angles drawn in a distance from points
a_label = text("A", (xy[0][0]*1.07, xy[0][1]*1.07))
b_label = text("B", (xy[1][0]*1.07, xy[1][1]*1.07))
c_label = text("C", (xy[2][0]*1.07, xy[2][1]*1.07))
labels = a_label + b_label + c_label
C = circle((0, 0), 1, aspect_ratio=1)
# Triangle
triangle = line([xy[0], xy[1], xy[2], xy[0]], rgbcolor="black")
triangle_points = point(xy, pointsize=30)
# Side lengths (bc, ca, ab) corresponding to triangle vertices (a, b, c)
ad = [distance(xy[1], xy[2]), distance(xy[2], xy[0]), distance(xy[0], xy[1])]
# Midpoints of edges (bc, ca, ab)
a_middle = [
((xy[1][0] + xy[2][0])/2.0, (xy[1][1] + xy[2][1])/2.0),
((xy[2][0] + xy[0][0])/2.0, (xy[2][1] + xy[0][1])/2.0),
((xy[0][0] + xy[1][0])/2.0, (xy[0][1] + xy[1][1])/2.0)
]
# Incircle
perimeter = float(ad[0] + ad[1] + ad[2])
incircle_center = (
(ad[0]*xy[0][0] + ad[1]*xy[1][0] + ad[2]*xy[2][0]) / perimeter,
(ad[0]*xy[0][1] + ad[1]*xy[1][1] + ad[2]*xy[2][1]) / perimeter
)
if show_incircle:
s = perimeter/2.0
incircle_r = math.sqrt((s - ad[0]) * (s - ad[1]) * (s - ad[2]) / s)
incircle_graph = circle(incircle_center, incircle_r) + point(incircle_center)
else:
incircle_graph = Graphics()
# Angle Bisectors
if show_ab:
a_ab = line([xy[0], half(a, 1, 2, 0)], rgbcolor="blue", alpha=0.6)
b_ab = line([xy[1], half(a, 2, 0, 1)], rgbcolor="blue", alpha=0.6)
c_ab = line([xy[2], half(a, 0, 1, 2)], rgbcolor="blue", alpha=0.6)
ab_point = point(incircle_center, rgbcolor="blue", pointsize=28)
ab_graph = a_ab + b_ab + c_ab + ab_point
else:
ab_graph = Graphics()
# Medians
if show_median:
a_median = line([xy[0], a_middle[0]], rgbcolor="green", alpha=0.6)
b_median = line([xy[1], a_middle[1]], rgbcolor="green", alpha=0.6)
c_median = line([xy[2], a_middle[2]], rgbcolor="green", alpha=0.6)
median_point = point(
(
(xy[0][0]+xy[1][0]+xy[2][0])/3.0,
(xy[0][1]+xy[1][1]+xy[2][1])/3.0
), rgbcolor="green", pointsize=28)
median_graph = a_median + b_median + c_median + median_point
else:
median_graph = Graphics()
# Perpendicular Bisectors
if show_pb:
a_pb = line_to_points(a_middle[0], half(a, 1, 2, 0), rgbcolor="red", alpha=0.6)
b_pb = line_to_points(a_middle[1], half(a, 2, 0, 1), rgbcolor="red", alpha=0.6)
c_pb = line_to_points(a_middle[2], half(a, 0, 1, 2), rgbcolor="red", alpha=0.6)
pb_point = point((0, 0), rgbcolor="red", pointsize=28)
pb_graph = a_pb + b_pb + c_pb + pb_point
else:
pb_graph = Graphics()
# Altitudes
if show_alt:
xA, xB, xC = xy[0][0], xy[1][0], xy[2][0]
yA, yB, yC = xy[0][1], xy[1][1], xy[2][1]
a_alt = plot(((xC-xB)*x+(xB-xC)*xA)/(yB-yC)+yA, (x,-3,3), rgbcolor="brown", alpha=0.6)
b_alt = plot(((xA-xC)*x+(xC-xA)*xB)/(yC-yA)+yB, (x,-3,3), rgbcolor="brown", alpha=0.6)
c_alt = plot(((xB-xA)*x+(xA-xB)*xC)/(yA-yB)+yC, (x,-3,3), rgbcolor="brown", alpha=0.6)
alt_lx = (xA*xB*(yA-yB)+xB*xC*(yB-yC)+xC*xA*(yC-yA)-(yA-yB)*(yB-yC)*(yC-yA))/(xC*yB-xB*yC+xA*yC-xC*yA+xB*yA-xA*yB)
alt_ly = (yA*yB*(xA-xB)+yB*yC*(xB-xC)+yC*yA*(xC-xA)-(xA-xB)*(xB-xC)*(xC-xA))/(yC*xB-yB*xC+yA*xC-yC*xA+yB*xA-yA*xB)
alt_intersection = point((alt_lx, alt_ly), rgbcolor="brown", pointsize=28)
alt_graph = a_alt + b_alt + c_alt + alt_intersection
else:
alt_graph = Graphics()
# Euler's Line
if show_euler:
euler_graph = line_to_points(
(0, 0),
(
(xy[0][0]+xy[1][0]+xy[2][0])/3.0,
(xy[0][1]+xy[1][1]+xy[2][1])/3.0
),
rgbcolor="purple",
thickness=2,
alpha=0.7
)
else:
euler_graph = Graphics()
show(
C + triangle + triangle_points + labels + ab_graph + median_graph +
pb_graph + alt_graph + incircle_graph + euler_graph,
figsize=[5,5], xmin=-1, xmax=1, ymin=-1, ymax=1
)
@library_interact
def coin(n = slider(2,10000, 100, default=1000, label="Number of Tosses"), interval = range_slider(0, 1, default=(0.45, 0.55), label="Plotting range (y)")):
"""
This interact demo simulates repeated tosses of a coin,
based on work by Lauri Ruotsalainen, 2010.
The points give the cumulative percentage of tosses which
are heads in a given run of the simulation, so that the
point `(x,y)` gives the percentage of the first `x` tosses
that were heads; this proportion should approach .5, of
course, if we are simulating a fair coin.
INPUT:
- ``n`` -- number of tosses
- ``interval`` -- plot range along
vertical axis
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.statistics.coin()
<html>...</html>
"""
from random import random
c = []
k = 0.0
for i in range(1, n + 1):
k += random()
c.append((i, k/i))
show(point(c[1:], gridlines=[None, [0.5]], pointsize=1), ymin=interval[0], ymax=interval[1])
@library_interact
def bisection_method(
title = text_control('<h2>Bisection method</h2>'),
f = input_box("x^2-2", label='f(x)'),
interval = range_slider(-5,5,default=(0, 4), label="range"),
d = slider(1, 8, 1, 3, label="$10^{-d}$ precision"),
maxn = slider(0,50,1,10, label="max iterations")):
"""
Interact explaining the bisection method, based on similar interact
explaining secant method and Wiliam Stein's example from wiki.
INPUT:
- ``f`` -- function
- ``interval`` -- range slider for the search interval
- ``d`` -- slider for the precision (`10^{-d}`)
- ``maxn`` -- max number of iterations
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.secant_method()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(f).function(x)
a, b = interval
h = 10**(-d)
try:
c, intervals = _bisection_method(f, float(a), float(b), maxn, h)
except ValueError:
print("f must have opposite sign at the endpoints of the interval")
show(plot(f, a, b, color='red'), xmin=a, xmax=b)
else:
html(r"$\text{Precision }h = 10^{-d}=10^{-%s}=%.5f$"%(d, float(h)))
html(r"${c = }%s$"%latex(c))
html(r"${f(c) = }%s"%latex(f(c)))
html(r"$%s \text{ iterations}"%len(intervals))
P = plot(f, a, b, color='red')
k = (P.ymax() - P.ymin())/ (1.5*len(intervals))
L = sum(line([(c,k*i), (d,k*i)]) for i, (c,d) in enumerate(intervals) )
L += sum(line([(c,k*i-k/4), (c,k*i+k/4)]) for i, (c,d) in enumerate(intervals) )
L += sum(line([(d,k*i-k/4), (d,k*i+k/4)]) for i, (c,d) in enumerate(intervals) )
show(P + L, xmin=a, xmax=b)
@library_interact
def secant_method(
title = text_control('<h2>Secant method for numerical root finding</h2>'),
f = input_box("x^2-2", label='f(x)'),
interval = range_slider(-5,5,default=(0, 4), label="range"),
d = slider(1, 16, 1, 3, label="10^-d precision"),
maxn = slider(0,15,1,10, label="max iterations")):
"""
Interact explaining the secant method, based on work by
Lauri Ruotsalainen, 2010.
Originally this is based on work by William Stein.
INPUT:
- ``f`` -- function
- ``interval`` -- range slider for the search interval
- ``d`` -- slider for the precision (10^-d)
- ``maxn`` -- max number of iterations
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.secant_method()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(f).function(x)
a, b = interval
h = 10**(-d)
if float(f(a)*f(b)) > 0:
print("f must have opposite sign at the endpoints of the interval")
show(plot(f, a, b, color='red'), xmin=a, xmax=b)
else:
c, intervals = _secant_method(f, float(a), float(b), maxn, h)
html(r"$\text{Precision }h = 10^{-d}=10^{-%s}=%.5f$"%(d, float(h)))
html(r"${c = }%s$"%latex(c))
html(r"${f(c) = }%s"%latex(f(c)))
html(r"$%s \text{ iterations}"%len(intervals))
P = plot(f, a, b, color='red')
k = (P.ymax() - P.ymin())/ (1.5*len(intervals))
L = sum(line([(c,k*i), (d,k*i)]) for i, (c,d) in enumerate(intervals) )
L += sum(line([(c,k*i-k/4), (c,k*i+k/4)]) for i, (c,d) in enumerate(intervals) )
L += sum(line([(d,k*i-k/4), (d,k*i+k/4)]) for i, (c,d) in enumerate(intervals) )
S = sum(line([(c,f(c)), (d,f(d)), (d-(d-c)*f(d)/(f(d)-f(c)), 0)], color="green") for (c,d) in intervals)
show(P + L + S, xmin=a, xmax=b)
@library_interact
def newton_method(
title = text_control('<h2>Newton method</h2>'),
f = input_box("x^2 - 2"),
c = slider(-10,10, default=6, label='Start ($x$)'),
d = slider(1, 16, 1, 3, label="$10^{-d}$ precision"),
maxn = slider(0, 15, 1, 10, label="max iterations"),
interval = range_slider(-10,10, default = (0,6), label="Interval"),
list_steps = checkbox(default=False, label="List steps")):
"""
Interact explaining the Newton method, based on work by
Lauri Ruotsalainen, 2010.
Originally this is based on work by William Stein.
INPUT:
- ``f`` -- function
- ``c`` -- starting position (`x`)
- ``d`` -- slider for the precision (`10^{-d}`)
- ``maxn`` -- max number of iterations
- ``interval`` -- range slider for the search interval
- ``list_steps`` -- checkbox, if true shows the steps numerically
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.newton_method()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(f).function(x)
a, b = interval
h = 10**(-d)
c, midpoints = _newton_method(f, float(c), maxn, h/2.0)
html(r"$\text{Precision } 2h = %s$"%latex(float(h)))
html(r"${c = }%s$"%c)
html(r"${f(c) = }%s"%latex(f(c)))
html(r"$%s \text{ iterations}"%len(midpoints))
if list_steps:
s = [["$n$","$x_n$","$f(x_n)$", "$f(x_n-h)\,f(x_n+h)$"]]
for i, c in enumerate(midpoints):
s.append([i+1, c, f(c), (c-h)*f(c+h)])
pretty_print(table(s, header_row=True))
else:
P = plot(f, x, interval, color="blue")
L = sum(line([(c, 0), (c, f(c))], color="green") for c in midpoints[:-1])
for i in range(len(midpoints) - 1):
L += line([(midpoints[i], f(midpoints[i])), (midpoints[i+1], 0)], color="red")
show(P + L, xmin=interval[0], xmax=interval[1], ymin=P.ymin(), ymax=P.ymax())
@library_interact
def trapezoid_integration(
title = text_control('<h2>Trapezoid integration</h2>'),
f = input_box(default = "x^2-5*x + 10", label='$f(x)=$'),
n = slider(1,100,1,5, label='# divisions'),
interval_input = selector(['from slider','from keyboard'], label='Integration interval', buttons=True),
interval_s = range_slider(-10,10,default=(0,8), label="slider: "),
interval_g = input_grid(1,2,default=[[0,8]], label="keyboard: "),
output_form = selector(['traditional','table','none'], label='Computations form', buttons=True)
):
"""
Interact explaining the trapezoid method for definite integrals, based on work by
Lauri Ruotsalainen, 2010 (based on the application "Numerical integrals with various rules"
by Marshall Hampton and Nick Alexander)
INPUT:
- ``f`` -- function of variable x to integrate
- ``n`` -- number of divisions
- ``interval_input`` -- swithes the input for interval between slider and keyboard
- ``interval_s`` -- slider for interval to integrate
- ``interval_g`` -- input grid for interval to integrate
- ``output_form`` -- the computation is formatted in a traditional form, in a table or missing
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.trapezoid_integration()
<html>...</html>
"""
xs = []
ys = []
if interval_input == 'from slider':
interval = interval_s
else:
interval = interval_g[0]
h = float(interval[1]-interval[0])/n
x = SR.var('x')
f = symbolic_expression(f).function(x)
trapezoids = Graphics()
for i in range(n):
xi = interval[0] + i*h
yi = f(xi)
trapezoids += line([[xi, 0], [xi, yi], [xi + h, f(xi + h)],[xi + h, 0],[xi, 0]], rgbcolor = (1,0,0))
xs.append(xi)
ys.append(yi)
xs.append(xi + h)
ys.append(f(xi + h))
html(r'Function $f(x)=%s$'%latex(f(x)))
show(plot(f, interval[0], interval[1]) + trapezoids, xmin = interval[0], xmax = interval[1])
numeric_value = integral_numerical(f, interval[0], interval[1])[0]
approx = h *(ys[0]/2 + sum([ys[i] for i in range(1,n)]) + ys[n]/2)
html(r'Integral value to seven decimal places is: $\displaystyle\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} = %.6f$'%(
interval[0], interval[1], N(numeric_value, digits=7))
)
if output_form == 'traditional':
sum_formula_html = r"\frac {d}{2} \cdot \left[f(x_0) + %s + f(x_{%s})\right]" % (
' + '.join([ "2 f(x_{%s})"%i for i in range(1,n)]),
n
)
sum_placement_html = r"\frac{%.2f}{2} \cdot \left[f(%.2f) + %s + f(%.2f)\right]" % (
h,
N(xs[0], digits=5),
' + '.join([ "2 f(%.2f)" %N(i, digits=5) for i in xs[1:-1]]),
N(xs[n], digits=5)
)
sum_values_html = r"\frac{%.2f}{2} \cdot \left[%.2f + %s + %.2f\right]" % (
h,
N(ys[0], digits=5),
' + '.join([ "2\cdot %.2f" % N(i, digits=5) for i in ys[1:-1]]),
N(ys[n], digits=5)
)
html(r'''
<div class="math">
\begin{align*}
\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}
& \approx %s \\
& = %s \\
& = %s \\
& = %s
\end{align*}
</div>
''' % (
interval[0], interval[1],
sum_formula_html, sum_placement_html, sum_values_html,
N(approx, digits=7)
))
elif output_form == 'table':
s = [['$i$','$x_i$','$f(x_i)$','$m$','$m\cdot f(x_i)$']]
for i in range(0,n+1):
if i==0 or i==n:
j = 1
else:
j = 2
s.append([i, xs[i], ys[i],j,N(j*ys[i])])
pretty_print(table(s, header_row=True))
@library_interact
def simpson_integration(
title = text_control('<h2>Simpson integration</h2>'),
f = input_box(default = 'x*sin(x)+x+1', label='$f(x)=$'),
n = slider(2,100,2,6, label='# divisions'),
interval_input = selector(['from slider','from keyboard'], label='Integration interval', buttons=True),
interval_s = range_slider(-10,10,default=(0,10), label="slider: "),
interval_g = input_grid(1,2,default=[[0,10]], label="keyboard: "),
output_form = selector(['traditional','table','none'], label='Computations form', buttons=True)):
"""
Interact explaining the simpson method for definite integrals, based on work by
Lauri Ruotsalainen, 2010 (based on the application "Numerical integrals with various rules"
by Marshall Hampton and Nick Alexander)
INPUT:
- ``f`` -- function of variable x to integrate
- ``n`` -- number of divisions (mult. of 2)
- ``interval_input`` -- swithes the input for interval between slider and keyboard
- ``interval_s`` -- slider for interval to integrate
- ``interval_g`` -- input grid for interval to integrate
- ``output_form`` -- the computation is formatted in a traditional form, in a table or missing
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.simpson_integration()
<html>...</html>
"""
x = SR.var('x')
f = symbolic_expression(f).function(x)
if interval_input == 'from slider':
interval = interval_s
else:
interval = interval_g[0]
xs = []; ys = []
dx = float(interval[1]-interval[0])/n
for i in range(n+1):
xs.append(interval[0] + i*dx)
ys.append(f(x=xs[-1]))
parabolas = Graphics()
lines = Graphics()
for i in range(0, n-1, 2):
p = parabola((xs[i],ys[i]),(xs[i+1],ys[i+1]),(xs[i+2],ys[i+2]))
parabolas += plot(p(x=x), (x, xs[i], xs[i+2]), color="red")
lines += line([(xs[i],ys[i]), (xs[i],0), (xs[i+2],0)],color="red")
lines += line([(xs[i+1],ys[i+1]), (xs[i+1],0)], linestyle="-.", color="red")
lines += line([(xs[-1],ys[-1]), (xs[-1],0)], color="red")
html(r'Function $f(x)=%s$'%latex(f(x)))
show(plot(f(x),x,interval[0],interval[1]) + parabolas + lines, xmin = interval[0], xmax = interval[1])
numeric_value = integral_numerical(f,interval[0],interval[1])[0]
approx = dx/3 *(ys[0] + sum([4*ys[i] for i in range(1,n,2)]) + sum([2*ys[i] for i in range(2,n,2)]) + ys[n])
html(r'Integral value to seven decimal places is: $\displaystyle\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x} = %.6f$'%
(interval[0],interval[1],
N(numeric_value,digits=7)))
if output_form == 'traditional':
sum_formula_html = r"\frac{d}{3} \cdot \left[ f(x_0) + %s + f(x_{%s})\right]" % (
' + '.join([ r"%s \cdot f(x_{%s})" %(i%2*(-2)+4, i+1) for i in range(0,n-1)]),
n
)
sum_placement_html = r"\frac{%.2f}{3} \cdot \left[ f(%.2f) + %s + f(%.2f)\right]" % (
dx,
N(xs[0],digits=5),
' + '.join([ r"%s \cdot f(%.2f)" %(i%2*(-2)+4, N(xk, digits=5)) for i, xk in enumerate(xs[1:-1])]),
N(xs[n],digits=5)
)
sum_values_html = r"\frac{%.2f}{3} \cdot \left[ %s %s %s\right]" %(
dx,
"%.2f + "%N(ys[0],digits=5),
' + '.join([ r"%s \cdot %.2f" %(i%2*(-2)+4, N(yk, digits=5)) for i, yk in enumerate(ys[1:-1])]),
" + %.2f"%N(ys[n],digits=5)
)
html(r'''
<div class="math">
\begin{align*}
\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}
& \approx %s \\
& = %s \\
& = %s \\
& = %.6f
\end{align*}
</div>
''' % (
interval[0], interval[1],
sum_formula_html, sum_placement_html, sum_values_html,
N(approx,digits=7)
))
elif output_form == 'table':
s = [['$i$','$x_i$','$f(x_i)$','$m$','$m\cdot f(x_i)$']]
for i in range(0,n+1):
if i==0 or i==n:
j = 1
else:
j = (i+1)%2*(-2)+4
s.append([i, xs[i], ys[i],j,N(j*ys[i])])
s.append(['','','','$\sum$','$%s$'%latex(3/dx*approx)])
pretty_print(table(s, header_row=True))
html(r'$\int_{%.2f}^{%.2f} {f(x) \, \mathrm{d}x}\approx\frac {%.2f}{3}\cdot %s=%s$'%
(interval[0], interval[1],dx,latex(3/dx*approx),latex(approx)))
@library_interact
def riemann_sum(
title = text_control('<h2>Riemann integral with random sampling</h2>'),
f = input_box("x^2+1", label = "$f(x)=$", width=40),
n = slider(1,30,1,5, label='# divisions'),
hr1 = text_control('<hr>'),
interval_input = selector(['from slider','from keyboard'], label='Integration interval', buttons=True),
interval_s = range_slider(-5,10,default=(0,2), label="slider: "),
interval_g = input_grid(1,2,default=[[0,2]], label="keyboard: "),
hr2 = text_control('<hr>'),
list_table = checkbox(default=False, label="List table"),
auto_update = False):
"""
Interact explaining the definition of Riemann integral
INPUT:
- ``f`` -- function of variable x to integrate
- ``n`` -- number of divisions
- ``interval_input`` -- swithes the input for interval between slider and keyboard
- ``interval_s`` -- slider for interval to integrate
- ``interval_g`` -- input grid for interval to integrate
- ``list_table`` -- print table with values of the function
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.riemann_sum()
<html>...</html>
AUTHORS:
- Robert Marik (08-2010)
"""
x = SR.var('x')
from random import random
if interval_input == 'from slider':
a = interval_s[0]
b = interval_s[1]
else:
a = interval_g[0][0]
b = interval_g[0][1]
func = symbolic_expression(f).function(x)
division = [a]+[a+random()*(b-a) for i in range(n-1)]+[b]
division = sorted([i for i in division])
xs = [division[i]+random()*(division[i+1]-division[i]) for i in range(n)]
ys = [func(x_val) for x_val in xs]
rects = Graphics()
for i in range(n):
body=[[division[i],0],[division[i],ys[i]],[division[i+1],ys[i]],[division[i+1],0]]
if ys[i].n()>0:
color_rect='green'
else:
color_rect='red'
rects = rects +polygon2d(body, rgbcolor = color_rect,alpha=0.1)\
+ point((xs[i],ys[i]), rgbcolor = (1,0,0))\
+ line(body,rgbcolor='black',zorder=-1)
html('<small>Adjust your data and click Update button. Click repeatedly for another random values.</small>')
show(plot(func(x),(x,a,b),zorder=5) + rects)
delka_intervalu=[division[i+1]-division[i] for i in range(n)]
if list_table:
pretty_print(table([
["$i$", "$[x_{i-1},x_i]$", "$\eta_i$", "$f(\eta_i)$", "$x_{i}-x_{i-1}$"]
] + [
[i+1,[division[i],division[i+1]],xs[i],ys[i],delka_intervalu[i]] for i in range(n)
], header_row=True))
html('Riemann sum: $\displaystyle\sum_{i=1}^{%s} f(\eta_i)(x_i-x_{i-1})=%s$ '%
(latex(n),latex(sum([ys[i]*delka_intervalu[i] for i in range(n)]))))
html('Exact value of the integral $\displaystyle\int_{%s}^{%s}%s\,\mathrm{d}x=%s$'%
(latex(a),latex(b),latex(func(x)),latex(integral_numerical(func(x),a,b)[0])))
x = SR.var('x')
@library_interact
def function_tool(f=sin(x), g=cos(x), xrange=range_slider(-3,3,default=(0,1),label='x-range'),
yrange='auto',
a=1,
action=selector(['f', 'df/dx', 'int f', 'num f', 'den f', '1/f', 'finv',
'f+a', 'f-a', 'f*a', 'f/a', 'f^a', 'f(x+a)', 'f(x*a)',
'f+g', 'f-g', 'f*g', 'f/g', 'f(g)'],
width=15, nrows=5, label="h = "),
do_plot = ("Draw Plots", True)):
"""
`Function Plotting Tool <http://wiki.sagemath.org/interact/calculus#Functiontool>`_
(by William Stein (?))
INPUT:
- ``f`` -- function f(x)
- ``g`` -- function g(x)
- ``xrange`` -- range for plotting (x)
- ``yrange`` -- range for plotting ('auto' is default, otherwise a tuple)
- ``a`` -- factor ``a``
- ``action`` -- select given operation on or combination of functions
- ``do_plot`` -- if true, a plot is drawn
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.calculus.function_tool()
<html>...</html>
"""
x = SR.var('x')
try:
f = SR(f); g = SR(g); a = SR(a)
except TypeError as msg:
print(msg[-200:])
print("Unable to make sense of f,g, or a as symbolic expressions in single variable x.")
return
if not (isinstance(xrange, tuple) and len(xrange) == 2):
xrange = (0,1)
h = 0; lbl = ''
if action == 'f':
h = f
lbl = 'f'
elif action == 'df/dx':
h = f.derivative(x)
lbl = r'\frac{\mathrm{d}f}{\mathrm{d}x}'
elif action == 'int f':
h = f.integrate(x)
lbl = r'\int f \,\mathrm{d}x'
elif action == 'num f':
h = f.numerator()
lbl = r'\text{numer(f)}'
elif action == 'den f':
h = f.denominator()
lbl = r'\text{denom(f)}'
elif action == '1/f':
h = 1/f
lbl = r'\frac{1}{f}'
elif action == 'finv':
h = solve(f == var('y'), x)[0].rhs()
lbl = 'f^{-1}(y)'
elif action == 'f+a':
h = f+a
lbl = 'f + a'
elif action == 'f-a':
h = f-a
lbl = 'f - a'
elif action == 'f*a':
h = f*a
lbl = r'f \times a'
elif action == 'f/a':
h = f/a
lbl = r'\frac{f}{a}'
elif action == 'f^a':
h = f**a
lbl = 'f^a'
elif action == 'f^a':
h = f**a
lbl = 'f^a'
elif action == 'f(x+a)':
h = f.subs(x=x+a)
lbl = 'f(x+a)'
elif action == 'f(x*a)':
h = f.subs(x=x*a)
lbl = 'f(xa)'
elif action == 'f+g':
h = f+g
lbl = 'f + g'
elif action == 'f-g':
h = f-g
lbl = 'f - g'
elif action == 'f*g':
h = f*g
lbl = r'f \times g'
elif action == 'f/g':
h = f/g
lbl = r'\frac{f}{g}'
elif action == 'f(g)':
h = f(g)
lbl = 'f(g)'
html('<center><font color="red">$f = %s$</font></center>'%latex(f))
html('<center><font color="green">$g = %s$</font></center>'%latex(g))
html('<center><font color="blue"><b>$h = %s = %s$</b></font></center>'%(lbl, latex(h)))
if do_plot:
P = plot(f, xrange, color='red', thickness=2) + \
plot(g, xrange, color='green', thickness=2) + \
plot(h, xrange, color='blue', thickness=2)
if yrange == 'auto':
show(P, xmin=xrange[0], xmax=xrange[1])
else:
yrange = sage_eval(yrange)
show(P, xmin=xrange[0], xmax=xrange[1], ymin=yrange[0], ymax=yrange[1])
@library_interact
def julia(expo = slider(-10,10,0.1,2),
c_real = slider(-2,2,0.01,0.5, label='real part const.'),
c_imag = slider(-2,2,0.01,0.5, label='imag part const.'),
iterations=slider(1,100,1,20, label='# iterations'),
zoom_x = range_slider(-2,2,0.01,(-1.5,1.5), label='Zoom X'),
zoom_y = range_slider(-2,2,0.01,(-1.5,1.5), label='Zoom Y'),
plot_points = slider(20,400,20, default=150, label='plot points'),
dpi = slider(20, 200, 10, default=80, label='dpi')):
"""
Julia Fractal, based on
`Julia by Harald Schilly <http://wiki.sagemath.org/interact/fractal#Julia>`_.
INPUT:
- ``exponent`` -- exponent ``e`` in `z^e+c`
- ``c_real`` -- real part of the constant ``c``
- ``c_imag`` -- imaginary part of the constant ``c``
- ``iterations`` -- number of iterations
- ``zoom_x`` -- range slider for zoom in x direction
- ``zoom_y`` -- range slider for zoom in y direction
- ``plot_points`` -- number of points to plot
- ``dpi`` -- dots-per-inch parameter for the plot
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.fractals.julia()
<html>...</html>
"""
z = SR.var('z')
I = CDF.gen()
f = symbolic_expression(z**expo + c_real + c_imag*I).function(z)
ff_j = fast_callable(f, vars=[z], domain=CDF)
from sage.interacts.library_cython import julia
html('<h2>Julia Fractal</h2>')
html(r'Recursive Formula: $z \leftarrow z^{%.2f} + (%.2f+%.2f*\mathbb{I})$' % (expo, c_real, c_imag))
complex_plot(lambda z: julia(ff_j, z, iterations), zoom_x, zoom_y, plot_points=plot_points, dpi=dpi).show(frame=True, aspect_ratio=1)
@library_interact
def mandelbrot(expo = slider(-10,10,0.1,2),
iterations=slider(1,100,1,20, label='# iterations'),
zoom_x = range_slider(-2,2,0.01,(-2,1), label='Zoom X'),
zoom_y = range_slider(-2,2,0.01,(-1.5,1.5), label='Zoom Y'),
plot_points = slider(20,400,20, default=150, label='plot points'),
dpi = slider(20, 200, 10, default=80, label='dpi')):
"""
Mandelbrot Fractal, based on
`Mandelbrot by Harald Schilly <http://wiki.sagemath.org/interact/fractal#Mandelbrot>`_.
INPUT:
- ``exponent`` -- exponent ``e`` in `z^e+c`
- ``iterations`` -- number of iterations
- ``zoom_x`` -- range slider for zoom in x direction
- ``zoom_y`` -- range slider for zoom in y direction
- ``plot_points`` -- number of points to plot
- ``dpi`` -- dots-per-inch parameter for the plot
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.fractals.mandelbrot()
<html>...</html>
"""
x, z, c = SR.var('x, z, c')
f = symbolic_expression(z**expo + c).function(z, c)
ff_m = fast_callable(f, vars=[z,c], domain=CDF)
from sage.interacts.library_cython import mandel
html('<h2>Mandelbrot Fractal</h2>')
html(r'Recursive Formula: $z \leftarrow z^{%.2f} + c$ for $c \in \mathbb{C}$' % expo)
complex_plot(lambda z: mandel(ff_m, z, iterations), zoom_x, zoom_y, plot_points=plot_points, dpi=dpi).show(frame=True, aspect_ratio=1)
@library_interact
def cellular_automaton(
N=slider(1,500,1,label='Number of iterations',default=100),
rule_number=slider(0, 255, 1, default=110, label='Rule number'),
size = slider(1, 11, step_size=1, default=6, label='size of graphic')):
"""
Yields a matrix showing the evolution of a
`Wolfram's cellular automaton <http://mathworld.wolfram.com/CellularAutomaton.html>`_.
`Based on work by Pablo Angulo <http://wiki.sagemath.org/interact/misc#CellularAutomata>`_.
INPUT:
- ``N`` -- iterations
- ``rule_number`` -- rule number (0 to 255)
- ``size`` -- size of the shown picture
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: interacts.fractals.cellular_automaton()
<html>...</html>
"""
from sage.all import Integer
if not 0 <= rule_number <= 255:
raise ValueError('Invalid rule number')
binary_digits = Integer(rule_number).digits(base=2)
rule = binary_digits + [0]*(8-len(binary_digits))
html('<h2>Cellular Automaton</h2>'+
'<div style="white-space: normal;">"A cellular automaton is a collection of "colored" cells \
on a grid of specified shape that evolves through a number of \
discrete time steps according to a set of rules based on the \
states of neighboring cells." — \
<a target="_blank" href="http://mathworld.wolfram.com/CellularAutomaton.html">Mathworld,\
Cellular Automaton</a></div>\
<div>Rule %s expands to %s</div>' % (rule_number, ''.join(map(str,rule)))
)
from sage.interacts.library_cython import cellular
M = cellular(rule, N)
plot_M = matrix_plot(M, cmap='binary')
plot_M.show(figsize=[size,size])
@library_interact
def polar_prime_spiral(
interval = range_slider(1, 4000, 10, default=(1, 1000), label="range"),
show_factors = True,
highlight_primes = True,
show_curves = True,
n = slider(1,200, 1, default=89, label="number $n$"),
dpi = slider(10,300, 10, default=100, label="dpi")):
"""
Polar Prime Spiral interact, based on work by David Runde.
For more information about the factors in the spiral,
`visit John Williamson's website <http://www.dcs.gla.ac.uk/~jhw/spirals/index.html>`_.
INPUT:
- ``interval`` -- range slider to specify start and end
- ``show_factors`` -- if true, show factors
- ``highlight_primes`` -- if true, prime numbers are highlighted
- ``show_curves`` -- if true, curves are plotted
- ``n`` -- number `n`
- ``dpi`` -- dots per inch resolution for plotting
EXAMPLES:
Invoked in the notebook, the following command will produce
the fully formatted interactive mathlet. In the command line,
it will simply return the underlying HTML and Sage code which
creates the mathlet::
sage: sage.interacts.algebra.polar_prime_spiral()
<html>...</html>
"""
html('<h2>Polar Prime Spiral</h2> \
<div style="white-space: normal;">\
For more information about the factors in the spiral, visit \
<a href="http://www.dcs.gla.ac.uk/~jhw/spirals/index.html" target="_blank">\
Number Spirals by John Williamson</a>.</div>'
)
start, end = interval
from sage.ext.fast_eval import fast_float
from math import floor, ceil
from sage.plot.colors import hue
if start < 1 or end <= start:
print("invalid start or end value")
return
if n > end:
print("WARNING: n is greater than end value")
return
if n < start:
print("n < start value")
return
nn = SR.var('nn')
f1 = fast_float(sqrt(nn)*cos(2*pi*sqrt(nn)), 'nn')
f2 = fast_float(sqrt(nn)*sin(2*pi*sqrt(nn)), 'nn')
f = lambda x: (f1(x), f2(x))
list = []
list2 = []
if not show_factors:
for i in srange(start, end, include_endpoint = True):
if Integer(i).is_pseudoprime(): list.append(f(i-start+1)) #Primes list
else: list2.append(f(i-start+1)) #Composites list
P = points(list)
R = points(list2, alpha = .1) #Faded Composites
else:
for i in srange(start, end, include_endpoint = True):
list.append(disk((f(i-start+1)),0.05*pow(2,len(factor(i))-1), (0,2*pi))) #resizes each of the dots depending of the number of factors of each number
if Integer(i).is_pseudoprime() and highlight_primes: list2.append(f(i-start+1))
P = Graphics()
for g in list:
P += g
p_size = 5 #the orange dot size of the prime markers
if not highlight_primes: list2 = [(f(n-start+1))]
R = points(list2, hue = .1, pointsize = p_size)
if n > 0:
html('$n = %s$' % factor(n))
p = 1
#The X which marks the given n
W1 = disk((f(n-start+1)), p, (pi/6, 2*pi/6), alpha=.1)
W2 = disk((f(n-start+1)), p, (4*pi/6, 5*pi/6), alpha=.1)
W3 = disk((f(n-start+1)), p, (7*pi/6, 8*pi/6), alpha=.1)
W4 = disk((f(n-start+1)), p, (10*pi/6, 11*pi/6), alpha=.1)
Q = W1 + W2 + W3 + W4
n = n - start +1 #offsets the n for different start values to ensure accurate plotting
if show_curves:
begin_curve = 0
t = SR.var('t')
a=1.0
b=0.0
if n > (floor(sqrt(n)))**2 and n <= (floor(sqrt(n)))**2 + floor(sqrt(n)):
c = -((floor(sqrt(n)))**2 - n)
c2= -((floor(sqrt(n)))**2 + floor(sqrt(n)) - n)
else:
c = -((ceil(sqrt(n)))**2 - n)
c2= -((floor(sqrt(n)))**2 + floor(sqrt(n)) - n)
html('Pink Curve: $n^2 + %s$' % c)
html('Green Curve: $n^2 + n + %s$' % c2)
m = SR.var('m')
g = symbolic_expression(a*m**2+b*m+c).function(m)
r = symbolic_expression(sqrt(g(m))).function(m)
theta = symbolic_expression(r(m)- m*sqrt(a)).function(m)
S1 = parametric_plot(((r(t))*cos(2*pi*(theta(t))),(r(t))*sin(2*pi*(theta(t)))),
(begin_curve, ceil(sqrt(end-start))), color=hue(0.8), thickness = .3) #Pink Line
b = 1
c = c2;
g = symbolic_expression(a*m**2+b*m+c).function(m)
r = symbolic_expression(sqrt(g(m))).function(m)
theta = symbolic_expression(r(m)- m*sqrt(a)).function(m)
S2 = parametric_plot(((r(t))*cos(2*pi*(theta(t))),(r(t))*sin(2*pi*(theta(t)))),
(begin_curve, ceil(sqrt(end-start))), color=hue(0.6), thickness = .3) #Green Line
show(R+P+S1+S2+Q, aspect_ratio = 1, axes = False, dpi = dpi)
else: show(R+P+Q, aspect_ratio = 1, axes = False, dpi = dpi)
else: show(R+P, aspect_ratio = 1, axes = False, dpi = dpi)
| [
37811,
198,
50,
496,
4225,
8656,
198,
198,
50,
496,
44020,
389,
5479,
286,
262,
4600,
31,
3849,
529,
11705,
1352,
1279,
40720,
40720,
82,
11286,
65,
14,
11295,
2070,
14,
3849,
529,
13,
6494,
29,
63,
44807,
198,
2990,
389,
29801,
985... | 2.179394 | 27,080 |
from handwriting_sample.base.containers import LoggableObject, HandwritingDataBase
| [
6738,
44396,
62,
39873,
13,
8692,
13,
3642,
50221,
1330,
5972,
70,
540,
10267,
11,
7157,
16502,
6601,
14881,
198
] | 4.15 | 20 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'DataIntegrationScheduleConfig',
'DataIntegrationTag',
'EventIntegrationAssociation',
'EventIntegrationEventFilter',
'EventIntegrationMetadata',
'EventIntegrationTag',
]
@pulumi.output_type
@pulumi.output_type
class DataIntegrationTag(dict):
"""
A label for tagging DataIntegration resources
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A label for tagging DataIntegration resources
:param str key: A key to identify the tag.
:param str value: Corresponding tag value for the key.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
A key to identify the tag.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
Corresponding tag value for the key.
"""
return pulumi.get(self, "value")
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 2.489002 | 591 |
#
# Copyright (C) 2017 Quest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
import logging
import docker
import blockade.host
_logger = logging.getLogger(__name__)
| [
2,
198,
2,
220,
15069,
357,
34,
8,
2177,
6785,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,... | 3.589744 | 195 |
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
try:
plt.style.use('presentation')
except:
pass
def plot_config():
"""
Make an 4x4 figure with top corner frames off and ticks on the left
and bottom edges
"""
fig, axs = plt.subplots(4, 4, figsize=(9.7, 9.7))
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.96,
wspace=0.05, hspace=0.05)
for i in range(4):
for j in range(4):
ax = axs[i, j]
# only have y ticks on left axes (axs[:, 0])
ax.set_yticks([])
# turn off top right frames
if j > i:
ax.set_frame_on(False)
# only have x ticks on bottom axes (axs[-1, 0])
if i != 4:
ax.set_xticks([])
return fig, axs
if __name__ == '__main__':
ssp = SSP(sys.argv[1])
fig, ax = ssp.age_plot()
plt.savefig(sys.argv[1] + '.png')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
25064,
198,
198,
28311,
25,
198,
220,
220,
220,
458,
83,
13,
7635,
13,
1904,
10786,
25579,
341,
11537,
198,
16... | 1.923541 | 497 |
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="dutch-pluralizer",
version="0.0.39",
description="Generates Dutch plural and singular nouns in a very imperfect way using Hunspell dictionaries. Why imperfect? Because the Dutch language is full of exceptions.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/KeesCBakker/dutch-pluralizer-py",
author="Kees C. Bakker / KeesTalksTech",
author_email="info@keestalkstech.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
],
packages=["dutch_pluralizer"],
include_package_data=True,
install_requires=["cython", "cyhunspell"],
entry_points={
"console_scripts": [
"dutch_pluralizer=dutch_pluralizer.__main__:main",
]
},
)
| [
11748,
3108,
8019,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
383,
8619,
7268,
428,
2393,
198,
39,
9338,
796,
3108,
8019,
13,
15235,
7,
834,
7753,
834,
737,
8000,
198,
198,
2,
383,
2420,
286,
262,
20832,
11682,
2393,
198... | 2.684807 | 441 |
@dec
if __name__ == '__main__':
x = wrap
x(hello)
decoratee()
| [
198,
31,
12501,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
2124,
796,
14441,
198,
220,
2124,
7,
31373,
8,
628,
220,
11705,
378,
68,
3419,
198
] | 2.151515 | 33 |
import gym
import torch
import numpy as np
from src.ModelBase.dyna import *
from src.NN import model
from RLalgo.td3 import TD3
if __name__=='__main__':
import random
import os
from src.NN.RL.base import MLPActor, MLPQ
random.seed(0)
torch.manual_seed(0)
np.random.seed(0)
test_episodes=100
RL_batchsize=128
env_batchsize=200
realenv = gym.make('CartPole-v0')
fakeenv = model.fake_cartpole_env(env_batchsize)
# os.chdir()
RLinp = {"env":fakeenv, # the surogate environment defined above
'Actor':MLPActor, # the type of policy network, defined in src/NN/RL
'Q': MLPQ, # the type of value function network, defined in src/NN/RL
'act_space_type':'d', # the type of action space, 'c' for continuous, 'd' for discrete
'a_kwargs':dict(activation=nn.ReLU,
hidden_sizes=[256]*2,
output_activation=nn.Tanh),# the hyperparameters of the network
'ep_type':'inf', # the type of episode, 'inf' for infinite, 'finite' for finite (only inf is supported for now)
'max_ep_len':400, # the maximum length of an episode
'replay_size':int(5e5) # the max size of the replay buffer
}
RL = TD3(**RLinp)
mb = dyna(RL,realenv,True,env_batchsize,real_buffer_size=int(5e5))
mb(80,1000,1000,1000,update_every=100 ,RL_batch_size=RL_batchsize,test_every=4,
num_test_episodes=test_episodes,RL_update_iter=50,RL_loop_per_epoch=4000,
env_train_start_size=800,noiselist=torch.zeros(16000),mixed_train=False,
data_train_max_iter=100, fake_env_loss_criteria=1e-4,env_num_batch=10)
| [
11748,
11550,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12351,
13,
17633,
14881,
13,
67,
46434,
1330,
1635,
198,
6738,
12351,
13,
6144,
1330,
2746,
198,
6738,
45715,
282,
2188,
13,
8671,
18,
1330,
13320,
18,
198,... | 2.228117 | 754 |
import numpy as np
if __name__ == '__main__':
adjacent_mat = np.array([
[0, 7, 4, 6, 1],
[100, 0, 100, 100, 100],
[100, 2, 0, 5, 100],
[100, 3, 100, 0, 100],
[100, 100, 100, 1, 0]
], dtype=int)
print(dijkstra_single_source_shortest_paths(adjacent_mat, 5, 0))
| [
11748,
299,
32152,
355,
45941,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
15909,
62,
6759,
796,
45941,
13,
18747,
26933,
198,
220,
220,
220,
220,
220,
220,
220,
685,
15,
11,
767,
11,... | 1.90303 | 165 |
class LowFuelError(ValueError):
"""
исключение, которое срабатывает если топлива не хватает до точки назначения
"""
| [
4871,
7754,
42663,
12331,
7,
11395,
12331,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
12466,
116,
21727,
31583,
30143,
141,
236,
141,
229,
16843,
22177,
18849,
16843,
11,
12466,
118,
15166,
20375,
15166,
21169,
15166,
16843,
220... | 1.207547 | 106 |
from django.contrib import admin
from .models import ContactRequest, Publications, Member, GroupInformation, ResearchField
from tinymce.widgets import TinyMCE
from django.db import models
admin.site.register(GroupInformation, GroupInformationAdmin)
admin.site.register(ResearchField, ResearchFieldAdmin)
admin.site.register(ContactRequest)
admin.site.register(Publications, PublicationsAdmin)
admin.site.register(Member, MembersAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
14039,
18453,
11,
40865,
11,
10239,
11,
4912,
21918,
11,
4992,
15878,
198,
6738,
7009,
76,
344,
13,
28029,
11407,
1330,
20443,
44,
5222,
198,
6738,
42625,
1420... | 3.9375 | 112 |
import argparse
import os
import sys
import numpy as np
import torchvision
from torchvision import datasets, transforms
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import videotransforms
from charades_dataset_full import Charades as Dataset
from pytorch_i3d import InceptionI3d
from torch.autograd import Variable
from torch.optim import lr_scheduler
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
parser = argparse.ArgumentParser()
parser.add_argument('-mode', type=str, help='rgb or flow')
parser.add_argument('-load_model', type=str)
parser.add_argument('-root', type=str)
parser.add_argument('-gpu', type=str)
parser.add_argument('-save_dir', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if __name__ == '__main__':
# need to add argparse
run(mode=args.mode,
root=args.root,
load_model=args.load_model,
save_dir=args.save_dir)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
10178,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
... | 2.771676 | 346 |
import sys
dataIn = sys.stdin.readline()
timeData = dataIn.split()
hours = int(timeData[0])
minutes = int(timeData[1])
if minutes >= 45:
minutes -= 45
else:
if hours == 0: hours = 24
hours -= 1
minutes += 15
print('{} {}'.format(hours,minutes)) | [
11748,
25064,
198,
7890,
818,
796,
25064,
13,
19282,
259,
13,
961,
1370,
3419,
198,
198,
2435,
6601,
796,
1366,
818,
13,
35312,
3419,
198,
24425,
796,
493,
7,
2435,
6601,
58,
15,
12962,
198,
1084,
1769,
796,
493,
7,
2435,
6601,
58,
... | 2.495327 | 107 |
import csv
import os
import re
from wo.core.fileutils import WOFileUtils
from wo.core.logging import Log
from wo.core.shellexec import WOShellExec
from wo.core.variables import WOVar
from wo.core.acme import WOAcme
| [
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
302,
198,
198,
6738,
24486,
13,
7295,
13,
7753,
26791,
1330,
370,
46,
8979,
18274,
4487,
198,
6738,
24486,
13,
7295,
13,
6404,
2667,
1330,
5972,
198,
6738,
24486,
13,
7295,
13,
82,
297... | 2.893333 | 75 |
#!/usr/bin/env python
# coding=utf-8
def fib2(n):
"""Return a list containing the Fibonacci series up to n."""
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return str(result).strip()
fib = fib2(200)
#print fib
fibs = [0, 1]
num = int(raw_input('How many Fibonacci numbers do you want?'))
for i in range(num-2):
fibs.append(fibs[-2]+fibs[-1])
print fibs
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
4299,
12900,
17,
7,
77,
2599,
198,
220,
220,
220,
37227,
13615,
257,
1351,
7268,
262,
41566,
261,
44456,
2168,
510,
284,
299,
526,
15931,
198,
220,
... | 2.153846 | 195 |
# coding: utf8
import copy
import heapq
import os
import pickle
import shutil
import socket
import threading
import time
import json
import traceback
import datetime
from . import config, utils, constant
from .replay_file import ReplayFile
from .translation import Translation
from .packet_processor import PacketProcessor
from .logger import Logger
from .pycraft import authentication
from .pycraft.networking.connection import Connection
from .pycraft.networking.packets import Packet as PycraftPacket, clientbound, serverbound
from .SARC.packet import Packet as SARCPacket
| [
2,
19617,
25,
3384,
69,
23,
198,
198,
11748,
4866,
198,
11748,
24575,
80,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
4423,
346,
198,
11748,
17802,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
12854,
... | 3.754839 | 155 |
from html_table_parser import HTMLTableParser
import requests
import logging
_LOGGER = logging.getLogger(__name__)
def get_devicelist(home_hub_ip='192.168.1.254'):
"""Retrieve data from BT Home Hub 5 and return parsed result.
"""
url = 'http://{}/'.format(home_hub_ip)
try:
response = requests.get(url, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if response.status_code == 200:
return parse_devicelist(response.text)
else:
_LOGGER.error("Invalid response from Home Hub: %s", response)
def parse_devicelist(data_str):
"""Parse the BT Home Hub 5 data format."""
p = HTMLTableParser()
p.feed(data_str)
known_devices = p.tables[9]
devices = {}
for device in known_devices:
if len(device) == 5 and device[2] != '':
devices[device[2]] = device[1]
return devices
| [
6738,
27711,
62,
11487,
62,
48610,
1330,
11532,
10962,
46677,
198,
11748,
7007,
198,
11748,
18931,
198,
198,
62,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4299,
651,
62,
7959,
291,
46331,
7,
1119... | 2.577236 | 369 |
# coding=utf-8
"""
everblog.blueprints
~~~~~~~~~~~~~~~~~~~
blueprints for the whole application
"""
import sys, datetime
from functools import wraps
from flask import g, redirect, session, url_for
from toolkit_library.inspector import PackageInspector
from everblog import app, db
from everblog.models import Page
def admin_required(f):
"""function decorator, force user to login as administrator."""
@wraps(f)
return decorated
def before_request():
"""called before every request"""
g.pages = db.session.query(Page).order_by(Page.order)
g.contacts = app.config['CONTACT_METHODS']
g.blog_owner = app.config['BLOG_OWNER']
g.time_zone = datetime.timedelta(hours = app.config['TIME_ZONE'])
g.google_analytics_tracking_id = app.config['GOOGLE_ANALYTICS_TRACKING_ID']
def teardown_request(exception = None):
"""called after every request"""
db.session.remove()
packageInspector = PackageInspector(sys.modules[__name__])
all_blueprints = packageInspector.get_all_modules()
for blueprint in all_blueprints:
#import blueprint
exec('from everblog.blueprints import {0}'.format(blueprint))
#register before_request to every blueprint
exec('{0}.blueprint.before_request(before_request)'.format(blueprint))
#register teardown_request to every blueprint
exec('{0}.blueprint.teardown_request(teardown_request)'.format(blueprint))
#register blueprint to app
exec('app.register_blueprint({0}.blueprint)'.format(blueprint))
| [
2,
19617,
28,
40477,
12,
23,
201,
198,
37811,
201,
198,
220,
220,
220,
1683,
14036,
13,
17585,
17190,
201,
198,
220,
220,
220,
220,
27156,
4907,
93,
201,
198,
220,
220,
220,
4171,
17190,
329,
262,
2187,
3586,
201,
198,
37811,
201,
... | 2.785971 | 556 |
DATA = [
{
'name': 'Facundo',
'age': 72,
'organization': 'Platzi',
'position': 'Technical Coach',
'language': 'python',
},
{
'name': 'Luisana',
'age': 33,
'organization': 'Globant',
'position': 'UX Designer',
'language': 'javascript',
},
{
'name': 'Héctor',
'age': 19,
'organization': 'Platzi',
'position': 'Associate',
'language': 'ruby',
},
{
'name': 'Gabriel',
'age': 20,
'organization': 'Platzi',
'position': 'Associate',
'language': 'javascript',
},
{
'name': 'Isabella',
'age': 30,
'organization': 'Platzi',
'position': 'QA Manager',
'language': 'java',
},
{
'name': 'Karo',
'age': 23,
'organization': 'Everis',
'position': 'Backend Developer',
'language': 'python',
},
{
'name': 'Ariel',
'age': 32,
'organization': 'Rappi',
'position': 'Support',
'language': '',
},
{
'name': 'Juan',
'age': 17,
'organization': '',
'position': 'Student',
'language': 'go',
},
{
'name': 'Pablo',
'age': 32,
'organization': 'Master',
'position': 'Human Resources Manager',
'language': 'python',
},
{
'name': 'Lorena',
'age': 56,
'organization': 'Python Organization',
'position': 'Language Maker',
'language': 'python',
},
]
if __name__ == '__main__':
run() | [
26947,
796,
685,
201,
198,
220,
220,
220,
1391,
201,
198,
220,
220,
220,
220,
220,
220,
220,
705,
3672,
10354,
705,
47522,
41204,
3256,
201,
198,
220,
220,
220,
220,
220,
220,
220,
705,
496,
10354,
7724,
11,
201,
198,
220,
220,
22... | 1.824599 | 935 |
# ####### STDLIB
import errno
# noinspection PyUnresolvedReferences
import getpass
# noinspection PyUnresolvedReferences
import logging
# noinspection PyUnresolvedReferences
import sys
# ####### EXT
# noinspection PyBroadException
try:
import fire # type: ignore
except Exception:
# maybe we dont need fire if not called via commandline, so accept if it is not there
pass
# ####### OWN
# noinspection PyUnresolvedReferences
import lib_log_utils
# ####### PROJ
# imports for local pytest
try:
from .lib_bash import * # type: ignore # pragma: no cover
from .lib_install import * # type: ignore # pragma: no cover
# imports for doctest
except ImportError: # type: ignore # pragma: no cover
from lib_bash import * # type: ignore # pragma: no cover
from lib_install import * # type: ignore # pragma: no cover
if __name__ == '__main__':
main()
| [
2,
46424,
2235,
3563,
19260,
9865,
198,
198,
11748,
11454,
3919,
198,
2,
645,
1040,
14978,
9485,
3118,
411,
5634,
19927,
198,
11748,
651,
6603,
198,
2,
645,
1040,
14978,
9485,
3118,
411,
5634,
19927,
198,
11748,
18931,
198,
2,
645,
10... | 2.725714 | 350 |
#!usr/bin/env python
import os
import pandas as pd
from tqdm import tqdm
import argparse
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database, database_exists
import sys
import codes3d
if __name__ == '__main__':
args = parse_args()
print('Building...')
for first_level in os.listdir(args.hic_dir):
if os.path.isdir(os.path.join(args.hic_dir, first_level)):
for second_level in os.listdir(os.path.join(args.hic_dir, first_level)):
cell_line = first_level
fp = os.path.join(args.hic_dir,first_level, second_level)
build_hic_tables(cell_line, args.enzyme, fp, args.db_auth, args.mapq_cutoff, args.tablespace)
elif os.path.isfile(os.path.join(args.hic_dir, first_level)):
cell_line = ''
if args.hic_dir.endswith('/'):
cell_line = args.hic_dir.strip().split('/')[-2]
else:
cell_line = args.hic_dir.strip().split('/')[-1]
fp = os.path.join(args.hic_dir,first_level)
build_hic_tables(cell_line, args.enzyme, fp, args.db_auth, args.mapq_cutoff, args.tablespace)
| [
2,
0,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
1822,
29572,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738... | 2.013356 | 599 |
import json
import sqlite3
conn = sqlite3.connect('Json2SQL.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS User;
DROP TABLE IF EXISTS Member;
DROP TABLE IF EXISTS Course;
CREATE TABLE User (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Course (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE
);
CREATE TABLE Member (
user_id INTEGER,
course_id INTEGER,
role INTEGER,
PRIMARY KEY (user_id, course_id)
)
''')
f_name = './AssignmentData/Data_roster.json'
# [
# [ "Charley", "si110", 1 ],
# [ "Mea", "si110", 0 ],
str_data = open(f_name).read()
json_data = json.loads(str_data)
for entry in json_data:
user_name = entry[0]
course_title = entry[1]
member_role = entry[2]
print((user_name, course_title, member_role))
cur.execute('''INSERT OR IGNORE INTO User (name) VALUES ( ? )''', (user_name,))
cur.execute('SELECT id FROM User WHERE name = ? ', (user_name,))
user_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Course (title) VALUES ( ? )''', (course_title,))
cur.execute('SELECT id FROM Course WHERE title = ? ', (course_title,))
course_id = cur.fetchone()[0]
cur.execute('''INSERT OR REPLACE INTO Member (user_id, course_id, role) VALUES ( ?, ?, ? )''',
(user_id, course_id, member_role))
conn.commit()
| [
11748,
33918,
198,
11748,
44161,
578,
18,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
41,
1559,
17,
17861,
13,
25410,
578,
11537,
198,
22019,
796,
48260,
13,
66,
21471,
3419,
198,
198,
2,
2141,
617,
9058,
198,
22019,
13,
... | 2.396721 | 610 |
import os
import torch
import argparse
import numpy as np
from collections import defaultdict
from mmcv import Config
from mmcv.runner import load_checkpoint, init_dist, get_dist_info
from mmcv.parallel import MMDistributedDataParallel
from mmdet.apis import set_random_seed, multi_gpu_test
from mmdet3d.models import build_model
from mmdet3d.datasets import build_dataloader, build_dataset
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
28034,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
8085,
33967,
1330,
17056,
198,
6738,
8085,
33967,
13,
16737,
1330,
3440,
62,
9122,
4122,
11,
231... | 3.160584 | 137 |
# coding:utf-8
| [
2,
19617,
25,
40477,
12,
23,
628
] | 2.285714 | 7 |
from distutils.core import setup
setup(
name='tweeterid',
version='1.0.0',
description='Get twitter handles based on user ID\'s and vice-versa via tweeterid.com in python',
long_description='Get twitter handles based on user ID\'s and vice-versa via tweeterid.com in python',
author='sl4v',
author_email='iamsl4v@protonmail.com',
url='https://github.com/sl4vkek/python-tweeterid',
packages=['tweeterid'],
install_requires=['requests'],
license="WTFPL"
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
201,
198,
201,
198,
40406,
7,
201,
198,
220,
220,
220,
1438,
11639,
83,
732,
2357,
312,
3256,
201,
198,
220,
220,
220,
2196,
11639,
16,
13,
15,
13,
15,
3256,
201,
198,
220,
220,
220,
6764,
... | 2.532338 | 201 |
# -*- coding: utf-8 -*-
# IMPORTS
from typing import Dict
# LOCAL IMPORTS
from walmart.core import Resource
class Orders(Resource):
"""
Orders
"""
path = 'orders'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
30023,
33002,
198,
6738,
19720,
1330,
360,
713,
198,
198,
2,
37347,
1847,
30023,
33002,
198,
6738,
6514,
13822,
13,
7295,
1330,
20857,
198,
198,
4871,
30689,
7,
26198... | 2.806452 | 62 |
import os
import os.path
import dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
dotenv.load_dotenv(dotenv_path)
API_KEY = os.environ.get("ACBOT_KEY")
WEBHOOK1 = os.environ.get("WEBHOOK1")
WEBHOOK2 = os.environ.get("WEBHOOK2") | [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
16605,
24330,
198,
198,
26518,
24330,
62,
6978,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
45302,
24330,
11537,
198,
26518,
24330,
... | 2.234234 | 111 |
from django.contrib import admin
from . import models as scheduler_engine_models
# Register your models here.
admin.site.register(scheduler_engine_models.Event)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
1330,
4981,
355,
6038,
18173,
62,
18392,
62,
27530,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
1416,
704,
18173,
62,
18392,
62,
2... | 3.468085 | 47 |
from voronoi import VoronoiDiagram, euclidean_distance
from color_models import RGB
import math
import random
from PIL import Image, ImageDraw
from gradients import create_color_gradient
"""
Methods based off of Voronoi diagram generation.
"""
if __name__ == '__main__':
img = multi_circles(1080, 1920, num_of_points=10, num_of_colors=3, optimize=True)
img.show()
img.save("test.bmp", 'BMP') | [
6738,
410,
273,
261,
23013,
1330,
44143,
261,
23013,
18683,
6713,
11,
304,
36616,
485,
272,
62,
30246,
198,
6738,
3124,
62,
27530,
1330,
25228,
198,
11748,
10688,
198,
11748,
4738,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
198,... | 2.886525 | 141 |
""" Utility functions used in osxphotos """
import fnmatch
import glob
import importlib
import inspect
import logging
import os
import os.path
import pathlib
import platform
import re
import sqlite3
import subprocess
import sys
import unicodedata
import urllib.parse
from plistlib import load as plistload
from typing import Callable, Union
import CoreFoundation
import objc
from Foundation import NSString
from ._constants import UNICODE_FORMAT
__all__ = [
"noop",
"lineno",
"dd_to_dms_str",
"get_system_library_path",
"get_last_library_path",
"list_photo_libraries",
"normalize_fs_path",
"findfiles",
"normalize_unicode",
"increment_filename_with_count",
"increment_filename",
"expand_and_validate_filepath",
"load_function",
]
_DEBUG = False
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s",
)
if not _DEBUG:
logging.disable(logging.DEBUG)
def _get_logger():
"""Used only for testing
Returns:
logging.Logger object -- logging.Logger object for osxphotos
"""
return logging.Logger(__name__)
def _set_debug(debug):
"""Enable or disable debug logging"""
global _DEBUG
_DEBUG = debug
if debug:
logging.disable(logging.NOTSET)
else:
logging.disable(logging.DEBUG)
def _debug():
"""returns True if debugging turned on (via _set_debug), otherwise, false"""
return _DEBUG
def noop(*args, **kwargs):
"""do nothing (no operation)"""
pass
def lineno(filename):
"""Returns string with filename and current line number in caller as '(filename): line_num'
Will trim filename to just the name, dropping path, if any."""
line = inspect.currentframe().f_back.f_lineno
filename = pathlib.Path(filename).name
return f"{filename}: {line}"
def _check_file_exists(filename):
"""returns true if file exists and is not a directory
otherwise returns false"""
filename = os.path.abspath(filename)
return os.path.exists(filename) and not os.path.isdir(filename)
def _get_resource_loc(model_id):
"""returns folder_id and file_id needed to find location of edited photo"""
""" and live photos for version <= Photos 4.0 """
# determine folder where Photos stores edited version
# edited images are stored in:
# Photos Library.photoslibrary/resources/media/version/XX/00/fullsizeoutput_Y.jpeg
# where XX and Y are computed based on RKModelResources.modelId
# file_id (Y in above example) is hex representation of model_id without leading 0x
file_id = hex_id = hex(model_id)[2:]
# folder_id (XX) in above example if first two chars of model_id converted to hex
# and left padded with zeros if < 4 digits
folder_id = hex_id.zfill(4)[0:2]
return folder_id, file_id
def _dd_to_dms(dd):
"""convert lat or lon in decimal degrees (dd) to degrees, minutes, seconds"""
""" return tuple of int(deg), int(min), float(sec) """
dd = float(dd)
negative = dd < 0
dd = abs(dd)
min_, sec_ = divmod(dd * 3600, 60)
deg_, min_ = divmod(min_, 60)
if negative:
if deg_ > 0:
deg_ = deg_ * -1
elif min_ > 0:
min_ = min_ * -1
else:
sec_ = sec_ * -1
return int(deg_), int(min_), sec_
def dd_to_dms_str(lat, lon):
"""convert latitude, longitude in degrees to degrees, minutes, seconds as string"""
""" lat: latitude in degrees """
""" lon: longitude in degrees """
""" returns: string tuple in format ("51 deg 30' 12.86\" N", "0 deg 7' 54.50\" W") """
""" this is the same format used by exiftool's json format """
# TODO: add this to readme
lat_deg, lat_min, lat_sec = _dd_to_dms(lat)
lon_deg, lon_min, lon_sec = _dd_to_dms(lon)
lat_hemisphere = "N"
if any([lat_deg < 0, lat_min < 0, lat_sec < 0]):
lat_hemisphere = "S"
lon_hemisphere = "E"
if any([lon_deg < 0, lon_min < 0, lon_sec < 0]):
lon_hemisphere = "W"
lat_str = (
f"{abs(lat_deg)} deg {abs(lat_min)}' {abs(lat_sec):.2f}\" {lat_hemisphere}"
)
lon_str = (
f"{abs(lon_deg)} deg {abs(lon_min)}' {abs(lon_sec):.2f}\" {lon_hemisphere}"
)
return lat_str, lon_str
def get_system_library_path():
"""return the path to the system Photos library as string"""
""" only works on MacOS 10.15 """
""" on earlier versions, returns None """
_, major, _ = _get_os_version()
if int(major) < 15:
logging.debug(
f"get_system_library_path not implemented for MacOS < 10.15: you have {major}"
)
return None
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.photolibraryd/Data/Library/Preferences/com.apple.photolibraryd.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
return pl.get("SystemLibraryPath")
def get_last_library_path():
"""returns the path to the last opened Photos library
If a library has never been opened, returns None"""
plist_file = pathlib.Path(
str(pathlib.Path.home())
+ "/Library/Containers/com.apple.Photos/Data/Library/Preferences/com.apple.Photos.plist"
)
if plist_file.is_file():
with open(plist_file, "rb") as fp:
pl = plistload(fp)
else:
logging.debug(f"could not find plist file: {str(plist_file)}")
return None
# get the IPXDefaultLibraryURLBookmark from com.apple.Photos.plist
# this is a serialized CFData object
photosurlref = pl.get("IPXDefaultLibraryURLBookmark")
if photosurlref is not None:
# use CFURLCreateByResolvingBookmarkData to de-serialize bookmark data into a CFURLRef
# pylint: disable=no-member
# pylint: disable=undefined-variable
photosurl = CoreFoundation.CFURLCreateByResolvingBookmarkData(
CoreFoundation.kCFAllocatorDefault, photosurlref, 0, None, None, None, None
)
# the CFURLRef we got is a sruct that python treats as an array
# I'd like to pass this to CFURLGetFileSystemRepresentation to get the path but
# CFURLGetFileSystemRepresentation barfs when it gets an array from python instead of expected struct
# first element is the path string in form:
# file:///Users/username/Pictures/Photos%20Library.photoslibrary/
photosurlstr = photosurl[0].absoluteString() if photosurl[0] else None
# now coerce the file URI back into an OS path
# surely there must be a better way
if photosurlstr is not None:
photospath = os.path.normpath(
urllib.parse.unquote(urllib.parse.urlparse(photosurlstr).path)
)
else:
logging.warning(
"Could not extract photos URL String from IPXDefaultLibraryURLBookmark"
)
return None
return photospath
else:
logging.debug("Could not get path to Photos database")
return None
def list_photo_libraries():
"""returns list of Photos libraries found on the system"""
""" on MacOS < 10.15, this may omit some libraries """
# On 10.15, mdfind appears to find all libraries
# On older MacOS versions, mdfind appears to ignore some libraries
# glob to find libraries in ~/Pictures then mdfind to find all the others
# TODO: make this more robust
lib_list = glob.glob(f"{str(pathlib.Path.home())}/Pictures/*.photoslibrary")
# On older OS, may not get all libraries so make sure we get the last one
last_lib = get_last_library_path()
if last_lib:
lib_list.append(last_lib)
output = subprocess.check_output(
["/usr/bin/mdfind", "-onlyin", "/", "-name", ".photoslibrary"]
).splitlines()
for lib in output:
lib_list.append(lib.decode("utf-8"))
lib_list = list(set(lib_list))
lib_list.sort()
return lib_list
def normalize_fs_path(path: str) -> str:
"""Normalize filesystem paths with unicode in them"""
with objc.autorelease_pool():
normalized_path = NSString.fileSystemRepresentation(path)
return normalized_path.decode("utf8")
def findfiles(pattern, path_):
"""Returns list of filenames from path_ matched by pattern
shell pattern. Matching is case-insensitive.
If 'path_' is invalid/doesn't exist, returns []."""
if not os.path.isdir(path_):
return []
# See: https://gist.github.com/techtonik/5694830
# paths need to be normalized for unicode as filesystem returns unicode in NFD form
pattern = normalize_fs_path(pattern)
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
files = [normalize_fs_path(p) for p in os.listdir(path_)]
return [name for name in files if rule.match(name)]
def _open_sql_file(dbname):
"""opens sqlite file dbname in read-only mode
returns tuple of (connection, cursor)"""
try:
dbpath = pathlib.Path(dbname).resolve()
conn = sqlite3.connect(f"{dbpath.as_uri()}?mode=ro", timeout=1, uri=True)
c = conn.cursor()
except sqlite3.Error as e:
sys.exit(f"An error occurred opening sqlite file: {e.args[0]} {dbname}")
return (conn, c)
def _db_is_locked(dbname):
"""check to see if a sqlite3 db is locked
returns True if database is locked, otherwise False
dbname: name of database to test"""
# first, check to see if lock file exists, if so, assume the file is locked
lock_name = f"{dbname}.lock"
if os.path.exists(lock_name):
logging.debug(f"{dbname} is locked")
return True
# no lock file so try to read from the database to see if it's locked
locked = None
try:
(conn, c) = _open_sql_file(dbname)
c.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;")
conn.close()
logging.debug(f"{dbname} is not locked")
locked = False
except:
logging.debug(f"{dbname} is locked")
locked = True
return locked
# OSXPHOTOS_XATTR_UUID = "com.osxphotos.uuid"
# def get_uuid_for_file(filepath):
# """ returns UUID associated with an exported file
# filepath: path to exported photo
# """
# attr = xattr.xattr(filepath)
# try:
# uuid_bytes = attr[OSXPHOTOS_XATTR_UUID]
# uuid_str = uuid_bytes.decode('utf-8')
# except KeyError:
# uuid_str = None
# return uuid_str
# def set_uuid_for_file(filepath, uuid):
# """ sets the UUID associated with an exported file
# filepath: path to exported photo
# uuid: uuid string for photo
# """
# if not os.path.exists(filepath):
# raise FileNotFoundError(f"Missing file: {filepath}")
# attr = xattr.xattr(filepath)
# uuid_bytes = bytes(uuid, 'utf-8')
# attr.set(OSXPHOTOS_XATTR_UUID, uuid_bytes)
def normalize_unicode(value):
"""normalize unicode data"""
if value is not None:
if isinstance(value, (tuple, list)):
return tuple(unicodedata.normalize(UNICODE_FORMAT, v) for v in value)
elif isinstance(value, str):
return unicodedata.normalize(UNICODE_FORMAT, value)
else:
return value
else:
return None
def increment_filename_with_count(
filepath: Union[str, pathlib.Path], count: int = 0
) -> str:
"""Return filename (1).ext, etc if filename.ext exists
If file exists in filename's parent folder with same stem as filename,
add (1), (2), etc. until a non-existing filename is found.
Args:
filepath: str or pathlib.Path; full path, including file name
count: int; starting increment value
Returns:
tuple of new filepath (or same if not incremented), count
Note: This obviously is subject to race condition so using with caution.
"""
dest = filepath if isinstance(filepath, pathlib.Path) else pathlib.Path(filepath)
dest_files = findfiles(f"{dest.stem}*", str(dest.parent))
dest_files = [normalize_fs_path(pathlib.Path(f).stem.lower()) for f in dest_files]
dest_new = dest.stem
if count:
dest_new = f"{dest.stem} ({count})"
while normalize_fs_path(dest_new.lower()) in dest_files:
count += 1
dest_new = f"{dest.stem} ({count})"
dest = dest.parent / f"{dest_new}{dest.suffix}"
return str(dest), count
def increment_filename(filepath: Union[str, pathlib.Path]) -> str:
"""Return filename (1).ext, etc if filename.ext exists
If file exists in filename's parent folder with same stem as filename,
add (1), (2), etc. until a non-existing filename is found.
Args:
filepath: str or pathlib.Path; full path, including file name
Returns:
new filepath (or same if not incremented)
Note: This obviously is subject to race condition so using with caution.
"""
new_filepath, _ = increment_filename_with_count(filepath)
return new_filepath
def expand_and_validate_filepath(path: str) -> str:
"""validate and expand ~ in filepath, also un-escapes spaces
Returns:
expanded path if path is valid file, else None
"""
path = re.sub(r"\\ ", " ", path)
path = pathlib.Path(path).expanduser()
if path.is_file():
return str(path)
return None
def load_function(pyfile: str, function_name: str) -> Callable:
"""Load function_name from python file pyfile"""
module_file = pathlib.Path(pyfile)
if not module_file.is_file():
raise FileNotFoundError(f"module {pyfile} does not appear to exist")
module_dir = module_file.parent or pathlib.Path(os.getcwd())
module_name = module_file.stem
# store old sys.path and ensure module_dir at beginning of path
syspath = sys.path
sys.path = [str(module_dir)] + syspath
module = importlib.import_module(module_name)
try:
func = getattr(module, function_name)
except AttributeError:
raise ValueError(f"'{function_name}' not found in module '{module_name}'")
finally:
# restore sys.path
sys.path = syspath
return func
| [
37811,
34030,
5499,
973,
287,
28686,
87,
24729,
37227,
198,
198,
11748,
24714,
15699,
198,
11748,
15095,
198,
11748,
1330,
8019,
198,
11748,
10104,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
3108,
8019,
... | 2.544985 | 5,613 |
from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup
| [
6738,
257,
72,
21857,
13,
19199,
1330,
554,
1370,
9218,
3526,
21864,
11,
554,
1370,
9218,
3526,
9704,
929,
628
] | 3.5 | 20 |
# Python 2
# Note this is not complicated in Python since
# Python automatically handles big integers
import sys
import math
n = int(raw_input().strip())
answer = math.factorial(n)
print answer | [
2,
11361,
362,
201,
198,
201,
198,
2,
5740,
428,
318,
407,
8253,
287,
11361,
1201,
201,
198,
2,
11361,
6338,
17105,
1263,
37014,
201,
198,
201,
198,
11748,
25064,
201,
198,
11748,
10688,
201,
198,
201,
198,
77,
796,
493,
7,
1831,
... | 3.2 | 65 |
from panda3d.core import *
from otp.level import BasicEntities
from direct.directnotify import DirectNotifyGlobal | [
6738,
279,
5282,
18,
67,
13,
7295,
1330,
1635,
198,
6738,
30972,
79,
13,
5715,
1330,
14392,
14539,
871,
198,
6738,
1277,
13,
12942,
1662,
1958,
1330,
4128,
3673,
1958,
22289
] | 3.645161 | 31 |
import logging
import os
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
LOGGING_LEVEL = logging.getLevelName(os.getenv('LOGGING_LEVEL', 'DEBUG'))
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
APP_TMP = os.path.join(APP_ROOT, 'tmp')
RECEIPT_HOST = os.getenv("CTP_RECEIPT_HOST", "http://localhost:8191")
RECEIPT_PATH = os.getenv("CTP_RECEIPT_PATH", "questionnairereceipts")
RECEIPT_USER = os.getenv("CTP_RECEIPT_USER", "gateway")
RECEIPT_PASS = os.getenv("CTP_RECEIPT_PASS", "ctp")
RABBIT_QUEUE = os.getenv('RECEIPT_CTP_QUEUE', 'ctp_receipt')
RABBIT_QUARANTINE_QUEUE = os.getenv('RECEIPT_CTP_QUARANTINE_QUEUE', 'ctp_receipt_quarantine')
RABBIT_EXCHANGE = os.getenv('RABBITMQ_EXCHANGE', 'message')
SDX_RECEIPT_CTP_SECRET = os.getenv("SDX_RECEIPT_CTP_SECRET")
if SDX_RECEIPT_CTP_SECRET is not None:
SDX_RECEIPT_CTP_SECRET = SDX_RECEIPT_CTP_SECRET.encode("ascii")
RABBIT_URL = 'amqp://{user}:{password}@{hostname}:{port}/{vhost}'.format(
hostname=os.getenv('RABBITMQ_HOST', 'rabbit'),
port=os.getenv('RABBITMQ_PORT', 5672),
user=os.getenv('RABBITMQ_DEFAULT_USER', 'rabbit'),
password=os.getenv('RABBITMQ_DEFAULT_PASS', 'rabbit'),
vhost=os.getenv('RABBITMQ_DEFAULT_VHOST', '%2f')
)
RABBIT_URL2 = 'amqp://{user}:{password}@{hostname}:{port}/{vhost}'.format(
hostname=os.getenv('RABBITMQ_HOST2', 'rabbit'),
port=os.getenv('RABBITMQ_PORT2', 5672),
user=os.getenv('RABBITMQ_DEFAULT_USER', 'rabbit'),
password=os.getenv('RABBITMQ_DEFAULT_PASS', 'rabbit'),
vhost=os.getenv('RABBITMQ_DEFAULT_VHOST', '%2f')
)
RABBIT_URLS = [RABBIT_URL, RABBIT_URL2]
# Configure the number of retries attempted before failing call
session = requests.Session()
retries = Retry(total=5, backoff_factor=0.1)
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
7007,
198,
6738,
7007,
13,
43789,
13,
333,
297,
571,
18,
13,
22602,
13,
1186,
563,
1330,
4990,
563,
198,
6738,
7007,
13,
324,
12126,
1330,
14626,
47307,
198,
198,
25294,
38,
2751,
62,
2538,... | 2.232558 | 860 |
# -*- coding:utf8 -*-
from scrapy import cmdline
cmdline.execute('scrapy crawl jiadian'.split())
| [
2,
532,
9,
12,
19617,
25,
40477,
23,
532,
9,
12,
198,
6738,
15881,
88,
1330,
23991,
1370,
198,
28758,
1370,
13,
41049,
10786,
1416,
2416,
88,
27318,
474,
72,
18425,
4458,
35312,
28955,
628
] | 2.8 | 35 |
import os
import dotenv
if os.path.exists('.env'):
dotenv.load_dotenv('.env')
DEBUG = True if os.getenv('DEBUG') == 'True' else False
# REQUIRED APP SETTINGS
FRONTEND_URL = os.getenv('FRONTEND_URL')
CONTROLLERS = ['auth', 'resume', 'status']
PROVIDERS = ['headhunter', 'superjob']
CLEANUP_PERIOD = 60*60*24 # sec
REAUTH_PERIOD = 60*180 # sec
PUSH_PERIOD = 60*30 # sec
JWT_HEADER_TYPE = 'JWT'
JWT_SECRET_KEY = os.getenv('JWT_SECRET_KEY', os.urandom(64))
JWT_ACCESS_TOKEN_EXPIRES = os.getenv('JWT_ACCESS_TOKEN_EXPIRES', 15) # min
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'postgres://')
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_URL = os.getenv('REDIS_URL', 'redis://')
CACHE_TYPE = 'redis'
CACHE_KEY_PREFIX = 'cache'
CACHE_DEFAULT_TIMEOUT = 300
CACHE_REDIS_URL = os.getenv('REDIS_URL', 'redis://')
SENTRY_DSN = os.getenv('SENTRY_DSN', None)
SCOUT_KEY = os.getenv('SCOUT_KEY', None)
SCOUT_NAME = 'pushresume-dev' if DEBUG else 'pushresume'
SCOUT_MONITOR = True
# PROVIDERS SETTINGS
HEADHUNTER = {
'client_id': os.getenv('HH_CLIENT'),
'client_secret': os.getenv('HH_SECRET'),
'base_url': os.getenv('HH_BASE_URL'),
'authorize_url': os.getenv('HH_AUTH_URL'),
'access_token_url': os.getenv('HH_TOKEN_URL')
}
SUPERJOB = {
'client_id': os.getenv('SJ_CLIENT'),
'client_secret': os.getenv('SJ_SECRET'),
'base_url': os.getenv('SJ_BASE_URL'),
'authorize_url': os.getenv('SJ_AUTH_URL'),
'access_token_url': os.getenv('SJ_TOKEN_URL'),
'refresh_token_url': os.getenv('SJ_TOKEN_REFRESH_URL')
}
| [
11748,
28686,
198,
11748,
16605,
24330,
198,
198,
361,
28686,
13,
6978,
13,
1069,
1023,
7,
4458,
24330,
6,
2599,
198,
220,
220,
220,
16605,
24330,
13,
2220,
62,
26518,
24330,
7,
4458,
24330,
11537,
198,
198,
30531,
796,
6407,
611,
286... | 2.172943 | 717 |
import main
import time
import json
import numpy as np
import sys
input_coef = float(sys.argv[1])
constant_coef = float(sys.argv[2])
D = DataSender(input_coef, constant_coef)
D.setUp()
D.test_training()
| [
11748,
1388,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
628,
198,
15414,
62,
1073,
891,
796,
12178,
7,
17597,
13,
853,
85,
58,
16,
12962,
198,
9979,
415,
62,
1073,
891,
796,
12178,
7,
... | 2.5625 | 80 |
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from aemter.views import *
class TestUrls(SimpleTestCase):
"""
Template
def test_xxxx_url_resolves(self):
url = reverse('mitglieder:')
self.assertEqual(resolve(url).func, )
"""
| [
6738,
42625,
14208,
13,
9288,
1330,
17427,
14402,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
11,
10568,
198,
6738,
257,
368,
353,
13,
33571,
1330,
1635,
198,
198,
4871,
6208,
16692,
7278,
7,
26437,
14402,
20448,
2599,
628... | 2.59292 | 113 |
import random
import itertools
from typing import Generator
import numpy as np
import plotly.graph_objects as go
DIMENSIONS = 3
MAX_SIZE = 128
POINTS_QUANTITY = 4
ITERATIONS = 10_000
CREATE = False
class FractalChaos:
"""
FractalChaos class implements base logic for fractal generation with chaos theory
by points generation
Example:
fractal = FractalPyramid(iterations=100_000)
fractal = fractal.create()
fractal.render(marker={'size': 1})
"""
point_start = None
points_main = None
points_generated = None
def render(self, *args, **kwargs):
"""
Render plotly figure
:param args: args for plotly Scatter3d object
:param kwargs: kwargs for plotly Scatter3d object
:return FractalChaos
"""
points_by_axis = self.get().T
fig = go.Figure(go.Scatter3d(
*args,
**kwargs,
x=points_by_axis[0],
y=points_by_axis[1],
z=points_by_axis[2],
mode='markers',
))
fig.show()
return self
def get(self) -> np.array:
"""
Get fractal points
:return np.array of shape (self.points_quantity + self.iterations, 3)
"""
return np.array([*self.get_generator()])
def get_generator(self) -> itertools.chain[np.array]:
"""
Get fractal points generator
:return Generator[np.array]
"""
if not self._is_created():
self.create()
return itertools.chain(self.points_main, self.points_generated)
def create(self):
"""
Create all fractal points
:return FractalChaos
"""
self.point_start = self._generate_point_random()
self.points_main = self._generate_points_main()
self.points_generated = self._generate_points_fractal()
return self
def _is_created(self) -> bool:
"""
Check if all fractal points are generated
:return bool
"""
return self.points_main is not None and self.points_generated is not None
def _generate_points_fractal(self) -> Generator:
"""
Generate fractal points
:return Generator[np.array]
"""
point_last = self.point_start
points_quantity = len(self.points_main)
for _ in range(self.__iterations):
# Choose random point from main
point_target = self.points_main[random.randrange(0, points_quantity)]
# Calculate middle point between last one and chosen target
point_middle = self._point_middle(point_last, point_target)
# Last point is calculated middle
point_last = point_middle
yield point_last
@staticmethod
@staticmethod
class FractalPyramid(FractalChaos):
"""
FractalChaos Child for equilateral pyramid
"""
FractalPyramid(iterations=100_000).render(marker={'size': 1})
| [
11748,
4738,
198,
11748,
340,
861,
10141,
198,
6738,
19720,
1330,
35986,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
7110,
306,
13,
34960,
62,
48205,
355,
467,
628,
198,
35,
3955,
16938,
11053,
796,
513,
198,
22921,
62,
33489,
796,
... | 2.316239 | 1,287 |
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from dellemc_unity_sdk import runner
from dellemc_unity_sdk import supportive_functions
from dellemc_unity_sdk import constants
ANSIBLE_METADATA = {'metadata_version': '0.1',
'status': ['unstable'],
'supported_by': 'community'}
parameters_all = {
'create': {
'nasServer': dict(required=True, type=dict),
'name': dict(type=str),
'description': dict(type=str)
}
}
template = {
constants.REST_OBJECT: 'cifsServer',
constants.ACTIONS: {
'create': {
constants.ACTION_TYPE: constants.ActionType.UPDATE,
constants.PARAMETER_TYPES: parameters_all.get('create')
}
}
}
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
9093,
856,
13,
21412,
62,
26791,
13,
35487,
1330,
28038,
856,
26796,
198,
6738,
1619,
10671,
66,
62,
9531,
62,
21282,
74,
1330,
17490,
198,
6738,
1619,
10671,
66,
62,
9531,
62,
2... | 2.257062 | 354 |
import pytest
from streamlit_prophet.lib.evaluation.preparation import add_time_groupers
from tests.samples.df import df_test
@pytest.mark.parametrize(
"df",
[df_test[8], df_test[10], df_test[11]],
)
| [
11748,
12972,
9288,
198,
6738,
4269,
18250,
62,
22930,
3202,
13,
8019,
13,
18206,
2288,
13,
3866,
1845,
341,
1330,
751,
62,
2435,
62,
8094,
364,
198,
6738,
5254,
13,
82,
12629,
13,
7568,
1330,
47764,
62,
9288,
628,
198,
31,
9078,
92... | 2.470588 | 85 |
#!/usr/bin/env python
'''Shape submodule for dGraph scene description module
David Dunn
Jan 2017 - created by splitting off from dGraph
ALL UNITS ARE IN METRIC
ie 1 cm = .01
www.qenops.com
'''
__author__ = ('David Dunn')
__version__ = '1.6'
__all__ = ["Shape", "PolySurface"]
import dGraph as dg
import dGraph.materials as dgm
from dGraph.dio import obj
import dGraph.config as config
from math import sin, cos, pi
import numpy as np
from numpy.linalg import norm
from numpy import dot, cross, matlib
import OpenGL.GL as GL
from OpenGL.GL import shaders
import ctypes
class Shape(dg.WorldObject):
''' Any world object that has a renderable surface
Material (connection to a material class object for illumination information)
Intersection (given a Ray will return all the intersection points and distances)
Normal (given a surface point will calculate a normalized normal)
Bounding Box
???? Casts shadows
???? U and V values
'''
@property
def material(self):
''' Deprecated '''
return self._materials[0]
def setMaterial(self, material):
''' Deprecated '''
self._materials[0] = material
def generateVBO(self):
''' an abstract method for implimentation in subclasses'''
raise NotImplementedError("Please Implement this method")
def renderGL(self):
''' an abstract method for implimentation in subclasses'''
raise NotImplementedError("Please Implement this method")
class PolySurface(Shape):
'''
A surface object defined by polygons
Verticies - local x,y,z and w of each vert
UVs - uv position list
Normals - normal vector list
Edges - maybe not needed?
Faces - dict List consisting of verts, uvs, and normals of 3 or more verts that make up a face
MaterialId - index of material
'''
def triangulateGL(self):
''' Compute tangents and bitangents'''
# http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-13-normal-mapping/
if len(self._faceUvs.A1) > 0:
self._tangents = np.zeros(self._normals.shape, self._normals.dtype)
self._bitangents = np.zeros(self._normals.shape, self._normals.dtype)
for triIndex in range(self._faceVerts.shape[0]):
# Shortcuts for vertices
v0 = self._verts[self._faceVerts[triIndex,0]].A1;
v1 = self._verts[self._faceVerts[triIndex,1]].A1;
v2 = self._verts[self._faceVerts[triIndex,2]].A1;
# Shortcuts for UVs
uv0 = self._uvs[self._faceUvs[triIndex,0]].A1;
uv1 = self._uvs[self._faceUvs[triIndex,1]].A1;
uv2 = self._uvs[self._faceUvs[triIndex,2]].A1;
# Edges of the triangle : postion delta
deltaPos1 = v1-v0;
deltaPos2 = v2-v0;
# UV delta
deltaUV1 = uv1-uv0;
deltaUV2 = uv2-uv0;
denom = (deltaUV1[0] * deltaUV2[1] - deltaUV1[1] * deltaUV2[0])
denom = max(denom, 1e-5) if denom >= 0 else min(denom, -1e-5)
r = 1.0 / denom;
tangent = (deltaPos1 * deltaUV2[1] - deltaPos2 * deltaUV1[1])*r;
bitangent = (deltaPos2 * deltaUV1[0] - deltaPos1 * deltaUV2[0])*r;
for i in range(3):
self._tangents[self._faceNormals[triIndex,i]] += tangent
self._bitangents[self._faceNormals[triIndex,i]] += bitangent
for i in range(len(self._tangents)):
self._tangents[i] /= np.linalg.norm(self._tangents[i], 2)
self._bitangents[i] /= np.linalg.norm(self._bitangents[i], 2)
# orthogonalize
self._tangents[i] = (self._tangents[i] - np.dot(self._tangents[i], self._normals[i].A1) * self._normals[i].A1);
self._bitangents[i] = np.cross(self._normals[i].A1, self._tangents[i]);
self._tangents = np.matrix(self._tangents)
self._bitangents = np.matrix(self._bitangents)
''' Generate openGL triangle lists in VVVVVTTTTTNNNNN form
Don't need to worry about world matrix - we will do that via model matrix '''
# Combine all the positions, normals, and uvs into one array, then remove duplicates - that is our vertex buffer
maxSize = 2**16 # numpy uint64 is 64 bits spread over 3 attributes is 21 bits 2**21/3 is max number of faces
fuvs = np.zeros_like(self._faceVerts, dtype=np.uint64) if len(self._uvs.A1) < 3 else self._faceUvs.astype(np.uint64)
fnorms = np.zeros_like(self._faceVerts, dtype=np.uint64) if len(self._normals.A1) < 3 else self._faceNormals.astype(np.uint64)
fmatIds = np.zeros_like(self._faceVerts, dtype=np.uint64)
if len(self._materialIds.A1) >= 1:
fmatIds = np.resize(self._materialIds.A1, [3,len(self._materialIds.A1)]).transpose().astype(np.uint64) # expand to triangles
f = np.array(self._faceVerts.astype(np.uint64)+(maxSize*fuvs).astype(np.uint64)+((maxSize**2)*fnorms).astype(np.uint64)+((maxSize**3)*fmatIds).astype(np.uint64)).ravel()
fullVerts, faces = np.unique(f, return_inverse=True) # get the unique indices and the reconstruction(our element array)
# Build our actual vertex array by getting the positions, normals and uvs from our unique indicies
vertsGL = self._verts[fullVerts%maxSize].getA1()
uvsGL = np.zeros((0),dtype=np.float32) if len(self._uvs.A1) < 3 else self._uvs[((fullVerts/maxSize)%maxSize).astype(fullVerts.dtype)].getA1()
normsGL = np.zeros((0),dtype=np.float32) if len(self._normals.A1) < 3 else self._normals[(fullVerts/(maxSize**2)%maxSize).astype(fullVerts.dtype)].getA1()
tangentsGL = np.zeros((0),dtype=np.float32) if len(self._tangents.A1) < 3 else self._tangents[(fullVerts/(maxSize**2)%maxSize).astype(fullVerts.dtype)].getA1()
bitangentsGL = np.zeros((0),dtype=np.float32) if len(self._bitangents.A1) < 3 else self._bitangents[(fullVerts/(maxSize**2)%maxSize).astype(fullVerts.dtype)].getA1()
matIdsGL = np.zeros((0),dtype=np.float32) if len(self._materialIds.A1) < 1 else (fullVerts/(maxSize**3)).astype(np.float32)
return np.concatenate((vertsGL,uvsGL,normsGL,tangentsGL,bitangentsGL,matIdsGL)), \
faces.astype(np.uint32), \
[len(vertsGL),len(uvsGL),len(normsGL),len(tangentsGL),len(bitangentsGL),len(matIdsGL)]
def generateVBO(self):
''' generates OpenGL VBO and VAO objects '''
if self._VBOdone:
return
#global shader_pos, shader_uvs, shader_norm
vertsGL, facesGL, lengths = self.triangulateGL() # make sure our vert list and face list are populated
self.numTris = len(facesGL)
if self._shader is None: # make sure our shader is compiled
self.compileShader()
self.vertexArray = GL.glGenVertexArrays(1) # create our vertex array
GL.glBindVertexArray(self.vertexArray) # bind our vertex array
self.vertexBuffer = GL.glGenBuffers(1) # Generate buffer to hold our vertex data
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vertexBuffer) # Bind our buffer
GL.glBufferData(GL.GL_ARRAY_BUFFER, vertsGL.nbytes, vertsGL, GL.GL_STATIC_DRAW) # Send the data over to the buffer
# Need to figure out stride and normal offset before we start
stride = 0 # stride does not work
#stride = facesGL[0].nbytes
#normOffset = self._verts[0].nbytes*lengths[0]+self._verts[0].nbytes*lengths[1] # offset will be length of positions + length of uvs
# Set up the array attributes
shader_pos = GL.glGetAttribLocation(self._shader, 'position')
shader_uvs = GL.glGetAttribLocation(self._shader, 'texCoord') # will return -1 if attribute isn't supported in shader
shader_norm = GL.glGetAttribLocation(self._shader, 'normal') # will return -1 if attribute isn't supported in shader
shader_tangent = GL.glGetAttribLocation(self._shader, 'tangent') # will return -1 if attribute isn't supported in shader
shader_bitangent = GL.glGetAttribLocation(self._shader, 'bitangent') # will return -1 if attribute isn't supported in shader
shader_materialId = GL.glGetAttribLocation(self._shader, 'materialId') # will return -1 if attribute isn't supported in shader
GL.glEnableVertexAttribArray(shader_pos) # Add a vertex position attribute
GL.glVertexAttribPointer(shader_pos, 3, GL.GL_FLOAT, False, stride, None) # Describe the position data layout in the buffer
if len(self._uvs.A1) > 2 and shader_uvs != -1:
GL.glEnableVertexAttribArray(shader_uvs) # Add a vertex uv attribute
GL.glVertexAttribPointer(shader_uvs, 2, GL.GL_FLOAT, False, stride, ctypes.c_void_p(int(np.sum(lengths[:1]))*facesGL[0].nbytes))
if len(self._normals.A1) > 2 and shader_norm != -1:
GL.glEnableVertexAttribArray(shader_norm)
GL.glVertexAttribPointer(shader_norm, 3, GL.GL_FLOAT, False, stride, ctypes.c_void_p(int(np.sum(lengths[:2]))*facesGL[0].nbytes))
if len(self._tangents.A1) > 2 and shader_tangent != -1:
GL.glEnableVertexAttribArray(shader_tangent)
GL.glVertexAttribPointer(shader_tangent, 3, GL.GL_FLOAT, False, stride, ctypes.c_void_p(int(np.sum(lengths[:3]))*facesGL[0].nbytes))
if len(self._bitangents) > 2 and shader_bitangent != -1:
GL.glEnableVertexAttribArray(shader_bitangent)
GL.glVertexAttribPointer(shader_bitangent, 3, GL.GL_FLOAT, False, stride, ctypes.c_void_p(int(np.sum(lengths[:4]))*facesGL[0].nbytes))
if len(self._materialIds.A1) > 2 and shader_materialId != -1:
GL.glEnableVertexAttribArray(shader_materialId) # Add a vertex material attribute
GL.glVertexAttribPointer(shader_materialId, 1, GL.GL_FLOAT, False, stride, ctypes.c_void_p(int(np.sum(lengths[:5]))*facesGL[0].nbytes))
#import pdb;pdb.set_trace()
# Create face element array
self.triangleBuffer = GL.glGenBuffers(1) # Generate buffer to hold our face data
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.triangleBuffer) # Bind our buffer as element array
GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, facesGL.nbytes, facesGL, GL.GL_STATIC_DRAW) # Send the data over to the buffer
GL.glBindVertexArray( 0 ) # Unbind the VAO first (Important)
GL.glDisableVertexAttribArray(shader_pos) # Disable our vertex attributes
GL.glDisableVertexAttribArray(shader_uvs) if len(self._uvs.A1) > 2 and shader_uvs != -1 else True
GL.glDisableVertexAttribArray(shader_norm) if len(self._normals.A1) > 2 and shader_norm != -1 else True
GL.glDisableVertexAttribArray(shader_materialId) if len(self._materialIds.A1) > 2 and shader_materialId != -1 else True
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0) # Unbind the buffer
GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0) # Unbind the buffer
self._VBOdone = True
@property
@property | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
33383,
850,
21412,
329,
288,
37065,
3715,
6764,
8265,
198,
198,
11006,
30833,
198,
12128,
2177,
532,
2727,
416,
26021,
572,
422,
288,
37065,
198,
198,
7036,
4725,
29722,
15986,
... | 1.98588 | 6,374 |
# In members/urls.py
from django.conf.urls import url
from . import views
app_name = "members"
urlpatterns=[
url(r'^$', views.AllMembersView.as_view(), name="all_members"),
url(r'^new_member/$', views.NewMemberView.as_view(), name="new_member"),
]
| [
2,
554,
1866,
14,
6371,
82,
13,
9078,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
366,
30814,
1,
198,
6371,
33279,
82,
41888,
198,
220,
19016,
7,
8... | 2.673684 | 95 |
import numpy as np
import pytest
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal)
from astrodata import NDAstroData
from geminidr.gemini.lookups import DQ_definitions as DQ
from gempy.library.nddops import NDStacker, sum1d
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize('func', [NDStacker.median, NDStacker.lmedian])
@pytest.mark.parametrize('func,expected_median,expected_var',
[(NDStacker.median, 2.5, 0.65),
(NDStacker.lmedian, 2, 0.79)])
@pytest.mark.xfail(reason='review this')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
299,
32152,
13,
33407,
1330,
357,
30493,
62,
439,
19836,
11,
6818,
62,
18747,
62,
28177,
62,
40496,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.195286 | 297 |
# -*- coding: utf-8 -*-
from argparse import ArgumentError, ArgumentParser, RawTextHelpFormatter
from typing import NoReturn, Text
from constants.general import CLI
# ==============================================================================
# CLASS
# ==============================================================================
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
6738,
1822,
29572,
1330,
45751,
12331,
11,
45751,
46677,
11,
16089,
8206,
22087,
8479,
1436,
201,
198,
6738,
19720,
1330,
1400,
13615,
11,
8255,
201,
198,
... | 4.794521 | 73 |
import logging
import os
import yaml
from pkg_resources import resource_filename
# For testing
if __name__ == "__main__":
print(get_yaml()) | [
11748,
18931,
198,
11748,
28686,
198,
11748,
331,
43695,
198,
6738,
279,
10025,
62,
37540,
1330,
8271,
62,
34345,
628,
198,
198,
2,
1114,
4856,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,... | 3.106383 | 47 |
"""
Faça um programa que peça o tamanho de um arquivo
para download (em MB) e a velocidade de um link de
Internet (em Mbps), calcule e informe o
tempo aproximado de download do arquivo usando
este link (em minutos).
"""
file_size = float(input("Digite o tamanho do arquivo\n"))
speed_link = float(input("Digite a velocidade da internet\n"))
print(calculate_time(file_size, speed_link))
"""
50 mb = x segundos
50 mb = 1s
size = x
speed_link = 1
x = size/speed
x = 50 *1
50x = 50
x = 50/50
x = 1
x = """ | [
37811,
198,
50110,
50041,
23781,
1430,
64,
8358,
613,
50041,
267,
256,
10546,
8873,
390,
23781,
610,
421,
23593,
198,
1845,
64,
4321,
357,
368,
10771,
8,
304,
257,
11555,
420,
312,
671,
390,
23781,
2792,
390,
220,
198,
28566,
357,
368... | 2.462264 | 212 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
... | 3.322034 | 59 |
#from: http://stackoverflow.com/questions/17112550/python-and-numba-for-vectorized-functions
#setup: import numpy as np ; np.random.seed(0); N = 100000 ; a, b, c = np.random.rand(N), np.random.rand(N), np.random.rand(N)
#run: vibr_energy(a, b, c)
#pythran export vibr_energy(float64[], float64[], float64[])
import numpy
| [
2,
6738,
25,
2638,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
27192,
11623,
1120,
14,
29412,
12,
392,
12,
77,
2178,
64,
12,
1640,
12,
31364,
1143,
12,
12543,
2733,
198,
2,
40406,
25,
1330,
299,
32152,
355,
45941,
2162,
... | 2.515625 | 128 |
from utils.imports import import_all_file_under_current_dir
import_all_file_under_current_dir(__file__)
| [
6738,
3384,
4487,
13,
320,
3742,
1330,
1330,
62,
439,
62,
7753,
62,
4625,
62,
14421,
62,
15908,
198,
198,
11748,
62,
439,
62,
7753,
62,
4625,
62,
14421,
62,
15908,
7,
834,
7753,
834,
8,
198
] | 2.837838 | 37 |
import http
from fastapi import APIRouter, Body, Depends
from app.dependencies import get_current_user
from app.models.user import User
from app.schemas.messages import MessageCreateSchema, MessageSchema, MessageUpdateSchema
from app.services.messages import (
add_reaction_to_message,
create_message,
create_reply_message,
delete_message,
remove_reaction_from_message,
update_message,
)
router = APIRouter()
@router.post(
"",
response_description="Create new message",
response_model=MessageSchema,
status_code=http.HTTPStatus.CREATED,
)
@router.patch(
"/{message_id}",
response_model=MessageSchema,
summary="Update message",
)
@router.delete(
"/{message_id}",
summary="Remove message",
status_code=http.HTTPStatus.NO_CONTENT,
)
@router.post(
"/{message_id}/reactions/{reaction_emoji}",
summary="Add reaction to message",
status_code=http.HTTPStatus.NO_CONTENT,
)
@router.delete(
"/{message_id}/reactions/{reaction_emoji}",
summary="Remove reaction to message",
status_code=http.HTTPStatus.NO_CONTENT,
)
@router.post(
"/{message_id}/replies",
response_model=MessageSchema,
summary="Create reply to original message",
status_code=http.HTTPStatus.CREATED,
)
| [
11748,
2638,
198,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
12290,
11,
2129,
2412,
198,
198,
6738,
598,
13,
45841,
3976,
1330,
651,
62,
14421,
62,
7220,
198,
6738,
598,
13,
27530,
13,
7220,
1330,
11787,
198,
6738,
598,
13,
... | 2.680672 | 476 |
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interfaces provided by RelStorage database adapters"""
from __future__ import absolute_import
from ZODB.POSException import StorageError
from ZODB.POSException import ReadConflictError
from ZODB.POSException import ConflictError
from zope.interface import Attribute
from zope.interface import Interface
# pylint:disable=inherit-non-class,no-method-argument,no-self-argument
# pylint:disable=too-many-ancestors,too-many-lines
from relstorage.interfaces import Tuple
from relstorage.interfaces import Object
from relstorage.interfaces import Bool
from relstorage.interfaces import Factory
from relstorage.interfaces import IException
###
# Abstractions to support multiple databases.
###
class IDBDialect(Interface):
"""
Handles converting from our internal "standard" SQL queries to
something database specific.
"""
# TODO: Fill this in.
def boolean_str(value):
"""
Given exactly a `bool` (`True` or `False`) return the string the database
uses to represent that literal.
By default, this will be "TRUE" or "FALSE", but older versions of SQLite
need 1 or 0, while Oracle needs "'Y'" or "'N'".
"""
class IDBDriver(Interface):
"""
An abstraction over the information needed for RelStorage to work
with an arbitrary DB-API driver.
"""
__name__ = Attribute("The name of this driver")
disconnected_exceptions = Tuple(
description=(u"A tuple of exceptions this driver can raise on any operation if it is "
u"disconnected from the database."),
value_type=Factory(IException)
)
close_exceptions = Tuple(
description=(u"A tuple of exceptions that we can ignore when we try to "
u"close the connection to the database. Often this is the same "
u"or an extension of `disconnected_exceptions`."
u"These exceptions may also be ignored on rolling back the connection, "
u"if we are otherwise completely done with it and prepared to drop it. "),
value_type=Factory(IException),
)
lock_exceptions = Tuple(
description=u"A tuple of exceptions",
value_type=Factory(IException),
) # XXX: Document
use_replica_exceptions = Tuple(
description=(u"A tuple of exceptions raised by connecting "
u"that should cause us to try a replica."),
value_type=Factory(IException)
)
Binary = Attribute("A callable.")
dialect = Object(IDBDialect, description=u"The IDBDialect for this driver.")
cursor_arraysize = Attribute(
"The value to assign to each new cursor's ``arraysize`` attribute.")
supports_64bit_unsigned_id = Attribute("Can the driver handle 2**64 as a parameter?")
connect = Attribute("""
A callable to create and return a new connection object.
The signature is not specified here because the
required parameters differ between databases and drivers. The interface
should be agreed upon between the :class:`IConnectionManager` and
the drivers for its database.
This connection, and all objects created from it such as cursors,
should be used within a single thread only.
""")
def cursor(connection, server_side=False):
"""
Create and return a new cursor sharing the state of the given
*connection*.
The cursor should be closed when it is no longer needed. The
cursor should be considered forward-only (no backward
scrolling) and ephemeral (results go away when the attached
transaction is committed or rolled back).
For compatibility, previous cursors should not have
outstanding results pending when this is called and while the
returned cursor is used (not all drivers permit multiple
active cursors).
If *server_side* is true (not the default), request that the
driver creates a cursor that will **not** buffer the complete
results of a query on the client. Instead, the results should
be streamed from the server in batches. This can reduce the
maximum amount of memory needed to handle results, if done
carefully.
For compatibility, server_side cursors can only be used
to execute a single query.
Most drivers (``psycopg2``, ``psycopg2cffi``, ``pg8000``,
``mysqlclient``) default to buffering the entire results
client side before returning from the ``execute`` method. This
can reduce latency and increase overall throughput, but at the
cost of memory, especially if the results will be copied into
different data structures.
Not all drivers support server-side cursors; they will ignore
that request. At this writing, this includes ``pg8000``. Some
drivers (at this writing, only ``gevent MySQLdb``) always use
server-side cursors. The ``cx_Oracle`` driver is unevaluated.
``psycopg2`` and ``psycopg2cffi`` both iterate in chunks of
``cur.itersize`` by default. PyMySQL seems to iterate one row at a time.
``mysqlclient`` defaults to also iterating one row at a time, but
we patch that to operate in chunks of ``cur.arraysize``.
"""
def binary_column_as_state_type(db_column_data):
"""
Turn *db_column_data* into something that's a valid pickle
state.
Valid pickle states should be acceptable to
`io.BytesIO` and `pickle.UnPickler`.
*db_column_dat* came from a column of data declared to be of the
type that we store state information in (e.g., a BLOB on MySQL
or Oracle).
"""
def binary_column_as_bytes(db_column_data):
"""
Turn *db_column_data* into a `bytes` object.
Use this when the specific type must be known,
for example to prefix or suffix additional byte values
like that produced by `p64`.
"""
def enter_critical_phase_until_transaction_end(connection, cursor):
"""
Given a connection and cursor opened by this driver, cause it
to attempt to raise its priority and return results faster.
This mostly has meaning for gevent drivers, which may limit
the amount of time they spend in the hub and the number of context
switches to other greenlets.
This phase continues until *after* the ultimate call that
commits or aborts is sent, but should revert to normal as quickly as
possible after that. :class:`IRelStorageAdapter` may cooperate with
the driver using implementation-specific methods to end the phase
at an appropriate time if there is a hidden commit.
This method must be idempotent (have the same effect if called more than
once) within a given transaction.
"""
def is_in_critical_phase(connection, cursor):
"""
Answer whether :meth:`enter_critical_phase_until_transaction_end` is in effect.
"""
def exit_critical_phase(connection, cursor):
"If currently in a critical phase, de-escalate."
def exception_is_deadlock(exc):
"""
Given an exception object, return True if it represents a deadlock
in the database.
The exception need not be an exception produced by the driver.
"""
class IDBDriverSupportsCritical(IDBDriver):
"""
A marker for database drivers that support
critical phases.
They promise that :meth:`enter_critical_phase_until_transaction_end`
will do something useful.
"""
class IDBDriverFactory(Interface):
"""
Information about, and a way to get, an `IDBDriver`
implementation.
"""
driver_name = Attribute("The name of this driver produced by this factory.")
def check_availability():
"""
Return a boolean indicating whether a call to this factory
will return a driver (True) or will raise an error (False).
"""
def __call__(): # pylint:disable=signature-differs
"""
Return a new `IDBDriver` as represented by this factory.
If it is not possible to do this, for example because the
module cannot be imported, raise an `DriverNotAvailableError`.
"""
class DriverNotAvailableError(Exception):
"""
Raised when a requested driver isn't available.
"""
#: The name of the requested driver
driver_name = None
#: The `IDBDriverOptions` that was asked for the driver.
driver_options = None
#: The underlying reason string, for example, from an import error
#: if such is available.
reason = None
__repr__ = __str__
class UnknownDriverError(DriverNotAvailableError):
"""
Raised when a driver that isn't registered at all is requested.
"""
class NoDriversAvailableError(DriverNotAvailableError):
"""
Raised when there are no drivers available.
"""
class IDBDriverOptions(Interface):
"""
Implemented by a module to provide alternative drivers.
"""
database_type = Attribute("A string naming the type of database. Informational only.")
def select_driver(driver_name=None):
"""
Choose and return an `IDBDriver`.
The *driver_name* of "auto" is equivalent to a *driver_name* of
`None` and means to choose the highest priority available driver.
"""
def known_driver_factories():
"""
Return an iterable of the potential `IDBDriverFactory`
objects that can be used by `select_driver`.
Each driver factory may or may not be available.
The driver factories are returned in priority order, with the highest priority
driver being first.
"""
###
# Creating and managing DB-API 2.0 connections.
# (https://www.python.org/dev/peps/pep-0249/)
###
class IConnectionManager(Interface):
"""
Open and close database connections.
This is a low-level interface; most operations should instead
use a pre-existing :class:`IManagedDBConnection`.
"""
isolation_load = Attribute("Default load isolation level.")
isolation_store = Attribute("Default store isolation level.")
isolation_read_committed = Attribute("Read committed.")
isolation_serializable = Attribute("Serializable.")
def open(
isolation=None,
deferrable=False,
read_only=False,
replica_selector=None,
application_name=None,
**kwargs):
"""Open a database connection and return (conn, cursor)."""
def close(conn=None, cursor=None):
"""
Close a connection and cursor, ignoring certain errors.
Return a True value if the connection was closed cleanly;
return a false value if an error was ignored.
"""
def rollback_and_close(conn, cursor):
"""
Rollback the connection and close it, ignoring certain errors.
Certain database drivers, such as MySQLdb using the SSCursor, require
all cursors to be closed before rolling back (otherwise it generates a
ProgrammingError: 2014 "Commands out of sync").
This method abstracts that.
:return: A true value if the connection was closed without ignoring any exceptions;
if an exception was ignored, returns a false value.
"""
def rollback(conn, cursor):
"""
Like `rollback_and_close`, but without the close, and letting
errors pass.
If an error does happen, then the connection and cursor are closed
before this method returns.
"""
def rollback_quietly(conn, cursor):
"""
Like `rollback_and_close`, but without the close.
:return: A true value if the connection was rolled back without ignoring any exceptions;
if an exception was ignored, returns a false value (and the connection and cursor
are closed before this method returns).
"""
def begin(conn, cursor):
"""
Call this on a store connection after restarting it.
This lets the store connection know that it may need to begin a
transaction, even if it was freshly opened.
"""
def open_and_call(callback):
"""Call a function with an open connection and cursor.
If the function returns, commits the transaction and returns the
result returned by the function.
If the function raises an exception, aborts the transaction
then propagates the exception.
"""
def open_for_load():
"""
Open a connection for loading objects.
This connection is read only, and presents a consistent view
of the database as of the time the first statement is
executed. It should be opened in ``REPEATABLE READ`` or higher
isolation level. It must not be in autocommit.
:return: ``(conn, cursor)``
"""
def restart_load(conn, cursor, needs_rollback=True):
"""
Reinitialize a connection for loading objects.
This gets called when polling the database, so it needs to be
quick.
Raise one of self.disconnected_exceptions if the database has
disconnected.
"""
def open_for_store(**open_args):
"""
Open and initialize a connection for storing objects.
This connection is read/write, and its view of the database
needs to be consistent for each statement, but should read a
fresh snapshot on each statement for purposes of conflict
resolution and cooperation with other store connections. It
should be opened in ``READ COMMITTED`` isolation level,
without autocommit. (Opening in ``REPEATABLE READ`` or higher,
with a single snapshot, could reduce the use of locks, but
increases the risk of serialization errors and having
transactions rollback; we could handle that by raising
``ConflictError`` and letting the application retry, but only
if we did that before ``tpc_finish``, and not all test cases
can handle that either.)
This connection will take locks on rows in the state tables,
and hold them during the commit process.
A connection opened by this method is the only type of
connection that can hold the commit lock.
:return: ``(conn, cursor)``
"""
def restart_store(conn, cursor, needs_rollback=True):
"""
Rollback and reuse a store connection.
Raise one of self.disconnected_exceptions if the database
has disconnected.
You can set *needs_rollback* to false if you're certain
the connection does not need rolled back.
"""
def open_for_pre_pack():
"""
Open a connection to be used for the pre-pack phase.
This connection will make many different queries; each one
must be consistent unto itself, but they do not all have to be
consistent with each other. This is because the *first* query
this object makes establishes a base state, and we will
manually discard later changes seen in future queries.
It will read from the state tables and write to the pack tables;
it will not write to the state tables, nor hold the commit lock.
It may hold share locks on state rows temporarily.
This connection may be open for a long period of time, and
will be committed as appropriate between queries. It is
acceptable for this connection to be in autocommit mode, if
required, but it is preferred for it not to be. This should be
opened in ``READ COMMITTED`` isolation level.
:return: ``(conn, cursor)``
"""
def open_for_pack_lock():
"""
Open a connection to be used for the sole purpose of holding
the pack lock.
Use a private connection (lock_conn and lock_cursor) to hold
the pack lock. Have the adapter open temporary connections
to do the actual work, allowing the adapter to use special
transaction modes for packing, and to commit at will without
losing the lock.
If the database doesn't actually use a pack lock,
this may return ``(None, None)``.
"""
def cursor_for_connection(conn):
"""
If the cursor returned by an open method was discarded
for state management purposes, use this to get a new cursor.
"""
def add_on_store_opened(f):
"""
Add a callable(cursor, restart=bool) for when a store connection
is opened.
.. versionadded:: 2.1a1
"""
def add_on_load_opened(f):
"""
Add a callable (cursor, restart=bool) for when a load connection is opened.
.. versionadded:: 2.1a1
"""
def describe_connection(conn, cursor):
"""
Return an object that describes the connection.
The object should have a useful `str` value.
.. versionadded:: 3.4.3
"""
class IManagedDBConnection(Interface):
"""
A managed DB connection consists of a DB-API ``connection`` object
and a single DB-API ``cursor`` from that connection.
This encapsulates proper use of ``IConnectionManager``, including
handling disconnections and re-connecting at appropriate times.
It is not allowed to use multiple cursors from a connection at the
same time; not all drivers properly support that.
If the DB-API connection is not open, presumed to be good, and
previously accessed, this object has a false value.
"Restarting" a connection means to bring it to a current view of
the database. Typically this means a rollback so that a new
transaction can begin with a new MVCC snapshot.
"""
cursor = Attribute("The DB-API cursor to use. Read-only.")
connection = Attribute("The DB-API connection to use. Read-only.")
def __bool__():
"""
Return true if the database connection is believed to be ready to use.
"""
def __nonzero__():
"""
Same as __bool__ for Python 2.
"""
def drop():
"""
Unconditionally drop (close) the database connection.
"""
def rollback_quietly():
"""
Rollback the connection and return a true value on success.
When this completes, the connection will be in a neutral state,
not idle in a transaction.
If an error occurs during rollback, the connection is dropped
and a false value is returned.
"""
def isolated_connection():
"""
Context manager that opens a new, distinct connection and
returns its cursor.
No matter what happens in the ``with`` block, the connection will be
dropped afterwards.
"""
def restart_and_call(f, *args, **kw):
"""
Restart the connection (roll it back) and call a function
after doing this.
This may drop and re-connect the connection if necessary.
:param callable f:
The function to call: ``f(conn, cursor, *args, **kwargs)``.
May be called up to twice if it raises a disconnected exception
on the first try.
:return: The return value of ``f``.
"""
def enter_critical_phase_until_transaction_end():
"""
As for :meth:`IDBDriver.enter_critical_phase_until_transaction_end`.
"""
class IManagedLoadConnection(IManagedDBConnection):
"""
A managed connection intended for loading.
"""
class IManagedStoreConnection(IManagedDBConnection):
"""
A managed connection intended for storing data.
"""
class IReplicaSelector(Interface):
"""Selects a database replica"""
def current():
"""Get the current replica.
Return a string. For PostgreSQL and MySQL, the string is
either a host:port specification or host name. For Oracle,
the string is a DSN.
"""
def next():
"""Return the next replica to try.
Return None if there are no more replicas defined.
"""
class IDatabaseIterator(Interface):
"""Iterate over the available data in the database"""
def iter_objects(cursor, tid):
"""Iterate over object states in a transaction.
Yields (oid, prev_tid, state) for each object state.
"""
def iter_transactions(cursor):
"""
Iterate over the transaction log, newest first.
Skips packed transactions. Yields (tid, username, description,
extension) for each transaction.
"""
def iter_transactions_range(cursor, start=None, stop=None):
"""
Return an indexable object over the transactions in the given range, oldest
first.
Includes packed transactions.
Has an object with the properties ``tid_int``, ``username``
(bytes) ``description`` (bytes) ``extension`` (bytes) and
``packed`` (boolean) for each transaction.
"""
def iter_object_history(cursor, oid):
"""
Iterate over an object's history.
Yields an object with the properties ``tid_int``, ``username``
(bytes) ``description`` (bytes) ``extension`` (bytes) and
``pickle_size`` (int) for each transaction.
:raises KeyError: if the object does not exist
"""
def iter_current_records(cursor, start_oid_int=0):
"""
Cause the *cursor* (which should be a server-side cursor)
to execute a query that will iterate over
``(oid_int, tid_int, state_bytes)`` values for all the current objects.
Each current object is returned only once, at the transaction most recently
committed for it.
Returns a generator.
For compatibility with FileStorage, this must iterate in ascending
OID order; it must also accept an OID to begin with for compatibility with
zodbupdate.
"""
class ILocker(Interface):
"""Acquire and release the commit and pack locks."""
def lock_current_objects(cursor, read_current_oid_ints, shared_locks_block):
"""
Lock the objects being modified in the current transaction
exclusively, plus the relevant rows for the objects whose OIDs
are contained in *read_current_oid_ints* with a read lock.
The exclusive locks should always be taken in a blocking fashion;
the shared read locks should be taken without blocking (raising an
exception if blocking would occur) if possible, unless *shared_locks_block*
is set to True.
See :meth:`IRelStorageAdapter.lock_objects_and_detect_conflicts`
for a description of the expected behaviour.
This should be done as part of the voting phase of TPC, before
taking out the final commit lock.
Returns nothing.
Typically this will be followed by a call to
:meth:`detect_conflict`.
"""
def hold_commit_lock(cursor, ensure_current=True, nowait=False):
"""
Acquire the commit lock.
If *ensure_current* is True (the default), other tables may be
locked as well, to ensure the most current data is available.
When using row level locks, *ensure_current* is always
implicit.
With *nowait* set to True, only try to obtain the lock without
waiting and return a boolean indicating if the lock was
successful. **Note:** this parameter is deprecated and will be removed
in the future; it is not currently used.
Should raise `UnableToAcquireCommitLockError` if the lock can not
be acquired before a configured timeout.
"""
def release_commit_lock(cursor):
"""Release the commit lock"""
def hold_pack_lock(cursor):
"""Try to acquire the pack lock.
Raise UnableToAcquirePackUndoLockError if packing or undo is already in progress.
"""
def release_pack_lock(cursor):
"""Release the pack lock."""
class IObjectMover(Interface):
"""Move object states to/from the database and within the database."""
def load_current(cursor, oid):
"""
Returns the current state and integer tid for an object.
*oid* is an integer. Returns (None, None) if object does not
exist.
"""
def load_currents(cursor, oids):
"""
Returns the oid integer, state, and integer tid for all the specified
objects.
*oids* is an iterable of integers. If any objects do no exist,
they are ignored.
"""
def load_revision(cursor, oid, tid):
"""Returns the state for an object on a particular transaction.
Returns None if no such state exists.
"""
def exists(cursor, oid):
"""Returns a true value if the given object exists."""
def load_before(cursor, oid, tid):
"""Returns the state and tid of an object before transaction tid.
Returns (None, None) if no earlier state exists.
"""
def get_object_tid_after(cursor, oid, tid):
"""Returns the tid of the next change after an object revision.
Returns None if no later state exists.
"""
def current_object_tids(cursor, oids, timeout=None):
"""
Returns the current ``{oid_int: tid_int}`` for specified object ids.
Note that this may be a BTree mapping, not a dictionary.
:param oids: An iterable of OID integers.
:keyword float timeout: If not None, this is an approximate upper bound
(in seconds) on how long this function will run.
:raises AggregateOperationTimeoutError: If the timeout was exceeded.
This will have one extra attribute set, ``partial_result``, which will be a
(partial) mapping of the results collected before the timeout.
"""
def on_store_opened(cursor, restart=False):
"""Create the temporary table for storing objects.
This method may be None, meaning no store connection
initialization is required.
"""
def make_batcher(cursor, row_limit):
"""Return an object to be used for batch store operations.
*row_limit* is the maximum number of rows to queue before
calling the database.
"""
def store_temps(cursor, state_oid_tid_iter):
"""
Store many objects in the temporary table.
*batcher* is an object returned by :meth:`make_batcher`.
*state_oid_tid_iter* is an iterable providing tuples
``(data, oid_int, prev_tid_int)``. It is guaranteed that the
``oid_int`` values will be distinct. It is further guaranteed that
this method will not be called more than once in a given transaction;
further updates to the temporary table will be made using
``replace_temps``, which is also only called once.
"""
def restore(cursor, batcher, oid, tid, data):
"""Store an object directly, without conflict detection.
Used for copying transactions into this database.
batcher is an object returned by self.make_batcher().
"""
def detect_conflict(cursor):
"""
Find all conflicts in the data about to be committed (as stored
by :meth:`store_temps`)
Returns a sequence of
``(oid, committed_tid, attempted_committed_tid, committed_state)`` where
each entry refers to a conflicting object. The *committed_state* **must** be
returned.
This method should be called during the ``tpc_vote`` phase of a transaction,
with :meth:`ILocker.lock_current_objects` held.
"""
def replace_temps(cursor, state_oid_tid_iter):
"""
Replace all objects in the temporary table with new data from
*state_oid_tid_iter*.
This happens after conflict resolution. The param is as for
``store_temps``.
Implementations should try to perform this in as few database operations
as possible.
"""
def move_from_temp(cursor, tid, txn_has_blobs):
"""
Move the temporarily stored objects to permanent storage.
*tid* is the integer tid of the transaction being committed.
Returns nothing.
The steps should be as follows:
- If we are preserving history, then ``INSERT`` into
``object_state`` the values stored in ``temp_store``,
remembering to coalesce the
``LENGTH(temp_store.state)``.
- Otherwise, when we are not preserving history,
``INSERT`` missing rows from ``object_state`` into
``temp_store``, and ``UPDATE`` rows that were already
there. (This is best done with an upsert). If blobs are
involved, then ``DELETE`` from ``blob_chunk`` where the
OID is in ``temp_store``.
- For both types of storage, ``INSERT`` into
``blob_chunk`` the values from ``temp_blob_chunk``. In a
history-free storage, this may be combined with the last
step in an ``UPSERT``.
"""
def update_current(cursor, tid):
"""
Update the current object pointers.
*tid* is the integer tid of the transaction being committed.
Returns nothing. This does nothing when the storage is history
free.
When the storage preserves history, all the objects in
``object_state`` having the given *tid* should have their
(oid, *tid*) stored into ``current_object``. This can be done
with a single upsert.
XXX: Why do we need to look at ``object_state``? Is there a
reason we can't look at the smaller ``temp_store``? Conflict
resolution maybe?
"""
def download_blob(cursor, oid, tid, filename):
"""Download a blob into a file.
Returns the size of the blob file in bytes.
"""
def upload_blob(cursor, oid, tid, filename):
"""Upload a blob from a file.
If tid is None, upload to the temporary table.
"""
class IOIDAllocator(Interface):
"""
Allocate OIDs and control future allocation.
The cursor passed here must be from a
:meth:`store connection <IConnectionManager.open_for_store>`.
"""
def new_oids(cursor):
"""
Return a new :class:`list` of new, unused integer OIDs.
The list should be contiguous and must be in sorted order from
highest to lowest. It must never contain 0.
"""
def set_min_oid(cursor, oid_int):
"""
Ensure the next OID (the rightmost value from
:meth:`new_oids`) is greater than the given *oid_int*.
"""
def reset_oid(cursor):
"""
Cause the sequence of OIDs to begin again from the beginning.
"""
class IPackUndo(Interface):
"""Perform pack and undo operations"""
MAX_TID = Attribute("The maximum TID that can be stored.")
def verify_undoable(cursor, undo_tid):
"""Raise UndoError if it is not safe to undo the specified txn.
"""
def undo(cursor, undo_tid, self_tid):
"""Undo a transaction.
Parameters: "undo_tid", the integer tid of the transaction to undo,
and "self_tid", the integer tid of the current transaction.
Returns the states copied forward by the undo operation as a
list of (oid, old_tid).
May raise UndoError.
"""
def fill_object_refs(conn, cursor, get_references):
"""Update the object_refs table by analyzing new transactions.
"""
def choose_pack_transaction(pack_point):
"""Return the transaction before or at the specified pack time.
Returns None if there is nothing to pack.
"""
def pre_pack(pack_tid, get_references):
"""Decide what to pack.
pack_tid specifies the most recent transaction to pack.
get_references is a function that accepts a stored object state
and returns a set of OIDs that state refers to.
"""
def pack(pack_tid, packed_func=None):
"""Pack. Requires the information provided by pre_pack.
packed_func, if provided, will be called for every object state
packed, just after the object is removed. The function must
accept two parameters, oid and tid (64 bit integers).
"""
def deleteObject(cursor, oid_int, tid_int):
"""
Delete the revision of *oid_int* in transaction *tid_int*.
This method marks an object as deleted via a new object
revision. Subsequent attempts to load current data for the
object will fail with a POSKeyError, but loads for
non-current data will suceed if there are previous
non-delete records. The object will be removed from the
storage when all not-delete records are removed.
The serial argument must match the most recently committed
serial for the object. This is a seat belt.
--- Documentation for ``IExternalGC``
In history-free databases there is no such thing as a delete record, so
this should remove the single
revision of *oid_int* (which *should* be checked to verify it
is at *tid_int*), leading all access to *oid_int* in the
future to throw ``POSKeyError``.
In history preserving databases, this means to set the state for the object
at the transaction to NULL, signifying that it's been deleted. A subsequent
pack operation is required to actually remove these deleted items.
"""
class IPoller(Interface):
"""Poll for new data"""
def get_current_tid(cursor):
"""
Returns the highest transaction ID visible to the cursor.
If there are no transactions, returns 0.
"""
def poll_invalidations(conn, cursor, prev_polled_tid):
"""
Polls for new transactions.
*conn* and *cursor* must have been created previously by
``open_for_load()`` (a snapshot connection). *prev_polled_tid*
is the tid returned at the last poll, or None if this is the
first poll.
If the database has disconnected, this method should raise one
of the exceptions listed in the disconnected_exceptions
attribute of the associated IConnectionManager.
Returns ``(changes, new_polled_tid)``, where changes is either
an iterable of (oid, tid) that have changed, or None to
indicate that the changes are too complex to list; this must
cause local storage caches to be invalidated.
``new_polled_tid`` is never None.
Important: You must consume the changes iterable, and you must
not make any other queries until you do.
This method may raise :class:`StaleConnectionError` (a
``ReadConflictError``) if the database has reverted to an
earlier transaction, which can happen in an asynchronously
replicated database. This exception is one that is transient
and most transaction middleware will catch it and retry the
transaction.
"""
class ISchemaInstaller(Interface):
"""Install the schema in the database, clear it, or uninstall it"""
def prepare():
"""
Create the database schema if it does not already exist.
Perform any migration steps needed, and call :meth:`verify`
before returning.
"""
def verify():
"""
Ensure that the schema that's installed can be used by this
RelStorage.
If it cannot, for example it's history-preserving and we were configured
to be history-free, raise an exception.
"""
def zap_all():
"""Clear all data out of the database."""
def drop_all():
"""Drop all tables and sequences."""
class IScriptRunner(Interface):
"""Run database-agnostic SQL scripts.
Using an IScriptRunner is appropriate for batch operations and
uncommon operations that can be slow, but is not appropriate
for performance-critical code.
"""
script_vars = Attribute(
"""A mapping providing replacements for parts of scripts.
Used for making scripts compatible with databases using
different parameter styles.
""")
def run_script_stmt(cursor, generic_stmt, generic_params=()):
"""Execute a statement from a script with the given parameters.
generic_params should be either an empty tuple (no parameters) or
a map.
The input statement is generic and will be transformed
into a database-specific statement.
"""
def run_script(cursor, script, params=()):
"""Execute a series of statements in the database.
params should be either an empty tuple (no parameters) or
a map.
The statements are transformed by run_script_stmt
before execution.
"""
def run_many(cursor, stmt, items):
"""Execute a statement repeatedly. Items should be a list of tuples.
stmt should use '%s' parameter format (not %(name)s).
"""
# Note: the Oracle implementation also provides run_lob_stmt, which
# is useful for reading LOBs from the database quickly.
class ITransactionControl(Interface):
"""Begin, commit, and abort transactions."""
def get_tid(cursor):
"""Returns the most recent tid."""
def add_transaction(cursor, tid, username, description, extension,
packed=False):
"""Add a transaction."""
def delete_transaction(cursor, tid):
"""Remove a transaction."""
def commit_phase1(store_connection, tid):
"""
Begin a commit. Returns the transaction name.
The transaction name must not be None.
This method should guarantee that :meth:`commit_phase2` will
succeed, meaning that if commit_phase2() would raise any
error, the error should be raised in :meth:`commit_phase1`
instead.
:param store_connection: An :class:`IManagedStoreConnection`
"""
def commit_phase2(store_connection, txn, load_connection):
"""
Final transaction commit.
*txn* is the name returned by :meth:`commit_phase1`.
:param store_connection: An :class:`IManagedStoreConnection`
This is what must be committed.
:param load_connection: An :class:`IManagedLoadConnection`
corresponding to the store connection. If helpful to the database,
(for example, for resource reasons) implementations may rollback
the connection immediately before committing the store connection.
"""
def abort(store_connection, txn=None):
"""
Abort the commit, ignoring certain exceptions.
If *txn* is not None, phase 1 is also aborted.
:param store_connection: An :class:`IManagedStoreConnection`
:return: A true value if the connection was rolled back
without ignoring any exceptions; if an exception was
ignored, returns a false value (and the connection
and cursor are closed before this method returns).
"""
class IDBStats(Interface):
"""
Collecting, viewing and updating database information.
"""
def get_object_count():
"""Returns the approximate number of objects in the database"""
def get_db_size():
"""Returns the approximate size of the database in bytes"""
def large_database_change():
"""
Call this when the database has changed substantially,
and it would be a good time to perform any updates or
optimizations.
"""
class IRelStorageAdapter(Interface):
"""
A database adapter for RelStorage.
Historically, this has just been a holding place for other components
for the particular database. However, it is moving to holding algorithms
involved in the storage; this facilitates moving chunks of functionality
into database stored procedures as appropriate. The basic algorithms are
implemented in :class:`.adapter.AbstractAdapter`.
"""
driver = Object(IDBDriver)
connmanager = Object(IConnectionManager)
dbiter = Object(IDatabaseIterator)
keep_history = Bool(description=u"True if this adapter supports undo")
locker = Object(ILocker)
mover = Object(IObjectMover)
oidallocator = Object(IOIDAllocator)
packundo = Object(IPackUndo)
poller = Object(IPoller)
runner = Object(IScriptRunner)
schema = Object(ISchemaInstaller)
stats = Object(IDBStats)
txncontrol = Object(ITransactionControl)
def new_instance():
"""
Return an instance for use by another RelStorage instance.
Adapters that are stateless can simply return self. Adapters
that have mutable state must make a clone and return it.
"""
def release():
"""
Release the resources held uniquely by this instance.
"""
def close():
"""
Release the resources held by this instance and all child instances.
"""
def __str__():
"""Return a short description of the adapter"""
def lock_database_and_choose_next_tid(cursor,
username,
description,
extension):
"""
Lock the database with the commit lock and allocate the next
tid.
In a simple implementation, this will first obtain the commit
lock with around :meth:`ILocker.hold_commit_lock`. Then it
will query the current most recently committed TID with
:meth:`ITransactionControl.get_tid`. It will choose the next
TID based on that value and the current timestamp, and then it
will write that value to the database with
:meth:`ITransactionControl.add_transaction`.
The *username*, *description* and *extension* paramaters are as for
``add_transaction.
:return: The new TID integer.
"""
def lock_database_and_move(
store_connection, load_connection,
transaction_has_blobs,
ude,
commit=True,
committing_tid_int=None,
after_selecting_tid=None
):
"""
Lock the database, choose the next TID, and move temporary
data into its final place.
This is used in two modes. In the usual case, *commit* will be
true, and this method is called to implement the final step of
``tpc_finish``. In that case, this method is responsible for
committing the transaction (using the *store_connection*
provided). When *commit* is false, this method is effectively
part of ``tpc_vote`` and must **not** commit.
The *blobhelper* is the :class:`IBlobHelper`. This method is
responsible for moving blob data into place if the blob data
is stored on the server and there are blobs in this
transaction. (Implementations that use stored procedures will
probably not need this argument; it is here to be able to
provide ``txn_has_blobs`` to
:meth:`IObjectMover.move_from_temp`.)
*ude* is a tuple of ``(username, description, extension)``.
If *committing_tid_int* is None, then this method must lock
the database and choose the next TID as if by calling
:meth:`lock_database_and_choose_next_tid` (passing in the
expanded *ude*); if it is **not** None, the database has
already been locked and the TID selected.
*after_selecting_tid* is a function of one argument, the
committing integer TID. If it is proveded, it must be called
once the TID has been selected and temporary data moved into
place.
Implementations are encouraged to do all of this work in as
few calls to the database as possible with a stored procedure
(ideally one call). The default implementation will use
:meth:`lock_database_and_choose_next_tid`,
:meth:`IObjectMover.move_from_temp`,
:meth:`IObjectMover.update_current` and
:meth:`ITransactionControl.commit_phase1` and
:meth:`ITransactionControl.commit_phase2`.
When committing, implementations are encouraged to exit any
:meth:`critical phase <IDBDriver.enter_critical_phase_until_transaction_end>`
in the most timely manner possible after ensuring that the commit
request has been sent, especially if only one
communication with the database is required that may block for an arbitrary
time to get the lock.
:param load_connection: The load connection corresponding to the store
connection. When *commit* is true, this **may** be rolled back
immediately before actually committing the *store_connection*, if
that assists the database (for example, with resource management).
If *commit* is not true, the load connection must not be changed.
Rolling back the load connection is optional.
Be careful to use the *store_connection* for all operations requiring
a current view of the database or any writes. The load connection
has a historical view and is not writable.
:return: A tuple ``(committing_tid_int, prepared_txn_id)``;
the *prepared_txn_id* is irrelevant if *commit* was
true.
"""
def lock_objects_and_detect_conflicts(
cursor,
read_current_oids,
):
"""
Without taking the commit lock, lock the objects this
transaction wants to modify (for exclusive update) and the
objects in *read_current_oids* (for shared read).
Returns an iterable of ``(oid_int, committed_tid_int,
tid_this_txn_saw_int, committed_state)`` for current objects
that were locked, plus objects which had conflicts.
Implementations are encouraged to do all this work in as few
calls to the database as possible with a stored procedure. The
default implementation will use
:meth:`ILocker.lock_current_objects`,
:meth:`IObjectMover.current_object_tids`, and
:meth:`IObjectMover.detect_conflicts`.
This method may raise the same lock exceptions and
:meth:`ILocker.lock_current_objects`. In particular, it should
take care to distinguish between a failure to acquire an
update lock and a failure to acquire a read lock by raising
the appropriate exceptions
(:class:`UnableToLockRowsToModifyError` and
:class:`UnableToLockRowsToReadCurrentError`, respectively).
Because two separate classes of locks are to be obtained,
implementations will typically need to make two separate
locking queries. If the second of those queries fails, the
implementation is encouraged to immediately release locks
taken by the first operation before raising an exception.
Ideally this happens in the database stored procedure.
(PostgreSQL works this way automatically --- any error rolls
the transaction back and releases locks --- but this takes
work on MySQL except in the case of deadlock.)
.. rubric:: Read Current Locks
The *read_current_oids* maps OID integers to expected TID
integers. Each such object must be read share locked for the
duration of the transaction so we can ensure that the final
commit occurs without those objects having been changed. From
ReadVerifyingStorage's ``checkCurrentSerialInTransaction``:
"If no [ReadConflictError] exception is raised, then the
serial must remain current through the end of the
transaction."
Applications sometimes (often) perform readCurrent() on the
*wrong* object (for example: the ``BTree`` object or the
``zope.container`` container object, when what is really
required, what will actually be modified, is a ``BTree``
bucket---very hard to predict), so very often these objects
will not ever be modified. (Indeed, ``BTree.__setitem__`` and
``__delitem__`` perform readCurrent on the BTree itself; but
once the first bucket has been established, the BTree will not
usually be modified.) A share lock is enough to prevent any
modifications without causing unnecessary blocking if the
object would never be modified.
In the results, if the ``tid_this_txn_saw_int`` is ``None``,
that was an object we only read, and share locked. If the
``committed_tid_int`` does not match the TID we expected to
get, then the caller will raise a ``ReadConflictError`` and
abort the transaction. The implementation is encouraged to
return all such rows *first* so that these inexpensive checks
can be accomplished before the more expensive conflict
resolution process.
Optionally, if this method can detect a read current violation
based on the data in *read_current_oids* at the database
level, it may raise a :class:`ReadConflictError`. This method
is also allowed and encouraged to *only* return read current
violations; further, it need only return the first such
violation (because an exception will immediately be raised.)
Implementations are encouraged to use the database ``NOWAIT``
feature (or equivalent) to take read current locks. If such an
object is already locked exclusively, that means it is being
modified and this transaction is racing the modification
transaction. Taking the lock with ``NOWAIT`` and raising an
error lets the write transaction proceed, while this one rolls
back and retries. (Hopefully the write transaction finishes
quickly, or several retries may be needed.)
.. rubric:: Modified Objects and Conflicts
All objects that this transaction intends to modify (which are
in the ``temp_store`` table) must be exclusively write locked
when this method returns.
The remainder of the results (where ``tid_this_txn_saw_int``
is not ``None``) give objects that we have detected a conflict
(a modification that has committed earlier to an object this
transaction also wants to modify).
As an optimization for conflict resolution,
``committed_state`` may give the current committed state of
the object (corresponding to ``committed_tid_int``), but is
allowed to be ``None`` if there isn't an efficient way to
query that in bulk from the database.
.. rubric:: Deadlocks, and Shared vs Exclusive Locks
It might seem that, because no method of a transaction
(*except* for ``restore()``) writes directly to the
``object_state`` or ``current_object`` table *before*
acquiring the commit lock, a share lock is enough even for
objects we're definitely going to modify. That way leads to
deadlock, however. Consider this order of operations:
1. Tx a: LOCK OBJECT 1 FOR SHARE. (Tx a will modify this.)
2. Tx b: LOCK OBJECT 1 FOR SHARE. (Tx b is just
readCurrent.)
3. Tx a: Obtain commit lock.
4. Tx b: attempt to obtain commit lock; block.
5. Tx a: UPDATE OBJECT 1; attempt to escalate shared lock
to exclusive lock. --> DEADLOCK with the shared lock
from step 2.
Tx a needs to raise the lock of object 1, but Tx b's share
lock is preventing it. Meanwhile, Tx b wants the commit lock,
but Tx a is holding it.
If Tx a took an exclusive lock, it would either block Tx b
from getting a share lock, or be blocked by Tx b's share lock;
either way, whichever one got to the commit lock would be able
to complete.
Further, it is trivial to show that when using two lock
classes, two transactions that have overlapping sets of
objects (e.g., a wants shared on ``(1, 3, 5, 7)`` and
exclusive on ``(2, 4, 6, 8)`` and b wants shared on ``(2, 4,
6, 8)`` and exclusive on ``(3, 5, 7)``), can easily deadlock
*before* taking the commit lock, no matter how we interleave
those operations. This is true if they both take their
exclusive locks first and then attempt share locks on the
remainder, both take shared locks on everything and attempt to
upgrade to exclusive on that subset, or both take just the
shared locks and then attempt to take the exclusive locks.
This extends to more than two processes.
**That's perfectly fine.**
As long as the database either supports ``NOWAIT``
(immediately error when you fail to get a requested lock) or
rapid deadlock detection resulting in an error, we can catch
that error and turn it into the ``ReadConflictError`` it
actually is.
PostgreSQL supports ``NOWAIT`` (and deadlock detection, after
a small but configurable delay). MySQL's InnoDB supports rapid
deadlock detection, and starting with MySQL 8, it supports
``NOWAIT``.
Transactions that deadlock would have been doomed anyway;
a deadlock is just another way of saying there will be a
readCurrent conflict.
.. rubric:: Lock Order
The original strategy was to first take exclusive locks of
things we will be modifying. Once that succeeds, then we
attempt shared locks of readCurrent using ``NOWAIT``. If that
fails because we can't get a lock, we know someone is in the
process of modifying it and we have a conflict. If we get the
locks, we still have to confirm the TIDs are the things we
expect. (A possible optimization is to do those two steps at
once, in the database. ``SELECT FOR SHARE WHERE oid = X and
TID = x``. If we don't get the right number of rows,
conflict.) This prioritizes writers over readers: readers fail
at the expense of writers.
However, it means that if we're going to fail a transaction
because an object we'd like to read lock has been modified, we
have to wait until we timeout, or acquire all of our exclusive
locks. Depending on transaction ordering, this could mean
unnecessarily long delays. Suppose Tx a wants to write to
objects 1 and 2, and Tx b wants to write to 1 but only read 2.
(Recall that a ZODB connection automatically upgrades
readCurrent() into just modifications. 1 and 2 could be bank
accounts; Tx a is doing a transfer, but Tx b is checking
collateral (account 2) for a loan that was approved and
transferring that into 1.)
1. Tx a: LOCK 1, 2 EXCLUSIVE. (No share locks.)
2. Tx b: LOCK 1 EXCLUSIVE. -> Block; queue for lock 1.
3. Tx a: Resolve a conflict in 1, wait for the commit
lock, and finally commit and release the locks on 1 and
4. Tx c: LOCK 2 EXCLUSIVE.
5. Tx b: LOCK 2 SHARE -> wait exception.
Here, Tx b had to wait while Tx a finished its entire
business, only to have Tx c swoop in and get the lock first,
leading to Tx b raising an exception. Some databases guarantee
that locks are handed off in FIFO fashion, but not all do.
Even if the database granted Tb b the share lock first, it
would still discover that the TID had changed and raise an
exception. Meanwhile, Tx b has been holding an exclusive lock
on 1 this entire time, preventing anyone else from modifying
it.
If we take the share locks first, the scenario looks like
this:
1. Tx a: LOCK 1, 2 EXCLUSIVE. (No share locks.)
2. Tx b: LOCK 2 SHARE -> wait exception; begin retry.
Tx b gets much quicker notification that it won't be able to
progress and begins a retry.
Both orders have the problem that if Tx a takes longer to
commit than Tx b does to begin its retry, Tx b may take the
same action against the same state of the database several
times in a row before giving up. What's different is that in
the exclusive first version, that only happens if Tx a and Tx
b have no exclusive locks in common:
1. Tx a: Lock 1 EXCLUSIVE.
2. Tx b: Lock 2 EXCLUSIVE.
3. Tx b: Lock 1 SHARE -> wait exception, begin retry.
However, this can be mitigated by introducing small backoff
delays in the transaction retry logic.
Suppose a different transaction already modified object 2.
With the original lock order (exclusive first), the scenario
doesn't change. Tx b has to wait for Tx a to finish before it
can determine that fact (Tx a has to resolve a conflict or
fail). If Tx b got to go first, it would relatively quickly
discover this fact, but at the cost of waiting for an
exclusive lock for an arbitrary amount of time:
1. Tx b: Lock 1 EXCLUSIVE. (Potentially blocks.)
2. Tx a: Lock 1, 2 EXCLUSIVE. -> Block; queue for lock 1.
3. Tx b: Lock 2 SHARE.
4. Tx b: Determine 2 has been modified; raise
ReadConflictError.
Taking the share lock first solves this concern; Tx b is
immediately able to determine that 2 has been modified and
quickly raise an exception without holding any other locks.
:param cursor: The store cursor.
:param read_current_oids: A mapping from oid integer to tid
integer that the transaction expects.
"""
###
# Exceptions
###
class ReplicaClosedException(Exception):
"""The connection to the replica has been closed"""
class UnableToAcquireLockError(Exception):
"A lock cannot be acquired."
class UnableToAcquireCommitLockError(StorageError, UnableToAcquireLockError):
"""
The commit lock cannot be acquired due to a timeout.
This means some other transaction had the lock we needed. Retrying
the transaction may succeed.
However, for historical reasons, this exception is not a ``TransientError``.
"""
class UnableToLockRowsDeadlockError(ConflictError, UnableToAcquireLockError):
"""
The root class for a lock error because there was a deadlock.
"""
# TransientError -> ConflictError -> ReadConflictError
class UnableToLockRowsToModifyError(ConflictError, UnableToAcquireLockError):
"""
We were unable to lock one or more rows that we intend to modify
due to a timeout.
This means another transaction already had the rows locked,
and thus we are in conflict with that transaction. Retrying the
transaction may succeed.
This is a type of ``ConflictError``, which is a transient error.
"""
UnableToLockRowsToModifyError.DEADLOCK_VARIANT = UnableToLockRowsToModifyDeadlockError
class UnableToLockRowsToReadCurrentError(ReadConflictError, UnableToAcquireLockError):
"""
We were unable to lock one or more rows that belong to an object
that ``Connection.readCurrent()`` was called on.
This means another transaction already had the rows locked with
intent to modify them, and thus we are in conflict with that
transaction. Retrying the transaction may succeed.
This is a type of ``ReadConflictError``, which is a transient error.
"""
UnableToLockRowsToReadCurrentError.DEADLOCK_VARIANT = UnableToLockRowsToReadCurrentDeadlockError
class UnableToAcquirePackUndoLockError(StorageError, UnableToAcquireLockError):
"""A pack or undo operation is in progress."""
class StaleConnectionError(ReadConflictError):
"""
Raised by `IPoller.poll_invalidations` when a stale connection is
detected.
"""
@classmethod
class AggregateOperationTimeoutError(Exception):
"""
Raised when an aggregate operation in RelStorage detects
that it has exceeded a specified timeout.
"""
#: If the function knows a useful, partial result, it may set this
#: attribute. Check it against the class value to see if it's been set.
partial_result = object()
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
3717,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789... | 2.840878 | 21,650 |