text string | size int64 | token_count int64 |
|---|---|---|
# Import the envs module so that envs register themselves
import gym_minigrid
| 78 | 22 |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["prepare_characterization"]
import kplr
import transit
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as pl
import george
from george import kernels
from ..prepare import Prepare
from ..download import Download
from ..discontinuity import Discontinuity
def prepare_characterization(kicid, periods, time0s, rors, impacts,
es=None,
data_window_hw=3.0, min_data_window_hw=0.5):
# Download and process the light curves.
pipe = Download()
pipe = Prepare(pipe)
pipe = Discontinuity(pipe)
r = pipe.query(kicid=kicid)
# Find the data chunks that hit a transit.
lcs = []
for lc in r.light_curves:
# Build the mask of times that hit transits.
m = np.zeros_like(lc.time, dtype=bool)
mmin = np.zeros_like(lc.time, dtype=bool)
for p, t0 in zip(periods, time0s):
hp = 0.5 * p
t0 = t0 % p
dt = np.abs((lc.time - t0 + hp) % p - hp)
m += dt < data_window_hw
mmin += dt < min_data_window_hw
# Trim the dataset and set up the Gaussian Process model.
if np.any(mmin) and np.sum(m) > 10:
# Re-normalize the trimmed light curve.
mu = np.median(lc.flux[m])
lc.time = np.ascontiguousarray(lc.time[m])
lc.flux = np.ascontiguousarray(lc.flux[m] / mu)
lc.ferr = np.ascontiguousarray(lc.ferr[m] / mu)
# Make sure that the light curve knows its integration time.
lc.texp = kplr.EXPOSURE_TIMES[1] / 86400.0
# Heuristically guess the Gaussian Process parameters.
lc.factor = 1000.0
amp = np.median((lc.factor * (lc.flux-1.0))**2)
kernel = amp*kernels.Matern32Kernel(4.0)
lc.gp = george.GP(kernel)
# Run an initial computation of the GP.
lc.gp.compute(lc.time, lc.ferr * lc.factor)
# Save this light curve.
lcs.append(lc)
# Set up the initial system model.
spars = r.star.huber
star = transit.Central(mass=spars.M, radius=spars.R)
s = transit.System(star)
for i in range(len(periods)):
planet = transit.Body(r=rors[i] * star.radius,
period=periods[i],
t0=time0s[i] % periods[i],
b=impacts[i],
e=0.0 if es is None else es[i])
s.add_body(planet)
# Approximate the stellar mass and radius measurements as log-normal.
q = np.array(spars[["R", "E_R", "e_R"]], dtype=float)
lnsr = (np.log(q[0]),
1.0 / np.mean([np.log(q[0] + q[1]) - np.log(q[0]),
np.log(q[0]) - np.log(q[0] - q[2])]) ** 2)
q = np.array(spars[["M", "E_M", "e_M"]], dtype=float)
lnsm = (np.log(q[0]),
1.0 / np.mean([np.log(q[0] + q[1]) - np.log(q[0]),
np.log(q[0]) - np.log(q[0] - q[2])]) ** 2)
return ProbabilisticModel(lcs, s, lnsr, lnsm)
class ProbabilisticModel(object):
def __init__(self, lcs, system, lnsr, lnsm):
self.lcs = lcs
self.system = system
self.lnsr = lnsr
self.lnsm = lnsm
self.fit_star = False
def pack(self):
star = self.system.central
planets = self.system.bodies
vec = list(self.lcs[0].gp.kernel.vector)
if self.fit_star:
vec += [np.log(star.radius), np.log(star.mass)]
vec += [
star.q1,
star.q2,
]
vec += [v for p in planets for v in (
np.log(p.r), np.log(p.period), p.t0, p.b,
np.sqrt(p.e) * np.sin(p.pomega),
np.sqrt(p.e) * np.cos(p.pomega)
)]
return np.array(vec)
def unpack(self, pars):
# Update the kernel.
i = len(self.lcs[0].gp.kernel)
for lc in self.lcs:
lc.gp.kernel[:] = pars[:i]
# Update the star.
star = self.system.central
if self.fit_star:
star.radius, star.mass = np.exp(pars[i:i+2])
i += 2
star.q1, star.q2 = pars[i:i+2]
i += 2
# Update the planets.
for p in self.system.bodies:
p.r, p.period = np.exp(pars[i:i+2])
i += 2
p.t0, p.b = pars[i:i+2]
i += 2
sqesn, sqecs = pars[i:i+2]
p.e = sqesn**2 + sqecs**2
p.pomega = np.arctan2(sqesn, sqecs)
i += 2
def lnprior(self):
lnp = 0.0
# Apply the stellar parameter constraints.
star = self.system.central
if not (0 < star.q1 < 1 and 0 < star.q2 < 1):
return -np.inf
lnsr = np.log(star.radius)
lnp -= 0.5 * self.lnsr[1] * (self.lnsr[0] - lnsr) ** 2
lnsm = np.log(star.mass)
lnp -= 0.5 * self.lnsm[1] * (self.lnsm[0] - lnsm) ** 2
# And the planet parameters.
for p in self.system.bodies:
if p.b < 0.0 or not (-2 * np.pi < p.pomega < 2 * np.pi):
return -np.inf
# Kipping (2013)
lnp += beta(1.12, 3.09).logpdf(p.e)
return lnp
def lnlike(self):
ll = 0.0
for lc in self.lcs:
try:
mu = self.system.light_curve(lc.time, texp=lc.texp)
except RuntimeError:
return -np.inf
r = (lc.flux - mu) * lc.factor
ll += lc.gp.lnlikelihood(r, quiet=True)
if not np.isfinite(ll):
return -np.inf
return ll
def lnprob(self, p):
try:
self.unpack(p)
except ValueError:
return -np.inf
lp = self.lnprior()
if not np.isfinite(lp):
return -np.inf
ll = self.lnlike()
if not np.isfinite(ll):
return -np.inf
return lp + ll
def plot(self, dy=1e-2):
fig = pl.figure()
ax = fig.add_subplot(111)
period = self.system.bodies[0].period
t0 = self.system.bodies[0].t0
for i, lc in enumerate(self.lcs):
t = (lc.time - t0 + 0.5 * period) % period - 0.5 * period
ax.plot(t, lc.flux + i*dy, ".k", alpha=0.5)
mu = self.system.light_curve(lc.time, texp=lc.texp)
r = lc.factor * (lc.flux - mu)
pred = lc.gp.predict(r, lc.time, mean_only=True) / lc.factor
ax.plot(t, pred + 1.0 + i*dy, "r", alpha=0.5)
ax.plot(t, pred + mu + i*dy, "b", alpha=0.5)
ax.axvline(0.0, color="k", alpha=0.3, lw=3)
return fig
| 6,705 | 2,526 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from re import findall
def f(cur_digit):
l = list()
l.append("a")
l.append("b")
l.extend(l)
l.extend(l)
l.append("c")
for i in range(int(cur_digit)):
if l[i] != "a" and "a" in l:
l.remove("a")
else:
l.insert(i, "c")
return l
rightmost_digit = "".join(findall("\d", input("Please provide your matriculation number: ")))[-1]
print("Result:", f(rightmost_digit))
| 1,242 | 471 |
#!/usr/bin/env python3
import pvml
import numpy as np
import sys
try:
import PIL.Image
except ImportError:
print("To use this script you need the `Pillow' libray")
print("Install it with 'pip install Pillow' or 'pip3 install Pillow'")
sys.exit()
IMAGENET_CLASSES = list(map(str.strip, """
tench, goldfish, great white shark, tiger shark, hammerhead,
electric ray, stingray, cock, hen, ostrich, brambling, goldfinch,
house finch, junco, indigo bunting, robin, bulbul, jay, magpie,
chickadee, water ouzel, kite, bald eagle, vulture, great grey owl,
European fire salamander, common newt, eft, spotted salamander,
axolotl, bullfrog, tree frog, tailed frog, loggerhead, leatherback
turtle, mud turtle, terrapin, box turtle, banded gecko, common iguana,
American chameleon, whiptail, agama, frilled lizard, alligator lizard,
Gila monster, green lizard, African chameleon, Komodo dragon, African
crocodile, American alligator, triceratops, thunder snake, ringneck
snake, hognose snake, green snake, king snake, garter snake, water
snake, vine snake, night snake, boa constrictor, rock python, Indian
cobra, green mamba, sea snake, horned viper, diamondback, sidewinder,
trilobite, harvestman, scorpion, black and gold garden spider, barn
spider, garden spider, black widow, tarantula, wolf spider, tick,
centipede, black grouse, ptarmigan, ruffed grouse, prairie chicken,
peacock, quail, partridge, African grey, macaw, sulphur-crested
cockatoo, lorikeet, coucal, bee eater, hornbill, hummingbird, jacamar,
toucan, drake, red-breasted merganser, goose, black swan, tusker,
echidna, platypus, wallaby, koala, wombat, jellyfish, sea anemone,
brain coral, flatworm, nematode, conch, snail, slug, sea slug, chiton,
chambered nautilus, Dungeness crab, rock crab, fiddler crab, king
crab, American lobster, spiny lobster, crayfish, hermit crab, isopod,
white stork, black stork, spoonbill, flamingo, little blue heron,
American egret, bittern, crane, limpkin, European gallinule, American
coot, bustard, ruddy turnstone, red-backed sandpiper, redshank,
dowitcher, oystercatcher, pelican, king penguin, albatross, grey
whale, killer whale, dugong, sea lion, Chihuahua, Japanese spaniel,
Maltese dog, Pekinese, Shih-Tzu, Blenheim spaniel, papillon, toy
terrier, Rhodesian ridgeback, Afghan hound, basset, beagle,
bloodhound, bluetick, black-and-tan coonhound, Walker hound, English
foxhound, redbone, borzoi, Irish wolfhound, Italian greyhound,
whippet, Ibizan hound, Norwegian elkhound, otterhound, Saluki,
Scottish deerhound, Weimaraner, Staffordshire bullterrier, American
Staffordshire terrier, Bedlington terrier, Border terrier, Kerry blue
terrier, Irish terrier, Norfolk terrier, Norwich terrier, Yorkshire
terrier, wire-haired fox terrier, Lakeland terrier, Sealyham terrier,
Airedale, cairn, Australian terrier, Dandie Dinmont, Boston bull,
miniature schnauzer, giant schnauzer, standard schnauzer, Scotch
terrier, Tibetan terrier, silky terrier, soft-coated wheaten terrier,
West Highland white terrier, Lhasa, flat-coated retriever,
curly-coated retriever, golden retriever, Labrador retriever,
Chesapeake Bay retriever, German short-haired pointer, vizsla, English
setter, Irish setter, Gordon setter, Brittany spaniel, clumber,
English springer, Welsh springer spaniel, cocker spaniel, Sussex
spaniel, Irish water spaniel, kuvasz, schipperke, groenendael,
malinois, briard, kelpie, komondor, Old English sheepdog, Shetland
sheepdog, collie, Border collie, Bouvier des Flandres, Rottweiler,
German shepherd, Doberman, miniature pinscher, Greater Swiss Mountain
dog, Bernese mountain dog, Appenzeller, EntleBucher, boxer, bull
mastiff, Tibetan mastiff, French bulldog, Great Dane, Saint Bernard,
Eskimo dog, malamute, Siberian husky, dalmatian, affenpinscher,
basenji, pug, Leonberg, Newfoundland, Great Pyrenees, Samoyed,
Pomeranian, chow, keeshond, Brabancon griffon, Pembroke, Cardigan, toy
poodle, miniature poodle, standard poodle, Mexican hairless, timber
wolf, white wolf, red wolf, coyote, dingo, dhole, African hunting dog,
hyena, red fox, kit fox, Arctic fox, grey fox, tabby, tiger cat,
Persian cat, Siamese cat, Egyptian cat, cougar, lynx, leopard, snow
leopard, jaguar, lion, tiger, cheetah, brown bear, American black
bear, ice bear, sloth bear, mongoose, meerkat, tiger beetle, ladybug,
ground beetle, long-horned beetle, leaf beetle, dung beetle,
rhinoceros beetle, weevil, fly, bee, ant, grasshopper, cricket,
walking stick, cockroach, mantis, cicada, leafhopper, lacewing,
dragonfly, damselfly, admiral, ringlet, monarch, cabbage butterfly,
sulphur butterfly, lycaenid, starfish, sea urchin, sea cucumber, wood
rabbit, hare, Angora, hamster, porcupine, fox squirrel, marmot,
beaver, guinea pig, sorrel, zebra, hog, wild boar, warthog,
hippopotamus, ox, water buffalo, bison, ram, bighorn, ibex,
hartebeest, impala, gazelle, Arabian camel, llama, weasel, mink,
polecat, black-footed ferret, otter, skunk, badger, armadillo,
three-toed sloth, orangutan, gorilla, chimpanzee, gibbon, siamang,
guenon, patas, baboon, macaque, langur, colobus, proboscis monkey,
marmoset, capuchin, howler monkey, titi, spider monkey, squirrel
monkey, Madagascar cat, indri, Indian elephant, African elephant,
lesser panda, giant panda, barracouta, eel, coho, rock beauty, anemone
fish, sturgeon, gar, lionfish, puffer, abacus, abaya, academic gown,
accordion, acoustic guitar, aircraft carrier, airliner, airship,
altar, ambulance, amphibian, analog clock, apiary, apron, ashcan,
assault rifle, backpack, bakery, balance beam, balloon, ballpoint,
Band Aid, banjo, bannister, barbell, barber chair, barbershop, barn,
barometer, barrel, barrow, baseball, basketball, bassinet, bassoon,
bathing cap, bath towel, bathtub, beach wagon, beacon, beaker,
bearskin, beer bottle, beer glass, bell cote, bib,
bicycle-built-for-two, bikini, binder, binoculars, birdhouse,
boathouse, bobsled, bolo tie, bonnet, bookcase, bookshop, bottlecap,
bow, bow tie, brass, brassiere, breakwater, breastplate, broom,
bucket, buckle, bulletproof vest, bullet train, butcher shop, cab,
caldron, candle, cannon, canoe, can opener, cardigan, car mirror,
carousel, carpenter's kit, carton, car wheel, cash machine, cassette,
cassette player, castle, catamaran, CD player, cello, cellular
telephone, chain, chainlink fence, chain mail, chain saw, chest,
chiffonier, chime, china cabinet, Christmas stocking, church, cinema,
cleaver, cliff dwelling, cloak, clog, cocktail shaker, coffee mug,
coffeepot, coil, combination lock, computer keyboard, confectionery,
container ship, convertible, corkscrew, cornet, cowboy boot, cowboy
hat, cradle, crane, crash helmet, crate, crib, Crock Pot, croquet
ball, crutch, cuirass, dam, desk, desktop computer, dial telephone,
diaper, digital clock, digital watch, dining table, dishrag,
dishwasher, disk brake, dock, dogsled, dome, doormat, drilling
platform, drum, drumstick, dumbbell, Dutch oven, electric fan,
electric guitar, electric locomotive, entertainment center, envelope,
espresso maker, face powder, feather boa, file, fireboat, fire engine,
fire screen, flagpole, flute, folding chair, football helmet,
forklift, fountain, fountain pen, four-poster, freight car, French
horn, frying pan, fur coat, garbage truck, gasmask, gas pump, goblet,
go-kart, golf ball, golfcart, gondola, gong, gown, grand piano,
greenhouse, grille, grocery store, guillotine, hair slide, hair spray,
half track, hammer, hamper, hand blower, hand-held computer,
handkerchief, hard disc, harmonica, harp, harvester, hatchet, holster,
home theater, honeycomb, hook, hoopskirt, horizontal bar, horse cart,
hourglass, iPod, iron, jack-o'-lantern, jean, jeep, jersey, jigsaw
puzzle, jinrikisha, joystick, kimono, knee pad, knot, lab coat, ladle,
lampshade, laptop, lawn mower, lens cap, letter opener, library,
lifeboat, lighter, limousine, liner, lipstick, Loafer, lotion,
loudspeaker, loupe, lumbermill, magnetic compass, mailbag, mailbox,
maillot, maillot, manhole cover, maraca, marimba, mask, matchstick,
maypole, maze, measuring cup, medicine chest, megalith, microphone,
microwave, military uniform, milk can, minibus, miniskirt, minivan,
missile, mitten, mixing bowl, mobile home, Model T, modem, monastery,
monitor, moped, mortar, mortarboard, mosque, mosquito net, motor
scooter, mountain bike, mountain tent, mouse, mousetrap, moving van,
muzzle, nail, neck brace, necklace, nipple, notebook, obelisk, oboe,
ocarina, odometer, oil filter, organ, oscilloscope, overskirt, oxcart,
oxygen mask, packet, paddle, paddlewheel, padlock, paintbrush, pajama,
palace, panpipe, paper towel, parachute, parallel bars, park bench,
parking meter, passenger car, patio, pay-phone, pedestal, pencil box,
pencil sharpener, perfume, Petri dish, photocopier, pick, pickelhaube,
picket fence, pickup, pier, piggy bank, pill bottle, pillow, ping-pong
ball, pinwheel, pirate, pitcher, plane, planetarium, plastic bag,
plate rack, plow, plunger, Polaroid camera, pole, police van, poncho,
pool table, pop bottle, pot, potter's wheel, power drill, prayer rug,
printer, prison, projectile, projector, puck, punching bag, purse,
quill, quilt, racer, racket, radiator, radio, radio telescope, rain
barrel, recreational vehicle, reel, reflex camera, refrigerator,
remote control, restaurant, revolver, rifle, rocking chair,
rotisserie, rubber eraser, rugby ball, rule, running shoe, safe,
safety pin, saltshaker, sandal, sarong, sax, scabbard, scale, school
bus, schooner, scoreboard, screen, screw, screwdriver, seat belt,
sewing machine, shield, shoe shop, shoji, shopping basket, shopping
cart, shovel, shower cap, shower curtain, ski, ski mask, sleeping bag,
slide rule, sliding door, slot, snorkel, snowmobile, snowplow, soap
dispenser, soccer ball, sock, solar dish, sombrero, soup bowl, space
bar, space heater, space shuttle, spatula, speedboat, spider web,
spindle, sports car, spotlight, stage, steam locomotive, steel arch
bridge, steel drum, stethoscope, stole, stone wall, stopwatch, stove,
strainer, streetcar, stretcher, studio couch, stupa, submarine, suit,
sundial, sunglass, sunglasses, sunscreen, suspension bridge, swab,
sweatshirt, swimming trunks, swing, switch, syringe, table lamp, tank,
tape player, teapot, teddy, television, tennis ball, thatch, theater
curtain, thimble, thresher, throne, tile roof, toaster, tobacco shop,
toilet seat, torch, totem pole, tow truck, toyshop, tractor, trailer
truck, tray, trench coat, tricycle, trimaran, tripod, triumphal arch,
trolleybus, trombone, tub, turnstile, typewriter keyboard, umbrella,
unicycle, upright, vacuum, vase, vault, velvet, vending machine,
vestment, viaduct, violin, volleyball, waffle iron, wall clock,
wallet, wardrobe, warplane, washbasin, washer, water bottle, water
jug, water tower, whiskey jug, whistle, wig, window screen, window
shade, Windsor tie, wine bottle, wing, wok, wooden spoon, wool, worm
fence, wreck, yawl, yurt, web site, comic book, crossword puzzle,
street sign, traffic light, book jacket, menu, plate, guacamole,
consomme, hot pot, trifle, ice cream, ice lolly, French loaf, bagel,
pretzel, cheeseburger, hotdog, mashed potato, head cabbage, broccoli,
cauliflower, zucchini, spaghetti squash, acorn squash, butternut
squash, cucumber, artichoke, bell pepper, cardoon, mushroom, Granny
Smith, strawberry, orange, lemon, fig, pineapple, banana, jackfruit,
custard apple, pomegranate, hay, carbonara, chocolate sauce, dough,
meat loaf, pizza, potpie, burrito, red wine, espresso, cup, eggnog,
alp, bubble, cliff, coral reef, geyser, lakeside, promontory, sandbar,
seashore, valley, volcano, ballplayer, groom, scuba diver, rapeseed,
daisy, yellow lady's slipper, corn, acorn, hip, buckeye, coral fungus,
agaric, gyromitra, stinkhorn, earthstar, hen-of-the-woods, bolete,
ear, toilet tissue, """.replace("\n", " ").split(",")))
# Check the command line
if len(sys.argv) < 2:
print("USAGE: ./pvmlnet_classify IMAGE1 IMAGE2 IMAGE3 ...")
sys.exit()
# Load the network
try:
net = pvml.CNN.load("pvmlnet.npz")
except FileNotFoundError:
print("Network definition file 'pvmlnet.npz' not found!")
print("You can download it from the following address:")
print("https://drive.google.com/file/d/1VmpvQkBk_dLsU54muiwsGcRRT_cSiYwa/view?usp=sharing")
print()
sys.exit()
# Load the images
filenames = sys.argv[1:]
images = []
for impath in filenames:
im = PIL.Image.open(impath).convert("RGB")
im = np.array(im.resize((224, 224), PIL.Image.BILINEAR))
images.append(im / 255.0)
images = np.stack(images, 0)
# Classify the images
labels, probs = net.inference(images)
# Print the results
ii = np.argsort(-probs, 1)
for i in range(len(filenames)):
for k in range(5):
p = probs[i, ii[i, k]] * 100
synset = IMAGENET_CLASSES[ii[i, k]]
print("{}. {} ({:.1f}%)".format(k + 1, synset, p))
print()
| 12,849 | 4,691 |
import pymongo
from bson.son import SON
from pymongo import MongoClient
# encoding=utf-8
__author__ = 'Hinsteny'
print(pymongo.get_version_string())
class SingleClient(object):
'''
Single Client hold the client object
'''
client = MongoClient('127.0.0.1', 27017)
client.the_database.authenticate('hinsteny', 'welcome', source='admin', mechanism='SCRAM-SHA-1')
def __new__(cls, *args, **kw):
if not hasattr(cls, '_instance'):
orig = super(SingleClient, cls)
cls._instance = orig.__new__(cls, *args, **kw)
return cls._instance
def getClient():
client = MongoClient('127.0.0.1', 27017)
client.the_database.authenticate('hinsteny', 'welcome', source='admin', mechanism='SCRAM-SHA-1')
return client
def test_connection():
client = getClient()
db = client.cube_test
query = {}
cursor = db.user.find(query)
print(cursor.count())
print(cursor[0])
def test_addUser():
client = getClient()
db = client.admin
query = {}
cursor = db.system.users.find(query)
if cursor.count() == 0 :
db.runCommand({createUser})({"user":"admin","pwd":"welcome","roles":["root"]})
else:
print(cursor[0])
def create_test_data(db):
db.things.drop()
result = db.things.insert_many([{"x": 1, "tags": ["dog", "cat"]},{"x": 2, "tags": ["cat"]},{"x": 2, "tags": ["mouse", "cat", "dog"]},{"x": 3, "tags": ["eat","pear"]}])
print(result.inserted_ids)
def doAggregation(collection, pipeline):
print(list(collection.aggregate(pipeline)))
# Do test
if __name__ == "__main__":
test_connection()
# test_addUser()
db = getClient().aggregation_example
create_test_data(db)
pipeline = [
{"$unwind": "$tags"},
{"$group": {"_id": "", "count": {"$sum": "$x"}}},
{"$sort": SON([("count", -1), ("_id", -1)])}
]
doAggregation(db.things, pipeline) | 1,916 | 681 |
#
# Get divisor and modulo
# Often forgotten, often useful
#
a = 5
b = 3
n, m = divmod(a, b)
print(n) # 1
print(m) # 2
#
# Next multiple of a number n
# Used a lot in CodinGame Clash of Code
#
n = 3
idx = [*range(10)]
res = [a + (n - (a % n)) % n for a in idx]
print(idx) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(res) # [0, 3, 3, 3, 6, 6, 6, 9, 9, 9]
#
# Show a multiplication
# Used in CodinGame Clash of Code
#
# Numbers to multiply
a = 500
b = 1300
# Second number => String
b_s = str(b)
# Small multiplications
mults = list(reversed([a * int(b_s[i]) * 10 ** (len(b_s) - i - 1) for i in range(len(b_s))]))
mults = [m for m in mults if m != 0]
# Strings to list
s = [str(a), b_s, "-", *map(str, mults), "-"]
s.append(str(sum(list(mults))))
# Add mult sign
s[1] = "x " + b_s
# Adjust right align
n = len(max(s, key=len))
s = [w.rjust(n, " ") for w in s]
# Horizontal bars
s[2] = s[-2] = n * "-"
print("\n".join(s))
| 933 | 471 |
from _base import *
config = config['classification']
tp = TP()
tokenizer_path = f'{project_root}/{config["tokenizer_path"]}'
tokenizer_gen_path = f'{project_root}/{config["tokenizer_gen_path"]}'
def load_category_master():
try:
dba = clsDbAccessor()
category_master = dba.execQuery("SELECT category_id, category_name FROM mst_categories;")
dba.close()
return category_master
except Exception as e:
print('failed: SELECT mst_categories')
raise e
def load_keyword_master():
try:
dba = clsDbAccessor()
keyword_master = dba.execQuery("SELECT category_id, keyword FROM mst_keywords;")
dba.close()
return keyword_master
except Exception as e:
print('failed: SELECT mst_keywords')
raise e
def load_gen_category_master():
try:
dba = clsDbAccessor()
gen_category_master = dba.execQuery("SELECT category_id, category_name FROM mst_gen_categories;")
dba.close()
return gen_category_master
except Exception as e:
print('failed: SELECT mst_categories')
raise e
# return pd.read_csv(f'{project_root}/data/mst_gen_categories.csv')
def load_gen_keyword_master(): # localで学習時に使用
return pd.read_csv(f'{project_root}/data/mst_gen_keywords.csv')
category_master = load_category_master()
categories = list(category_master['category_name'].values)
gen_category_master = load_gen_category_master()
gen_categories = list(gen_category_master['category_name'].values)
import tensorflow as tf
from tensorflow.keras import Sequential, layers, losses, optimizers, callbacks
def create_model(emb_dim=10):
model = Sequential([
layers.Embedding(input_dim=10**6, output_dim=emb_dim),
layers.Conv1D(256, 3, activation='relu'),
layers.GlobalMaxPooling1D(),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model.compile(
loss=losses.BinaryCrossentropy(),
optimizer=optimizers.Adam(),
metrics=['accuracy']
)
return model
def regex_and(s):
return ''.join([f'(?=.*{w})' for w in s.split()])
def create_dataset(texts, keywords):
# bl = texts.str.contains('|'.join(category_keywords[category]), regex=True)
bl = texts.str.contains(r'{}'.format('|'.join(map(regex_and, keywords))), regex=True)
true_datas = texts[bl]
n = len(true_datas)
false_datas = texts.drop(true_datas.index).sample(n=n, random_state=10)
x = pd.concat([true_datas, false_datas]).map(tp.norm_wakati)
y = np.array([1]*n + [0]*n)
return x, y
from tensorflow.keras.preprocessing.text import Tokenizer, tokenizer_from_json
from tensorflow.keras.preprocessing.sequence import pad_sequences
def pad(x, maxlen):
return pad_sequences(x, maxlen=maxlen, padding='post', truncating='post')
def train(df):
# texts = df['text'].drop_duplicates()
# if not os.path.exists(tokenizer_path):
# tokenizer = Tokenizer()
# tokenizer.fit_on_texts(texts.map(tp.norm_wakati))
# with open(tokenizer_path, 'w', encoding='utf-8') as f:
# f.write(json.dumps(tokenizer.to_json(), ensure_ascii=False))
# with open(tokenizer_path) as f:
# tokenizer = tokenizer_from_json(json.load(f))
# keyword_master = load_keyword_master()
# id2category = dict(zip(category_master['category_id'], category_master['category_name']))
# category_keywords = dict()
# for category_id, df in keyword_master.groupby(['category_id']):
# keywords = df['keyword'].values
# category_keywords[id2category[category_id]] = list(keywords)
# categories = list(category_keywords.keys())
# for category in categories:
# x, y = create_dataset(texts, category_keywords[category])
# x = pad(tokenizer.texts_to_sequences(x), maxlen=100)
# model = create_model(emb_dim=2)
# weights_save_path = f'{project_root}/model/classification_model/{category}.ckpt'
# model.fit(
# x, y,
# epochs=config['epochs'], batch_size=config['batch_size'],
# callbacks=[
# callbacks.EarlyStopping(patience=config['patience']),
# callbacks.ModelCheckpoint(weights_save_path, save_best_only=True, save_weights_only=True)
# ],
# validation_split=0.1
# )
# print(f'{category} end')
texts = df['text'].drop_duplicates()
if not os.path.exists(tokenizer_gen_path):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts.map(tp.norm_wakati))
with open(tokenizer_gen_path, 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer.to_json(), ensure_ascii=False))
with open(tokenizer_gen_path) as f:
tokenizer = tokenizer_from_json(json.load(f))
keyword_master = load_gen_keyword_master()
id2category = dict(zip(gen_category_master['category_id'], gen_category_master['category_name']))
category_keywords = dict()
for category_id, df in keyword_master.groupby(['category_id']):
keywords = df['keyword'].values
category_keywords[id2category[category_id]] = list(keywords)
gen_categories = list(category_keywords.keys())
for category in gen_categories:
x, y = create_dataset(texts, category_keywords[category])
x = pad(tokenizer.texts_to_sequences(x), maxlen=40)
model = create_model(emb_dim=10)
weights_save_path = f'{project_root}/model/gen_classification_model/{category}.ckpt'
model.fit(
x, y,
epochs=config['epochs'], batch_size=config['batch_size'],
callbacks=[
callbacks.EarlyStopping(patience=config['patience']),
callbacks.ModelCheckpoint(weights_save_path, save_best_only=True, save_weights_only=True)
],
validation_split=0.1
)
print(f'{category} end')
def infer(df, categories=categories, gen_categories=gen_categories):
toppan = (df['tw_id'] >= 9*10**18).map(int).values
texts = df['wakati_text']
with open(tokenizer_path) as f:
tokenizer = tokenizer_from_json(json.load(f))
x = pad(tokenizer.texts_to_sequences(texts), maxlen=100)
print('data prepared!')
category2id = dict(zip(category_master['category_name'], category_master['category_id']))
res = []
n_categories = []
for category in categories:
model = create_model(emb_dim=2)
try:
model.load_weights(f'{project_root}/model/classification_model/{category}.ckpt')
n_categories.append(category)
except:
continue
predicted = np.around(model.predict(x).ravel(), 4)# probability
res.append(predicted)
print(f'{category} end')
del model
categories = n_categories
category_score = [json.dumps(dict([(str(category2id[categories[j]]), {"val": str(res[j][i])}) for j in range(len(categories))] + [('8', {'val': str(toppan[i])})]), ensure_ascii=False) for i in range(len(res[0]))]
df['category'] = category_score
'''
記事カテゴリ
'''
with open(tokenizer_gen_path) as f:
tokenizer = tokenizer_from_json(json.load(f))
x = pad(tokenizer.texts_to_sequences(texts), maxlen=100)
print('data prepared!')
category2id = dict(zip(gen_category_master['category_name'], gen_category_master['category_id']))
res = []
n_categories = []
for category in gen_categories:
model = create_model(emb_dim=10)
try:
model.load_weights(f'{project_root}/model/gen_classification_model/{category}.ckpt')
n_categories.append(category)
except:
continue
predicted = np.around(model.predict(x).ravel(), 4)# probability
res.append(predicted)
print(f'{category} end')
del model
gen_categories = n_categories
category_score = [json.dumps(dict([(str(category2id[gen_categories[j]]), {"val": str(res[j][i])}) for j in range(len(gen_categories))]), ensure_ascii=False) for i in range(len(res[0]))]
df['gen_category'] = category_score
# from main import update_local_df
def update_past_all():
dba = clsDbAccessor()
df = dba.execQuery("SELECT `tw_id` FROM `tbl_twitters` WHERE proc_flag=1 AND deleted_at IS NULL;")
dba.close()
print(df)
local_df = load_local_df().set_index('tw_id').reset_index()
print(local_df.head())
df = df.merge(local_df, how='left', on='tw_id').set_index('tw_id', drop=False).dropna()
print(df)
infer(df)
print(df.head())
update_tbl_twitter(df, ['category', 'gen_category'])
pass
if __name__ == '__main__':
# df = pd.read_csv(f'{project_root}/data/tbl_twitter.csv', names=['text'])
# train(df)
update_past_all()
pass | 8,842 | 2,935 |
import os
import config
import sys
import logging
import datetime
import tornado.web
import tornado.ioloop
import tornado.options
import random
import handlers
DEBUG = True
DIRNAME = os.path.dirname(os.path.abspath(__file__))
STATIC_PATH = os.path.join(DIRNAME, 'static')
TEMPLATE_PATH = os.path.join(DIRNAME, 'template')
logging.debug(DIRNAME)
settings = {
'debug': DEBUG,
'template_path': TEMPLATE_PATH,
'static_path': STATIC_PATH
}
db = {'database':'dummy'}
application = tornado.web.Application([
(r"/", handlers.MainHandler),
(r"/admin", handlers.AdminHandler),
(r"/dev", handlers.DeviceHandler),
# (r"/", BrowserHandler),
# (r"/add", AddUserHandler),
# (r"/remove", RemoveUserHandler),
# (r"/static/(.*)", tornado.web.StaticFileHandler, {"path": STATIC_PATH}),
], db=db, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
logging.debug('now listening on 8888')
| 1,024 | 351 |
"""Hyperfire Class to model the actual BLASTER.
"""
import time
from pi_turret.dc_motor.motor import DCMotor, FEED_PIN, FLYWHEEL_PIN
class Hyperfire:
"""Hyperfire Class to model the actual BLASTER.
"""
def __init__(self):
self.flywheels = DCMotor(FLYWHEEL_PIN)
self.feeder = DCMotor(FEED_PIN)
def flywheels_on(self):
"""Signal flywheels to turn on
"""
self.flywheels.power()
def flywheels_off(self):
"""Signal flywheels to turn off
"""
self.flywheels.off()
def feed_on(self):
"""Signal feeder to turn on
"""
self.feeder.power()
def feed_off(self):
"""Signal feeder to turn off
"""
self.feeder.off()
def burst_fire(self, duration=0.2):
"""Signals the flywheels to rev then the feeder for the given duration
"""
self.flywheels_on()
time.sleep(1)
self.feed_on()
# Firing duration
time.sleep(duration)
self.feed_off()
# Spindown time
time.sleep(1) # Check this
self.flywheels_off()
if __name__ == "__main__":
BLASTER = Hyperfire()
BLASTER.burst_fire(0.5)
DCMotor.cleanup()
| 1,227 | 443 |
import json
import logging
import os
import cv2
import numpy as np
from tqdm import tqdm
logger = logging.getLogger('CameraUtils')
class CameraCalibration(object):
@staticmethod
def get_image_paths(chessboard_img_dir):
allowed_extensions = ['.jpg', '.png', '.jpeg']
full_image_paths = []
if not os.path.exists(chessboard_img_dir) or os.path.isfile(chessboard_img_dir):
raise ValueError("Chessboard images directory not found")
files_in_dir = os.listdir(chessboard_img_dir)
for _file in files_in_dir:
if os.path.splitext(_file)[-1] in allowed_extensions:
full_image_paths.append(os.path.join(chessboard_img_dir, _file))
else:
logger.info("Skipping {name} - Not an image file".format(name=_file))
if not full_image_paths:
raise RuntimeError("No chessboard images found")
return full_image_paths
def __init__(self, n_cols=None, n_rows=None, chessboard_img_dir=None,
params_load_path=None, store_output_images=False,):
'''
Args:
n_cols (int) : Number of corners along horizontal axis
n_rows (int) : Number of corners along vertical axis
chessboard_img_dir (str) : directory where the chessboard images are stored
'''
if params_load_path:
if os.path.exists(params_load_path):
self.load_params_from_file(params_load_path)
logger.info('Camera params loaded and ready to use')
else:
logger.error('Cannot load params from file. Please recalibrate')
raise ValueError('Cannot load params from file. Please recalibrate')
return
if not all([n_cols, n_rows, chessboard_img_dir]):
raise ValueError('Pass in chess board params and location to images')
self.images_dir = chessboard_img_dir
self.image_paths = self.get_image_paths(chessboard_img_dir)
self.pattern_size = (n_cols, n_rows)
self.mtx = None
self.dist = None
self.output_images_path = []
self.failed_images = []
self.__is_calibrated = False
self.__store_output_images = store_output_images
self.__calibrate_camera()
def load_params_from_file(self, json_file_path):
expected_keys = ['mtx', 'dist']
with open(json_file_path, 'r') as fp:
data = json.load(fp)
if not all([k in data.keys() for k in expected_keys]):
raise ValueError('Cannot load camera params. Use a different file or recalibrate')
self.mtx = np.array(data['mtx'])
self.dist = np.array(data['dist'])
self.__is_calibrated = True
def save_params_to_file(self, file_path):
data = {
'mtx': self.mtx.tolist(),
'dist': self.dist.tolist()
}
with open(file_path, 'w') as fp:
json.dump(data, fp)
def __calibrate_camera(self):
# Termination criteria to choose accurate corners. terminate sub-pixel detection
# after 30 iterations or if improvement is less than 0.001
termination_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Arrays to store collection of 3d and 2d chessboard corners
chessboard_corners_3d = []
image_points_2d = []
corner_points_3d = np.zeros((self.pattern_size[0] * self.pattern_size[1], 3), np.float32)
# Fill with 3D Coordinates representing the corners in chess board
corner_points_3d[:, :2] = np.mgrid[0: self.pattern_size[0], 0:self.pattern_size[1]].T.reshape(-1, 2) # flake8: noqa
# if we have to store output images of detected chess boards, create a target folder
output_imgs_dir = os.path.join(self.images_dir, 'output')
if self.__store_output_images and not os.path.exists(output_imgs_dir):
os.makedirs(output_imgs_dir)
for image in tqdm(self.image_paths, desc='Finding chessboard corners'):
img = cv2.imread(image)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find corners - return early if no corners are detectable
found_corners, corners = cv2.findChessboardCorners(gray, self.pattern_size,
None,
cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK)
if found_corners:
chessboard_corners_3d.append(corner_points_3d)
accurate_corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
termination_criteria)
image_points_2d.append(accurate_corners)
if self.__store_output_images:
new_img_path = os.path.join(output_imgs_dir, os.path.basename(image))
cv2.drawChessboardCorners(img, self.pattern_size, accurate_corners,
found_corners)
cv2.imwrite(new_img_path, img)
self.output_images_path.append(new_img_path)
else:
logger.debug("Failed to find chessboard in {name}".format(name=image))
self.failed_images.append(image)
(success, self.mtx, self.dist, _, _) = cv2.calibrateCamera(chessboard_corners_3d,
image_points_2d, gray.shape[::-1], None, None)
if not success:
raise RuntimeError("Calibration failed ! Retry with better chessboard images")
# Set Calibration Result to Trues
logger.info(('Successfully calculated Camera Matrix.'
'Skipped processing {count} images').format(count=len(self.failed_images)))
self.__is_calibrated = True
def get_camera_params(self, redo_calibration=False):
if not self.__is_calibrated or redo_calibration:
self.__calibrate_camera()
return (self.mtx, self.dist)
def get_processed_images(self):
'''Returns a list of chessboard images with corners drawn and a list of images
in which corner detection failed
Returns data (dict):
data['output_images'] : list of paths with corners drawn
data['failed_images'] : list of path in which corner detection failed
'''
if not self.__store_output_images:
logger.warn(('Output images are not stored. To write output images,'
'set "store_ store_output_images=True" during init'))
return {
'output_images': self.output_images_path,
'failed_images': self.failed_images
}
def undistort_image(self, image):
'''Takes an numpy array representing an image or a string pointing to a image path
and undistorts with the calibrated camera matrix and distortion coffiecients'''
if not self.__is_calibrated:
self.__calibrate_camera()
img_data = cv2.imread(image) if isinstance(image, str) else image
return cv2.undistort(img_data, self.mtx, self.dist, None, self.mtx)
| 7,281 | 2,171 |
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from bisector import *
from astropy.time import Time
from ccf2rv import *
from per_epoch_table import per_epoch_table
def sinusoidal(phase,dphase,amp,zp):
return np.sin( (phase+dphase))*amp+zp
# do not *formally* exclude an order, but this is done later with the bandpass keyword
exclude_orders = [28,47,48]
object = 'TOI-736'
mask = 'gl699_neg'
method = 'all'
sanitize = True
# number of median-absolute deviations within an epoch to consider a point discrepant
tbl,dico = get_object_rv(object,mask =mask,
method = method,force = True,
exclude_orders = exclude_orders,
snr_min = 20.0, velocity_window = 20, sanitize = sanitize,
dvmax_per_order = 500.0, bandpass = 'H',
doplot = True, do_blacklist = True,
detailed_output = True,
sed_match = False)
rv = np.array(tbl['RV'])
rv -= np.mean(rv)
ccf = np.array(dico['MEAN_CCF'])
ccf2 = np.array(ccf)
for i in range(34):
ccf2[:,i] = np.roll(ccf2[:,i],int(-rv[i]*10))
moy = np.mean(ccf2,axis=1)
for i in range(34):
ccf2[:,i] -= moy
for i in range(34):
ccf2[:,i] = np.roll(ccf2[:,i],int(rv[i]*10))
damps = np.arange(10,55,0.1)
all_ccs = np.zeros([ccf2.shape[0],len(damps)])
for ite in range(len(damps)):
print(ite)
ccf3 = np.zeros_like(ccf2)
for i in range(34):
ccf3[:,i] = np.roll(ccf2[:,i],int(damps[ite]*rv[i]*10))
all_ccs[:,ite] = np.nanmean(ccf3,axis=1)
plt.plot(dico['ccf_RV'],moy)
plt.show()
plt.plot(damps,all_ccs[np.argmin(moy),:])
plt.show()
plt.imshow(all_ccs/np.std(all_ccs),aspect = 'auto',extent = [np.min(damps),np.max(damps),np.min(dico['ccf_RV']),np.max(dico['ccf_RV'])])
plt.show()
# period for the sinusoidal currve
period = 14.4
# create the table with bis per epoch
tbl_bin = per_epoch_table(tbl,nMAD_cut = 5)
# get time stamps friendly for plotting
t2 = Time(tbl_bin['MJDATE_MEAN'], format = 'mjd')
t3 = Time(tbl['MJDATE'], format = 'mjd')
# get phase for sine fitting
phase_bin = 2*np.pi*tbl_bin['MJDATE_MEAN']/period
phase = 2*np.pi*tbl['MJDATE']/period
# fit sinusoid
fit, pcov = curve_fit(sinusoidal, phase_bin, tbl_bin['RV'])
# some plotting fiddling
dt = np.max(tbl_bin['MJDATE_MEAN']) - np.min(tbl_bin['MJDATE_MEAN'])
time_plot = np.arange(np.min(tbl_bin['MJDATE_MEAN'])-dt/10,np.max(tbl_bin['MJDATE_MEAN'])+dt/10,dt/1000)
phase_plot = 2*np.pi*time_plot/period
model_bin = sinusoidal(phase_bin,*fit)
model= sinusoidal(phase,*fit)
model_plot = sinusoidal(phase_plot,*fit)
print('Amplitude of the sinusoidal at {0} days: {1:.2f} m/s'.format(period, 1000*fit[1]))
print('Mean velocity: {1:.2f} m/s'.format(period, 1000*fit[2]))
print('Mean/Median per-epoch STDDEV {0}/{1} km/s'.format(np.mean(tbl_bin["ERROR_RV"])
,np.median(tbl_bin["ERROR_RV"])))
fig, ax = plt.subplots(nrows = 2, ncols = 1,sharex = True, figsize = (14,8))
for i in range(len(t2)):
ax[0].plot_date(t2.plot_date,tbl_bin['RV'],'g.')
ax[0].plot_date([t2[i].plot_date,t2[i].plot_date],[tbl_bin['RV'][i]-tbl_bin['ERROR_RV'][i],
tbl_bin['RV'][i]+tbl_bin['ERROR_RV'][i]],'g')
ax[0].plot_date(t3.plot_date,tbl['RV'],'r.',alpha = 0.5)
ax[1].errorbar(t3.plot_date,tbl['RV'] - model,yerr=tbl['ERROR_RV'], linestyle="None",
fmt='o',color = 'green', alpha = 0.2, label = 'Individual measurements')
ax[0].plot(Time(time_plot, format = 'mjd').plot_date,model_plot,'r:')
ax[0].set(ylabel = 'Velocity [km/s]',title = object)
ax[1].errorbar(t2.plot_date, tbl_bin['RV'] - model_bin, yerr=tbl_bin['ERROR_RV'],
linestyle="None", fmt='o',
alpha = 0.5, capsize = 2, color = 'black',label = 'Epoch mean')
ax[1].legend()
ax[1].plot(Time(time_plot, format = 'mjd').plot_date,np.zeros(len(time_plot)),'r:')
ax[1].set(xlabel = 'Date', ylabel = 'Residuals [km/s]',ylim = [-.15,0.15],
xlim = [np.min(Time(time_plot, format = 'mjd').plot_date),
np.max(Time(time_plot, format = 'mjd').plot_date)]
)
for label in ax[1].get_xticklabels():
label.set_rotation(25)
label.set_ha('right')
plt.tight_layout()
plt.savefig(object+'.pdf')
plt.show()
sigma = np.std((tbl_bin['RV'] - model_bin))
mean_error = np.mean(tbl_bin['ERROR_RV'])
median_error = np.nanmedian(tbl_bin['ERROR_RV'])
reduced_chi2 = np.std((tbl_bin['RV'] - model_bin)/tbl_bin['ERROR_RV'])
print('\n--- values for the per-night weighted-mean points ---\n')
print(' mean ERROR_RV {0:.2f} m/s, median ERROR_RV {1:.2f} m/s, '
'reduced chi2 {2:.2f} '.format(mean_error*1e3, median_error*1e3, reduced_chi2))
mean_error = np.mean(tbl['ERROR_RV'])
median_error = np.nanmedian(tbl['ERROR_RV'])
print('\n--- values for the individual points ---\n')
print(' mean ERROR_RV {0:.2f} m/s, median ERROR_RV {1:.2f} m/s'.format( mean_error*1e3,median_error*1e3))
f = open('TOI1278_obslog.tex','w')
# create an observation log in tex format
# Nice when you want to write a paper in the end, hey, that's the point of all these observations!
for i in range(len(tbl)):
f.write('{0:.4f} & ${1:.3f} \pm {2:.3f}$ & {3:.3f} \\\\ \n'.format(tbl['MJDATE'][i],tbl['RV'][i], tbl['ERROR_RV'][i],tbl['D2_RESIDUAL_CCF'][i]))
f.close() | 5,375 | 2,316 |
##############################################################################################################################################################
##############################################################################################################################################################
"""
Training scripts for classification models presented in our paper.
Replace or modify the config file in the following part of the code to make changes to train different models.
# load the config file
config = toml.load("cfg/pretrained_classifier.toml")
"""
##############################################################################################################################################################
##############################################################################################################################################################
import os
import sys
import toml
import torch
import random
import numpy as np
from torch import optim
from torch.cuda.amp import autocast, GradScaler
import torchvision.transforms.functional as TF
import train_ae
import utils as utils
from pretrained_model import PretrainedClassifier, classification_loss
from dataset import create_dataset
##############################################################################################################################################################
##############################################################################################################################################################
def model_setup(config):
if config["model"]["type"] != "classification":
raise ValueError("Your config is for an autoencoder model, but this script is for classification models. Please use train_ae.py instead.")
# load data
train_loader = create_dataset(
which_dataset=config["dataset"]["name"],
which_factor=config["dataset"]["factor"],
use_triplet=False,
should_augment=config["training"]["augment"],
make_scene_impossible=False,
make_instance_impossible=False,
batch_size=config["training"]["batch_size"],
shuffle=True,
get_all=False
)
# number of classification classes
nbr_classes = len(list(train_loader.dataset.string_labels_to_integer_dict.keys()))
# define model
model = PretrainedClassifier(config["model"], nbr_classes=nbr_classes).to(config["device"])
# get the optimizer defined in the config file
# load it from the torch module
optim_def = getattr(optim, config["training"]["optimizer"])
# create the optimizer
optimizer = dict()
if config["training"]["optimizer"] == "SGD":
optimizer["method"] = optim_def(model.parameters(), lr=config["training"]["learning_rate"], weight_decay=config["training"]["weight_decay"], momentum=0.9, nesterov=True)
else:
optimizer["method"] = optim_def(model.parameters(), lr=config["training"]["learning_rate"], weight_decay=config["training"]["weight_decay"])
print('=' * 73)
print(optimizer["method"])
print('=' * 73)
return model, optimizer, train_loader
##############################################################################################################################################################
def train_one_epoch(model, optimizer, scaler, train_loader, config, nbr_epoch):
# make sure we are training
model.train()
# placeholder
total_loss = 0
# for each batch
for batch_images in train_loader:
# set gradients to zero
optimizer["method"].zero_grad()
# push to gpu
input_images = batch_images["image"].to(config["device"])
labels_left = batch_images["gt_left"]
labels_middle = batch_images["gt_middle"]
labels_right = batch_images["gt_right"]
labels = torch.tensor([train_loader.dataset.string_labels_to_integer_dict[str(x.item())+"_"+str(y.item())+"_"+str(z.item())] for x,y,z in zip(labels_left, labels_middle, labels_right)]).to(config["device"])
# inference
with autocast():
model_output = model(input_images)
# classification error
batch_loss = classification_loss(model_output, labels)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
# Backward passes under autocast are not recommended.
# Backward ops run in the same dtype autocast chose for corresponding forward ops.
scaler.scale(batch_loss).backward()
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer["method"])
# Updates the scale for next iteration.
scaler.update()
# update total loss
total_loss += batch_loss.item()
print(f"[Training] \tEpoch: {nbr_epoch+1} Total Loss: {total_loss:.4f}")
return model
##############################################################################################################################################################
def evaluate(model, train_loader, loader_dict, config, save_folder, nbr_epoch):
# make sure we are evaluating
model.eval()
# we do not need to keep track of gradients
with torch.no_grad():
performances = dict()
# for the loader of each test vehicle
for vehicle, loader in loader_dict.items():
correct = 0
total = 0
# for each batch
for batch_images in loader:
# push to gpu
input_images = batch_images["image"].to(config["device"])
labels_left = batch_images["gt_left"]
labels_middle = batch_images["gt_middle"]
labels_right = batch_images["gt_right"]
labels = torch.tensor([train_loader.dataset.string_labels_to_integer_dict[str(x.item())+"_"+str(y.item())+"_"+str(z.item())] for x,y,z in zip(labels_left, labels_middle, labels_right)]).to(config["device"])
# we input the distorted input image
classif_output = model(input_images)
_, predictions = torch.max(classif_output, 1)
correct += (predictions == labels).sum().item()
total += labels.size(0)
flipped_input_images = torch.stack([TF.hflip(x) for x in input_images])
flipped_labels = torch.tensor([train_loader.dataset.string_labels_to_integer_dict[str(z.item())+"_"+str(y.item())+"_"+str(x.item())] for x,y,z in zip(labels_left, labels_middle, labels_right)]).to(config["device"])
flipped_classif_output = model(flipped_input_images)
_, flipped_predictions = torch.max(flipped_classif_output, 1)
correct += (flipped_predictions == flipped_labels).sum().item()
total += flipped_labels.size(0)
# compute the epoch accuracy
accuracy = correct / total
performances[vehicle] = dict()
performances[vehicle]["accuracy"] = accuracy
print(f"[Testing] \tEpoch: {nbr_epoch+1}, Vehicle: {vehicle}, Accuracy: {100*accuracy:.2f}% ({correct}/{total})")
if vehicle.lower() == "ticam":
utils.append_accuracy(save_folder, accuracy)
performances["epoch"] = nbr_epoch
return performances
##############################################################################################################################################################
def train(config):
#########################################################
# GPU
#########################################################
# specify which gpu should be visible
os.environ["CUDA_VISIBLE_DEVICES"] = config["training"]["gpu"]
# save the gpu settings
config["device"] = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# gradscaler to improve speed performance with mixed precision training
scaler = GradScaler()
#########################################################
# Setup
#########################################################
# create the folders for saving
save_folder = train_ae.folder_setup(config)
# create the model, optimizer and data loader
model, optimizer, train_loader = model_setup(config)
# get also a test loader for evaluation on unseen dataset
test_loader = train_ae.get_test_loader(config, real_only=True)
#########################################################
# Training
#########################################################
# keep track of time
timer = utils.TrainingTimer()
# best performance so far
best_performance = {"ticam": {"accuracy" : 0}}
# for each epoch
for nbr_epoch in range(config["training"]["epochs"]):
# train a single epoch
model = train_one_epoch(model, optimizer, scaler, train_loader, config, nbr_epoch)
# evaluate a single epoch
if (nbr_epoch+1) % config["training"]["frequency"] == 0 or nbr_epoch == 1:
performances = evaluate(model, train_loader, test_loader, config, save_folder, nbr_epoch)
# save the best model
if performances["ticam"]["accuracy"] > best_performance["ticam"]["accuracy"]:
best_performance = performances
torch.save(model.state_dict(), save_folder["checkpoints"] / "best_model.pth")
#########################################################
# Aftermath
#########################################################
# save the last model
torch.save(model.state_dict(), save_folder["checkpoints"] / "last_model.pth")
# save the transformation from string to integer labels
np.save(save_folder["checkpoints"] / 'label_dict.npy', train_loader.dataset.string_labels_to_integer_dict)
print("=" * 37)
timer.print_end_time()
print("=" * 37)
print("Best performance:")
for key, value in best_performance.items():
if key != "epoch":
print(f"{key}:{100*value['accuracy']:.2f}")
else:
print(f"{key}:{value}")
print("=" * 37)
# reset the stdout with the original one
# this is necessary when the train function is called several times
# by another script
sys.stdout = sys.stdout.end()
##############################################################################################################################################################
##############################################################################################################################################################
if __name__ == "__main__":
# reproducibility
seed = 42
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
# load the config file
config = toml.load("cfg/pretrained_classifier.toml")
# start the training using the config file
train(config) | 11,335 | 2,969 |
from model.utils.data_engineering import DataEngineering
from model.prediction_model.regression import Regression
# Create an instance for DataEngineering and load data from CSV
csv_path = "data/area_01.csv"
data_e = DataEngineering()
data_e.load_data(csv_path)
data_e.clean_data()
# Create new features
# "age" feature
max_date = data_e.get_data()["año"].max()
age = max_date - data_e.get_data()["año"]
data_e.add_column("age", age)
# "flow" feature
flow_data = data_e.get_data()["E_FLUJO"].copy().astype("category").cat.codes
data_e.add_column("flow", flow_data)
# Set features and label
features = ["flow",
"NU_COORD_UTM ESTE",
"NU_COORD_UTM NORTE",
"°API",
"age"]
label = "BBPD"
data_e.set_features(features)
data_e.set_label(label)
# Split Train-Test data
data_e.split_data()
# Create a Model
model = Regression(data_e)
# Train and test the model
model.train()
print(f"------------------------------\nMean score: {model.score()}")
# Make a prediction
model.predict(data_e.x_test.iloc[0], data_e.y_test.iloc[0])
| 1,072 | 390 |
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.contrib.auth.models import User
class City(models.Model):
"""City"""
class Meta:
verbose_name = "City"
verbose_name_plural = "Cities"
name = models.CharField("City name", max_length=50)
lat = models.FloatField("Latitude", validators=[MinValueValidator(-180), MaxValueValidator(180)])
lon = models.FloatField("Longtitude", validators=[MinValueValidator(-180), MaxValueValidator(180)])
def __str__(self):
return str(self.name)
class FavouriteCity(models.Model):
"""Favourite city of user"""
class Meta:
verbose_name = "Favourite city"
verbose_name_plural = "Favourite cities"
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name="favourites")
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="favourites")
def __str__(self):
return f"{self.city.name} ({self.user.username})"
| 1,026 | 334 |
# coding: utf-8
"""
IdCheck.IO API
Check identity documents
OpenAPI spec version: 0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import models into model package
from .check_summary_of_the_submitted_document import CheckSummaryOfTheSubmittedDocument
from .classification_of_the_submitted_document import ClassificationOfTheSubmittedDocument
from .control import Control
from .control_group import ControlGroup
from .detailed_information_of_the_holder_of_the_submitted_document import DetailedInformationOfTheHolderOfTheSubmittedDocument
from .detailed_information_of_the_submitted_document import DetailedInformationOfTheSubmittedDocument
from .error_response import ErrorResponse
from .event_date import EventDate
from .extracted_image import ExtractedImage
from .generic_data import GenericData
from .health_response import HealthResponse
from .image import Image
from .image_indicator import ImageIndicator
from .image_list_response import ImageListResponse
from .image_request import ImageRequest
from .mrz import Mrz
from .mrz_list_response import MrzListResponse
from .mrz_request import MrzRequest
from .mrz_response import MrzResponse
from .report_response import ReportResponse
from .result_response import ResultResponse
from .task_response import TaskResponse
from .user_response import UserResponse
| 1,927 | 520 |
#!/usr/bin/env python
from setuptools import setup
import os
import pyageng
setup(name='pyageng',
version = pyageng.__version__,
description='Code for the book "Measurements and Data Analysis for Agricultural Engineers using Python"',
author='Matti Pastell',
author_email='matti.pastell@helsinki.fi',
url='http://pyageng.mpastell.com',
long_description =\
"""
Python for Agricultural Engineers
---------------------------------
This Python package is used in my (not quite finished) book " Measurements and Data Analysis for Agricultural Engineers Using Python".
You can read the current version book online at: `<http://pyageng.mpastell.com/book>`_
""",
packages=['pyageng'],
license='LICENSE.txt',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'
]
)
| 1,079 | 311 |
# Day32 of my 100DaysOfCode Challenge
# send a quote on monday using email
import random
import smtplib
import datetime as dt
# function to send email
def send_email(_quote):
my_email = "abc@example.com"
password = "qwerty@1234"
# opening connection using 'with', as done for opening a file
with smtplib.SMTP("smtp.example.com") as connection:
connection.starttls()
connection.login(user=my_email, password=password)
connection.sendmail(
from_addr=my_email,
to_addrs="email@example.com",
msg=f"Subject:Monday Motivation\n\n{_quote}"
)
# accessing the file and choose a random quote and call send email on monday
now = dt.datetime.now()
weekday = now.weekday()
if weekday == 0:
with open("quotes.txt") as quote_file:
all_quotes = quote_file.readline()
quote = random.choice(all_quotes)
# print(quote)
send_email(quote)
| 935 | 299 |
from random import choice
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
fortune = choice(
'You will have good health',
'You will not have good health'
)
return render_template('simple2.html', fortune=fortune)
app.run(host='127.0.0.1', debug=True)
| 324 | 105 |
from accountancy.helpers import bulk_delete_with_history
from accountancy.signals import audit_post_delete
from contacts.models import Contact
from django.test import TestCase
from simple_history.models import HistoricalRecords
from django.db import models
class ContactAuditTests(TestCase):
"""
Check that the app is set up correctly i.e. right signals are set up,
and it is registered with simple history package.
"""
def test_simple_history_post_delete_receiver_is_removed(self):
"""
The ready method of the AppConfig calls simple_history_custom_set_up
on the AuditMixin class which disconnects this receiver.
"""
live_receivers = models.signals.post_delete._live_receivers(Contact)
for receiver in live_receivers:
if receiver.__self__.__class__.__name__ == HistoricalRecords.__name__:
self.fail(
"""
Historical Records receiver not disconnected.
It should be because we are using our own custom signal
which is fired when we delete."""
)
def test_audit_post_delete_signal_is_added(self):
"""
After registering the model and disconnecting the receiver from
the post delete signal we add our receiver to a custom signal
"""
live_receivers = audit_post_delete._live_receivers(Contact)
found = False
for receiver in live_receivers:
if str(receiver) == "<bound method AuditMixin.post_delete of <class 'contacts.models.Contact'>>":
found = True
break
if not found:
self.fail("Failed to find the post_delete method of the AuditMixin class")
def test_instance_deleted(self):
c = Contact(
code="1",
name="contact1",
email="doris@hotmail.com"
)
c.save()
c.delete()
self.assertEqual(
len(
Contact.history.all()
),
2 # created + deleted audits
)
def test_queryset_deleted(self):
c = Contact(
code="1",
name="contact1",
email="doris@hotmail.com"
)
c.save()
Contact.objects.all().delete()
self.assertEqual(
len(
Contact.history.all()
),
1 # created audit only
# deleted audit is not created
# use bulk_delete_with_history for deleted audits
) | 2,559 | 660 |
# -*- coding: utf-8 -*-
"""
Cisco CUCM connector class
This class creates a connection object via AXL\SOAP to the specified CUCM server using the specified AXL-enabled credentials
@author: Alfonso Sandoval Rosas
"""
import urllib3, logging
from ZeepDebugPlugin import *
from zeep import Client, xsd
from zeep.cache import SqliteCache
from zeep.transports import Transport
from requests import Session
from requests.auth import HTTPBasicAuth
from urllib3.exceptions import InsecureRequestWarning
class CUCMConnectorAXL:
def __init__(self,CUCM_IP,AXL_Username,AXL_Password,CUCM_Version = '12.5',debug = False,logger = False):
"""
Constructor initiates session establishment process when the instance is created
Parameters
----------
AXL_Username : string
AXL-enabled CUCM username
AXL_Password : string
AXL-enabled CUCM password
CUCM_IP : string
Target CUCM IP address
CUCM_Version : string. Default: 11.5
Target CUCM version
debug : boolean. Default: False
Toggle debug plugin for seeing incoming/outgoing SOAP requests in console
logger : logging instance. Default: False
Custom logger for ERROR-type messages handling
"""
self._AXL_Username = AXL_Username
self._AXL_Password = AXL_Password
self._CUCM_IP = CUCM_IP
self._CUCM_Version = CUCM_Version
self._debug = debug
self._logger = logger
self._CLIENT = ''
self._connect_cucm()
self._test_connection()
def _connect_cucm(self):
"""Session establishment with target CUCM node. Returns zeep object"""
try:
WSDL = f'schema/{self._CUCM_Version}/AXLAPI.wsdl'
if('9.' in self._CUCM_Version):
urllib3.disable_warnings()
urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'
WSDL = 'schema/9.1/AXLAPI.wsdl'
try:
urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'
except AttributeError:
pass
else:
urllib3.disable_warnings(InsecureRequestWarning)
BINDING_NAME = "{http://www.cisco.com/AXLAPIService/}AXLAPIBinding"
ADDRESS = "https://{ip}:8443/axl/".format(ip=self._CUCM_IP)
session = Session()
session.verify = False
session.auth = HTTPBasicAuth(self._AXL_Username, self._AXL_Password)
transport = Transport(cache=SqliteCache(), session=session, timeout=10)
if self._debug:
client = Client(wsdl=WSDL, transport=transport, plugins=[ZeepDebugPlugin()])
else:
client = Client(wsdl=WSDL, transport=transport)
self._CLIENT = client.create_service(BINDING_NAME, ADDRESS)
except FileExistsError:
self._CLIENT = False
if self._logger:
self._logger.error(f'Please verify the existance of the WSDL files corresponding to the CUCM version in the /schema folder' )
else:
logging.error( f'Please verify the existance of the WSDL files corresponding to the CUCM version in the /schema folder' )
pass
def _test_connection(self):
"""Test query for connection validation"""
if self._debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug( f'Test connection query to: ({self._CUCM_IP}) ...')
try:
self._CLIENT.listCallManagerGroup(
searchCriteria = {'name':'%'},
returnedTags = {'name':''}
)
except Exception as err:
self._CLIENT = False
if 'Max retries exceeded' in str(err):
if self._logger:
self._logger.error(f'Server ({self._CUCM_IP}) is unreachable' )
else:
logging.error( f'Server ({self._CUCM_IP}) is unreachable' )
elif 'Unknown fault occured' in str(err):
if self._logger:
self._logger.error( f'Conection error to ({self._CUCM_IP}): Possible credentials mismatch' )
else:
logging.error( f'Conection error to ({self._CUCM_IP}): Possible credentials mismatch' )
else:
if self._logger:
self._logger.error(f'Conection error to ({self._CUCM_IP}): {err}')
else:
logging.error( f'Conection error to ({self._CUCM_IP}): {err}')
pass
def isValid(self):
"""Returns current self._CLIENT value. The value will be False if the test when creating the instance was not successful"""
return self._CLIENT
@staticmethod
def connector(CUCM_IP,AXL_Username,AXL_Password,CUCM_Version = '11.5',debug = False):
"""Returns a standalone connector. No class methods. For testing purposes"""
WSDL = f'schema/{CUCM_Version}/AXLAPI.wsdl'
if('9.' in CUCM_Version):
urllib3.disable_warnings()
urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'
WSDL = 'schema/9.1/AXLAPI.wsdl'
try:
urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'
except AttributeError:
pass
else:
urllib3.disable_warnings(InsecureRequestWarning)
BINDING_NAME = "{http://www.cisco.com/AXLAPIService/}AXLAPIBinding"
ADDRESS = "https://{ip}:8443/axl/".format(ip=CUCM_IP)
session = Session()
session.verify = False
session.auth = HTTPBasicAuth(AXL_Username, AXL_Password)
transport = Transport(cache=SqliteCache(), session=session, timeout=10)
if debug:
client = Client(wsdl=WSDL, transport=transport, plugins=[ZeepDebugPlugin()])
else:
client = Client(wsdl=WSDL, transport=transport)
return client.create_service(BINDING_NAME, ADDRESS) | 6,109 | 1,777 |
"""
//Iotree42 sensor Network
//purpose: for handle requests and to process the wep pages
//used software: python3, django, time, datetime, rest_framework
//for hardware: Debian-Server
//design by Sebastian Stadler
//on behalf of the university of munich.
//NO WARRANTY AND NO LIABILITY
//use of the code at your own risk.
"""
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .forms import UserUpdateForm, ProfileUpdateForm, UserRegisterForm, TreePostForm, InputPostForm
from django.core.paginator import Paginator
from .mqttcon import InitMqttClient
from django.http import HttpResponse, Http404
from django.conf import settings
from django.contrib.auth.models import User
from datetime import timezone
from .fluxcon import InitInfluxUser, DelInfluxData
from .fluxdatacon import FluxDataCon
from .grafanacon import InitGrafaUser
from .pahocon import PahoSend
import time
import json
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from revproxy.views import ProxyView
import datetime
with open('/etc/iotree/config.json', encoding='utf-8') as config_file:
config = json.load(config_file)
def zip_download(request, version):
import os
if int(version) == 1:
file_name = 'IoTree_Gateway_V_2.0.zip'
# file_name = version
file_path = os.path.join(settings.MEDIA_ROOT, 'downloadfiles/'+file_name)
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/force-download")
response['Content-Disposition'] = 'attachment; filename='+ file_name
return response
raise Http404
# func. for the register site
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user_name = form.cleaned_data.get('username')
user_email = form.cleaned_data.get('email')
password1 = form.cleaned_data.get('password1')
user = form.save(commit=False)
init_flux_client = InitInfluxUser(user_name, password1)
init_flux_client.run()
init_mqtt_client = InitMqttClient(user_name, password1)
init_mqtt_client.run()
init_grafa_client = InitGrafaUser(user_name, password1, user_email)
init_grafa_client.run()
user.first_name = "Same as the login PW from this site."
user.last_name = "Same as the login PW from this site."
user.save()
messages.success(request, str(user_name)+': account has been created! You are now able to log in!')
del init_mqtt_client
del init_grafa_client
del init_flux_client
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
# func. for deleting user and process the deleting site
@login_required
def delete_user(request):
if request.method == 'POST':
confirm = request.POST.get('confirm')
cancel = request.POST.get('cancel')
if confirm == 'confirm':
user = User.objects.get(username=request.user.username)
user.delete()
messages.success(request, str(request.user.username) + ' account has been deleted! and all related data')
return redirect('logout')
if cancel == 'cancel':
return redirect('profile')
else:
return render(request, 'users/delete_user.html')
# func. for the profile site
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
delete = request.POST.get('delete', None)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, 'Your account has been updated!')
if delete:
try:
return redirect('delete-user')
except User.DoesNotExist:
messages.success(request, ' account does not exist!')
except Exception as e:
messages.success(request, str(e.message))
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
print(p_form)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
# func. for the iotree site
@login_required
def treeview(request):
if request.method == 'POST':
form = TreePostForm(request.POST)
if form.is_valid():
time_start = form.cleaned_data.get('time_start')
time_end = form.cleaned_data.get('time_end')
time_start = time_start.replace(tzinfo=timezone.utc).timestamp()
time_end = time_end.replace(tzinfo=timezone.utc).timestamp()
tree = request.POST.get('tree', None)
action = form.cleaned_data.get('action')
time_start = int(time_start*1000)
time_end = int(time_end*1000)
treee = tree.replace("/", "_")
if action == 'table':
return redirect('iotree-show', str(treee), time_start, time_end)
if action == 'download':
return redirect('iotree-download', str(treee), time_start, time_end)
if action == 'delete':
flux_client = FluxDataCon(request.user.username)
tags = flux_client.get_raw_tags(str(tree))
del flux_client
if tags == "":
messages.info(request, "No Date Found! for delete")
return redirect('treeview')
else:
flux_del = DelInfluxData(request.user.username, tags)
response = flux_del.run()
messages.info(request, 'Measuerments droped: '+str(tags)+'Database response: '+str(response))
del flux_del
return redirect('treeview')
else:
form = TreePostForm(initial={'time_end':datetime.datetime.now()})
flux_client = FluxDataCon(request.user.username)
context = flux_client.get_tag_tree()
if str(context) == "[]":
messages.info(request, 'No data jet!')
del flux_client
return render(request, 'users/treeview.html', {'context':context, 'form':form})
# func. for display all gateways and do actions
@login_required
def gatewaylist(request):
if request.method == 'POST':
messages.info(request, "No Post -> get")
else:
# connect to db
flux_client = FluxDataCon(request.user.username)
# get all tags
tags = flux_client.get_tag_tree()
# filter only the gateway ids
dicttags = json.loads(tags)
gatewaylist = []
for m in dicttags:
gatewaylist.append(m["text"])
# getting last 5 min entrys of ping of each gatway
flux_client.start_time((int(time.time())-300)*1000)
flux_client.end_time(int(time.time())*1000)
tree = [s + "/SYSTEMcontrol/ping" for s in gatewaylist]
lastseen = flux_client.find(",".join(tree))
del flux_client
# check if gateway is online the last 5 min or not
lastseenlist = []
for n in lastseen:
tag = n["posts_tree"]
tag = tag.split("/")
lastseenlist.append(tag[0])
# map data for render page
context = []
for b in gatewaylist:
element = {}
element["id"] = b
if b in lastseenlist:
element["status"] = "online"
element["color"] = "green"
else:
element["status"] = "offline"
element["color"] = "red"
context.append(element)
if str(context) == "[]":
messages.info(request, 'No gateway connected jet!')
return render(request, 'users/gateway_list.html', {'context':context})
# func. for the setup_rpi site
@login_required
def input(request, gateway, task):
if request.method == 'POST':
form = InputPostForm(request.POST)
if form.is_valid():
if request.POST.get("send"):
textbox = form.cleaned_data.get('textbox')
if "jsonfile" in task:
topic = "SYSTEMcontrolDONOTSAVE/syncfile"
pahosend = PahoSend(request.user.username, gateway, topic)
jsonstring = pahosend.checkjson(textbox)
if jsonstring:
io = pahosend.send(jsonstring)
if io:
messages.info(request, "MQTT message has been send!")
else:
messages.error(request, "Somthing went wrong when sending please try again.")
else:
messages.error(request, "Sorry this might be not proper json!")
elif "commandsend" in task:
topic = "SYSTEMcontrolDONOTSAVE/bashCOMMAND"
pahosend = PahoSend(request.user.username, gateway, topic)
io = pahosend.send(textbox)
if io:
messages.info(request, "MQTT message has been send!")
else:
messages.error(request, "Somthing went wrong when sending please try again.")
elif "linkgateway" in task:
topic = "SYSTEMcontrolDONOTSAVE/linkgateway"
pahosend = PahoSend(request.user.username, gateway, topic)
jsonstring = pahosend.checkjson(textbox)
if jsonstring:
io = pahosend.send(jsonstring)
if io:
messages.info(request, "MQTT message has been send!")
else:
messages.error(request, "Somthing went wrong when sending please try again.")
else:
messages.error(request, "Sorry this might be not proper json!")
else:
messages.error(request, "Somthing went wrong: task ist not clear. Please try again")
return redirect('input', gateway, task)
elif request.POST.get("update"):
return redirect('input', gateway, task)
elif request.POST.get("cancel"):
return redirect('gatewaylist')
else:
flux_client = FluxDataCon(request.user.username)
flux_client.last = True
# label the textbox
if "jsonfile" in task:
label = "Json File:"
# pre fill with saved data in db
lastentry = flux_client.find(gateway+"/SYSTEMcontrolSAVEJSON/syncfile")
if lastentry:
jsonstring = lastentry[0]["posts_body"][0][1]
form = InputPostForm(initial={"textbox": jsonstring})
else:
form = InputPostForm(initial={"textbox": "{}"})
elif "commandsend" in task:
label = "Send a command to your Gateway (default options: reboot, update, upgrade):"
form = InputPostForm(initial={"textbox": "update"})
elif "linkgateway" in task:
label = 'Listen to other Gateways (example: {"gatewayID":"topic1/topic2/#"}):'
# pre fill with saved data in db
lastentry = flux_client.find(gateway+"/SYSTEMcontrolSAVEJSON/linkgateway")
if lastentry:
jsonstring = lastentry[0]["posts_body"][0][1]
form = InputPostForm(initial={"textbox": jsonstring})
else:
form = InputPostForm(initial={"textbox": "{}"})
context = {
'gateway': "Gateway: "+gateway,
'label': label
}
return render(request, 'users/input.html', {'context':context, 'form':form})
# func. for the setup_rpi site
@login_required
def setup_rpi(request):
if request.method == 'POST':
request.POST.get('download', None)
version = 1
return redirect('zip-download', version)
else:
context = {
'file': '1'
}
return render(request, 'users/setup_rpi.html', context)
# func. for the manual site
@login_required
def manual(request):
return render(request, 'users/manual.html')
# func. for redirect to a grafana iframe via modif.
@login_required
def tografana(request):
# return render(request, 'users/dashboard.html')
# somthing not wroking properly with iframe needs more investiagtion.
# work around no iframe via redirect to grafana proxy address
return redirect(config['GRAFA_ADDRESS'])
# modified page for render Grafana in iframe
@login_required
def iframedash(request):
return redirect(config['GRAFA_ADDRESS'])
# methode for reverse proxy to grafana with auto login and user validation
@method_decorator(login_required, name='dispatch')
class GrafanaProxyView(ProxyView):
upstream = 'http://localhost:3000/'
def get_proxy_request_headers(self, request):
headers = super(GrafanaProxyView, self).get_proxy_request_headers(request)
headers['X-WEBAUTH-USER'] = request.user.username
return headers
# func. for the iotree_show site, for displaying tables
@login_required
def iotree_show(request, tags, time_start, time_end):
tags = tags.replace("_", "/")
time_start = int(time_start)
time_end = int(time_end)
flux_client = FluxDataCon(request.user.username)
flux_client.start_time(time_start)
flux_client.end_time(time_end)
contexts = flux_client.find(tags)
del flux_client
if len(contexts) == 0:
messages.error(request, 'No Data Found! Data response: '+str(contexts)+'. Given Nodes: '+str(tags) )
return redirect('treeview')
else:
paginator = Paginator(contexts, 1)
page = request.GET.get('page')
context = paginator.get_page(page)
return render(request, 'users/iotree_show.html', {'contexts': context})
# CSV download return
@login_required
def iotree_download(request, tags, time_start, time_end):
import csv
import datetime
tags = tags.replace("_", "/")
time_start = int(time_start)
time_end = int(time_end)
flux_client = FluxDataCon(request.user.username)
flux_client.start_time(time_start)
flux_client.end_time(time_end)
context = flux_client.find(tags)
del flux_client
if len(context) == 0:
messages.error(request, 'No Data Found! Data response: '+str(context)+'. Given Nodes: '+str(tags) )
return redirect('treeview')
else:
# starting a csv file
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}"'.format('IoTree42_'+str(datetime.datetime.
now())+'.csv')
writer = csv.writer(response, delimiter=';', dialect='excel')
for z in context:
# add info about data to csv
writer.writerow(['tree branchs: ', z['posts_tree']])
# headings for csv from data mongo
writer.writerow(z['posts_head'])
# values for csv form data mongo
writer.writerows(z['posts_body'])
# for n in reversed(z['posts_body']):
# writer.writerow(n)
writer.writerow(['------', '------', '------', '------', '------', '------', '------'])
return response
@api_view(['GET', 'POST'])
@permission_classes([IsAuthenticated])
def iotree_api(request):
if request.method == 'POST':
try:
data = dict(request.data)
print(data)
tree = data['tree']
time_start = data['time_start']
time_end = data['time_end']
if time_end == 'now':
time_end = int(time.time()*1000)
else:
time_end = int(time_end)
time_start = int(time_start)
flux_client = FluxDataCon(request.user.username)
flux_client.start_time(time_start)
flux_client.end_time(time_end)
context = flux_client.find(tree)
del flux_client
if str(context) == "[]":
context = {"error":"No Data found! or Timeout!", "Info":"Hint: Max rows 200000!"}
return Response(context)
except:
return Response({"status":404,"Info":"Something went wrong when the query", "Hint":"Max rows 200000!"})
else:
flux_client = FluxDataCon(request.user.username)
iotree = flux_client.get_tag_tree()
leafs = flux_client.get_leafs()
context = {
"listofleafs": leafs,
"iotree": json.loads(iotree)
}
del flux_client
if str(context) == "false":
context = {"error":"false"}
if str(context) == "[]":
context = {"error":"No Data jet!"}
return Response(context)
| 17,512 | 4,935 |
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.layers.normalization import BatchNormalization
from art.classifiers.classifier import Classifier
def mnist_layers(input_shape, nb_filters):
layers = [Conv2D(nb_filters, (8, 8), strides=(2, 2), padding="same", input_shape=input_shape),
"activation",
Conv2D((nb_filters * 2), (6, 6), strides=(2, 2), padding="valid"),
"activation",
Conv2D((nb_filters * 2), (5, 5), strides=(1, 1), padding="valid"),
"activation",
Flatten()]
return layers
def cifar10_layers(input_shape, nb_filters):
layers = [Conv2D(nb_filters // 2, (3, 3), padding="same", input_shape=input_shape),
"activation",
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.5),
Conv2D(nb_filters, (3, 3), padding="valid"),
"activation",
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.5),
Flatten(),
Dense(500),
"activation",
Dropout(0.5)]
return layers
class CNN(Classifier):
"""
Implementation of a convolutional neural network using Keras sequential model
"""
def __init__(self, input_shape=None, include_end=True, act='relu', bnorm=False, input_ph=None, nb_filters=64,
nb_classes=10, act_params={}, model=None, defences=None, preproc=None, dataset="mnist"):
"""Instantiates a ConvolutionalNeuralNetwork model using Keras sequential model
:param tuple input_shape: shape of the input images
:param bool include_end: whether to include a softmax layer at the end or not
:param str act: type of the intermediate activation functions
:param bool bnorm: whether to apply batch normalization after each layer or not
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param int nb_filters: number of convolutional filters per layer
:param int nb_classes: the number of output classes
:param dict act_params: dict of params for activation layers
:rtype: keras.model object
"""
if model is None:
model = Sequential(name='cnn')
layers = []
if "mnist" in dataset:
layers = mnist_layers(input_shape, nb_filters)
elif "cifar10" in dataset:
layers = cifar10_layers(input_shape, nb_filters)
elif "stl10" in dataset:
raise NotImplementedError("No CNN architecture is defined for dataset '{0}'.".format(dataset))
for layer in layers:
if layer == "activation":
model.add(self.get_activation(act, **act_params))
if bnorm:
model.add(BatchNormalization())
else:
model.add(layer)
model.add(Dense(nb_classes))
if include_end:
model.add(Activation('softmax'))
super(CNN, self).__init__(model, defences, preproc)
| 4,506 | 1,330 |
#!/bin/python
from deap import tools
from copy import deepcopy
import random
from deap import algorithms
import promoterz
import statistics
from .. import evolutionHooks
def checkPopulation(population, message):
if not (len(population)):
print(message)
def standard_loop(World, locale):
# --assertions are most for debugging purposes; they should not trigger
assert (len(locale.population))
locale.extraStats = {}
# --validate individuals;
locale.population = promoterz.validation.validatePopulation(
World.tools.constructPhenotype, World.TargetParameters, locale.population
)
# --remove equal citizens before evaluation for efficency
nonevaluated = [ind for ind in locale.population if not ind.fitness.valid]
Lu = len(nonevaluated)
print("first unevaluated: %i" % len(nonevaluated))
remains = locale.extratools.populationPD(nonevaluated, 1.0)
Lr = len(remains)
print("%i individues removed due to equality" % (Lu - Lr))
locale.population = [
ind for ind in locale.population if ind.fitness.valid
] + remains
# --evaluate individuals;
locale.extraStats['nb_evaluated'], locale.extraStats[
'avgTrades'
] = World.parallel.evaluatePopulation(
locale
)
locale.extraStats['avgExposure'] = sum([I.averageExposure for I in locale.population])/len(locale.population)
# --send best individue to HallOfFame;
if not locale.EPOCH % 15:
BestSetting = tools.selBest(locale.population, 1)[0]
locale.HallOfFame.insert(BestSetting)
assert (sum([x.fitness.valid for x in locale.population]) == len(locale.population))
# --compile stats;
statistics.compileStats(locale)
# --population ages
qpop = len(locale.population)
locale.population = locale.extratools.populationAges(
locale.population, locale.EvolutionStatistics[locale.EPOCH]
)
wpop = len(locale.population)
locale.extraStats['nbElderDies'] = qpop - wpop
# INDIVIDUE FITNESS ATTRIBUTES FILTERS;
# --remove very inapt citizens
if World.genconf.minimumProfitFilter is not None:
locale.extratools.filterThreshold(World.genconf.minimumProfitFilter,
World.genconf._lambda)
checkPopulation(locale.population, "Population dead after profit filter.")
# --remove individuals below tradecount
if World.genconf.TradeNumberFilterRange is not None:
locale.extratools.filterTrades(World.genconf.TradeNumberFilterRange,
World.genconf._lambda)
checkPopulation(locale.population, "Population dead after trading number filter.")
# --remove individues based on average roundtripe exposure time;
if World.genconf.averageExposureLengthFilterRange is not None:
locale.extratools.filterExposure(
World.genconf.averageExposureLengthFilterRange,
World.genconf._lambda
)
checkPopulation(locale.population, "Population dead after roundtrip exposure filter.")
if not locale.population:
locale.population = World.tools.population(World.genconf.POP_SIZE)
print("Repopulating... Aborting epoch.")
# --show stats;
statistics.showStatistics(locale)
# --calculate new population size;
if locale.EPOCH:
PRoFIGA = promoterz.supplement.PRoFIGA.calculatePRoFIGA(
World.genconf.PRoFIGA_beta,
locale.EPOCH,
World.genconf.NBEPOCH,
locale.EvolutionStatistics[locale.EPOCH - 1],
locale.EvolutionStatistics[locale.EPOCH],
)
locale.POP_SIZE += locale.POP_SIZE * PRoFIGA
minps, maxps = World.genconf.POP_SIZE // 2, World.genconf.POP_SIZE * 3
try:
locale.POP_SIZE = int(round(max(min(locale.POP_SIZE, maxps), minps)))
except:
locale.POP_SIZE = 30
M = "POP_SIZE PROFIGA ERROR;"
print(M)
# --filter best inds;
locale.population[:] = evolutionHooks.selBest(locale.population, locale.POP_SIZE)
checkPopulation(locale.population, "Population dead after selection of score filter.")
assert (None not in locale.population)
# print(EvolutionStatistics)
#FinalBestScores.append(Stats['max'])
# --select best individues to procreate
LAMBDA = max(World.genconf._lambda, locale.POP_SIZE - len(locale.population))
TournamentSize = max(2 * LAMBDA, len(locale.population))
offspring = evolutionHooks.Tournament(locale.population, LAMBDA, TournamentSize)
offspring = [deepcopy(x) for x in offspring] # is deepcopy necessary?
# --modify and integrate offspring;
offspring = algorithms.varAnd(
offspring, World.tools, World.genconf.cxpb, World.genconf.mutpb
)
locale.extratools.ageZero(offspring)
locale.population += offspring
# --NOW DOESN'T MATTER IF SOME INDIVIDUE LACKS FITNESS VALUES;
assert (None not in locale.population)
# --immigrate individual from HallOfFame;
if random.random() < 0.2:
locale.population = locale.extratools.ImmigrateHoF(locale.population)
# --immigrate random number of random individues;
if random.random() < 0.5:
locale.population = locale.extratools.ImmigrateRandom((2, 7), locale.population)
assert (len(locale.population))
assert (None not in locale.population)
| 5,380 | 1,658 |
#!/usr/bin/env python3
import codecs
import os
import re
import sys
sys.path.insert(0, os.path.abspath("../../logging2"))
# -- General configuration ------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_annotation",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "logging2"
copyright = "2017, Vince Forgione"
author = "Vince Forgione"
_setuppy = codecs.open(os.path.abspath("../../setup.py"), encoding="utf8").read()
_version = re.search("^VERSION = [\"']([^\"']+)[\"']", _setuppy, re.MULTILINE).group(1)
version = release = _version
language = None
exclude_patterns = []
pygments_style = "sphinx"
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = "alabaster"
# html_theme_options = {}
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
htmlhelp_basename = "logging2doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
(master_doc, "logging2.tex", "logging2 Documentation", "Vince Forgione", "manual"),
]
# -- Options for manual page output ---------------------------------------
man_pages = [(master_doc, "logging2", "logging2 Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(
master_doc,
"logging2",
"logging2 Documentation",
author,
"logging2",
"One line description of project.",
"Miscellaneous",
),
]
| 1,675 | 527 |
# -*- coding: utf-8 -*-
"""The API equivalent to Ethereum JSON RPC."""
from typing import Tuple, Union
from ..core.constants import HASH_SIZE
from ..core import chain, reader
from ..helpers import hashing
from ..remote import kv_remote
from ..rlp import sedes
from ..stagedsync import stages
# pylint: disable=broad-except
class EthereumAPI:
""" EthereumAPI"""
def __init__(self, target: str = kv_remote.DEFAULT_TARGET):
remote_kv_client = kv_remote.RemoteClient(target)
self.remote_kv = remote_kv_client.open()
def close(self):
""" close"""
self.remote_kv.close()
def block_number(self):
""" Get the number of the latest block in the chain. """
try:
block_heigth, _ = stages.get_stage_progress(self.remote_kv, stages.SyncStage.FINISH)
return block_heigth
except Exception:
return 0
def get_block_by_number(self, block_number: int) -> sedes.Block:
""" Get the block having the given number in the chain. """
return chain.Blockchain(self.remote_kv).read_block_by_number(block_number)
def get_block_by_hash(self, block_hash: str) -> sedes.Block:
""" Get the block having the given hash in the chain. """
try:
block_hash_bytes = hashing.hex_as_hash(block_hash)
return chain.Blockchain(self.remote_kv).read_block_by_hash(block_hash_bytes)
except Exception:
return None
def get_block_transaction_count_by_number(self, block_number: int) -> int:
""" Get the number of transactions included in block having the given number in the chain. """
block = chain.Blockchain(self.remote_kv).read_block_by_number(block_number)
return len(block.body.transactions) if block else -1
def get_block_transaction_count_by_hash(self, block_hash: str) -> int:
""" Get the number of transactions included in block having the given hash in the chain. """
block = chain.Blockchain(self.remote_kv).read_block_by_hash(block_hash)
return len(block.body.transactions) if block else -1
def get_storage_at(self, address: str, index: str, block_number_or_hash: Union[int, str]) -> str:
""" Returns a 32-byte long, zero-left-padded value at index storage location of address or '0x' if no value."""
try:
if isinstance(block_number_or_hash, int):
block_number = int(block_number_or_hash)
else:
block_hash = str(block_number_or_hash)
block_hash_bytes = hashing.hex_as_hash(block_hash)
block_number = chain.Blockchain(self.remote_kv).read_canonical_block_number(block_hash_bytes)
state_reader = reader.StateReader(self.remote_kv, block_number)
account = state_reader.read_account_data(address)
location_hash = hashing.hex_as_hash(str(index))
value = state_reader.read_account_storage(address, account.incarnation, location_hash)
return '0x' + value.hex().zfill(2*HASH_SIZE)
except Exception:
return '0x'
def syncing(self) -> Union[bool, Tuple[int ,int]]:
"""Returns false is already sync'd, otherwise the (currentBlock, highestBlock) couple."""
try:
highest_block, _ = stages.get_stage_progress(self.remote_kv, stages.SyncStage.HEADERS)
current_block, _ = stages.get_stage_progress(self.remote_kv, stages.SyncStage.FINISH)
if current_block >= highest_block:
return False
return highest_block, current_block
except Exception:
return False
| 3,651 | 1,077 |
"""Tox hook implementations."""
from __future__ import print_function
import os
import tox
try:
from tox.reporter import warning
except ImportError:
warning = lambda s: None
from .envlist import add_factors, AFTER, BEFORE
@tox.hookimpl
def tox_addoption(parser):
"""Add arguments."""
parser.add_argument("--append-factor", type=str, nargs="+", help="Append a factor.")
parser.add_argument(
"--prepend-factor", type=str, nargs="+", help="Prepend a factor."
)
parser.add_argument(
"--prepend-archraw-factor",
action="store_true",
help="Prepend raw CPU arch arch to factors, such as ia32, armv8_a, aarch64.",
)
parser.add_argument(
"--prepend-cpuarch-factor",
action="store_true",
help="Prepend CPU arch to factors, such as x86_32, x86_64, arm_7, arm_8.",
)
parser.add_argument(
"--prepend-ostype-factor",
action="store_true",
help="Prepend OS type to factors, such as linux, macos, windows.",
)
parser.add_argument(
"--prepend-username-factor",
action="store_true",
help="Prepend username to factors.",
)
parser.add_argument(
"--add-ci-factor",
action="store_true",
help="Add CI factors if environment variable is set, such as appveyor, travis or fallback ci.",
)
@tox.hookimpl(trylast=True)
def tox_configure(config):
"""Check for the presence of the added options."""
if config.option.prepend_archraw_factor:
from cpuinfo.cpuinfo import DataSource # noqa
archraw_factor_name = DataSource.arch_string_raw.replace("-", "_").lower()
if not config.option.prepend_factor:
config.option.prepend_factor = [archraw_factor_name]
else:
config.option.prepend_factor.insert(0, archraw_factor_name)
if config.option.prepend_cpuarch_factor:
from cpuinfo.cpuinfo import _parse_arch, DataSource # noqa
try:
arch, _ = _parse_arch(DataSource.arch_string_raw)
arch = arch.lower()
if not config.option.prepend_factor:
config.option.prepend_factor = [arch]
else:
config.option.prepend_factor.insert(0, arch)
except Exception:
archraw_factor_name = DataSource.arch_string_raw.replace("-", "_").lower()
warning(
'cpuarch not available for archraw "{}"'.format(archraw_factor_name)
)
if config.option.prepend_ostype_factor:
from osinfo.osinfo import _get_os_type # noqa
if not config.option.prepend_factor:
config.option.prepend_factor = [_get_os_type().lower()]
else:
config.option.prepend_factor.insert(0, _get_os_type().lower())
if config.option.add_ci_factor and "CI" in os.environ:
extra_factor = None
if "APPVEYOR" in os.environ or "TRAVIS" in os.environ:
config.option.prepend_username_factor = True
elif "CIRRUS_CI" in os.environ:
extra_factor = "cirrusci"
else:
extra_factor = "ci"
if extra_factor:
if not config.option.append_factor:
config.option.append_factor = [extra_factor]
else:
config.option.append_factor.insert(0, extra_factor)
if config.option.prepend_username_factor:
import getpass # noqa
username = getpass.getuser()
if username:
username = username.lower()
if not config.option.prepend_factor:
config.option.prepend_factor = [username]
else:
config.option.prepend_factor.insert(0, username)
if config.option.prepend_factor:
add_factors(config, config.option.prepend_factor, position=BEFORE)
if config.option.append_factor:
add_factors(config, config.option.append_factor, position=AFTER)
| 3,925 | 1,205 |
from petlib.ec import EcGroup,EcPt
from petlib.ec import _FFI,_C
from petlib.bn import Bn
from hashlib import sha256
#from memory_profiler import profile
def commitment_key_gen(n, trap=None,nid=714): #713 std, 415/714
'''Generates a key for a pedersen-like multicommitment.
It outputs a ECgroup specified by nid and n points on the curve.
If Trap is set the discrete log of all the points is also returned.'''
G = EcGroup(nid)
commitment_key=[]
trapdoor=[]
for i in xrange(n+1):
#priv = G.order().random()
#pub = priv * G.generator()
#commitment_key+=[pub]
#trapdoor+=[priv]
trapdoor+=[G.order().random()]
commitment_key+=[trapdoor[-1]*G.generator()]
if trap!=None:
return (G,commitment_key,tuple(trapdoor))
return (G,commitment_key)
def mult_prod(G,key,elements):
#G,key=ck
bvec=_FFI.new("EC_POINT * []",len(elements))
for i in xrange(len(elements)): bvec[i]=key[i].pt
evec=_FFI.new("BIGNUM * []",len(elements))
for i in xrange(len(elements)):
try:
evec[i]=elements[i].bn
except AttributeError:
#does this even work properly?
evec[i]=Bn(elements[i]).bn
comm = EcPt(G)
_C.EC_POINTs_mul(G.ecg, comm.pt, _FFI.NULL,len(elements), bvec, evec, _FFI.NULL)
return comm
def mult_prod_str(G,key,elements):#not actually used in commit_str, but could be potentially useful. Be careful that it was potentially causing segmentation fault.
#G,key=ck
bvec=_FFI.new("EC_POINT * []",len(elements))
for i in xrange(len(elements)): bvec[i]=key[i].pt
evec=_FFI.new("BIGNUM * []",len(elements))
for i in xrange(len(elements)): evec[i]=Bn.from_decimal(str(elements[i])).bn
comm = EcPt(G)
_C.EC_POINTs_mul(G.ecg, comm.pt, _FFI.NULL,len(elements), bvec, evec, _FFI.NULL)
return comm
def commit(ck,elements, rand=None):
'''Computes vector commitment to elements using ck
(and optionally using a given randomness).
Outputs a point on the curve and the randomness used (if not given as input)'''
G,key=ck
if len(elements)>=len(key):
raise Exception('Too many elements!Longer key required')
#term=(elements[i]*key[i] for i in xrange(len(elements)))
#term=[elements[i]*key[i] for i in xrange(len(elements))]
#bvec=_FFI.new("EC_POINT * []",len(elements))
#for i in xrange(len(elements)): bvec[i]=key[i].pt
#evec=_FFI.new("BIGNUM * []",len(elements))
#for i in xrange(len(elements)):
# try:
# evec[i]=elements[i].bn
# except AttributeError:
# evec[i]=Bn(elements[i]).bn
#comm = EcPt(G)
#_C.EC_POINTs_mul(G.ecg, comm.pt, _FFI.NULL,len(elements), bvec, evec, _FFI.NULL)
#comm=mult_prod(ck,elements)
#comm=reduce(lambda x, y : x + y,term) #apparently Reduce is more efficient than For loop
if rand==None:
rand=G.order().random()
#print elements
#print elements
#random_point=rand*key[-1]
#comm=comm+random_point
elements=list(elements)+[rand]
comm=mult_prod(G,key[:len(elements)-1]+[key[-1]],elements)
return comm,rand
#random_point=rand*key[-1]
#comm=comm+random_point
#print elements
elements=list(elements)+[rand]
#print elements
comm=mult_prod(G,key[:len(elements)-1]+[key[-1]],elements)
return comm
def commit_str(ck,elements_str, rand=None):
'''Computes vector commitment to elements using ck
(and optionally using a given randomness).
Outputs a point on the curve and the randomness used (if not given as input)'''
G,key=ck
#print 'test', len(key),len(elements)
if len(elements_str)>=len(key):
raise Exception('Too many elements!Longer key required')
#term=(Bn.from_decimal(str(elements[i]))*key[i] for i in xrange(len(elements)))
#bvec=_FFI.new("EC_POINT * []",len(elements))
#for i in xrange(len(elements)): bvec[i]=key[i].pt
#evec=_FFI.new("BIGNUM * []",len(elements))
#for i in xrange(len(elements)): evec[i]=Bn.from_decimal(str(elements[i])).bn
#comm = EcPt(G)
#_C.EC_POINTs_mul(G.ecg, comm.pt, _FFI.NULL,len(elements), bvec, evec, _FFI.NULL)
#comm=reduce(lambda x, y : x + y,term)
#comm=mult_prod_str(ck,elements)
if rand==None:
rand=G.order().random()
#random_point=rand*key[-1]
#comm=comm+random_point
#elements_str=list(elements_str)+[rand]
elements=[Bn.from_decimal(str(int(x))) for x in elements_str]+[Bn.from_decimal(str(rand))]
comm=mult_prod(G,key[:len(elements)-1]+[key[-1]],elements)
return comm,long(rand)
#random_point=Bn.from_decimal(str(rand))*key[-1]
#comm=comm+random_point
#elements_str=list(elements_str)+[rand]
elements=[Bn.from_decimal(str(x)) for x in elements_str]+[Bn.from_decimal(str(rand))]
comm=mult_prod(G,key[:len(elements)-1]+[key[-1]],elements)
return comm
def check_open_commit(ck,comm,elements,rand):
#Verifies that (element,rand) is an opening to comm
#G,key=ck
commitment=commit(ck,elements,rand)
return comm==commitment
def check_open_commit_str(ck,comm,elements,rand):
#Verifies that (element,rand) is an opening to comm
#G,key=ck
commitment=commit_str(ck,elements,rand)
return comm==commitment
def challenge(elements):
"""Packages a challenge in a bijective way"""
elem = [len(elements)] + elements
elem_str = map(str, elem)
elem_len = map(lambda x: "%s||%s" % (len(x) , x), elem_str)
state = "|".join(elem_len)
H = sha256()
H.update(state.encode("utf8"))
return H.digest()
def pok_open_comm_prove(public_key,A,opening,rand):
#ZKProof of knowledge of opening (opening,rand) to a commitment A
G,commitment_key=public_key
assert check_open_commit(public_key,A,opening,rand)
p = G.order()
blinder =[p.random() for i in xrange(len(opening))]
B,B_rand=commit(public_key,blinder)
state = ['Opening', G.nid(),list(commitment_key),A, B]#add a optional message
hash_x = challenge(state)
x = Bn.from_binary(hash_x) % p
f = [(blinder[i] - x*opening[i]) % p for i in xrange(len(opening)) ]
z = B_rand - x*rand % p
return (x, f, z)
def pok_open_comm_verify(public_key, A, proof):
#Verifies the ZKproof of knowledge of opening to a commitment A
G,commitment_key=public_key
x,f,z = proof
C=commit(public_key,f,z)+x*A
p = G.order()
state = ['Opening', G.nid(),list(commitment_key),A, C]
hash_x = challenge(state)
y = Bn.from_binary(hash_x) % p
return x == y
| 6,704 | 2,497 |
import graphene
from ...core import TaxRateType as CoreTaxRateType
from ...core.permissions import MODELS_PERMISSIONS
from ...core.weight import WeightUnits
from .utils import str_to_enum
class ReportingPeriod(graphene.Enum):
TODAY = 'TODAY'
THIS_MONTH = 'THIS_MONTH'
TaxRateType = graphene.Enum(
'TaxRateType',
[(str_to_enum(rate[0]), rate[0]) for rate in CoreTaxRateType.CHOICES])
PermissionEnum = graphene.Enum(
'PermissionEnum', [
(str_to_enum(codename.split('.')[1]), codename)
for codename in MODELS_PERMISSIONS])
WeightUnitsEnum = graphene.Enum(
'WeightUnitsEnum',
[(str_to_enum(unit[0]), unit[0]) for unit in WeightUnits.CHOICES])
| 692 | 250 |
import torch
from torch import nn
from .utils import conv3x3
from .base import BackboneBaseModule
__all__ = [
"ResNet34",
"ResNet50S",
]
def _add_stage(block, in_ch, out_ch, stride, use_se, repeat_time):
assert repeat_time > 0 and isinstance(repeat_time, int)
layers = [block(in_ch, out_ch, stride, use_se=use_se)]
for _ in range(repeat_time - 1):
layers.append(block(out_ch, out_ch, 1, use_se=use_se))
return nn.Sequential(*layers)
class BasicBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride, expansion=1, use_se=False):
assert out_ch % expansion == 0
mid_ch = int(out_ch / expansion)
super(BasicBlock, self).__init__()
self.use_se = use_se
self.do_downsample = not (in_ch == out_ch and stride == 1)
self.conv1 = conv3x3(in_ch, mid_ch, stride=stride)
self.bn1 = nn.BatchNorm2d(mid_ch)
self.conv2 = conv3x3(mid_ch, out_ch, stride=1)
self.bn2 = nn.BatchNorm2d(out_ch)
self.bn2.last_bn = True
self.relu = nn.ReLU(inplace=True)
if self.do_downsample:
self.residual = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 1, stride, bias=False),
nn.BatchNorm2d(out_ch),
)
if self.use_se:
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(out_ch, out_ch // 16, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch // 16, out_ch, 1, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.use_se:
attention = self.se(x)
x *= attention
if self.do_downsample:
residual = self.residual(residual)
x += residual
return self.relu(x)
class ResidualBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride, expansion=4, use_se=False):
assert out_ch % expansion == 0
mid_ch = int(out_ch / expansion)
super(ResidualBlock, self).__init__()
self.use_se = use_se
self.do_downsample = not (in_ch == out_ch and stride == 1)
self.conv1 = nn.Conv2d(in_ch, mid_ch, 1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_ch)
self.conv2 = conv3x3(mid_ch, mid_ch, stride)
self.bn2 = nn.BatchNorm2d(mid_ch)
self.conv3 = nn.Conv2d(mid_ch, out_ch, 1, bias=False)
self.bn3 = nn.BatchNorm2d(out_ch)
self.bn3.last_bn = True
self.relu = nn.ReLU(inplace=True)
if self.do_downsample:
self.residual = nn.Sequential(
nn.AvgPool2d(stride, stride, ceil_mode=True),
nn.Conv2d(in_ch, out_ch, 1, bias=False),
nn.BatchNorm2d(out_ch),
)
if self.use_se:
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(out_ch, out_ch // 16, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch // 16, out_ch, 1, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.use_se:
attention = self.se(x)
x *= attention
if self.do_downsample:
residual = self.residual(residual)
x += residual
return self.relu(x)
class ResNet34(BackboneBaseModule):
def __init__(self, use_se=False):
super(ResNet34, self).__init__()
self.channels = [64, 64, 128, 256, 512]
self.strides = [2, 4, 8, 16, 32]
self.stage0 = nn.Sequential(
nn.Conv2d(3, self.channels[0], 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(self.channels[0]),
nn.ReLU(inplace=True),
)
self.stage1 = nn.Sequential(nn.MaxPool2d(3, stride=2, padding=1))
for layer in _add_stage(BasicBlock, self.channels[0], self.channels[1],
1, use_se, 3):
self.stage1.add_module(str(len(self.stage1)), layer)
self.stage2 = _add_stage(BasicBlock, self.channels[1],
self.channels[2], 2, use_se, 4)
self.stage3 = _add_stage(BasicBlock, self.channels[2],
self.channels[3], 2, use_se, 6)
self.stage4 = _add_stage(BasicBlock, self.channels[3],
self.channels[4], 2, use_se, 3)
self._init_params()
def forward(self, x):
x = self.stage0(x) # 64, 1/2
x = self.stage1(x) # 64, 1/4
x = self.stage2(x) # 128, 1/8
x = self.stage3(x) # 256, 1/16
x = self.stage4(x) # 512, 1/32
return x
def _change_downsample(self, params):
self.stage3[0].conv1.stride = (params[0], params[0])
self.stage3[0].residual[0].stride = params[0]
self.stage4[0].conv1.stride = (params[1], params[1])
self.stage4[0].residual[0].stride = params[1]
class ResNet50S(BackboneBaseModule):
def __init__(self, use_se=False):
super(ResNet50S, self).__init__()
self.channels = [64, 256, 512, 1024, 2048]
self.strides = [2, 4, 8, 16, 32]
self.stage0 = nn.Sequential(
conv3x3(3, 32, 2),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
conv3x3(32, 32),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
conv3x3(32, self.channels[0]),
nn.BatchNorm2d(self.channels[0]),
nn.ReLU(inplace=True),
)
self.stage1 = nn.Sequential(nn.MaxPool2d(3, stride=2, padding=1))
for layer in _add_stage(ResidualBlock, self.channels[0],
self.channels[1], 1, use_se, 3):
self.stage1.add_module(str(len(self.stage1)), layer)
self.stage2 = _add_stage(ResidualBlock, self.channels[1],
self.channels[2], 2, use_se, 4)
self.stage3 = _add_stage(ResidualBlock, self.channels[2],
self.channels[3], 2, use_se, 6)
self.stage4 = _add_stage(ResidualBlock, self.channels[3],
self.channels[4], 2, use_se, 3)
self._init_params()
def forward(self, x):
x = self.stage0(x) # 64, 1/2
x = self.stage1(x) # 256, 1/4
x = self.stage2(x) # 512, 1/8
x = self.stage3(x) # 1024, 1/16
x = self.stage4(x) # 2048, 1/32
return x
def _change_downsample(self, params):
self.stage3[0].conv2.stride = (params[0], params[0])
self.stage3[0].residual[0].kernel_size = params[0]
self.stage3[0].residual[0].stride = params[0]
self.stage4[0].conv2.stride = (params[1], params[1])
self.stage4[0].residual[0].kernel_size = params[1]
self.stage4[0].residual[0].stride = params[1]
| 7,208 | 2,844 |
#!/usr/bin/env python
import sys
sys.path = ['lib/'] + sys.path
| 64 | 25 |
"""
Helper functions for porting R code into python/stLearn.
"""
import os
ro = None
pandas2ri = None
localconverter = None
def rpy2_setup(r_path):
"""Sets up rpy2."""
os.environ["R_HOME"] = r_path
import rpy2.robjects as robjects_
from rpy2.robjects import pandas2ri as pandas2ri_
from rpy2.robjects.conversion import localconverter as localconverter_
global ro, pandas2ri, localconverter
ro = robjects_
pandas2ri = pandas2ri_
localconverter = localconverter_
| 503 | 180 |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import mutation_waterfall.preprocess as preprocess
def plot(mutation_list_file, n_genes=30, ax=None, file=None):
"""Generates a waterfall plot describing mutational landscape of samples.
Args:
mutation_list_file: Path to mutation list.
num_gene: Number of genes to be plotted. (default: 30)
ax: Matplotlib axis to draw the plot.
file: If not None, resulting plot will be saved as an image file.
Returns:
ax: Axis containing the plot.
"""
binary_matrix, genes, samples = preprocess.make_binary_matrix(mutation_list_file)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.sca(ax)
waterfall(binary_matrix, genes, n_genes, ax)
plt.tight_layout()
if file:
plt.savefig(file, dpi=150)
else:
plt.show()
def waterfall(binary_matrix, genes, num_gene, ax):
"""Sort binary matrix and plot it.
Args:
binary_matrix: Binary matrix containing mutation status.
genes: List of genes.
num_gene: Number of genes to be plotted.
ax: Matplotlib axis to draw the plot.
Returns:
ax: Axis containing the plot.
"""
row_order = binary_matrix.sum(axis=1).argsort()[::-1]
temp = binary_matrix[row_order]
column_order = np.array([''.join([str(x) for j, x in enumerate(temp[:, i])]) for i in range(temp.shape[1])]).argsort()[::-1]
temp = temp[:, column_order]
# Y-axis tick labels
ax.set_yticks(np.arange(num_gene))
percentages = binary_matrix.sum(axis=1) / binary_matrix.shape[1] * 100
yticklabels = ['$%s$ (%.1f%%)' % (genes[ix], percentages[ix]) for ix in row_order[:num_gene]]
plt.yticks(np.arange(num_gene), yticklabels)
ax.set_xticks(np.arange(-.5, temp.shape[1], 1), minor=True)
ax.set_yticks(np.arange(-.5, num_gene, 1), minor=True)
ax.grid(which='minor', color='grey', linestyle='-', alpha=0.33, linewidth=1,)
plt.xticks([])
ax.imshow(temp[:num_gene, :], interpolation='none', aspect='auto', cmap=plt.cm.gray_r)
return ax
| 2,152 | 772 |
import random
from collections import defaultdict
class RandomizedCollection(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.collection = defaultdict(set)
self.element = []
def insert(self, val):
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
:type val: int
:rtype: bool
"""
ok = not self.collection[val]
n = len(self.element)
self.element += val,
self.collection[val].add(n)
return ok
def remove(self, val):
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
"""
if not self.collection[val]:
return False
index = self.collection[val].pop()
if index != len(self.element) - 1:
self.element[index] = self.element[-1]
self.collection[self.element[-1]].add(index)
self.collection[self.element[-1]].remove(len(self.element) - 1)
self.element.pop()
return True
def getRandom(self):
"""
Get a random element from the collection.
:rtype: int
"""
if len(self.element) == 0:
return -1
# OJ is python2., randint
return self.element[random.randint(0, len(self.element)-1)]
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| 1,730 | 504 |
"""Code snippets vol-58
287-pywebview display text and image.
Requires:
pip3 install pywebview
display_txt_img.html
and model.jpg both in cwd.
Origin:
https://github.com/r0x0r/pywebview/tree/master/examples
"""
import webview
if __name__ == '__main__':
master_window = webview.create_window('Pywebview-text and image example',
url='display_txt_img.html',
width=665, height=575,
confirm_close=True,)
webview.start()
| 553 | 171 |
import os
from pynsett.discourse import Discourse
from pynsett.extractor import Extractor
from pynsett.knowledge import Knowledge
_path = os.path.dirname(__file__)
text = "Jane was born on 10 August 1979."
knowledge = Knowledge()
knowledge.add_rules(open(os.path.join(_path, '../rules/test.rules')).read())
discourse = Discourse(text)
extractor = Extractor(discourse, knowledge)
triplets = extractor.extract()
for triplet in triplets:
print(triplet)
| 460 | 158 |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Roberto García Carvajal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Módulo para nombrar números enteros no negativos (en castellano).
Uso: nombrar_numero(entero_no_negativo).
"""
__all__ = ['nombrar_numero']
__author__ = u'Roberto García Carvajal'
def _desempaquetar_segmento(x):
"""
Extrae de la cadena las centenas, decenas y unidades por separado.
Tener en cuenta que puede que la cadena tenga una longitud inferior 3.
:param x: str de como mucho 3 caracteres.
:return tuple: tupla de 3 enteros que representan a centenas, decenas y
unidades.
"""
index = 0
c = 0
d = 0
u = 0
l = len(x)
if l > 3:
raise ValueError(u"El segmento debe ser como mucho de longitud 3.")
if l > 2:
c = int(x[index])
index += 1
if l > 1:
d = int(x[index])
index += 1
if l > 0:
u = int(x[index])
return c, d, u
def _nombrar_segmento(x, unidad_larga=False):
"""
Nombra un segmento determinado. No incluye puntuación.
:param x: str de, como mucho, 3 caracteres numéricos.
:param unidad_larga: bool. Indica si la unidad se escribo como "uno"o
"un".
:return str con la transcripción del número.
"""
c, d, u = _desempaquetar_segmento(x)
# Mapa para las centenas. Con cuidado de '1', que será 'cien' si decenas y
# unidades son 0.
c_dict = {
0: u"",
1: ((d + u) > 0 and u"ciento" or u"cien"),
2: u"doscientos",
3: u"trescientos",
4: u"cuatrocientos",
5: u"quinientos",
6: u"seiscientos",
7: u"setecientos",
8: u"ochocientos",
9: u"novecientos",
}
# Mapa para decenas, con cuidado de que las unidades sean 0.
d_dict = {
0: u"",
1: (u and u"dieci" or u"diez"),
2: (u and u"veinti" or u"veinte"),
3: (u and u"treinta y " or u"treinta"),
4: (u and u"cuarenta y " or u"cuarenta"),
5: (u and u"cincuenta y " or u"cincuenta"),
6: (u and u"sesenta y " or u"sesenta"),
7: (u and u"setenta y " or u"setenta"),
8: (u and u"ochenta y " or u"ochenta"),
9: (u and u"noventa y " or u"noventa"),
}
# Mapa de unidades, teniendo en cuenta la unidad_larga.
# Además, si las decenas es 2, algunos números llevan tildes.
u_dict = {
0: u"",
1: (unidad_larga and u"uno") or (d == 2 and u"ún") or u"un",
2: (d == 2 and u"dós") or u"dos",
3: (d == 2 and u"trés") or u"tres",
4: u"cuatro",
5: u"cinco",
6: (d in (1, 2) and u"séis") or u"seis",
7: u"siete",
8: u"ocho",
9: u"nueve",
}
c_res = c_dict[c]
d_u_res = d_dict[d] + u_dict[u]
# Caso especial de los números entre 11 y 15.
if d == 1 and 0 < u < 6:
d_u_res = {
11: u"once",
12: u"doce",
13: u"trece",
14: u"catorce",
15: u"quince",
}[10 + u]
# Sólo incluimos separador si las dos partes del segmento tienen valores.
separator = u""
if c_res and d_u_res:
separator = u" "
return c_res + separator + d_u_res
def nombrar_numero(x):
"""
Convierte un número a su formato escrito. Sólo acepta números enteros
no negativos.
:param x: int, entero no negativo a convertir a formato escrito.
:return unicode: devuelve el número en formato alfabético.
"""
# Comprobación del tipo.
if not isinstance(x, int):
raise ValueError(u"Tipo incorrecto. Se esperaba int, encontrado %s" %
x.__class__.__name__)
# Comprobación de signo.
if x < 0:
raise ValueError(u"Se esperaba un entero no negativo.")
if x == 0:
return u"cero"
# Ahora vamos a trocear el número en grupos de 3 en 3, empezando desde la
# derecha y tomando nota del número de segmentos que tiene el número.
# Almacenamos los segmentos en una lista de cadenas en orden inverso.
# Ejemplos:
# 1 -> ["1"]
# 4234 -> ["234", "4"]
# 10001 -> ["001", "10"]
xx = u"%s" % x
xx = xx[::-1]
l = len(xx) / 3 + {False: 0, True: 1}[(len(xx) % 3) > 0]
vx = []
for i in range(0, l):
vx.append(xx[3 * i:3 * (i + 1)][::-1])
# vx = vx[::-1]
resultado = u""
mapa_sufijos_singular = {
0: u"",
1: u"mil",
2: u"millón",
3: u"mil",
4: u"billón",
5: u"mil",
6: u"trillón",
}
mapa_sufijos_plural = {
0: u"",
1: u"mil",
2: u"millones",
3: u"mil",
4: u"billones",
5: u"mil",
6: u"trillones",
}
# Recorremos los segmentos. Recordar que vamos a nombrar el número por
# grupos de tres desde la derecha, añadiendo el sufijo según puntuación, si
# corresponde.
for index, v in enumerate(vx):
resultado_segmento = _nombrar_segmento(v, unidad_larga=(index == 0))
# Si el segmento es de millares y el resultado tiene valor o el segmento
# es de millones, se añade el sufijo. Ejemplos:
# - Para 1000001, segmentos '1', '000', '001'. Si estamos en el segmento
# '000', no deberíamos de poner 'mil'.
# - Para 1000020001, segmentos '1', '000', '020', '001'. Si estamos en
# el segmento '000', deberíamos de poner "millones".
if (resultado_segmento or (index % 2) == 0) and index > 0:
resultado_segmento += u" "
# Distinguimos entre singular o plural.
if resultado_segmento == u"un ":
# Si el resultado es un 1 y estamos nombrando millares, lo
# dejamos vacío. Es decir: para 1000 no vamos a decir
# 'un mil', sino sólo mil.
if (index % 2) == 1:
resultado_segmento = mapa_sufijos_singular[index]
else:
resultado_segmento += mapa_sufijos_singular[index]
else:
resultado_segmento += mapa_sufijos_plural[index]
if resultado_segmento:
resultado = u" " + resultado_segmento + resultado
# Probablemente queden espacios a la izquierda (un mínimo de 1).
# Los eliminamos.
return resultado.lstrip()
| 6,861 | 2,533 |
#!/usr/bin/env python3
"""
Black-box end to end tests for OpenZGY.
If the old ZGY-Public Python module is available, several of the
tests will be run both on the old and the new implementation to
verify that they mach. This is a bit messy. Because there are some
known bugs in the old code and some deliberate changes in the new.
If the Seismic Store plug-in and/or the ZFP compression plug-in
are available then this functionality is tested as well.
* Tested in checkReadingDeadArea(), checkContents, etc.
On read, bricks written as explicit all zero and bricks that were
never written should be treated identically. This is not quite the
case with the legacy reader: A read request covering both existing
and non-existing bricks works as described. A read request where
all the corresponding bricks are missing will return an error. This
is unfortunate as it makes the caller more aware of the file's
layout. The test should try reading a small rectangle fully inside
a partially written brick but outside the written area, and one
fully inside a never-written brick, and one that overlaps both
types of brick.
* Tested in checkContents() and checkRawContents().
On read of integral data, requesting data as float should give the
same result as requesting the data as storage values and doing the
conversion afterwards. Make sure this holds both for regular data,
data from all-constant bricks, and values in missing bricks. In
particular, make sure that "absent" data doesn't get returned as
storage-zero when reading as integer and converted-zero when
reading as float. To test for this, make sure the coding range is
not symmetrical. I.e. storage zero must not map to converted zero.
* Tested in checkContents() and checkRawContents().
When doing a partial write of a brick that did not exist, the
missing values should be "zero after conversion to float", or as
close to zero as possible. Make sure they are not garbage and not
"zero before conversion to float" instead. See
Accessor::WriteDataExT
* Tested in checkStatistics() and checkHistogram().
Statistics and histogram information stored in the ZGY file should
have the same values as if the entire survey was read and statistics
and histogram was computed from those values. In other words,
statistics should count all samples inside the survey boundary
regardless of whether they come from regular, all-constant, or
never written bricks. Samples from the padding area outside the
survey boundary must not be counted.
This is trivial if statistics and histogram is computed in a
separate pass. Less so if the information is collected during write.
NOTE: The old accessor will not count never-written bricks.
* Tested in checkStatistics() and checkHistogram().
The above rule holds true even when the coding range is not zero centric
(cannot precisely represent zero after conversion) or does not contain
zero so zero cannot be represented at all. In these cases, even
never-written bricks will affect "sum of all samples" et cetera.
To test this, two additional copies of the test data is needed
with "non zero centric" coding and "only positive" coding.
NOTE: The statistics will be pretty useless in this case, so we might
not really care either way.
NOTE: The old accessor will not count never-written bricks.
* Tested in checkStatistics() and checkHistogram().
Statistics and histogram should handle overwritten data correctly.
This is trivial if statistics and histogram is computed in a
separate pass. Less so if the information is collected during write.
In the test data, the inersection of "A" and "B" is overwritten.
* The histogram range should be wide enough for all samples to fit.
It is allowed to be wider. Specifically, for an 8-bit file the only
thing that makes sense is to have a 1:1 correspondence between
storage values and histogram bins. So, histogram range equals
coding range. For a 16-bit file which makes use of most of the
available storage values (which is a reasonable assumption) one
could also set histogram range equals coding range, assigning 256
storage values to each histogram bin. Not explicitly tested yet.
* If alpha information is written, and this is done before writing
the bricks, then histogram and statistics should only include actually
live traces. This test is N/A if we completely deprecate alpha support.
* If alpha information is written to change a trace to dead after
bulk has been written for that trace, the effect on the statistics
is unspecified. Technically it would make sense to immediately
correct the statistics once the alpha changes. This is not
implemented even in the old accessor. And probaby never will be.
This is N/A for testing in any case since the result is unspecified.
* Just in case we are forced to keep the old behavior that treats all-zero
bricks slightly different from never-written bricks, it is recommended
that applications that don't need this odd behavior explicitly fills
all newly created file with zeros before writing real data.
This is N/A for testing.
* Tested in testFancyReadConstant().
Applications that do want to distinguish between never-written,
all-constant, and regular bricks should only do so for performance
reasons. A separate api function will be provided to query the
brick status. This new api function obviously needs to be tested.
* Not tested. Mostly of historical interest.
The curorig and cursize members are deprecated but we might add a
unit test just to document the current behavior. They were supposed
to give the bounding box of data actually written to the file. Or
possibly the bounding box including padding to the nearest brick
boundary. Or maybe somebody just gave up and set them equal to the
survey. I suspect the origin will always be included, see
OnBrickWritten. I also suspect that DataInfo::SetExtent is always
called with setcur=true which means the range will always match the
full size.
"""
#print('Running' if __name__ == '__main__' else 'Importing', __file__)
import numpy as np
import os
import sys
import io
import math
import json
import base64
import time
from contextlib import suppress, ExitStack, contextmanager
from enum import Enum
from collections import namedtuple
try:
from .. import zgypublic as oldzgy
print("Also testing the old ZGY-Public API.")
except Exception as ex:
print("Old ZGY-Public is not available:", ex)
class FakeAPI:
zgy = None
ZgyReader = object()
ZgyWriter = object()
oldzgy = FakeAPI()
from .. import api as newzgy
from ..api import SampleDataType, UnitDimension, ProgressWithDots, ZgyCompressFactory, ZgyKnownCompressors, ZgyKnownDecompressors
from ..impl.lodalgo import DecimationType # TODO-Low encapsulation?
from ..test.utils import SDCredentials, TempFileAutoDelete, LocalFileAutoDelete, CloudFileAutoDelete, HasSeismicStore, HasZFPCompression, SDTestData, SDTestSink
from ..impl.enum import UpdateMode
from ..exception import *
def HasOldZgy():
return oldzgy.zgy is not None
def showZgy(*args):
msg = ""
for a in args:
if a is None: pass
elif a is newzgy.ZgyReader: msg += " and new reader"
elif a is newzgy.ZgyWriter: msg += " and new writer"
elif a is oldzgy.ZgyReader: msg += " and old reader"
elif a is oldzgy.ZgyWriter: msg += " and old writer"
else: msg += " and " + a.__module__ + "." + a.__name__
return msg[5:] if msg else ""
# ----- Called by test code; not runnable by themselves. ----- #
@contextmanager
def TimeMe(name):
#start = time.perf_counter()
yield None
#elapsed = time.perf_counter() - start
#print("TIMED: %-20.20s %7.3f" % (name+":", elapsed), flush=True)
class TraceCallsToSD:
"""
Suitable for use as a _debug_trace callback.
"""
_entry = namedtuple("io", "what nbytes padded parts")
def __init__(self, *, verbose = False):
self.calls = []
self._verbose = verbose
def __call__(self, what, nbytes, padded, parts):
self.calls.append(self._entry(what, nbytes, padded, parts))
if self._verbose:
print(" {0:9s} size {1:10s} padded {2:10s} parts {3:1d}".format(
what, self._pretty(nbytes), self._pretty(padded), parts))
@staticmethod
def _pretty(n):
if (n < 1024) or (n % (1024) != 0):
return "{0:4d} bytes".format(n)
elif (n < 1024*1024) or (n % (1024*1024) != 0):
return "{0:7d} KB".format(n//1024)
else:
return "{0:7d} MB".format(n//(1024*1024))
def reset(self):
self.calls = []
class MustThrow:
"""
Check that we get the expected exception.
"""
def __init__(self, message = None, extypes = None):
self._extypes = extypes
self._message = message
if isinstance(extypes, type) and issubclass(extypes, Exception):
self._extypes = (extypes,)
self._exnames = tuple([e.__name__ for e in self._extypes]) if self._extypes else "Exception"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None:
problem = 'Expected {0}, got no exception'.format(self._exnames)
elif self._extypes and type not in self._extypes:
problem = 'Expected {0} got {1} "{2}"'.format(self._exnames, type.__name__, str(value))
elif self._message and str(value).find(self._message) < 0:
problem = 'Expected "{0}" got "{1}"'.format(self._message, str(value))
else:
problem = None
#print('Ok: Expected {0} "{1}" got {2} "{3}"'.format(self._exnames, self._message or "", type.__name__, str(value)))
if problem:
raise AssertionError(problem) from None
return True # suppress the exception.
def pretty(n):
"""
Format a number, assumed to be a size in bytes, as a human readable string.
"""
if type(n) != type(42):
return str(n)
if n >= (1024*1024*1024) and (n % (1024*1024*1024)) == 0:
return str(n//(1024*1024*1024)) + " GB"
if n >= (1024*1024) and (n % (1024*1024)) == 0:
return str(n//(1024*1024)) + " MB"
if n >= (512*1024) and (n % (256*1024)) == 0:
return str(n//(256*1024)) + "*256 KB"
if n >= (1024) and (n % (1024)) == 0:
return str(n//(1024)) + " KB"
return str(n) + " bytes"
def savePNG(data, outfile):
from PIL import Image
def normalize(a):
a = a.astype(np.float32)
dead = np.isnan(a)
amin, amax = (np.nanmin(a), np.nanmax(a))
a[dead] = amin
if amin == amax:
a *= 0
else:
a = (a - amin) / (amax - amin)
a = (a * 255).astype(np.uint8)
return a, dead
data = np.squeeze(data)
data = np.transpose(data)
data = np.flip(data, 1)
data, dead = normalize(data)
tmp = np.zeros((data.shape[0], data.shape[1], 3), dtype=np.uint8)
r = tmp[...,0]
g = tmp[...,1]
b = tmp[...,2]
r += data
g += data
b += data
r[dead] = 255
g[dead] = 255
b[dead] = 0
im = Image.fromarray(tmp, mode="RGB")
im.save(outfile, format="PNG")
def isMutable(obj, *, verbose = False, seen = set()):
"""
Recursive check for whether an object is mutable.
The idea was to check that all members of e.g. ZgyReader are
immutable so the user cannot (a) shoot himself in the foot by
directly modifying a data members, or (b) even worse, change
some cached value by modifying a mutable member of a container.
Unfortunately this was a lot harder then I thought.
- A callable might or might not be const. Need to check the code.
- A property and a data member look rather similar.
- A readonly property may have a syub __set__ that will throw.
- A __setattr__, if present, can make any attribute mutable.
- Python has no frozendict (yet) unless I want to add a
rather pointless dependency, So I copy dicts before
returning them. This is safe, but the code here cannot know.
I might make my own dict-like wrapper but this is getting
way too complicated.
Looks like I just jave to rely on dump() followed by eyeballing
the source code.
"""
# Known types
if isinstance(obj, (type(None), type, str, int, bool, float, tuple, bytes, Enum, np.dtype)):
if verbose: print("Immutable type", type(obj).__name__)
return False
elif isinstance(obj, (list, set, dict, bytearray, np.ndarray)):
if verbose: print("MUTABLE type", type(obj).__name__)
return True
elif callable(obj):
if verbose: print("CALLABLE type", type(obj).__name__)
return False
# Recursive checks
if id(obj) in seen:
if verbose: print("skipping cycle of", type(obj).__name__)
return False
print("Adding", id(obj), "to seen")
seen |= set((id(obj),))
if isinstance(obj, dict):
obj = obj.items()
if isinstance(obj, tuple):
if verbose: print("recursively checking", type(obj).__name__)
return any([isMutable(e, verbose=verbose, seen=seen) for e in obj])
if verbose: print("unknown type, assuming mutable", type(obj).__name__)
return True
def hasMutableMembers(obj, *, safe = set(), verbose = False):
"""
Try to detect whether obj (which is some kind of instance variable)
has any plain data members or any properties that contain data that
in turn looks like it is mutable. Note that this turned out to be
a lot harder then I first thought. The tests are by no means complete.
"""
if obj is not None:
for x in sorted(dir(obj)):
if x[0] != '_' and not x in safe:
is_prop = isinstance(getattr(type(obj), x, None), property)
is_call = callable(getattr(obj, x))
if not is_prop and not is_call:
if verbose: print(type(obj).__name__ + "." + x,
"looks like a DATA member")
return True
if isMutable(getattr(obj, x), verbose=False, seen=set()):
if verbose: print(type(obj).__name__ + "." + x,
"is of a MUTABLE type")
return True
return False
def dump(message, obj, verbose = False):
if message: print(message)
class Dummy:
"""(no doc)"""
for x in sorted(dir(obj)):
if x[0] != '_':
value = getattr(obj, x)
if isinstance(getattr(type(obj), x, None), property):
vt = "prop "
elif callable(value):
vt = "call "
else:
vt = "DATA "
if isMutable(value, seen=set()):
vt = "MUTABLE " + vt
if verbose:
doc = '\n' + str(getattr(obj.__class__, x, Dummy).__doc__)
doc = doc.replace('\n', '\n\t\t')
print('\t' + vt + x, "=", value, doc)
else:
if not callable(value):
print('\t' + vt + x, "=", value)
else:
print('\t' + vt + x + "()")
def createFancyBuffer(defaultvalue, unwrittenvalue):
"""
Create test data as described elsewhere. This version saves the
data in an in-memory numpy array making the code quite trivial.
There is no point in writing the data in multiple operations
because we aren't testing numpy.
The caller needs to specify the default value that will be
assigned to samples that were never written. Separate defaults
may be given for unwritten samples inside a brick vs. bricks
never written to at all. If these two differ this is arguably
a bug in the implementation.
"""
data = np.full((112, 64, 176), defaultvalue, dtype=np.float32)
data[16:16+40, 16:16+41, 16:16+42] = 31
data[48:48+72, 20:20+10, 24:24+16] = 97
data[:,:,128:176] = unwrittenvalue
return data
def createFancyFile(filename, datatype, datarange, zgyWriterFactory, *, single_write = False, kwargs = dict()):
"""
The layout of this test data is described in detail in doc/testdata.png
The figure also explains how to compute the expected statistics by hand.
As for computing the expected sample values, this is done by
createFancyBuffer().
* Create a ZGY file with size (112, 64, 176) which gives it a bricksize
of 2x1x3. Other parameters vary.
* Write an oddly sized rectangle "A" inside the first brick.
* Write an oddly sized rectangle "B" covering two cubes and partly
intersecting the first write, and also runs slightly into the
padding area.
* Write an all-zero region "C" that completely covers one brick and
also covers a second brick completely apart from padding area
outside the survey.
Additional arguments such as "snr" can be passed as kwargs={"snr": 99},
note that I have not declared the parameter as **kwargs so the dict
must be created by hand. To make it more explicit what the extras are.
Accounting for existing bugs:
Several of the tests have arguments (defaultvalue,unwrittenvalue,countdead).
- defaultvalue should be the value closest to 0 that can be represented.
- unwrittenvalue ought to have been the same as defaultvalue, but with the
old reader it might be 0 for float access and 0 converted to float for
raw.
- countdead should be True meaning unwritten samples are included in the
statistics and the histogram, but if the file was created by the old
writer then it needs to be set False.
Future: If implementing aplha support (currently not the case) we will
also need a file with alpha tiles set to the horizontal extent of the
actual stored data. In this data set there will still be unwritten
data at the tail end of each trace. Production code rarely does
this though; the assumption is that all traces have the same length
and that traces are written fully or not at all.
Note that currently, neither the old ZGY-Public nor the new OpenZGY
API can write alpha tiles. Only ZGY-Internal can do that. That API
does not have any Python wrapper.
"""
with zgyWriterFactory(filename,
iocontext = SDCredentials(),
size = (112, 64, 176),
datatype = datatype,
datarange = datarange,
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(3775, 1000),
(1000, 2890),
(3775, 2890)),
**kwargs
) as writer:
expect_datarange_1 = datarange
if datatype == SampleDataType.float and zgyWriterFactory != oldzgy.ZgyWriter:
# The value is unspecified. It could be NaN if the file was never
# flushed, or (0,0) if it was flushed before writing anything.
# Or it could be the (likely not calculated yet) statistical
# range if the code in api.ZgyMeta.datarange chooses to return
# the statistical range instead.
expect_datarange_1 = (0, 0)
#dump(filename, writer)
checkmeta(writer, datatype, expect_datarange_1)
if single_write:
# Read/modify/write is not allowed whan writing compressed data,
# or at least not recommended since noise will accumulate.
writer.write((0, 0, 0), createFancyBuffer(0, 0))
else:
writer.write((16,16,16), np.full((40,41,42), 31, dtype=np.float32))
writer.write((48,20,24), np.full((72,10,16), 97, dtype=np.float32))
writer.write((0,0,64), np.full((112,64,64), 0, dtype=np.float32))
# Statistics haven't been computed yet, so datarange for float cubes
# should still be returned as empty.
checkmeta(writer, datatype, expect_datarange_1)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
expect_datarange_2 = datarange
if datatype == SampleDataType.float:
if True or zgyWriterFactory != oldzgy.ZgyWriter:
# The value has been explicitly set to the statistical range
# if written by the new writer. If api.ZgyMeta.datarange chooses
# to return the statistical range instead this this happens
# also for files written by the old accessor. The second
# conditinal should be disabled in that case.
expect_datarange_2 = (reader.statistics.min, reader.statistics.max)
checkmeta(reader, datatype, expect_datarange_2)
def checkmeta(meta, datatype = None, datarange = None):
"""
Verify round trip of metadata. This can be used both by a writer
(ensure the data we set is still available as properties) and a
reader (ensure the roundtrip to a stored file and back worked).
"""
assert(meta.size == (112, 64, 176))
assert(datatype is None or meta.datatype == datatype)
assert(datarange is None or meta.datarange == datarange)
assert(meta.raw_datarange == meta.datarange)
assert(meta.zunitdim == UnitDimension.time)
assert(meta.zunitname == "ms")
assert(abs(meta.zunitfactor - 0.001) < 1.0e-5)
assert(meta.hunitdim == UnitDimension.length)
assert(meta.hunitname == "ft")
assert(abs(meta.hunitfactor - 0.3048) < 0.0001)
assert(meta.zstart == 2500)
assert(abs(meta.zinc - 4.125) < 0.0001)
assert(meta.annotstart == (1234, 5678))
assert(meta.annotinc == (5, 2))
assert np.sum(np.abs(np.array(meta.corners) -
np.array(((1000, 1000),
(3775, 1000),
(1000, 2890),
(3775, 2890))))) < 0.0001
def explaincontents(expect, actual, delta):
"""
Detailed checking of a small part of the standard test cube.
A single trace that covers many special cases. Show an explanation
of what is being tested as well as expected vs. actual results.
See doc/testdata.png. This method is meant to be used to understand
why a particular test has failed.
"""
table = [( 0, 16, "default(r/m/w)"),
( 16, 24, "written once "),
( 24, 40, "written twice "),
( 40, 58, "written once "),
( 58, 63, "default(r/m/w)"),
( 64, 128, "constant-zero "),
(128, 176, "default(empty)")]
print("Displaying the trace at [50,22,:]")
for beg, end, text in table:
ex = expect[50,22,beg:end]
ac = actual[50,22,beg:end]
if np.amin(ex) == np.amax(ex) and np.amin(ac) == np.amax(ac):
print(" ", text, "expect", ex[0], "actual", ac[1])
else:
print(" ", text, "expect", ex, "actual", ac)
print(" largest error in entire cube:", delta)
def checkContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, *, maxdelta = 0.001):
"""
Read back the entire survey from one of the files created by
createFancyFile() and compare with the expected results.
Also check the metadata.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader, io.StringIO() as bitbucket:
# Improve coverage by exercising the debug log statements
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
checkmeta(reader)
actual = np.zeros((112, 64, 176), dtype=np.float32)
reader.read((0,0,0), actual, verbose = verbose)
delta = np.amax(np.abs(expect - actual))
if not delta <= maxdelta:
explaincontents(expect, actual, delta)
assert delta <= maxdelta
def compareArrays(expect, actual, value_epsilon = 0.02, count_epsilon = 0.01, *, verbose = False):
value_range = np.amax(expect) - np.amin(expect)
count_total = len(expect.flat)
# Error in each sample, relative to the total expected value range.
# Can technically be greater than 1 if "actual" has wild values.
# A value of e.g. <= 0.01 might be considered close enough.
value_delta = np.abs(expect - actual) / (value_range if value_range else 1)
count_bad = np.count_nonzero(value_delta > value_epsilon)
# In addition to the test for not exactly equal, allow a certain
# fraction of samples to differ by any amount. Typically this
# might be needed due to edge effects in lowres data.
relative_bad = count_bad / count_total
ok = relative_bad <= count_epsilon
if verbose:
print("{5}: {0:6d} of {1:7d} samples ({2:.2f}%) differ > {3:.2f}%. Allowed {4:.2f}%.".format(
count_bad, count_total, 100.0 * count_bad / count_total,
100.0 * value_epsilon, 100.0 * count_epsilon,
"pass" if ok else "FAIL"))
return ok
def showdecimation(lod0, lod1):
"""
Input 4 hires traces (2,2,n) and a corresponding decimated
trace (n//2) and display those to manually inspect the result.
"""
print(" decimated from these input samples")
for ii in range(0, lod0.shape[2], 2):
print("{0:10.5g} {1}".format(lod1[ii//2], list(lod0[:,:,ii:ii+2].flat)))
def checkLodContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue):
"""
As checkContents, but caller specifies which LOD to read and we
allow some slop in the result since the "expect" array uses trivial
decimation while the zgy writer uses something fancier.
NOTE: Due to bugs in the old writer, no checks are done for samples
where the fullres data has never been written. I have given up on
figuring out the current behavior; I just know that it is wrong.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
nlods = 1
size = np.array(reader.size, dtype=np.int64)
while np.any(size > reader.bricksize):
nlods += 1
size = (size + 1) // 2
assert nlods == reader.nlods
for lod in range(0, nlods):
step = 1<<lod
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
expect = expect[:,:,:128] # Hard coded edge of written data.
expect = expect[::step,::step,::step]
size = (np.array(reader.size, dtype=np.int64) + (step-1)) // step
size[2] = 128//step
actual = np.zeros(size, dtype=np.float32)
reader.read((0,0,0), actual, lod = lod)
ok = compareArrays(expect, actual,
value_epsilon = 0.02 if lod < 2 else 0.04,
count_epsilon = 0.01 if lod < 2 else 0.03)
if not ok:
deltas = np.abs(expect - actual).astype(np.float64)
# A single 2d section in the "interesting" part of the survey.
actual_2d = actual[:,22//step,:]
expect_2d = expect[:,22//step,:]
deltas_2d = deltas[:,22//step,:]
# A single trace in the "interesting" part of the survey.
expect_1d = expect_2d[50//step,:]
actual_1d = actual_2d[50//step,:]
deltas_1d = deltas_2d[50//step,:]
# Now visualize these for debugging
savePNG(actual[:,22//step,:], "actual-" + str(lod) + ".png")
savePNG(expect[:,22//step,:], "expect-" + str(lod) + ".png")
savePNG(deltas[:,22//step,:], "deltas-" + str(lod) + ".png")
print("\n{0} LOD {1} check: {2}".format(
filename, lod, ("pass" if ok else "FAIL")))
print("Default", defaultvalue, "unwritten", unwrittenvalue)
print("first sample expect {0} actual {1}".format(
expect[0,0,0], actual[0,0,0]))
print("last sample expect {0} actual {1}".format(
expect[-1,-1,-1], actual[-1,-1,-1]))
print("interesting trace expect", expect_1d,
"interesting trace actual", actual_1d,
"delta", deltas_1d,
sep="\n")
assert ok
def checkRawContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, *, maxdelta = 0.001):
"""
As checkContents, but do the value conversion ourselves.
There may be issues with never written bricks.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
dtype = {SampleDataType.int8: np.int8,
SampleDataType.int16: np.int16,
SampleDataType.float: np.float32 }[reader.datatype]
checkmeta(reader)
actual = np.zeros((112, 64, 176), dtype=dtype)
reader.read((0,0,0), actual)
#print("raw...", actual[50,22,:])
if np.issubdtype(dtype, np.integer):
iinfo = np.iinfo(dtype)
actual = actual.astype(np.float32)
a = (reader.datarange[1]-reader.datarange[0])/(iinfo.max-iinfo.min)
b = reader.datarange[0] - a * iinfo.min
actual *= a
actual += b
delta = np.amax(np.abs(expect - actual))
if not delta <= maxdelta:
# A single trace in the "interesting" part of the survey.
print("expect", expect[50,22,:])
print("actual", actual[50,22,:])
print("delta", delta)
assert delta <= maxdelta
def computeStatisticsByRead(filename, zgyReaderFactory):
"""
Read back the entire survey from one of the files created by
createFancyFile() and compute statistics from the bulk data.
Concentrate on sum of samples and count of samples.
Also check the metadata.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader)
data = np.zeros((112, 64, 176), dtype=np.float32)
reader.read((0,0,0), data)
theSum = np.sum(data.flat, dtype=np.float64)
theCount = len(data.flat)
#print("Read sum {0}, sample count {1}".format(theSum, theCount))
#cnt = 0
#for x in (0, 1, 31, 97):
# c = np.count_nonzero(data == x)
# print(x, c)
# cnt += c
#print("?", theCount - cnt) # unaccounted for
return theSum, theCount
def readStatisticsStoredInFile(filename, zgyReaderFactory):
"""
Open the ZGY file and retrieve only the stored statistics information.
This is only supported in the new API.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
stats = reader.statistics
#print(stats)
return (stats.sum, stats.cnt)
def computeStatisticsByHand(defaultvalue, unwrittenvalue):
S = 112 * 64 * 176 # Total samples in survey, excluding padding.
P = 128 * 64 * 192 - S # Padding samples to align with 64^3 bricks.
A = 40 * 41 * 42 # Rect A beg (16,16,16) end (56,57,58) value 31.
B = 72 * 10 * 16 # rect B beg (48,20,24) end (120,30,40) value 97.
C = 112 * 64 * 64 # rect C beg (0,0,64) end (112,64,128) value 0.
D = 8 * 10 * 16 # overlap A/B, begin at (48,20,24).
E = 8 * 10 * 16 # B outside survey: begin at(128,30,40).
Z = 112 * 64 * 48 # Samples inside survey in never-written bricks.
nSample_31 = A - D
nSample_97 = B - E
nSample_unwritten = Z
nSample_default = S - nSample_31 - nSample_97 - nSample_unwritten
theSum = (31 * nSample_31 +
97 * nSample_97 +
defaultvalue * nSample_default +
(unwrittenvalue or 0) * nSample_unwritten)
theCount = S if unwrittenvalue is not None else S - Z
#print("Expected sum {0} * {1} + {2} * {3} + {4} * {5} + {6} * {7} = {8}, sample count {9}".format(31, nSample_31, 97, nSample_97, defaultvalue, nSample_default, unwrittenvalue, nSample_unwritten, theSum, theCount))
if unwrittenvalue is None:
theHist = { 31: nSample_31, 97: nSample_97,
defaultvalue: nSample_default }
elif defaultvalue == unwrittenvalue:
theHist = { 31: nSample_31, 97: nSample_97,
defaultvalue: nSample_default + nSample_unwritten }
else:
theHist = { 31: nSample_31, 97: nSample_97,
defaultvalue: nSample_default,
unwrittenvalue: nSample_unwritten }
return theSum, theCount, theHist
def checkStatistics(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, countdead, *, maxdelta = 0.001):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
byhand = computeStatisticsByHand(defaultvalue, unwrittenvalue)
byread = computeStatisticsByRead(filename, zgyReaderFactory)
if not (abs(byhand[0]-byread[0]) < maxdelta and byhand[1] == byread[1]):
print("stat sum: byhand: {0}, byread {1}, maxdelta {2}, count byhand: {3} byread {4}".format(byhand[0], byread[0], maxdelta, byhand[1], byread[1]))
assert(abs(byhand[0]-byread[0]) < maxdelta and byhand[1] == byread[1])
if zgyReaderFactory is not oldzgy.ZgyReader:
byhand = computeStatisticsByHand(defaultvalue, unwrittenvalue if countdead else None)
byload = readStatisticsStoredInFile(filename, zgyReaderFactory)
assert(abs(byhand[0]-byload[0]) < maxdelta and byhand[1] == byload[1])
def findHistogramSlot(value, histrange):
"""
Which slot this value belongs to in a 256-bin histogram.
The result is guaranteed to be in the range [0..255].
Values outside range are clipped to 0 or 255. This is not
how the actual histogram computation is done, but for the
tests it should not make any difference.
"""
value = 255 * (value - histrange[0]) / (histrange[1] - histrange[0])
return int(np.rint(np.clip(value, 0, 255)))
def checkHistogram(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, countdead):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
if zgyReaderFactory is not oldzgy.ZgyReader:
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
stat = (reader.statistics.min, reader.statistics.max)
hist = (reader.histogram.min, reader.histogram.max)
data = (reader.datarange[0], reader.datarange[1])
if False:
print("checkHistogram:",
"stat", stat, "hist", hist, "data", data,
"type", reader.datatype.name)
if reader.datatype == SampleDataType.float:
# Float data written by the old writer currently writes
# the histogram on the fly and may end up with a too wide
# range. The new reader doesn't do this now but it might do
# so in the future. Note that data == stat for float zgy.
assert hist[0] <= data[0] and hist[1] >= data[1]
else:
assert math.isclose(hist[0],data[0]) and math.isclose(hist[1],data[1])
assert reader.histogram.cnt == reader.statistics.cnt
hist = reader.histogram
#print(hist)
_, _, byhand = computeStatisticsByHand(defaultvalue, unwrittenvalue if countdead else None)
#print(byhand)
expect_hist = np.zeros(256, dtype=np.int64)
for value, expect in byhand.items():
slot = findHistogramSlot(value, (hist.min, hist.max))
expect_hist[slot] += expect
for slot in range(256):
actual = hist.bin[slot]
expect = expect_hist[slot]
if actual != expect:
print("histogram value", value, "slot", slot,
"expect", expect, "actual", actual)
#print("actual", hist)
#print("expect", expect_hist)
assert actual == expect
def isReaderOpen(reader):
"""
Return True if the zgy file is open for read.
There isn't a property for that in the API because
typically this is only needed when testing.
"""
tmp = np.zeros((1, 1, 1), dtype=np.float32)
try:
reader.read((0,0,0), tmp)
except (RuntimeError, newzgy.ZgyUserError) as ex:
assert "ot open for" in str(ex)
return False
return True
def checkReadingDeadArea(filename, pos, zgyReaderFactory, expected):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.float32)
reader.read(pos, tmp)
#print(list(tmp.flat), "expected", expected)
assert np.all(np.abs(tmp - expected) < 0.001)
def checkReadingOutsideRange(filename, zgyReaderFactory):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.float32)
with MustThrow("outside the valid range"):
reader.read((0, 0, 10000), tmp)
with MustThrow("outside the valid range"):
reader.read((0, 0, -9999), tmp)
with MustThrow("outside the valid range"):
reader.readconst((0, 0, 10000), (2, 2, 2))
with MustThrow("outside the valid range"):
reader.readconst((0, 0, -9999), (2, 2, 2))
#with MustThrow("outside the valid range"):
# reader.readconst((0, 0, 0), (1000000, 1000000, 1000000))
def checkReadingOutsideLod(filename, zgyReaderFactory):
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.float32)
with MustThrow("outside the valid range"):
reader.read((0, 0, 0), tmp, lod=-1)
with MustThrow("outside the valid range"):
reader.read((0, 0, 0), tmp, lod=9)
with MustThrow("outside the valid range"):
reader.readconst((0, 0, 0), (2, 2, 2), lod=-1)
with MustThrow("outside the valid range"):
reader.readconst((0, 0, 0), (2, 2, 2), lod=9)
def checkReadingToWrongValueType(filename, zgyReaderFactory):
"""
This was supposed to cover a test in readToExistingBuffer()
but now the error is caught already in the API layer.
Which is already tested in testBadArgumentsOnReadWrite.
Keeping the test here in case this changes back later.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
tmp = np.full((2, 2, 2), 42, dtype=np.int16)
#with MustThrow("conversion only supported"):
with MustThrow("array of np.float32 or np.int8"):
reader.read((0, 0, 0), tmp)
def hasSAuthToken():
try:
jwt = json.loads(base64.urlsafe_b64decode(SDCredentials().sdtoken.split(".")[1] + "====").decode("ascii"))
print(json.dumps(jwt, indent=2, sort_keys=True))
timeleft = jwt["exp"] - int(time.time())
print("SAuth token has", timeleft // 60, "minutes to expiry")
return timeleft > 0
except IOError:
# Missing or malformed token, including "FILE:" tokens.
# Unfortunately, impersonation tokens that are still
# good to refresh will also fail here.
return True # optimist.
# ----- Separate tests, but needs testFancy() to create the test files. ----- #
def runCloseOnException(filename, zgyReaderFactory):
"""
Test that the "with" guard is working properly.
On leaving the scope the reader should be closed.
Even if we left via an exception.
"""
class DummyException(Exception):
pass
try:
# If the reader raises an exception in __init__ then "reader"
# remains unassigned. While if we raise an exception ourselves
# it gets caught at the same level but now with "reader" known.
# No big deal as long as we *only* catch the dummy exception,
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
assert isReaderOpen(reader)
raise DummyException("testing...")
except DummyException:
pass
assert not isReaderOpen(reader)
def runErrorOnClose(filename, ZgyReaderFactory):
"""
Only relevant for openzgy. Verify correct behavior when we exit
the context manager due to an exception. For the old zgy wrapper
there is no easy way of forcing an error to be thrown on close,
so while I would like to have tested that one as well, I won't.
"""
# Exception was thrown from inside the block only.
# Make sure the reader was closed. This peeks at internal data.
try:
message = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
raise RuntimeError("xyzzy")
except Exception as ex:
message = str(ex)
assert message == "xyzzy"
assert reader._fd is None
# Exception was thrown from the reader's close() method only.
try:
message = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
reader._fd.xx_close()
reader._fd = "oops"
except Exception as ex:
message = str(ex)
assert message.find("object has no attribute") >= 0
# Exception was thrown from inside the block, then when handling
# that exception another exception was thrown inside close().
try:
message1 = ""
message2 = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
reader._fd.xx_close()
reader._fd = "oops"
raise RuntimeError("xyzzy")
except Exception as ex:
message1 = str(ex)
message2 = str(ex.__cause__ or ex.__context__)
assert message1.find("object has no attribute") >= 0
assert message2 == "xyzzy"
def runConversions(filename, zgyReaderFactory):
"""
Verify that coordinate conversion between index, annot, and world works.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as demo:
#dump("", demo, True)
a = demo.indexToAnnot((3, 7))
i = demo.annotToIndex(a)
#print(a, i)
assert(a == (1249, 5692) and i == (3, 7))
w = demo.indexToWorld((0, 0))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000, 1000) and i == (0, 0))
w = demo.indexToWorld((1, 0))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1025, 1000) and i == (1, 0))
w = demo.indexToWorld((0, 1))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000, 1030) and i == (0, 1))
w = demo.indexToWorld((3, 7))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000 + 3*25, 1000 + 7*30) and i == (3, 7))
w = demo.annotToWorld(a)
a = demo.worldToAnnot(w)
#print(w, a)
assert(w == (1000 + 3*25, 1000 + 7*30) and a == (1249, 5692))
def runErrorIfNotOpenForRead(filename, zgyReaderFactory):
size = (1, 1, 1)
tmp = np.zeros(size, dtype=np.float32)
pos = (0, 0, 0)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
reader.close()
with MustThrow("ot open for read"):
reader.read(pos, tmp)
if zgyReaderFactory is not oldzgy.ZgyReader:
with MustThrow("ot open for read"):
reader.readconst(pos, size)
def runDumpToDevNull(filename, zgyReaderFactory):
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader, io.StringIO() as stream:
reader._meta.dumpRaw(file=stream)
# No test on the result, only see that it doesn't crash.
assert len(stream.getvalue()) > 0
def runClone(filename, templatename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=templatename) as writer:
checkmeta(writer, SampleDataType.int8, (-28,+227))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader, SampleDataType.int8, (-28,+227))
def runUpdate(filename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=filename) as writer:
checkmeta(writer, SampleDataType.int8, (-28,+227))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader, SampleDataType.int8, (-28,+227))
def runDumpMembers(filename, templatename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=templatename) as writer:
#dump("\nZgyWriter contents:", writer, verbose=False)
assert not hasMutableMembers(writer, safe=set(("meta",)), verbose=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
#dump("\nZgyReader contents:", reader, verbose=True)
assert not hasMutableMembers(reader, safe=set(("meta",)), verbose=True)
# ----- Separately runnable tests, might need caller to clean up files. ----- #
def testRegisteredCompressors():
#print("Known compressors", ",".join(ZgyKnownCompressors()),
# "decompressors", ",".join(ZgyKnownDecompressors()))
assert "ZFP" in ZgyKnownCompressors()
assert "ZFP" in ZgyKnownDecompressors()
with MustThrow('"XYZZY" not recognized. Must be one of', ZgyMissingFeature):
lossy = ZgyCompressFactory("XYZZY", snr=30)
def testProgressWithDots():
with io.StringIO() as line:
p = ProgressWithDots(length=51, outfile=line)
assert line.getvalue() == ""
p(0, 1000)
assert line.getvalue() == "."
p(1, 1000)
assert line.getvalue() == "."
p(500, 1000)
assert line.getvalue() == "." * 26
p(999, 1000)
assert line.getvalue() == "." * 50
p(1000, 1000)
assert line.getvalue() == "." * 51 + "\n"
def testBadArgumentsOnCreate():
fname = "should-not-exist.zgy"
try:
os.remove(fname)
except FileNotFoundError:
pass
with MustThrow("size must be specified", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname):
pass
with MustThrow("size must be at least 1", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,0,20)):
pass
with MustThrow("bricksize must be specified in 3 dimensions", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), bricksize=(64,64)):
pass
with MustThrow("bricksize must be >= 4 and a power of 2", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), bricksize=(64,64,48)):
pass
with MustThrow("datarange must be specified for integral types", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8):
pass
with MustThrow("datarange must have min < max", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(3,2)):
pass
with MustThrow("datarange must have min < max", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(3,3)):
pass
with MustThrow("datarange must be finite", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(np.nan,np.nan)):
pass
# The consistency checks should be done before actually creating the file.
# Which means that the next call should fail.
with MustThrow(None, FileNotFoundError):
os.remove(fname)
def testBadArgumentsOnReadWrite(filename):
origin = (0, 0, 0)
expect = "Expected a 3d numpy array of np.float32 or np.float32"
with newzgy.ZgyWriter(filename, size=(10,15,20)) as w:
with MustThrow(expect): # no data
w.write(origin, None)
with MustThrow(expect): # not numpy data
w.write(origin, [[[1,1,1]]])
with MustThrow(expect): # wrong data type
w.write(origin, np.array([[[1,1,1]]], dtype=np.int8))
with MustThrow(expect): # wrong number of dimensions
w.write(origin, np.array([1,1,1], dtype=np.float32))
expect = "Expected a writeable 3d numpy array of np.float32 or np.float32"
with newzgy.ZgyReader(filename) as r:
with MustThrow(expect): # no data
r.read(origin, None)
with MustThrow(expect): # not numpy data
r.read(origin, [[[1,1,1]]])
with MustThrow(expect): # wrong data type
r.read(origin, np.array([[[1,1,1]]], dtype=np.int8))
with MustThrow(expect): # wrong number of dimensions
r.read(origin, np.array([1,1,1], dtype=np.float32))
with MustThrow(expect): # buffer not writeable
a = np.array([[[1,1,1]]], dtype=np.float32)
a.setflags(write=False)
r.read(origin, a)
def testAutoDelete():
# It is an error if the expected file is missing.
with MustThrow("", FileNotFoundError):
with LocalFileAutoDelete("xyzzy", silent=True) as fn:
pass
# As above, but if some other error occurred that will have precedence.
with MustThrow("", IndexError):
with LocalFileAutoDelete("xyzzy", silent=True) as fn:
foo = [][1]
# No attempt is made to remove, if we explicitly disarmed.
with LocalFileAutoDelete("xyzzy") as fn:
assert "/tmp-" in fn.name or "\\tmp-" in fn.name or fn.name[:4] == "tmp-"
fn.disarm()
# Actually try creating the file. Auto cleanup happens.
with LocalFileAutoDelete("xyzzy") as fn:
assert "/tmp-" in fn.name or "\\tmp-" in fn.name or fn.name[:4] == "tmp-"
myname = fn.name
with open(fn.name, "w"):
pass
assert os.path.exists(myname)
assert not os.path.exists(myname)
myname = [None, None]
with ExitStack() as cleanup:
fn1 = LocalFileAutoDelete("one")
myname[0] = fn1.name
cleanup.enter_context(fn1)
with open(fn1.name, "w"):
pass
fn2 = LocalFileAutoDelete("two")
myname[1] = fn2.name
cleanup.enter_context(fn2)
with open(fn2.name, "w"):
pass
assert os.path.exists(myname[0])
assert os.path.exists(myname[1])
assert not os.path.exists(myname[0])
assert not os.path.exists(myname[1])
myname = [None, None]
with MustThrow("", FileNotFoundError):
with ExitStack() as cleanup:
fn1 = LocalFileAutoDelete("one")
myname[0] = fn1.name
cleanup.enter_context(fn1)
with open(fn1.name, "w"):
pass
fn2 = LocalFileAutoDelete("two", silent=True)
myname[1] = fn2.name
cleanup.enter_context(fn2)
# I did not get around to creating the second file.
# This means the fn2 context will raise an exception.
# fn1 should still have been deleted though.
assert not os.path.exists(myname[0])
def testHistogramRangeIsCenterNotEdge(filename):
"""
When the histogram gets generated by the ZGY writer, the range gives
the center value of bin 0 and the center value of bin 255. NOT the
lowest value that maps to bin 0 and the highest value that maps to
bin 255. Which would arguably also make sense. Verify that behavior.
"""
with oldzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 64, 64),
datatype = SampleDataType.float,
datarange =(0, 255),
zstart = 0, zinc = 4,
annotstart = (1, 1), annotinc = (1, 1),
corners = ((1000, 1000), (1630, 1000),
(1000, 1630), (1630, 1630))
) as writer:
# With the 0..255 histogram range interpreted as the center of the
# first and last bin, we have the following:
# slot 0 is -0.5..+0.5, slot 2 is 1.5..2.5, slot 5 is 4.5..5.5
# If we instead had a 0..256 histogram range interpreted as the
# extreme eddes of the first and last bin, we have this:
# slot 0 is 0..1, slot 2 is 2..3, slot 5 is 5..6, slot 255: 255..256
# That would still be approximately correct at least for the first
# few bins when setting the histogram range to 0..255 instead of
# 0..256. So if the histogram algorithm choose to use the range
# as the extreme limits (which it is NOT supposed to do),
# 1.8 and 2.2 would end up in different slots. And 4.3 and 4.7
# would end up in the same slot. It should be the other way around.
#
writer.write((0, 0, 0), np.full((1, 10, 10), 1.8, dtype=np.float32))
writer.write((1, 0, 0), np.full((1, 1, 1), 2.2, dtype=np.float32))
writer.write((2, 0, 0), np.full((1, 10, 5), 4.3, dtype=np.float32))
writer.write((3, 0, 0), np.full((1, 1, 2), 4.7, dtype=np.float32))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
#print(reader.histogram)
assert math.isclose(reader.histogram.min, 0.0)
assert math.isclose(reader.histogram.max, 255.0)
assert reader.histogram.bin[2] == 101
assert reader.histogram.bin[4] == 50
assert reader.histogram.bin[5] == 2
def testEmptyFile(filename, zgyWriterFactory = newzgy.ZgyWriter, zgyReaderFactory = newzgy.ZgyReader):
"""
Create a file without writing bulk data to it; make sure it is
well behaved both on write and on read back. Ideally test both
on-prem and cloud, and test all 9 combinations of ZGY, OpenZGY/C++,
and OpenZGY/Python readers and writers. With the current test
framework it gets a bit tricky to test the two OpenZGY/C++ vs.
OpenZGY/Python cases. Can I make a test that imports all three?
"""
#print('testEmptyFile("{0}")'.format(filename))
#print(' -> Using ' + showZgy(zgyWriterFactory, zgyReaderFactory))
with zgyWriterFactory(filename,
iocontext = SDCredentials(),
size = (100, 200, 300),
datatype = SampleDataType.float,
datarange = (-1, 1),
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(1005, 1000),
(1000, 1002),
(1005, 1002))
) as writer:
pass
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
slurp = np.ones(reader.size, dtype=np.float32)
reader.read((0,0,0), slurp)
assert np.count_nonzero(slurp) == 0
if zgyReaderFactory == newzgy.ZgyReader:
assert reader.readconst((0,0,0), reader.size) == 0
def testEmptyExistingFile(filename, zgyReaderFactory = newzgy.ZgyReader):
"""
Access a file that has already been created by the old ZGY accessor
with no written bricks and an invalid coding range.
To create, use the old ZGY-Public Python wrapper:
with zgy.ZgyWriter("OldEmpty2.zgy", size=(512, 640, 1000),
datarange=(101,101), datatype="int16") as w: pass
Can leave the file locally, or upload with ZGY, or with sdutil.
Currently the latter is the most interesting case to test.
"""
#print('testEmptyExistingFile("{0}")'.format(filename))
#print(' -> Using ' + showZgy(zgyReaderFactory))
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
if zgyReaderFactory == oldzgy.ZgyReader:
slurp = np.ones(reader.size, dtype=np.float32)
reader.read((0,0,0), slurp)
value = slurp[0,0,0] if np.all(slurp.flat == slurp[0,0,0]) else None
else:
value = reader.readconst((0,0,0), reader.size, as_float=True)
#print(" -> VALUE", value, "RANGE", reader.datarange)
# In spite of the 101..101 coding range, the file will contain
# all zeros. In the new accessor the coding range is rejected
# as bad, no conversion is done, so empty bricks read as zero.
# In the old accessor there is a "feature" that cause empty
# bricks to read as zero regardless of whether caller wants conversion.
assert value == 0
def testRmwFile(filename, zgyWriterFactory = newzgy.ZgyWriter):
"""
The layout of this test data is described in detail in doc/testdata-rmw.png.
"""
rmwsize = (((0,0,0), (304,64,384)), # Survey size.
((0,0,192), (304,64,384)), # Half the survey set to constant "1".
((28,0,84), (144,64,304)), # Touches 12 bricks.
((40,0,100), (160,64,288)), # Touches 12 bricks.
((204,0,0), (216,64,384)), # Tall, thin, to fill up this segment.
((52,0,120), (176,64,272)), # Touches 12 bricks.
((256,0,0), (304,64,352)), # Constant-value at survey edge.
((0,0,256), (64,64,320))) # Normal brick changed to constant.
surveysize = rmwsize[0][1]
expect = np.zeros(surveysize, dtype=np.float32)
partnum = 0
for part in rmwsize[1:]:
partnum += 1
beg, end = part
#print("part", part, "beg", beg, "end", end)
expect[beg[0]:end[0],beg[1]:end[1],beg[2]:end[2]] = partnum
with zgyWriterFactory(filename,
iocontext = SDCredentials(segsize=11/4),
size = surveysize,
datatype = SampleDataType.int8,
datarange = (-28,+227),
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(1005, 1000),
(1000, 1002),
(1005, 1002))
) as writer:
partnum = 0
sizes = [(0,)]
for part in rmwsize[1:]:
partnum += 1
beg, end = part
size = (end[0]-beg[0], end[1]-beg[1], end[2]-beg[2])
#print("part", part, "beg", beg, "end", end, "size", size)
if partnum == 1:
# Just doing this to exercise both the write functions.
data = np.full(size, partnum, dtype=np.float32)
writer.write(beg, data)
else:
data = np.float32(partnum)
writer.writeconst(beg, data, size=size, is_storage=False)
if filename[:5] == "sd://":
closed_sizes = tuple(writer._fd._relay._sizes)
opened_sizes = tuple([len(writer._fd._open_segment)])
sizes.append(closed_sizes + opened_sizes)
else:
sizes.append((writer._fd.xx_eof,))
#print(sizes)
sizes_in_bricks = []
for e in sizes:
for bytecount in e:
assert all([(bytecount % 64) == 0 for bytecount in e])
sizes_in_bricks.append(tuple(np.array(e, dtype=np.int64) // (256*1024)))
# The expected results have been computed by hand.
# See testdata-rmw.svg for a detailedexplanation with figures.
#print(sizes_in_bricks)
local = filename[:5] != "sd://"
assert sizes_in_bricks[1] == (( 1,) if local else (1, 0))
assert sizes_in_bricks[2] == ((11,) if local else (1, 10))
assert sizes_in_bricks[3] == ((11,) if local else (1, 10))
assert sizes_in_bricks[4] == ((17,) if local else (1, 11, 5))
assert sizes_in_bricks[5] == ((17,) if local else (1, 11, 11, 4))
assert sizes_in_bricks[6] == ((18,) if local else (1, 11, 11, 5))
assert sizes_in_bricks[7] == ((18,) if local else (1, 11, 11, 6))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
# Read the entire survey, excluding padding bytes, in a single
# operation. Compare with the survey built in memory.
slurp = np.zeros(reader.size, dtype=np.float32)
reader.read((0,0,0), slurp)
assert np.all(slurp == expect)
# Check each brick for whether it takes up space in the file or
# is flagged as constant value. The expected result is explained
# in the textual- and image descriptionof the test data.
is_const = np.zeros((5, 6), dtype=np.float32)
for ii in range(0, 320, 64):
for kk in range(0, 384, 64):
c = reader.readconst((ii, 0, kk), (64, 64, 64))
is_const[ii//64, kk//64] = -1 if c is None else c
expect_const = np.array([[0, -1, -1, -1, -1, 1],
[0, -1, 5, 5, -1, 1],
[0, -1, -1, -1, -1, 1],
[-1, -1, -1, -1, -1, -1],
[6, 6, 6, 6, 6, -1]], dtype=np.float32)
assert np.all(is_const == expect_const)
def testNoRmwInCompressedFile(filename):
lossy = ZgyCompressFactory("ZFP", snr=30)
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), size=(100, 64, 64), compressor=lossy) as w:
# Writing a constant value should not prevent overwriting later.
w.writeconst((0,0,0), value=42, size=w.size, is_storage=False)
# Write part of a brick for the first time.
data = np.arange(50*64*64, dtype=np.float32).reshape((50, 64, 64))
w.write((0,0,0), data)
# Write needing to update the first brick.
with MustThrow("Updating a local BrickStatus.Compressed brick with Compressed data is illegal"):
w.write((50,0,0), data)
# The above error might have set the global _is_bad flag, in spite of
# this being a recoverable user error. But it probably doesn't
# matter much either way.
w.errorflag = False
# Write entire survey. This is an update, but no read/modify/write.
# The old brick will be leaked if new one compresses larger.
data = np.arange(100*64*64, dtype=np.float32).reshape((100, 64, 64))
with MustThrow("Updating a local BrickStatus.Compressed brick with Compressed data is illegal"):
w.write((0,0,0), data)
w.errorflag = False
# This should actually have been set when we opened the file,
# that feature isn't implemented yet. Besides, for the purpose
# of this test I need to change it while the file is in use.
w._accessor._update_mode = UpdateMode.Pedantic
w.write((0,0,0), data)
def testFatalErrorFlag(filename):
class BogusFile:
def close(self): pass
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), size=(100, 64, 64)) as w:
data = np.arange(64*64*64, dtype=np.float32).reshape(64, 64, 64)
w.write((0,0,0), data)
w.write((0,0,0), data)
hack = w._accessor._file._file
w._accessor._file._file = BogusFile()
with MustThrow("BogusFile", AttributeError):
w.write((0,0,0), data)
w._accessor._file._file = hack
# File is now usable again, but the global error flag is set.
with MustThrow("previous errors"):
w.write((0,0,0), data)
# Explicitly reset it and we should be good.
w.errorflag = False
w.write((0,0,0), data)
# Another bad write
w._accessor._file._file = BogusFile()
with MustThrow("BogusFile", AttributeError):
w.write((0,0,0), data)
# Verify that lod generation and meta flush is either
# turned off or is ignoring errors. The final close()
# of the python file descriptor will not throw because
# BogusFile wraps close().
w.close()
hack.close()
def testLargeSparseFile(filename, zgyWriterFactory, zgyReaderFactory):
size = (5000, 6000, 1000)
wbeg = (1000, 9000)
wend = (wbeg[0] + 10 * (size[0]-1), wbeg[1] + 10 * (size[1]-1))
if zgyWriterFactory:
with zgyWriterFactory(filename,
iocontext = SDCredentials(),
size = size,
datatype = SampleDataType.int8,
datarange = (-28,+227),
zunitdim = UnitDimension.time,
zunitname = "ms",
zunitfactor = 0.001,
hunitdim = UnitDimension.length,
hunitname = "ft",
hunitfactor = 0.3048,
zstart = 2500,
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((wbeg[0], wbeg[1]),
(wend[0], wbeg[1]),
(wbeg[0], wend[1]),
(wend[0], wend[1]))) as writer:
writer.write((size[0]-1, size[1]-1, 0), np.array([[[42, 10, 10]]], dtype=np.int8))
writer.finalize(progress=ProgressWithDots(), decimation=[DecimationType.Maximum])
if zgyReaderFactory:
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
assert reader.size == size
data = np.zeros((1,1,4), dtype=np.int8)
pos = np.array((size[0]-1, size[1]-1, 0), dtype=np.int64)
reader.read(pos, data, lod=0)
assert tuple(data.flat) == (42, 10, 10, -100)
reader.read(pos//2, data, lod=1)
assert tuple(data.flat) == (42, 10, -100, -100)
for lod in range(2,8):
reader.read(pos//(1<<lod), data, lod=lod)
assert tuple(data.flat) == (42, -100, -100, -100)
def testNaan(filename, snr = -1):
compressor = ZgyCompressFactory("ZFP", snr = snr) if snr > 0 else None
with newzgy.ZgyWriter(filename,
compressor = compressor,
iocontext = SDCredentials(),
size = (256, 128, 128),
datatype = SampleDataType.float) as writer:
data = np.zeros((64, 64, 64), dtype=np.float32)
count_nan = 0
count_inf = 0
counts = np.zeros(256, dtype=np.int32)
# Some NaN, a few other different values, mostly zero.
data.fill(0)
data[0,0,:3] = np.nan
data[0,0,3] = 2
data[0,0,4] = 3
writer.write((0,0,0), data)
count_nan += 3
counts[2] += 1
counts[3] += 1
# Some NaN, only one other value (42)
data.fill(42)
data[0,0,:5] = np.nan
writer.write((64,0,0), data)
count_nan += 5
counts[42] += (64*64*64) - 5
# NaN only
data.fill(np.nan)
writer.write((128,0,0), data)
count_nan += (64*64*64)
# NaN explicitly written as constant value
writer.writeconst((192, 0, 0), np.nan, (64, 64, 64), is_storage=False)
count_nan += (64*64*64)
# Now repeat for +/- inf
# Some Inf, a few other different values. Mostly zero.
data.fill(0)
data[0,0,0] = np.inf
data[0,0,1] = -np.inf
data[0,0,2] = np.inf
data[0,0,3] = 3
data[0,0,4] = 4
writer.write((0,64,0), data)
count_inf += 3
counts[3] += 1
counts[4] += 1
# Some Inf, only one other value (255).
data.fill(255)
data[0,0,:13] = np.inf
data[0,1,:10] = -np.inf
writer.write((64,64,0), data)
count_inf += 23
counts[255] = (64*64*64) - 23
# +Inf only
data.fill(np.inf) # 64^3 Inf
writer.write((128,64,0), data)
count_inf += (64*64*64)
# -Inf explicitly written as constant value
writer.writeconst((192, 64, 0), -np.inf, (64, 64, 64), is_storage=False)
count_inf += (64*64*64)
counts[0] = 256*128*128 - np.sum(counts[1:]) - count_nan - count_inf
writer.finalize(decimation = [DecimationType.Average])
# Exercise logging & debug code in the compression module.
# Discard the output. Yes, this is a shameless trick to
# increase coverage. But in Python a test that only checks
# that a function is callable is in fact somewhat useful.
if compressor is not None:
with io.StringIO() as devnull:
compressor.dump(msg=None, outfile=devnull,
text=True, csv=True, reset=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
# --- statistics and histogram ---
#print(reader.statistics)
#print(reader.histogram)
#print(list(counts))
#print("Expect total size", 256*128*128,
# "nan", count_nan,
# "inf", count_inf,
# "valid", 256*128*128 - count_nan - count_inf)
#print("Got valid",
# "stats", reader.statistics.cnt,
# "histo", reader.histogram.cnt,
# "sampl", np.sum(reader.histogram.bin))
# Limits are set automatically to the value range. I carefully
# chose 0..255 since the histogram then has one bin per sample value.
assert reader.histogram.min == 0 and reader.histogram.max == 255
h = reader.histogram.bin
for i in range(256):
if counts[i] != h[i]:
print("Histogram bin", i, "expected", counts[i], "actual", h[i])
assert reader.statistics.cnt == 256*128*128 - count_nan - count_inf
assert reader.histogram.cnt == 256*128*128 - count_nan - count_inf
assert np.all(np.array(reader.histogram.bin) == counts)
#assert reader.statistics.inf == count_nan + count_inf # not in api
# --- bricks stored as all-constant or not ---
BRICK = (64, 64, 64)
assert reader.readconst((0,0,0), BRICK) is None
assert reader.readconst((64,0,0), BRICK) is None
assert np.isnan(reader.readconst((128,0,0), BRICK))
assert np.isnan(reader.readconst((192,0,0), BRICK))
assert reader.readconst((0,64,0), BRICK) is None
assert reader.readconst((64,64,0), BRICK) is None
assert reader.readconst((128,64,0), BRICK) == np.inf
assert reader.readconst((192,64,0), BRICK) == -np.inf
# -- read back samples ---
reader.read((0,0,0), data)
assert np.all(np.isnan(data[0,0,:3]))
assert data[0,0,3] == 2
assert data[0,0,4] == 3
assert np.count_nonzero(data) == 5
reader.read((64,0,0), data)
assert np.all(np.isnan(data[0,0,:5]))
assert np.count_nonzero(data == 42) == 64*64*64 - 5
reader.read((0,64,0), data)
assert data[0,0,0] == np.inf
assert data[0,0,1] == -np.inf
assert data[0,0,2] == np.inf
assert data[0,0,3] == 3
assert data[0,0,4] == 4
assert np.count_nonzero(data) == 5
reader.read((64,64,0), data)
assert np.all(data[0,0,:13] == np.inf)
assert np.all(data[0,1,:10] == -np.inf)
assert np.count_nonzero(data == 255) == 64*64*64 - 13 - 10
# --- read back low resolution ---
# LOD1 should be sufficient to test.
# Note that this only tests a single decimation algorithm
# and the functions that call it. There needs to be separate
# unit tests to verify that all decimation algorithms have a
# reasonable behavior for nan and inf.
fullres = np.zeros((128, 128, 128), dtype=np.float32)
reader.read((0,0,0), fullres, lod=0)
reader.read((0,0,0), data, lod=1)
# Input first trace: nan, nan, nan, 2, 3
# An extra slop factor is needed because calculation done in float32.
assert math.isclose(data[0,0,0], 0, rel_tol=1.0e-5) # 2 NaN (skipped), the rest zero.
assert math.isclose(data[0,0,1], 2/7, rel_tol=1.0e-5) # 1 NaN (skipped), 1 "2", rest "0"
assert math.isclose(data[0,0,2], 3/8, rel_tol=1.0e-5) # one "3", rest default to zero
# Input trace: 5*nan, rest is 42. With "Average" decimation
# each output sample found at least one finite value.
assert np.all(data[32:64, 0:32, 0:32] == 42)
# Input trace: +inf, -inf, +inf, 3, 4. All others 0.
# Note: The C++ code skips +/- inf. Numpy includes them unless
# told otherwise, and the average of +inf and -inf is NaN.
# These rules are pretty obscure and it is probably easier to
# TODO-Low adopt the C++ strategy both places.
#showdecimation(fullres[0:2,64:66,0:20], data[0,32,0:10])
assert np.isnan(data[0,32,0])
assert data[0,32,1] == np.inf
assert math.isclose(data[0,32,2], 4/8, rel_tol=1.0e-5) # one "4", rest default to zero
# Input trace: 13 * +inf in one trace, 10 * -inf in another.
# So the first 5 samples have average(-inf,+inf) => nan
# the next 2 samples have average(255,+inf) => +inf
# Everything else should be 255.
# UPDATE: In the C++ version (and soon also Python)
# +/- inf is ignored so all decimated samples are 255.
#showdecimation(fullres[64:66,64:66,0:20], data[32,32,0:10])
assert np.all(np.isnan(data[32,32,:5]))
assert data[32,32,5] == np.inf
assert data[32,32,6] == np.inf
assert data[32,32,7] == 255
# Now read the brick built from all-constant input.
reader.read((64,0,0), data, lod=1)
d1 = data[:32,:32,:32] # from data written at (128,0,0)
d2 = data[32:,:32,:32] # from data written at (192,0,0)
d3 = data[:32,32:,:32] # from data written at (128,64,0)
d4 = data[32:,32:,:32] # from data written at (192,64,0)
assert np.all(np.isnan(d1))
assert np.all(np.isnan(d2))
assert np.all(d3 == np.inf)
assert np.all(d4 == -np.inf)
def testWriteNaanToIntegerStorage(filename):
with newzgy.ZgyWriter(filename,
size = (256, 128, 128),
iocontext = SDCredentials(),
datatype = SampleDataType.int8,
datarange = (-128,+127)
) as writer:
data = np.zeros((64, 64, 64), dtype=np.float32)
data[0,0,42] = np.nan
writer.write((0,0,0), data)
def testZeroCentric(filename):
"""
Specific test for the zero-centric property. When the hard coded
(in this test) datarange is zero-centric then the rounding makes
an equal number of small positive and small negative numbers
end up being returned as zero after a roundtrip.
"""
data = np.array([[[
-1.4, -1.2, -1.0, -0.8, -0.6,
-0.4, -0.2, +0.0, +0.2, +0.4,
+0.6, +0.8, +1.0, +1.2, +1.4,
100.0, 200.0]]], dtype=np.float32)
expect = np.array([[[
-1, -1, -1, -1, -1,
0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
100, 200]]], dtype=np.float32)
with newzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
size = (64, 64, 64),
datatype = SampleDataType.int8,
datarange = (-28,+227),
) as writer:
writer.write((0,0,0), data)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
actual = np.zeros((1, 1, expect.size), dtype=np.float32)
reader.read((0,0,0), actual)
assert np.all(np.isclose(expect, actual))
def testFinalizeProgress(filename, abort = False):
"""
Check the progress callback that can be installed while generating
low resolution bricks. Optionally check that the callback can be
used to abort the generation.
"""
class Progress:
def __init__(self, abort = False):
self._abort = abort
self._complete = False
self._waszero = False
def __call__(self, done, total):
self._complete = bool(done == total)
self._waszero = self._waszero or done == 0
#print("done {0}/{1}".format(done, total))
return not abort or done < total//4
with newzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
size = (112+640, 64+320, 176),
) as writer:
writer.write((16,16,16), np.full((40,41,42), 31, dtype=np.float32))
writer.write((48,20,24), np.full((72,10,16), 97, dtype=np.float32))
writer.write((0,0,64), np.full((112,64,64), 0, dtype=np.float32))
writer.write((512,0,0), np.full((128,128,64), 42, dtype=np.float32))
progress = Progress(abort)
if abort:
# The progress callback will return False on 25% done.
with MustThrow(extypes = newzgy.ZgyAborted):
writer.finalize(progress=progress)
assert progress._waszero
assert not progress._complete
else:
writer.finalize(progress=progress)
assert progress._waszero
assert progress._complete
def testHugeFile(filename):
"""
Create a very sparse file where the declared size is large enough
to make the header area > 1 MB. This can trigger some issues.
Number of bricks:
Lod 0: 64*64*32 bricks = 131072
Lod 1: 32*32*16 bricks = 16384
Lod 2: 16*16*8 bricks = 2048
Lod 3: 8*8*4 bricks = 256
Lod 4: 4*4*2 bricks = 32
Lod 5: 2*2*1 bricks = 4
Lod 6: 1*1*1 brick = 1
SUM: 149797 bricks, 1.14 Mb of brick lookup tables
Rounded up to brick size there will be 1.25 MB of headers.
Non-constant bricks: Only one per layer. 1.75 MB total
Total file size: 3 MB.
"""
with newzgy.ZgyWriter(filename,
iocontext = SDCredentials(),
datatype = SampleDataType.int8,
datarange = (-128,+127),
size = (64*64, 64*64, 32*64),
) as writer:
writer.write((640,512,0), np.full((64,64,65), 42, dtype=np.float32))
#writer.finalize(progress=ProgressWithDots())
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
assert reader.nlods == 7
c1 = reader.readconst((640,512,0), (64,64,64))
c2 = reader.readconst((640,512,64), (64,64,64))
c3 = reader.readconst((640,512,129), (64,64,64))
assert c1 == 42 # writer detected it was constant
assert c2 is None # partly written
assert c3 == 0 # never written
assert os.stat(filename).st_size == 3 * 1024 * 1024
def testDecimateOddSize(filename):
"""
At the survey edge, the decimation that normally has 8 samples input
might only have 4, 2, or 1. Make sure the code doesn't include
the padding in its computation.
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (7, 13, 64+17)
) as writer:
data = np.full(writer.size, 200, dtype=np.float32)
data[0::2,:,:] = 100
data[:,0::2,:] = 50
assert np.all(data[:,:,:] == data[:,:,0:1])
writer.write((0,0,0), data)
writer.finalize(decimation = [DecimationType.Average])
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
assert reader.nlods == 2
data = np.zeros((4, 7, 32+9), dtype=np.float32)
reader.read((0,0,0), data, lod=1)
# Within each trace all samples should be the same, also
# the last one, since this is true also for the input.
assert np.all(data[:,:,:] == data[:,:,0:1])
# Most output values will be avg(200, 100, 50, 50) = 100.
# At the edges in i/j it should be average(50, 100) or (50,50).
# At the corner expect average(50) i.e. 50.
# If the implemenation erroneously tried to read the
# padding (which ought to be zero) the numbers will be lower.
# Currently in OpenZGY/C++ the samples not based on 8 neighbors
# might be set to 0.
assert np.all(data[:3, :6, :] == 100)
assert np.all(data[:3, 6, :] == 50)
assert np.all(data[3, :6, :] == 75)
assert np.all(data[3, 6, :] == 50)
def testDecimateWeightedAverage(filename):
"""
As test.lodalgo.testSpecial but very simplified, just to make sure
the default lod2 algorithm is in fact WeightedAverage. The lod1
default is LowPass; to avoid this getting in the way I will
make all traces constant-value. This makes LowPass behave as
Decimate (or Average, or Median, etc.)
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 256, 512)
) as writer:
data = np.zeros((64, 64, 512), dtype=np.float32)
# 1/4 brick of 300, 3/4 brick of 100, 3 bricks of unwritten 0.
data[:16,:,:] = 300
data[16:,:,:] = 100
tiny = np.array([[300, 300, 0, 0],
[300, 300, 0, 0],
[0, 0, 100, 100],
[0, 0, 100, 100]], dtype=np.float32)
# In lod 1 this will be just 300, 0, 0, 1000
tiny = tiny.reshape((4,4,1))
data[:4,:4,:] = tiny
assert np.all(data[:,:,:] == data[:,:,0:1])
writer.write((0,0,0), data)
#writer.finalize(decimation = [DecimationType.Average])
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
assert reader.nlods >= 3
# Checking the lowpass output, including the fact that it is
# supposed to have zero DC bias.
data = np.zeros((2, 2, 256), dtype=np.float32)
reader.read((0,0,0), data, lod=1)
#print(data[:,:,0])
assert np.all(np.isclose(data[0,0,:], 300))
assert np.all(np.isclose(data[0,1,:], 0))
assert np.all(np.isclose(data[1,0,:], 0))
assert np.all(np.isclose(data[1,1,:], 100))
data = np.zeros((1, 1, 1), dtype=np.float32)
reader.read((0,0,0), data, lod=2)
# average(300, 0, 0, 100) is 100 but we expect something closer to
# 300 since this value is relatively more scarce.
#print(data)
assert data.flat[0] > 200
def testMixingUserAndStorage(filename):
"""
When the file has an integer type both reading and writing can be done
either in float user sample values or in integral storage values.
Try all 4 combinations.
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
datatype = SampleDataType.int8, datarange = (-2,+763),
size = (64, 64, 512)
) as writer:
# user = 3*storage + 382
# storage = (user - 382) / 3
# user 3 -> storage -126.33 -> -126 -> user 4
# user 12 -> storage -123.33 -> -123 -> user 13
# user 40 -> storage -114
# user 71 -> storage -103.66 -> -104 -> user 70
w1 = np.zeros((64, 64, 64), dtype=np.float32)
w2 = np.zeros((64, 64, 64), dtype=np.float32)
w3 = np.zeros((64, 64, 64), dtype=np.int8)
w4 = np.zeros((64, 64, 64), dtype=np.int8)
w1[0,0,0] = 3.0 # user 4 <-> storage -126
w2[0,0,0] = 12.0 # user 13 <-> storage -123
w3[0,0,0] = -114 # user 40 <-> storage -114
w4[0,0,0] = -104 # user 70 <-> storage -104
writer.write((0,0,0), w1)
writer.write((0,0,64), w2)
writer.write((0,0,128), w3)
writer.write((0,0,192), w4)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
r1 = np.zeros((1, 1, 1), dtype=np.float32)
r2 = np.zeros((1, 1, 1), dtype=np.int8)
r3 = np.zeros((1, 1, 1), dtype=np.float32)
r4 = np.zeros((1, 1, 1), dtype=np.int8)
reader.read((0,0,0), r1)
reader.read((0,0,64), r2)
reader.read((0,0,128), r3)
reader.read((0,0,192), r4)
#print("expect", 4.0, -123, 40.0, -114)
#print("actual", r1.flat[0], r2.flat[0], r3.flat[0], r4.flat[0])
assert np.isclose(r1.flat[0], 4.0)
assert r2.flat[0] == -123
assert np.isclose(r3.flat[0], 40.0)
assert r4.flat[0] == -104
def testSmallConstArea(filename):
"""
Check what happens when writeconst() is called with a region
smaller than one brick. Application code might well specify
a region that doesn't align with the bricks. Actually writing
less than a brick in total would be odd, but the corner cases
that need to be handled are similar.
"""
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
datatype = SampleDataType.int8, datarange = (-128,+127),
size = (64, 64, 256)
) as writer:
writer.writeconst((0,0,128), 42, size=(64,64,128), is_storage=True)
# unwritten brick, value matches defaultvalue -> mark as const
# unwritten brick, value does not match default -> inflate
# const brick, value matches previous brick -> no-op
# const brick, value differs -> inflate
writer.writeconst((1,2,3+0), 0, size=(11,12,13), is_storage=True)
writer.writeconst((1,2,3+64), 15, size=(11,12,13), is_storage=True)
writer.writeconst((1,2,3+128), 42, size=(11,12,13), is_storage=True)
writer.writeconst((1,2,3+192), 67, size=(11,12,13), is_storage=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
BRICK = (64,64,64)
r1 = reader.readconst((0,0,0), BRICK, as_float = False)
r2 = reader.readconst((0,0,64), BRICK, as_float = False)
r3 = reader.readconst((0,0,128), BRICK, as_float = False)
r4 = reader.readconst((0,0,192), BRICK, as_float = False)
#print("testSmallConstArea:", r1, r2, r3, r4)
assert r1 == 0 # Was converted from "unwritten" to "const zero"
assert r2 is None # Brick now contains a mix of 0 and 15.
assert r3 == 42 # No-op; the brick already contained const 42.
assert r4 is None # Brick now contains a mix of 42 and 67.
onevalue_t = namedtuple("result", "range stats histo stats_count histo_count bins")
def testHistoOneValue(filename, dtype, value, fill, *, datarange = None, verbose = False):
if verbose:
print("Test dtype", dtype, "value", value,
("only" if fill else "and unwritten bricks"))
center = value if np.isfinite(value) else -0.25
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 64, 3*64),
datatype = dtype,
datarange = datarange or (center-1, center+1)
) as writer:
if np.isfinite(value):
writer.writeconst((0, 0, 0), value,
size=(64, 64, 64), is_storage=False)
if fill:
writer.writeconst((0, 0, 64), value,
size=(64, 64, 128), is_storage=False)
writer.finalize(force=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
if verbose:
print("Data range", reader.datarange)
print("Statistics", reader.statistics)
print("Histogram ", (reader.histogram.min, reader.histogram.max))
return onevalue_t((reader.datarange[0], reader.datarange[1]),
(reader.statistics.min, reader.statistics.max),
(reader.histogram.min, reader.histogram.max),
reader.statistics.cnt,
np.sum(reader.histogram.bin),
reader.histogram.bin)
def testHistoCornercaseFloat(filename):
# Float: datarange with zero size is valid on input,
# in fact the data range isn't specified by the user.
# Reading back data gives the statistical range
# which for float may include defaultvalue.
# The histogram will use the fuzzy algorithm.
# The numbers in brackets correspond to the ones in
# GenLodImpl::suggestHistogramRange().
# [3] nothing written.
# Note that the writer might need to pass force=True to finalize()
# to get the histogram- and statistics information written out even
# when no actual data has been written. I am unsure about how the
# principle of least surprise applies here. As of Oct 2020 the force
# is required. See the ZgyWriter constructor setting _dirty(False).
BRICK = 64*64*64
r = testHistoOneValue(filename, SampleDataType.float, np.nan, False)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (0, 0)
assert r.histo == (-128, +127)
assert r.stats_count == 3*BRICK # Assuming finalize with force=True
assert r.bins[128] == r.histo_count
# [4] one all zero brick, two never written.
# Expected result same as for nothing written.
r = testHistoOneValue(filename, SampleDataType.float, 0, False)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (0, 0)
assert r.histo == (-128, +127)
assert r.stats_count == 3*BRICK
assert r.bins[128] == r.histo_count
# [4] three all zero bricks.
# Expected result same as for nothing written.
r = testHistoOneValue(filename, SampleDataType.float, 0, True)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (0, 0)
assert r.histo == (-128, +127)
assert r.stats_count == 3*BRICK
assert r.bins[128] == r.histo_count
# [6] single negative value, plus two never written bricks.
# The statistics and histogram include the never-written
# samples as if they were zero.
# Note: I won't be testing the "some never written" scenario
# for every remaining case; it is hopefully enough to
# confirm once that never-written is treated as written-zero.
r = testHistoOneValue(filename, SampleDataType.float, -42, False)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (-42, 0)
assert r.histo == (-42, 0)
assert r.stats_count == 3*BRICK
assert r.bins[0] == BRICK
assert r.bins[255] == 2*BRICK
# [6] single negative value in all three bricks.
# The value range and the statistics should have the True
# range i.e. low==high and the histogram range should be wider.
r = testHistoOneValue(filename, SampleDataType.float, -42, True)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (-42, -42)
assert r.histo == (-42, 0)
assert r.stats_count == 3*BRICK
assert r.bins[0] == 3*BRICK
assert r.bins[255] == 0
# [6] single positive value in all three bricks.
# Result similar to the above but the ranges are swapped.
r = testHistoOneValue(filename, SampleDataType.float, +42, True)
assert r.range == r.stats
assert r.histo_count == r.stats_count
assert r.stats == (42, 42)
assert r.histo == (0, 42)
assert r.stats_count == 3*BRICK
assert r.bins[0] == 0
assert r.bins[255] == 3*BRICK
def testHistoCornercaseInt(filename):
# Integral data.
# Histogram range should always match the user provided range,
# which for never-written is -1.25 to +0.75 and for the
# remaining cases value +/- 1. This means that value won't be
# exactly representable as an integer (it maps to -0.5) and
# this will be noticeable in the statistics. The 0.5 factor
# may also lead to numerical instability. The samples end up
# either in bin 127 or bin 128.
# Also, range might be wider then statistics (unlike the float
# case) if not all possible storage values have been used.
BRICK = 64*64*64
r = testHistoOneValue(filename, SampleDataType.int8, np.nan, False)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25
assert abs(r.stats[0] - 0) > 0.001 # 0.0 not representable.
assert r.histo[0] == -1.25 and r.histo[1] == 0.75 # user choice exactly.
assert r.stats_count == 3*BRICK # Assuming finalize with force=True
# I don't really care where the "0" samples end up. It won't be the center.
assert r.bins[127] + r.bins[128] == 0
r = testHistoOneValue(filename, SampleDataType.int8, 0, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25
assert abs(r.stats[0] - 0) > 0.001 # 0.0 not representable.
assert r.histo[0] == 0-1 and r.histo[1] == 0+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int8, -42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] + 42) < 0.25
assert abs(r.stats[0] + 42) > 0.001 # 42.0 not representable.
assert r.histo[0] == -42-1 and r.histo[1] == -42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int8, +42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 42) < 0.25
assert abs(r.stats[0] - 42) > 0.001 # 42.0 not representable.
assert r.histo[0] == 42-1 and r.histo[1] == 42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
# 16 bit not much different from 8 bit, but the statistics will be
# closer to the supplied value because the quantization error is smaller.
r = testHistoOneValue(filename, SampleDataType.int16, np.nan, False)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25/256
assert abs(r.stats[0] - 0) > 0.001/256 # 0.0 not representable.
assert r.histo[0] == -1.25 and r.histo[1] == 0.75 # user choice exactly.
assert r.stats_count == 3*BRICK
# I don't really care where the "0" samples end up. It won't be the center.
assert r.bins[127] + r.bins[128] == 0
r = testHistoOneValue(filename, SampleDataType.int16, 0, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 0) < 0.25/256
assert abs(r.stats[0] - 0) > 0.001/256 # 0.0 not representable.
assert r.histo[0] == 0-1 and r.histo[1] == 0+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int16, -42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] + 42) < 0.25/256
assert abs(r.stats[0] + 42) > 0.001/256 # 42.0 not representable.
assert r.histo[0] == -42-1 and r.histo[1] == -42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
r = testHistoOneValue(filename, SampleDataType.int16, +42, True)
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats[0] == r.stats[1]
assert abs(r.stats[0] - 42) < 0.25/256
assert abs(r.stats[0] - 42) > 0.001/256 # 42.0 not representable.
assert r.histo[0] == 42-1 and r.histo[1] == 42+1 # user choice exactly.
assert r.stats_count == 3*BRICK
assert r.bins[127] + r.bins[128] == 3*BRICK
# Behavior when all explicitly written values get clipped.
# Expect both the histogram and the statistics to only reflect
# the clipped value (-5) as if that value and not -42 had been
# written.
r = testHistoOneValue(filename, SampleDataType.int8, -42, True,
datarange = (-5, +760))
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats == (-5, -5)
assert r.histo == (-5, +760)
assert r.stats_count == 3*BRICK
assert r.bins[0] == 3*BRICK
# As above, all explicitly written values get clipped but now
# there are a few unwritten bricks. Expect both the histogram
# and the statistics to only reflect the clipped value (-5) as
# if that value and not -42 had been written.
# Defaultvalue is +1 because the range does not give a zero
# centric histogram. The statistics should also reflect that.
# I.e. expect +1 to be part of the range.
r = testHistoOneValue(filename, SampleDataType.int8, -42, False,
datarange = (-5, +760))
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats == (-5, +1)
assert r.histo == (-5, +760)
assert r.stats_count == 3*BRICK
assert r.bins[0] == BRICK
assert r.bins[2] == 2*BRICK
# Similar to the above but no values written at all.
# Defaultvalue is still 1 due to missing zero-centric propery
# so this is what should be reflected in the statistics.
r = testHistoOneValue(filename, SampleDataType.int8, np.nan, False,
datarange = (-5, +760))
# Invariants for the integer case
assert r.range[0] <= r.stats[0] and r.range[1] >= r.stats[1]
assert r.histo == r.range
assert r.histo_count == r.stats_count
# Data dependent
assert r.stats == (+1, +1)
assert r.histo == (-5, +760)
assert r.stats_count == 3*BRICK
assert r.bins[2] == 3*BRICK
def testFancyDefaultValue():
"""
Part of the test suite using the same test data stored in different ways.
Check what happens when reading samples that were never written.
The rectangles used are:
a) Dead area of partly written brick
b) Part dead area, part all-constant brick
c) all-constant brick
d) part all-constant brick, part unwritten brick
e) unwritten brick.
In the new reader all should return the default value.
In the old reader the last one might throw a missing brick exception,
it does in the C++ ZGY-Public API but the Python wrapper catches it.
And the penultimate one might read zero from the unwritten area
while still seeing the default (1 in this case) elsewhere.
Also check reading completely outside range. The new accessor should
raise exceptions; the old one does whatever it feels like doing.
"""
with LocalFileAutoDelete("fancy-2.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-2,+763),
newzgy.ZgyWriter)
checkReadingDeadArea(fn.name, (5, 22, 1), oldzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 63), oldzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 65), oldzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 127), oldzgy.ZgyReader,
np.array([[[1, 0],[1, 0]],[[1, 0],[1, 0]]]))
checkReadingDeadArea(fn.name, (5, 22, 129), oldzgy.ZgyReader, 0)
#checkReadingOutsideRange(fn.name, oldzgy.ZgyReader)
#checkReadingOutsideLod(fn.name, oldzgy.ZgyReader)
#checkReadingToWrongValueType(fn.name, oldzgy.ZgyReader)
checkReadingDeadArea(fn.name, (5, 22, 1), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 63), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 65), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 127), newzgy.ZgyReader, 1)
checkReadingDeadArea(fn.name, (5, 22, 129), newzgy.ZgyReader, 1)
checkReadingOutsideRange(fn.name, newzgy.ZgyReader)
checkReadingOutsideLod(fn.name, newzgy.ZgyReader)
checkReadingToWrongValueType(fn.name, newzgy.ZgyReader)
def testFancyReadConstant():
"""
Test the new API in openzgy to return brick status.
"""
with LocalFileAutoDelete("fancy-2.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-2,+763),
newzgy.ZgyWriter)
with newzgy.ZgyReader(fn.name, iocontext = SDCredentials()) as reader, io.StringIO() as bitbucket:
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
# While the data inside this small rectangle is indeed constant,
# the whole brick is not. So, it won't be flagged as const val.
a = reader.readconst((17,17,17), (2,2,2), as_float = True, verbose=verbose)
b = reader.readconst((17,17,17), (2,2,2), as_float = False)
assert(a is None)
assert(b is None)
# In this case the enclosing brick was explicitly written with
# constant value 0, which will be read back as 1 because
# the range is not zero centric.
a = reader.readconst((1,2,67), (4,5,6), as_float = True)
b = reader.readconst((1,2,67), (4,5,6), as_float = False)
assert math.isclose(a, 1.0)
assert math.isclose(b, -127)
# Brick written as constant value 0 but only the region inside
# the survey. Whether this registers as "constant" may be
# considered an implementation detail. But ideally it ought to.
a = reader.readconst((65,2,67), (4,5,6), as_float = True)
b = reader.readconst((65,2,67), (4,5,6), as_float = False)
assert math.isclose(a, 1.0)
assert math.isclose(b, -127)
# Two bricks never written, two with constant value 0.
a = reader.readconst((0,0,64), (128,64,128), as_float = True)
b = reader.readconst((0,0,64), (128,64,128), as_float = False)
assert math.isclose(a, 1.0)
assert math.isclose(b, -127)
def testFancyMisc():
"""
Part of the test suite using the same test data stored in different ways.
"""
with LocalFileAutoDelete("fancy-1.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter)
# Doesn't really belong here but doesn't bother to create a test file.
runCloseOnException(fn.name, newzgy.ZgyReader)
runErrorOnClose(fn.name, newzgy.ZgyReader)
runConversions(fn.name, newzgy.ZgyReader)
runErrorIfNotOpenForRead(fn.name, newzgy.ZgyReader)
runDumpToDevNull(fn.name, newzgy.ZgyReader)
if HasOldZgy():
runCloseOnException(fn.name, oldzgy.ZgyReader)
runConversions(fn.name, oldzgy.ZgyReader)
runErrorIfNotOpenForRead(fn.name, oldzgy.ZgyReader)
with LocalFileAutoDelete("fancy-1-clone.zgy") as cloned:
runClone(cloned.name, fn.name)
runUpdate(cloned.name)
runDumpMembers(cloned.name, fn.name)
def testFancy1():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, int8.
The coding range is asymmetric but zero centric.
"""
with LocalFileAutoDelete("fancy-1.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
# The next line reveals a bug in ZGY-Public.
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 100)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy2():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, int8.
Unlike #1 the coding range is not zero centric. So 0 cannot be represented.
When can be stored is -2, +1, +4, ..., +763 i.e. only values 3*n+1.
So my sample data values 31 and 301 are representable, but zero is not.
"""
with LocalFileAutoDelete("fancy-2.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-2,+763),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 1, 0)
checkContents(fn.name, newzgy.ZgyReader, 1, 1)
checkLodContents(fn.name, oldzgy.ZgyReader, 1, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 1, 1)
# The next line reveals a bug in ZGY-Public.
checkRawContents(fn.name, oldzgy.ZgyReader, 1, 382)
checkRawContents(fn.name, newzgy.ZgyReader, 1, 1)
checkStatistics(fn.name, oldzgy.ZgyReader, 1, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 1, 1, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 1, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 1, 0, True)
def testFancy3():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, int16.
Unlike #1 and #2 zero is not included in the coding range.
The closest representable value to zero is +20
The valuetype is now int16 instead of int8 for variation.
"""
with LocalFileAutoDelete("fancy-3.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int16, (+20,+16403.75),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 20, 0)
checkContents(fn.name, newzgy.ZgyReader, 20, 20)
checkLodContents(fn.name, oldzgy.ZgyReader, 20, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 20, 20)
checkRawContents(fn.name, oldzgy.ZgyReader, 20, 8212)
checkRawContents(fn.name, newzgy.ZgyReader, 20, 20)
checkStatistics(fn.name, oldzgy.ZgyReader, 20, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 20, 20, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 20, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 20, 20, True)
def testFancy4():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY writer, both OpenZGY and ZGY-Public reader, local file, float32.
Bad coding range hint.
The coding range for float cubes is just a hint that might be used as a
hint for the histogram range. Or it might be completely ignored
if the histogram is written during a separate pass where the exact
range is already known.
"""
with LocalFileAutoDelete("fancy-4.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+1),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy5():
"""
Part of the test suite using the same test data stored in different ways.
Unline 1..4, this uses the old ZGY-Public writer, to help verify that
the old and new code produces the same result. The test uses both OpenZGY
and ZGY-Public reader, local file, int8.
"""
with LocalFileAutoDelete("fancy-5.zgy") as fn:
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
oldzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
# The next line reveals a bug in ZGY-Public.
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 100)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, False)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, False)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, False)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, False)
def testFancy6():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, both OpenZGY and ZGY-Public reader, local file, float.
Compared to the old writer the user specified codingrange
will now be ignored and the statistical range used instead.
Note that if api.ZgyMeta.datarange chooses to enforce this
then only the old reader will be able to verify what was written.
"""
with LocalFileAutoDelete("fancy-6.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+42),
newzgy.ZgyWriter)
checkContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, oldzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, oldzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy7():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, int8 with lossless compression.
Currently this is explicitly forbidden by a test in the api.
See comments in the doc and in the ZgyWriter source code for why. Also,
fewer checks because the old reader cannot handle the new compression.
"""
lossless = ZgyCompressFactory("ZFP", snr = 99)
with LocalFileAutoDelete("fancy-7.zgy") as fn:
with MustThrow("need to be stored as float", newzgy.ZgyUserError):
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossless})
#checkContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
#checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
fn.disarm()
def testFancy8():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, float32 with lossy compression.
"""
lossless = ZgyCompressFactory("ZFP", snr = 99)
with LocalFileAutoDelete("fancy-8.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+42),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossless})
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy9():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, int8 with lossy compression.
Currently this is explicitly forbidden by a test in the api.
See comments in the doc and in the ZgyWriter source code for why. Also,
fewer checks because the old reader cannot handle the new compression.
"""
lossy = ZgyCompressFactory("ZFP", snr = 30)
with LocalFileAutoDelete("fancy-9.zgy") as fn:
with MustThrow("need to be stored as float", newzgy.ZgyUserError):
createFancyFile(fn.name, SampleDataType.int8, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossy})
#checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=1.5)
#checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
#checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.5)
#checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=8000)
#checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
fn.disarm()
def testFancy10():
"""
Part of the test suite using the same test data stored in different ways.
OpenZGY Python writer, float32 with lossy compression.
"""
lossy = ZgyCompressFactory("ZFP", snr = 30)
with LocalFileAutoDelete("fancy-10.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-1,+42),
newzgy.ZgyWriter, single_write=True,
kwargs={"compressor": lossy})
checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=5000)
#checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy11():
"""
Part of the test suite using the same test data stored in different ways.
New code only, small bricksize, no compression.
"""
with LocalFileAutoDelete("fancy-11.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter,
kwargs={"bricksize": (32,32,32)})
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy12():
"""
Part of the test suite using the same test data stored in different ways.
New code only, large bricksize, no compression.
"""
with LocalFileAutoDelete("fancy-12.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter,
kwargs={"bricksize": (128,128,128)})
checkContents(fn.name, newzgy.ZgyReader, 0, 0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy13():
"""
Part of the test suite using the same test data stored in different ways.
New code only, non-rectangular bricks, no compression.
Need single_write=True because with the very small
bricksize my test code ends up writing nore than
one brick past the end of the survey.
"""
with LocalFileAutoDelete("fancy-13.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"bricksize": (16,32,128)})
checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=5000)
checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testFancy14():
"""
Part of the test suite using the same test data stored in different ways.
New code only, non-rectangular bricks, with compression.
"""
lossy = ZgyCompressFactory("ZFP", snr = 30)
with LocalFileAutoDelete("fancy-14.zgy") as fn:
createFancyFile(fn.name, SampleDataType.float, (-28,+227),
newzgy.ZgyWriter, single_write=True,
kwargs={"bricksize": (16,32,128), "compressor": lossy})
checkContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkLodContents(fn.name, newzgy.ZgyReader, 0, 0)
checkRawContents(fn.name, newzgy.ZgyReader, 0, 0, maxdelta=2.0)
checkStatistics(fn.name, newzgy.ZgyReader, 0, 0, True, maxdelta=5000)
#FAILS checkHistogram(fn.name, newzgy.ZgyReader, 0, 0, True)
def testCloudAutoDelete():
with CloudFileAutoDelete("xyzzy", None) as fn:
assert fn.name[:5] == "sd://"
fn.disarm()
# Seismic drive, missing credentials.
with MustThrow("service URL has not been defined", RuntimeError):
with CloudFileAutoDelete("xyzzy", None, silent=True) as fn:
assert fn.name[:5] == "sd://"
# Seismic drive, file not found.
# As of 2021-02-12 it is no longer an error to delete a non-existing file.
#with MustThrow("does not exist", RuntimeError):
with CloudFileAutoDelete("xyzzy", SDCredentials(), silent=True) as fn:
assert fn.name[:5] == "sd://"
def testReadFromCloud(filename):
with newzgy.ZgyReader(filename, iocontext=SDCredentials()) as reader, io.StringIO() as bitbucket:
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
assert reader.size == (181, 241, 169)
tmp = np.zeros((100, 50, 30), dtype=np.int8)
reader.read((42, 70, 50), tmp, verbose=verbose)
#print(tuple(tmp[0,0,:5]), tuple(tmp[0,0,-5:]))
assert tuple(tmp[0,0,:5]) == (57, 48, 38, 28, 17)
assert tuple(tmp[0,0,-5:]) == (-101, -91, -79, -65, -51)
def testCloudWriter(filename):
"""
File written by the new code to seismic store
I haven't hooked up the old API to seismic store, so do the read
checks only with newzgy.
"""
with TimeMe(" createFancyFile"):
createFancyFile(filename, SampleDataType.int8, (-28,+227), newzgy.ZgyWriter)
with TimeMe(" checkContents"):
checkContents(filename, newzgy.ZgyReader, 0, 0)
with TimeMe(" checkLodContents"):
checkLodContents(filename, newzgy.ZgyReader, 0, 0)
with TimeMe(" checkRawContents"):
checkRawContents(filename, newzgy.ZgyReader, 0, 0)
with TimeMe(" checkStatistics"):
checkStatistics(filename, newzgy.ZgyReader, 0, 0, True)
with TimeMe(" checkHistogram"):
checkHistogram(filename, newzgy.ZgyReader, 0, 0, True)
with TimeMe(" delete #1"):
newzgy.ZgyUtils(SDCredentials()).delete(filename)
with TimeMe(" delete #2"):
newzgy.ZgyUtils(SDCredentials()).delete(filename)
def testLegalTag(filename):
meta = {"foo": "bar", "foocount": 42}
meta = {"kind": "slb:openzgy:test:1.0.0", "data": meta}
iocontext = SDCredentials(legaltag="slb-synthetic-seismic",
writeid="test-my-write", seismicmeta=meta)
with newzgy.ZgyWriter(filename,
iocontext = iocontext,
size = (64, 64, 64),
datatype = SampleDataType.float) as writer:
data = np.zeros((64, 64, 64), dtype=np.float32)
writer.write((0, 0, 0), data)
writer.finalize()
#os.system("sdutil stat " + SDTestSink("legaltag.zgy") + " --detailed")
# TODO-Test, read back metadata and confirm it was stored correctly.
# Not possible yet.
# TODO-Question, there is both a {get,set}MetaData and a {get,set}SeismicMeta().
# I suspect the former only sets the "data" portion of SeismicMeta
# but the two might also be completely unrelated.
# TODO-Question, when (and only when) I specify seismicmeta I see that
# sdutil stat --detailed will show me the seismicmeta and this
# includes the legaltag. Is the legaltag in the seismicmeta
# different from the "old" legaltag? Can it be changed, since we
# do have a setSeismicMeta?
def testCloudConsolidateBricks(filename, *, verbose = False):
"""
When reading from seismic store, bricks that are contiguous in memory
should be read in a single operation because larger brick size is
faster (up to a point). When not contiguous the reads should still
make just a single call to seismic store with a scatter/gather array
so the lower level code miggt do multi-threading.
This test also enables the single-block caching which will cause
all the headers to be read in a single operation. It can also speed
up regular brick access. Note that this cache is extremely simplistic,
it only remembers the previous result and it only returns a match
if the next request is exactly identical.
TODO-Low consider splitting this into multiple tests.
"""
vprint = ((lambda *args, **kwargs: print(*args, **kwargs)) if verbose
else (lambda *args, **kwargs: None))
trace = TraceCallsToSD(verbose = verbose)
iocontext = SDCredentials(aligned=1, maxsize=64, maxhole=1, threads=1,
_debug_trace = trace
)
bricksize = np.array((64, 64, 64), dtype=np.int64)
brick = np.product(bricksize) * np.dtype(np.float32).itemsize
size = np.array((181, 241, 169), dtype=np.int64)
numbricks = (size + bricksize - 1) // bricksize
vprint("Creating. Expect header written twice, then bulk data once.")
with newzgy.ZgyWriter(filename, iocontext=iocontext,
bricksize = tuple(bricksize),
size = tuple(size)) as writer:
data = np.arange(np.product(size), dtype=np.float32).reshape(size)
writer.write((0,0,0), data)
# lod 0 bricks: 3 * 4 * 3 = 36
# lod 1 bricks: 2 * 2 * 2 = 8
# lod 2 bricks: 1
# sum bricks on file: 45
# Writing the final header is the penultimate and not the last write.
# This is due to how SeismicStoreFileDelayedWrite works. See also
# comments in ZgyWriter.close().
assert len(trace.calls) == 3
assert trace.calls[0] == ("append", brick, brick, 1)
assert trace.calls[1] == ("write", brick, brick, 1)
assert trace.calls[2] == ("append", 45 * brick, 45*brick, 1)
trace.reset()
vprint("Opening. Expect all headers read in just one real access.")
with newzgy.ZgyReader(filename, iocontext = iocontext) as reader:
assert len(trace.calls) >= 1
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
# The size in bricks, il/xl/slice, is (3, 4, 3).
# Reading a single inline should require just a single access.
# Reading a single crossline should read one brick-column (3 bricks)
# at a time, so it will need 3 reads. Each brick is 256 KB.
ildata = np.zeros((1, size[1], size[2]), dtype=np.float32)
xldata = np.zeros((size[0], 1, size[2]), dtype=np.float32)
vprint("read one il,", numbricks[1] * numbricks[2], "bricks")
reader.read((0,0,0), ildata)
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv",
brick*numbricks[1]*numbricks[2],
brick*numbricks[1]*numbricks[2], 1)
trace.reset()
vprint("read one xl,", numbricks[0], "*", numbricks[2], "bricks")
reader.read((0,0,0), xldata)
# Not contiguous, but a single scatter/gather read.
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv",
brick*numbricks[0]*numbricks[2],
brick*numbricks[0]*numbricks[2], 3)
trace.reset()
sample = np.zeros((1,1,1), dtype=np.float32)
vprint("read one sample. Should require just one brick.")
reader.read((100,100,100), sample)
assert len(trace.calls) == 1
assert trace.calls[0].nbytes == brick
trace.reset()
vprint("read another sample in the same brick. Should be cached.")
reader.read((101,102,103), sample)
assert len(trace.calls) == 1
assert trace.calls[0] == ("cachehit", brick, brick, 1)
trace.reset()
vprint("Opening with 64 MB buffers. Everything ought to be cached.")
# Note that the entire file is smaller than the requested blocking,
# it is important to veryfy that this doesn't cause problems when
# hitting EOF. The "simple cache" and the "scatter/gather" cases
# need to be tested separately.
iocontext = SDCredentials(aligned=64, maxsize=64, maxhole=1, threads=1,
_debug_trace = trace
)
with newzgy.ZgyReader(filename, iocontext = iocontext) as reader:
# As with the previous case there should just be a single read.
assert len(trace.calls) >= 1
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
# This will currently not be very performant. The requested
# padding will be applied but the simplistic cache won't use it.
# Not that big a deal since the padding in real cases should
# probably be just 4 MB or so, Small enough for the wasted
# bytes not actually costing anything.
# The test is important though. The padding to align reads
# is still applied, but in a different place in the code.
vprint("read one il,", numbricks[1] * numbricks[2], "bricks")
ildata = np.zeros((1, size[1], size[2]), dtype=np.float32)
reader.read((0,0,0), ildata)
assert len(trace.calls) == 1
# See FileAdt._consolidate_requests._groupsize()
# The header segment is not aligned to out oversized "align"
# parameter. This causes some needless data access because
# the padding will cross a segment boundary. Segment 0 (headers)
# will be read again even though we don't need it.
# The asserts below reflect the current implementation.
#assert trace.calls[0] == ("readv", 12*brick, 45*brick, 2)
assert trace.calls[0] == ("readv", 12*brick, 46*brick, 2)
trace.reset()
vprint("read one xl,", numbricks[0], "*", numbricks[2], "bricks")
xldata = np.zeros((size[0], 1, size[2]), dtype=np.float32)
reader.read((0,0,0), xldata)
# Consolidate and split causes this to end up as 3 separate
# non contiguous reads. Applying "align" is done too late
# which causes each of these 3 reads to cover the exact same
# area. And those areas in turn consist of two reads since
# we are reading the header also. The naive cache doesn't
# help us here. Fortunately this is a very contrived case.
assert len(trace.calls) == 1
#assert trace.calls[0] == ("readv", 9*brick, 45*brick, 1)
assert trace.calls[0] == ("readv", 9*brick, 3*46*brick, 6)
trace.reset()
# This should trigger the naive cache, tailored specifically
# to how Petrel reads data from ZGY.
vprint("read one il, one brick at a time")
ildata = np.zeros((1, 64, 64), dtype=np.float32)
for xl in range(0, size[1], 64):
for zz in range(0, size[2], 64):
reader.read((0, xl, zz), ildata)
assert len(trace.calls) >= 1
# The cache was cleared after readv, so expect one and just one
# read request to fill it.
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
vprint("read one xl, one brick at a time")
xldata = np.zeros((64, 1, 64), dtype=np.float32)
for il in range(0, size[0], 64):
for zz in range(0, size[2], 64):
reader.read((il, 0, zz), ildata)
assert len(trace.calls) >= 1
assert all([t.what == "cachehit" for t in trace.calls[0:]])
trace.reset()
# Re-create the file with 7 MB segment size, to stress some more code.
iocontext = SDCredentials(aligned=1, maxsize=64, maxhole=1, threads=1,
segsize=7, _debug_trace = trace
)
bricksize = np.array((64, 64, 64), dtype=np.int64)
brick = np.product(bricksize) * np.dtype(np.float32).itemsize
size = np.array((181, 241, 169), dtype=np.int64)
numbricks = (size + bricksize - 1) // bricksize
vprint("Creating. Expect header written twice and bulk data in 7 parts.")
with newzgy.ZgyWriter(filename, iocontext=iocontext,
bricksize = tuple(bricksize),
size = tuple(size)) as writer:
data = np.arange(np.product(size), dtype=np.float32).reshape(size)
writer.write((0,0,0), data)
# There may be several reads needed to generate lod 1 bricks
# from data already flushed. Ignore those.
calls = list([ e for e in trace.calls
if e.what not in ("readv", "cachehit", "cachemiss")])
assert len(calls) == 9
assert calls[0] == ("append", brick, brick, 1) # empty header
assert calls[1] == ("append", 7 * brick, 7 * brick, 1)
assert calls[2] == ("append", 7 * brick, 7 * brick, 1)
assert calls[3] == ("append", 7 * brick, 7 * brick, 1)
assert calls[4] == ("append", 7 * brick, 7 * brick, 1)
assert calls[5] == ("append", 7 * brick, 7 * brick, 1)
assert calls[6] == ("append", 7 * brick, 7 * brick, 1)
assert calls[7] == ("write", brick, brick, 1) # actual header
assert calls[8] == ("append", 3 * brick, 3 * brick, 1) # mop up.
trace.reset()
iocontext = SDCredentials(aligned=1, maxsize=64, maxhole=1, threads=1,
_debug_trace = trace
)
with newzgy.ZgyReader(filename, iocontext = iocontext) as reader:
assert len(trace.calls) >= 1
assert trace.calls[0].what in ("read", "readv", "cachemiss")
assert all([t.what == "cachehit" for t in trace.calls[1:]])
trace.reset()
vprint("read one il,", numbricks[1] * numbricks[2], "bricks")
ildata = np.zeros((1, size[1], size[2]), dtype=np.float32)
reader.read((0,0,0), ildata)
# There will be two reads since it crissed a segment boundary.
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv", 12*brick, 12*brick, 2)
trace.reset()
vprint("read one xl,", numbricks[0], "*", numbricks[2], "bricks")
xldata = np.zeros((size[0], 1, size[2]), dtype=np.float32)
reader.read((0,0,0), xldata)
# Not contiguous, but a single scatter/gather read.
# More that 3 parts due to crossing segment boundaries.
assert len(trace.calls) == 1
assert trace.calls[0] == ("readv", 9*brick, 9*brick, 4)
trace.reset()
vprint("done.")
def Main():
np.seterr(all='raise')
with TimeMe("ProgressWithDots"):
testProgressWithDots()
with TimeMe("BadArgumentsOnCreate"):
testBadArgumentsOnCreate()
with TimeMe("BadArgumentsOnReadWrite"):
with LocalFileAutoDelete("somefile.zgy") as fn:
testBadArgumentsOnReadWrite(fn.name)
with TimeMe("AutoDelete"):
testAutoDelete()
if HasOldZgy():
with TimeMe("HistogramRangeIsCenterNotEdge"):
with LocalFileAutoDelete("histo.zgy") as fn:
testHistogramRangeIsCenterNotEdge(fn.name)
with TimeMe("EmptyFile_NN"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, newzgy.ZgyWriter, newzgy.ZgyReader)
if HasOldZgy():
with TimeMe("EmptyFile_ON"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, newzgy.ZgyReader)
with TimeMe("EmptyFile_NO"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, newzgy.ZgyWriter, oldzgy.ZgyReader)
with TimeMe("EmptyFile_OO"):
with LocalFileAutoDelete("emptyfile.zgy") as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, oldzgy.ZgyReader)
with LocalFileAutoDelete("rmwfile.zgy") as fn:
testRmwFile(fn.name, newzgy.ZgyWriter)
with LocalFileAutoDelete("fatal-error.zgy") as fn:
testFatalErrorFlag(fn.name)
if False: # Disabled because it takes too long.
with TimeMe("LargeSparseFile"):
with LocalFileAutoDelete("largesparse.zgy") as fn:
testLargeSparseFile(fn.name, newzgy.ZgyWriter, newzgy.ZgyReader)
with TimeMe("Naan"):
with LocalFileAutoDelete("naan.zgy") as fn:
testNaan(fn.name)
with TimeMe("WriteNaanToIntegerStorage"):
with LocalFileAutoDelete("intnaan.zgy") as fn:
testWriteNaanToIntegerStorage(fn.name)
with TimeMe("ZeroCentric"):
with LocalFileAutoDelete("zerocentric.zgy") as fn:
testZeroCentric(fn.name)
with TimeMe("FinalizeProgress"):
with LocalFileAutoDelete("finalize.zgy") as fn:
testFinalizeProgress(fn.name, abort = False)
with TimeMe("FinalizeProgress"):
with LocalFileAutoDelete("finalize.zgy") as fn:
testFinalizeProgress(fn.name, abort = True)
with TimeMe("HugeFile"):
with LocalFileAutoDelete("huge.zgy") as fn:
testHugeFile(fn.name)
with LocalFileAutoDelete("oddsize.zgy") as fn:
testDecimateOddSize(fn.name)
with TimeMe("DecimateWeightedAverage"):
with LocalFileAutoDelete("weighted.zgy") as fn:
testDecimateWeightedAverage(fn.name)
with TimeMe("MixingUserAndStorage"):
with LocalFileAutoDelete("mixuserstorage.zgy") as fn:
testMixingUserAndStorage(fn.name)
with TimeMe("SmallConstArea"):
with LocalFileAutoDelete("smallconstarea.zgy") as fn:
testSmallConstArea(fn.name)
with LocalFileAutoDelete("testhisto_f.zgy") as fn:
testHistoCornercaseFloat(fn.name)
with LocalFileAutoDelete("testhisto_i.zgy") as fn:
testHistoCornercaseInt(fn.name)
with TimeMe("FancyDefaultValue"):
testFancyDefaultValue()
with TimeMe("FancyReadConstant"):
testFancyReadConstant()
with TimeMe("FancyMisc"):
testFancyMisc()
with TimeMe("TestFancy1"):
testFancy1()
with TimeMe("TestFancy2"):
testFancy2()
with TimeMe("TestFancy3"):
testFancy3()
with TimeMe("TestFancy4"):
testFancy4()
if HasOldZgy():
with TimeMe("TestFancy5"):
testFancy5()
with TimeMe("TestFancy6"):
testFancy6()
with TimeMe("TestFancy11"):
testFancy11()
with TimeMe("TestFancy12"):
testFancy12()
with TimeMe("TestFancy13"):
testFancy13()
# ZFP COMPRESSION
if HasZFPCompression():
with TimeMe("RegisteredCompressors"):
testRegisteredCompressors()
with TimeMe("TestFancy7"):
testFancy7()
with TimeMe("TestFancy8"):
testFancy8()
with TimeMe("TestFancy9"):
testFancy9()
with TimeMe("TestFancy10"):
testFancy10()
with TimeMe("TestFancy14"):
testFancy14()
with TimeMe("NoRmwInCompressedFile"):
with LocalFileAutoDelete("no-rmw.zgy") as fn:
testNoRmwInCompressedFile(fn.name)
with TimeMe("Naan"):
with LocalFileAutoDelete("naan.zgy") as fn:
testNaan(fn.name, 70)
# SEISMIC STORE
if not HasSeismicStore():
print("SKIPPING seismic store tests")
return
with TimeMe("testCloudAutoDelete"):
testCloudAutoDelete()
with TimeMe("testReadFromCloud"):
testReadFromCloud(SDTestData("Synt2.zgy"))
with TimeMe("testCloudWriter"):
with CloudFileAutoDelete("openzgy-rules.zgy", SDCredentials()) as cad:
testCloudWriter(cad.name)
cad.disarm() # The test function cleans up itself, unless it throws.
with TimeMe("EmptyFile"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name)
# oldzgy probably doesn't have zgycloud set up in this test.
if HasOldZgy() and False:
with TimeMe("EmptyFile_ON"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, newzgy.ZgyReader)
with TimeMe("EmptyFile_NO"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name, newzgy.ZgyWriter, oldzgy.ZgyReader)
with TimeMe("EmptyFile_OO"):
with CloudFileAutoDelete("emptyfile.zgy", SDCredentials()) as fn:
testEmptyFile(fn.name, oldzgy.ZgyWriter, oldzgy.ZgyReader)
with TimeMe("EmptyExistingFile"):
testEmptyExistingFile("sd://sntc/testdata/OldEmpty.zgy")
with TimeMe("testRmwFile"):
with CloudFileAutoDelete("rmwfile.zgy", SDCredentials()) as fn:
testRmwFile(fn.name, newzgy.ZgyWriter)
with TimeMe("testLegalTag"):
with CloudFileAutoDelete("legaltag.zgy", SDCredentials()) as fn:
testLegalTag(fn.name)
with CloudFileAutoDelete("consolidate.zgy", SDCredentials()) as fn:
with TimeMe("ConsolidateBricks"):
testCloudConsolidateBricks(fn.name, verbose = False)
if __name__ == "__main__":
Main()
# Copyright 2017-2021, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 141,183 | 47,640 |
from setuptools import setup
setup(name='music_geometry_eval',
version='2.0',
description='A python library to automatically evaluate music tonality based on geometry',
url='https://github.com/sebasgverde/music-geometry-eval',
author='Sebastian Garcia Valencia',
author_email='sebasgverde@gmail.com',
license='MIT',
packages=['music_geometry_eval'],
python_requires='>=2.7',
install_requires=[])
| 450 | 134 |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"get_module_text": "00_nb2module.ipynb",
"write_module_text": "00_nb2module.ipynb",
"clear_all_modules": "00_nb2module.ipynb",
"simple_export_one_nb": "00_nb2module.ipynb",
"simple_export_all_nb": "00_nb2module.ipynb",
"get_py_files": "01_module2nb.ipynb",
"get_cells_one_nb": "01_module2nb.ipynb",
"write_code_cell": "01_module2nb.ipynb",
"py_to_nb": "01_module2nb.ipynb"}
modules = ["nb2module.py",
"module2nb.py"]
doc_url = "https://Isaac-Flath.github.io/nbdevminimum/"
git_url = "https://github.com/Isaac-Flath/nbdevminimum/tree/{branch}/"
def custom_doc_links(name): return None
| 783 | 329 |
from tkinter import *
from functools import partial
from PIL import Image, ImageTk
class Calculadora():
def __init__(self):
# Coloquei a instancia Tk e o background
self.inst = Tk()
self.inst.geometry('720x1200')
self.inst['background'] = 'white'
# Fontes e imagens usadas
fonte = ('Verdana', 12, 'bold')
fontea = ('Verdana', 12)
fonte2 = ('Verdana', 18, 'bold')
# Vou colocar os calculos no Label
self.calculo = Label(self.inst, text='', font=fonte2,
bg='white', height=5)
self.calculo.pack()
# Fiz desse jeito pq é mais rápido e simples
Frames = [Frame(self.inst, bg='white', padx=0, pady=0) for cria in range(5)]
# Empacota os Frames
for empacotar in Frames: empacotar.pack()
self.texto = (
('C', '<×', '^', '/'),
('7', '8', '9', 'x'),
('4', '5', '6', '+'),
('1', '2', '3', '-'),
('.', '0', '()', '=')
)
# Cria os botoes na tela
self.botoes = []
for i in range(5):
frame = Frames[i]
for simbol in self.texto[i]:
but = Button(frame, text=simbol, font=fontea,
height=2, width=3, relief=GROOVE, bg='white',
command=partial(self.InterpretaBotoes, simbol)
)
but.pack(side=LEFT)
self.botoes.append(but)
# Muda a cor e a fonte do botão de acordo com o tipo
if simbol in ('C', '<×', '^', '/', 'x', '+', '-'):
but['bg'] = 'lightgray'
but['fg'] = 'darkcyan'
but['font'] = fonte
elif simbol == '=':
but['bg'] = 'green'
but['fg'] = 'white'
but['font'] = fonte
# Inicia a instancia
self.inst.mainloop()
# Vai executar o comando do botão pressionado
def InterpretaBotoes(self, valor):
if valor == 'C':
self.calculo['text'] = ''
elif valor == '<×':
self.calculo['text'] = self.calculo['text'][:len(self.calculo['text'])-1]
elif valor == '=':
self.Calcula()
elif valor == '()':
texto = self.calculo['text']
try:
if texto[len(texto)-1] in '+-/^x' or len(texto) == 0:
self.calculo['text'] += '('
elif texto[len(texto)-1] in '1234567890)':
self.calculo['text'] += ')'
except:
self.calculo['text'] += '('
else:
self.calculo['text'] += valor
if len(self.calculo['text']) % 15 == 0:
self.calculo['text'] += '\n'
def Calcula(self):
# Se tiver um dos operadores ele tenta fazer calculo
if any([op in self.calculo['text'] for op in ['+', '-', '/', '^', 'x']]):
calculo = ''
for elemento in self.calculo['text']:
for e in self.texto:
if elemento == 'x':
calculo += '*'
break
elif elemento == '^':
calculo += '**'
break
elif elemento in '()':
calculo += elemento
break
elif elemento in e:
calculo += elemento
break
self.calculo['text'] = ''
resultado = str(eval(calculo))
auxiliar = ''
for i in range(len(resultado)):
if (i+1) % 15 == 0:
auxiliar += '\n'
auxiliar += resultado[i]
self.calculo['text'] = auxiliar
Calculadora() | 3,041 | 1,503 |
from django.core.urlresolvers import reverse_lazy, reverse
from forum.models import Category, Question, Answer
from utils.mixin import AjaxableResponseMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.http import JsonResponse, Http404
from website.mixin import FrontMixin
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.shortcuts import get_object_or_404
from authentication.models import MyUser
class CategoryCreateView(UserPassesTestMixin, AjaxableResponseMixin, CreateView):
login_url = reverse_lazy('user-login')
model = Category
fields = ['name']
template_name_suffix = '_create_form'
success_url = reverse_lazy('category-list')
def test_func(self):
return self.request.user.is_staff
def get_context_data(self, *args, **kwargs):
context = super(CategoryCreateView, self).get_context_data(**kwargs)
context['active_page'] = 'category-add'
return context
class CategoryListView(UserPassesTestMixin, ListView):
login_url = reverse_lazy('user-login')
model = Category
context_object_name = 'category_list'
def test_func(self):
return self.request.user.is_staff
def get_context_data(self, *args, **kwargs):
context = super(CategoryListView, self).get_context_data(**kwargs)
context['active_page'] = 'category-list'
return context
class CategoryUpdateView(UserPassesTestMixin, AjaxableResponseMixin, UpdateView):
login_url = reverse_lazy('user-login')
model = Category
context_object_name = 'category'
template_name_suffix = '_update_form'
success_url = reverse_lazy('category-list')
fields = ['name']
def test_func(self):
return self.request.user.is_staff
def get_context_data(self, *args, **kwargs):
context = super(CategoryUpdateView, self).get_context_data(**kwargs)
context['active_page'] = 'category-update'
return context
class CategoryDeleteView(UserPassesTestMixin, AjaxableResponseMixin, DeleteView):
login_url = reverse_lazy('user-login')
model = Category
success_url = reverse_lazy('category-list')
def test_func(self):
return self.request.user.is_staff
def post(self, request, *args, **kwargs):
super(CategoryDeleteView, self).post(request, *args, **kwargs)
return JsonResponse({'state': 'success'})
class QuestionCreateView(LoginRequiredMixin, FrontMixin, CreateView):
login_url = reverse_lazy('user-login')
model = Question
template_name_suffix = '_create_form'
fields = ['title', 'content', 'category', 'inviting_person']
def get_context_data(self, *args, **kwargs):
context = super(QuestionCreateView, self).get_context_data(**kwargs)
context['category_list'] = Category.objects.all()
context['teacher_list'] = MyUser.objects.filter(identity='T').order_by('nickname')
return context
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.show_times = 0
return super(QuestionCreateView, self).form_valid(form)
def get_success_url(self):
questions_list = Question.objects.filter(author=self.request.user).order_by('-publish_time')
least_question = questions_list[0]
return reverse('question-detail', kwargs={'pk': least_question.id})
class CategoryQuestionListView(FrontMixin, ListView):
template_name = 'website/frontend/homepage.html'
model = Question
paginate_by = 10
context_object_name = 'question_list'
def get_queryset(self):
category = get_object_or_404(Category, pk=self.kwargs['pk'])
return Question.objects.filter(category=category)
class QuestionDetailView(FrontMixin, ListView):
model = Answer
template_name = 'forum/question_detail.html'
paginate_by = 10
context_object_name = 'answer_list'
def get_queryset(self):
question = Question.objects.get(pk=self.kwargs['pk'])
question.show_times += 1
question.save()
return Answer.objects.filter(question=question)
def get_context_data(self, *args, **kwargs):
context = super(QuestionDetailView, self).get_context_data(*args, **kwargs)
context['question'] = Question.objects.get(pk=self.kwargs['pk'])
return context
class AnswerCreateView(LoginRequiredMixin, FrontMixin, CreateView):
model = Answer
template_name = 'forum/answer_create_form.html'
fields = ['content']
login_url = reverse_lazy('user-login')
def get_context_data(self, *args, **kwargs):
context = super(AnswerCreateView, self).get_context_data(*args, **kwargs)
context['question'] = Question.objects.get(pk=self.kwargs['pk'])
return context
def get_success_url(self):
return reverse('question-detail', kwargs={'pk': self.kwargs['pk']})
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.question = Question.objects.get(pk=self.kwargs['pk'])
return super(AnswerCreateView, self).form_valid(form)
class ReplyCreateView(LoginRequiredMixin, FrontMixin, CreateView):
model = Answer
template_name = 'forum/reply_create_form.html'
fields = ['content']
login_url = reverse_lazy('user-login')
def get_context_data(self, *args, **kwargs):
context = super(ReplyCreateView, self).get_context_data(*args, **kwargs)
answer=Answer.objects.get(pk=self.kwargs['pk'])
context['answer'] = Answer.objects.get(pk=self.kwargs['pk'])
return context
def get_success_url(self):
answer=Answer.objects.get(pk=self.kwargs['pk'])
return reverse('question-detail', kwargs={'pk': answer.question.pk})
def form_valid(self, form):
answer=Answer.objects.get(pk=self.kwargs['pk'])
question=answer.question
form.instance.author = self.request.user
form.instance.question = question
form.instance.reply_author=answer.author.myuser
return super(ReplyCreateView, self).form_valid(form)
class QuestionListView(UserPassesTestMixin, ListView):
model = Question
login_url = reverse_lazy('user-login')
context_object_name = 'question_list'
template_name = 'forum/question_list.html'
def test_func(self):
return self.request.user.is_staff
class QuestionDeleteView(UserPassesTestMixin, AjaxableResponseMixin, DeleteView):
login_url = reverse_lazy('user-login')
model = Question
success_url = reverse_lazy('question-list')
def test_func(self):
return self.request.user.is_staff
def post(self, request, *args, **kwargs):
super(QuestionDeleteView, self).post(request, *args, **kwargs)
return JsonResponse({'state': 'success'})
class PersonalQuestionListView(FrontMixin, ListView):
paginate_by = 10
template_name = 'forum/question_weight2.html'
context_object_name = 'question_list'
def get_queryset(self):
return Question.objects.filter(author_id=self.kwargs['pk'])
def get_context_data(self, *args, **kwargs):
context = super(PersonalQuestionListView, self).get_context_data(**kwargs)
context['theuser']=MyUser.objects.get(pk=self.kwargs['pk'])
return context
class PersonalAnswerListView(FrontMixin, ListView):
paginate_by = 10
template_name = 'forum/answer_weight.html'
context_object_name = 'question_asked_list'
def get_queryset(self):
answers = Answer.objects.filter(author_id=self.kwargs['pk'])
question_asked_list = list(set([item.question for item in answers]))
question_asked_list.reverse()
return question_asked_list
def get_context_data(self, *args, **kwargs):
context = super(PersonalAnswerListView, self).get_context_data(**kwargs)
context['theuser']=MyUser.objects.get(pk=self.kwargs['pk'])
return context
class QuestionSearchView(FrontMixin, ListView):
paginate_by = 10
template_name = 'website/frontend/homepage.html'
context_object_name = 'question_list'
def get_queryset(self):
return Question.objects.filter(title__contains=self.request.GET.get('keyword', ''))
class PersonalInvitingListView(FrontMixin, ListView):
paginate_by = 10
template_name = 'website/frontend/homepage.html'
context_object_name = 'question_list'
def get_queryset(self):
return Question.objects.filter(inviting_person=MyUser.objects.get(pk=self.kwargs['pk']))
class PersonalReplyListView(FrontMixin, ListView):
paginate_by = 10
template_name = 'forum/reply_weight.html'
context_object_name = 'reply_list'
def get_queryset(self):
return Answer.objects.filter(reply_author=MyUser.objects.get(pk=self.kwargs['pk'])).order_by('-publish_time')
| 8,970 | 2,729 |
import torch.utils
def create_loader(tensors, **kwargs):
dataset = torch.utils.data.TensorDataset(*tensors)
loader = torch.utils.data.DataLoader(dataset, **kwargs)
return loader
| 191 | 64 |
import math
def combinations(n, k):
"""
Unordered, w / o repetitions
From n choose k
"""
return math.factorial(n) / (math.factorial(k) * math.factorial(n - k))
def combinations_with_repetitions(n, k):
"""
Unordered, w / repetitions
Вспомни задачу со спичками, это просто распределение delimiters
"""
return combinations(k + n - 1, n - 1)
def permutations(n, k):
"""
Ordered, w / o repetitions
K-permutations
"""
return math.factorial(n) / math.factorial(n - k)
def tuples(n, k):
"""
Ordered, w / repetitions
"""
return n ** k
def probability(A, omega):
return A / omega
| 660 | 239 |
# author Dominik Capkovic
# contact: domcapkovic@gmail.com; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/
# GitHub: https://github.com/kilimetr
packings_str = '''
Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450
Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430
Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440
Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400
Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300
Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337
Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360
Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320
Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336
Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341
Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410
Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446
Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380
Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368
Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333
Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336
Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341
Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345
Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333
Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333
Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303
NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410
NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366
NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425
NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322
Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402
Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345
Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408
Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390
Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342
Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369
Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465
Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464
Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379
Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450
Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398
Glitsch CMR ring | metal | 0.5" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495
Glitsch CMR ring | metal | 1.0" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0
Glitsch CMR ring | metal | 1.5"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0
Glitsch CMR ring | metal | 1.5" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0
TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389
Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412
Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210
VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405
VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420
Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459
Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296
Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257
Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331
Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390
Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377
Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302
Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0
Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0
Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370
Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232
Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387
DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354
DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326
Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385
Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0
Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0
Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270
Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327
Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390
Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422
Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412
Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0
Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167
'''
packings = []
for line in packings_str.strip().splitlines():
line_items = line.split(" | ")
line_items = [s.strip() for s in line_items]
name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items
packings.append({
'name': name,
'material': material,
'size': size,
'N': int(N),
'a': float(a),
'eps': float(eps),
'CS': float(CS),
'CFl': float(CFl),
'Ch': float(Ch),
'CP0': float(CP0),
'CL': float(CL),
'CV': float(CV),
})
# EXPORTING PACKING NAME
seen_packing_name = set()
export_packing_name = []
for i in range(len(packings)):
if packings[i]["name"] not in seen_packing_name:
seen_packing_name.add(packings[i]["name"])
export_packing_name.append(packings[i]["name"])
else:
pass
# # EXPORT PACKING SURFACEAREA
# export_packing_surfacearea = []
# for item in packings:
# if item["name"] == type_packing:
# export_packing_surfacearea.append(item["a"])
# print(export_packing_surfacearea)
| 9,297 | 5,827 |
from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
__all__ = ['RoIAlignRotated', 'roi_align_rotated'] | 117 | 47 |
import time, datetime
import urllib3
print("Importing OpenShift/Kubernetes packages ...")
import kubernetes
import ocp_resources
import openshift
import ocp_resources.node
import ocp_resources.machine
import openshift.dynamic
print("Importing AWS boto3 ...")
import boto3
import botocore
client_k8s = None
client_ec2 = None
resource_ec2 = None
def configure():
#
# K8s
#
global client_k8s
try:
client_k8s = openshift.dynamic.DynamicClient(client=kubernetes.config.new_client_from_config())
except Exception as e:
print("WARNING: kubernetes not available:", e)
#
# AWS
#
machines = [m for m in ocp_resources.machine.Machine.get(dyn_client=client_k8s)]
if not machines:
raise RuntimeError("No machine available ...")
cluster_region = machines[0].instance.spec.providerSpec.value.placement.region
global client_ec2, resource_ec2
cfg = botocore.config.Config(region_name=cluster_region)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html
client_ec2 = boto3.client('ec2', config=cfg)
resource_ec2 = boto3.resource('ec2', config=cfg)
print("Ready.")
def wait_openshift():
first = True
print("Waiting for OpenShift cluster to be ready ...")
while True:
try:
global client_k8s
client_k8s = DynamicClient(client=kubernetes.config.new_client_from_config())
nodes = [m for m in ocp_resources.node.Node.get(dyn_client=client_k8s)]
if len(nodes) != 0:
print(f"Found {len(nodes)} node, OpenShift Cluster is ready!")
break
except urllib3.exceptions.MaxRetryError: pass
except kubernetes.client.exceptions.ApiException: pass
time.sleep(10)
def get_machine_props():
if not client_k8s:
return None, None
machines = [m for m in ocp_resources.machine.Machine.get(dyn_client=client_k8s)]
if len(machines) != 1:
raise RuntimeError("Should be only one machine ...")
machine = machines[0]
cluster_name = machine.cluster_name
print(f"Cluster name: {cluster_name}")
instance = resource_ec2.Instance(machine.instance.status.providerStatus.instanceId)
instance.load()
print(f"Instance Id: {instance.id}")
zone = machine.instance.spec.providerSpec.value.placement.availabilityZone
print(f"Availability zone: {zone}")
return cluster_name, instance, zone
def get_instance_root_volume(instance):
volumes = [v for v in instance.volumes.all()]
if len(volumes) > 1:
print("WARNING: more than 1 volume found ...")
return volumes[0]
def get_cluster_snapshot(cluster_name, instance, zone):
resp = client_ec2.describe_snapshots(
Filters=[{
'Name': f'tag:kubernetes.io/cluster/{cluster_name}',
'Values': ['owned']
}])
snapshots = resp["Snapshots"]
if len(snapshots) == 0:
return None
if len(snapshots) > 1:
print("WARNING: more than 1 snapshot found ... taking the first one.")
snapshot = resource_ec2.Snapshot(snapshots[0]['SnapshotId'])
snapshot.load()
return snapshot
def await_snapshot(snapshot):
prev = ""
if snapshot.progress == "100%":
print(f"Snapshot {snapshot.id} is ready.")
while not snapshot.progress == "100%":
if prev == "":
print(f"Awaiting for the completion of snapshot {snapshot.id} ...")
print(snapshot.progress)
prev = snapshot.progress
time.sleep(10)
snapshot.reload()
if prev != snapshot.progress:
prev = snapshot.progress
print(snapshot.progress)
def human_ts():
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
| 3,779 | 1,199 |
from mogwai.connection import setup
from mogwai.models import Vertex, Edge
from mogwai import properties
from mogwai import relationships
from mogwai._compat import print_
import datetime
from pytz import utc
from functools import partial
import pickle
setup('127.0.0.1')
class OwnsObject(Edge):
label = 'owns_object' # this is optional, will default to the class name
since = properties.DateTime(required=True,
default=partial(datetime.datetime.now, tz=utc),
description='Owned object since')
class Trinket(Vertex):
element_type = 'gadget'
name = properties.String(required=True, max_length=1024)
class Person(Vertex):
element_type = 'person' # this is optional, will default to the class name
name = properties.String(required=True, max_length=512)
email = properties.Email(required=True)
# Define a shortcut relationship method
belongings = relationships.Relationship(OwnsObject, Trinket)
## Creation
# Create a trinket
trinket = Trinket.create(name='Clock')
# Create a Person
bob = Person.create(name='Bob Smith', email='bob@bob.net')
# Create the Ownership Relationship
relationship = OwnsObject.create(outV=bob, inV=trinket)
bob_serialized = pickle.dumps(bob)
print_("Bob Serialized: {}".format(bob_serialized))
deserialized_bob = pickle.loads(bob_serialized)
print_("Bob Deserialized: {}".format(deserialized_bob))
assert bob == deserialized_bob
relationship_serialized = pickle.dumps(relationship)
print_("Relationship Serialized: {}".format(relationship_serialized))
deserialized_relationship = pickle.loads(relationship_serialized)
print_("Relationship Deserialized: {}".format(deserialized_relationship))
assert relationship == deserialized_relationship
trinket_serialized = pickle.dumps(trinket)
print_("Trinket Serialized: {}".format(trinket_serialized))
deserialized_trinket = pickle.loads(trinket_serialized)
print_("Trinket Deserialized: {}".format(deserialized_trinket))
assert trinket == deserialized_trinket
| 2,053 | 678 |
import itertools
def test_product_combinator(container_, repeat_times):
"""
cartesian product, equivalent to a nested for-loop
product('ABCD', repeat=2)
AA AB AC AD BA BB BC BD CA CB CC CD DA DB DC DD
"""
return list(itertools.product(container_, repeat=repeat_times))
def test_permutation_combinator(container_, pairs_):
"""
r-length tuples, all possible orderings, no repeated elements
permutations('ABCD', 2)
AB AC AD BA BC BD CA CB CD DA DB DC
"""
return list(
itertools.permutations(
container_,
pairs_
)
)
def test_combination_combinator(container, pairs_):
"""
r-length tuples, in sorted order, no repeated elements
combinations('ABCD', 2)
AB AC AD BC BD CD
"""
return list(
itertools.combinations(container, pairs_)
)
def test_combinations_with_replacement_combinator(container, pairs_):
"""
r-length tuples, in sorted order, no repeated elements
combinations('ABCD', 2)
AB AC AD BC BD CD
"""
return list(
itertools.combinations_with_replacement(container, pairs_)
)
if __name__ == '__main__':
count = 10
items_list = list(
map(
lambda x: str(x[0]) + "_" + str(x[1]),
list(
zip(
['TYPE'] * count,
list(range(1, count+1))
)
)
)
)
print(items_list)
print("PRODUCT :=>", test_product_combinator(container_=items_list, repeat_times=2))
print("PERMUTATIONS :=>", test_permutation_combinator(container_=items_list, pairs_=2))
print("COMBINATIONS :=>", test_combination_combinator(container=items_list, pairs_=2))
print("COMBINATIONS WITH REPLACEMENT :=>", test_combinations_with_replacement_combinator(container=items_list, pairs_=2))
| 1,877 | 610 |
from cdnu.ccds import CdsPos, load_ccds
from cdnu.cram import load_cds_list
def test_load_cds_list():
cds = load_cds_list('./test/cramExample.cram',
[CdsPos('first', [(33036411, 33036588)], '21')])
assert len(cds) is 1
single_cds = cds[0]
assert single_cds is not None
assert len(single_cds) % 3 is 0
assert len(single_cds) is (33036588 - 33036411)
assert single_cds.startswith('ATG')
assert single_cds[-3:] in ('TAG', 'TAA', 'TGA')
def test_load_cds_list_small():
cds = load_cds_list('./test/cramExample.cram',
[CdsPos('first', [(33036660, 33036670)], '21')])
assert len(cds) is 1
assert cds[0] is None
def test_load_cds_list_some():
ccds = [
CdsPos('first', [(925941, 926012)], 'chr1'),
CdsPos('second', [(966531, 966613)], 'chr1'),
CdsPos('third', [(7784877, 7785004)], 'chr1')
]
address = ('ftp://ftp.ncbi.nlm.nih.gov/1000genomes/ftp/'
'1000G_2504_high_coverage/data/ERR3239281/NA07051.final.cram')
address = '/Users/peta/School/mbg/mbg-codon-usage/huge/NA07051.final.cram'
cds_list = load_cds_list(address, ccds)
assert len(cds_list) is 3
assert cds_list[0] is None
assert cds_list[1] is None
assert cds_list[2] is None
def test_load_cds_list_huge():
ccds = load_ccds()
address = ('ftp://ftp.ncbi.nlm.nih.gov/1000genomes/ftp/'
'1000G_2504_high_coverage/data/ERR3239281/NA07051.final.cram')
address = '/Users/peta/School/mbg/mbg-codon-usage/huge/NA07051.final.cram'
cds_list = load_cds_list(address, ccds[:100])
for cds in cds_list:
if cds is not None:
print('{}.. {:4} ..{}'.format(cds[:3], len(cds), cds[-3:]))
assert len(cds) % 3 is 0
assert cds.startswith('ATG')
assert cds[-3:] in ('TAG', 'TAA', 'TGA')
else:
print('None')
| 1,922 | 876 |
#!/usr/bin/env python
# encoding: utf-8
import torch.optim as optim
from torch.nn.utils import clip_grad_norm
class Optim(object):
def __init__(self, lr, max_grad_norm):
self.lr = lr
self.max_grad_norm = max_grad_norm
def set_parameters(self, params):
self.params = list(params)
self.optimizer = optim.Adam(self.params, lr=self.lr)
def step(self):
if self.max_grad_norm != -1:
clip_grad_norm(self.params, self.max_grad_norm)
self.optimizer.step()
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict())
| 622 | 218 |
import io
from setuptools import setup, find_packages
def make_long_description():
with io.open("README.md", encoding="utf-8") as fp:
long_description = fp.read()
return long_description
setup(
name="nephthys",
description="Advanced Python Logger",
long_description=make_long_description(),
long_description_content_type="text/markdown",
version="1.0.2",
author="Fabio Todaro",
license="MIT",
author_email="ft@ovalmoney.com",
url="https://github.com/OvalMoney/Nephthys",
python_requires=">=3.6",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["tests", "requirements"]),
install_requires=["webob"],
extras_require={"JSON": ["python-rapidjson"], "requests": ["requests"]},
)
| 1,033 | 327 |
# -*- coding: utf-8 -*-
import sys
from ..decorators import linter
from ..parsers.base import ParserBase
@linter(
name="robotframework-lint",
install=[[sys.executable, "-m", "pip", "install", "-U", "robotframework-lint"]],
help_cmd=["rflint", "--help"],
run=["rflint"],
rundefault=["rflint", "-A", "{config_dir}/.rflint"],
dotfiles=[".rflint"],
language="robotframework",
autorun=True,
run_per_file=True,
)
class RobotFrameworkLintParser(ParserBase):
"""Parse rflint output."""
def parse(self, lint_data):
messages = set()
current_file = None
for _, output in lint_data:
for line in output.split("\n"):
try:
if not line.strip():
continue
if line.startswith("+"):
current_file = line[2:]
continue
else:
_, position, message = line.split(":")
line_number, _ = position.split(",")
messages.add(
(current_file.strip(), int(line_number), message.strip())
)
except (ValueError, IndexError):
print(
"({0}) Invalid message: {1}".format(type(self).__name__, line)
)
return messages
| 1,420 | 399 |
# Copyright 2014 Google Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import subprocess
import sys
from catnip import protocol
from catnip import sandbox
class ClientError(Exception):
pass
class CatnipClient(object):
def __init__(self):
self._hostname = None
self._port = 22
self._username = 'catnip'
self._identity_file = None
self._disk_image_stream = None
self._check_ssh_host_key = False
self._multiplex = False
self._debug = False
##############################################################################
## Setters
def SetHost(self, hostname, port=22):
if not isinstance(hostname, str):
raise TypeError('hostname must be a string')
if not (isinstance(port, int) and 1 <= port < 65536):
raise TypeError('invalid port')
self._hostname = hostname
self._port = port
def SetUser(self, username):
if not isinstance(username, str):
raise TypeError('username must be a string')
self._username = username
def SetIdentityFile(self, identity_file):
self._identity_file = identity_file
def SetDiskImageStream(self, disk_image_stream):
if not hasattr(disk_image_stream, 'read'):
raise TypeError('disk_image_stream must be a stream')
self._disk_image_stream = disk_image_stream
def SetCheckSSHHostKey(self, check_ssh_host_key):
if not isinstance(check_ssh_host_key, bool):
raise TypeError('check_ssh_host_key must be a boolean')
self._check_ssh_host_key = check_ssh_host_key
def SetMultiplex(self, multiplex):
if not isinstance(multiplex, bool):
raise TypeError('multiplex must be a boolean')
self._multiplex = multiplex
def SetDebug(self, debug):
if not isinstance(debug, bool):
raise TypeError('debug must be a boolean')
self._debug = debug
##############################################################################
## Actions
def Run(self, params, requests, extra_files,
output_filename=None, callback=None):
if ((not output_filename and not callback) or
(output_filename and callback)):
raise ValueError('One of output_filename or callback should be passed.')
if not params.Validate():
raise ValueError('Insufficient SandboxParams')
for request in requests:
if not request.Validate():
raise ValueError('Insufficient RunRequest')
self._CheckSettingsBeforeRun()
args = self._BuildSSHArgs() + self._BuildRunArgs()
if params.debug:
print >>sys.stderr, ' '.join(args)
proc = None
try:
with open(os.devnull, 'w') as null:
stderr = None if self._debug else null
if output_filename:
with open(output_filename, 'w') as stdout:
proc = subprocess.Popen(args,
close_fds=True,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
else:
proc = subprocess.Popen(args,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr)
writer = protocol.RequestWriter(proc.stdin)
writer.WriteParams(params)
for request in requests:
writer.WriteRunRequest(request)
for extra_file in extra_files:
writer.WriteExtraFile(os.path.basename(extra_file), extra_file)
if self._disk_image_stream:
writer.WriteDiskImage(self._disk_image_stream)
writer.Finish()
proc.stdin.close()
if callback:
reader = protocol.ResponseReader(proc.stdout)
reader.Read(callback)
proc.wait()
if proc.returncode != 0:
raise ClientError('Failed to start a remote program')
proc = None
except KeyboardInterrupt:
if params.debug:
print >>sys.stderr, 'Interrupted during execution.'
finally:
if proc and proc.poll() is None:
os.kill(proc.pid, signal.SIGINT)
def GetStatus(self):
self._CheckSettingsBeforeRun()
args = self._BuildSSHArgs() + self._BuildGetStatusArgs()
with open(os.devnull, 'w') as null:
stderr = None if self._debug else null
proc = subprocess.Popen(args,
close_fds=True,
stdin=null,
stdout=subprocess.PIPE,
stderr=stderr)
status = proc.communicate(None)[0]
if proc.returncode != 0:
raise ClientError('Failed to start a remote program')
return status
def EndMultiplex(self):
self._CheckSettingsBeforeRun()
args = self._BuildSSHArgs() + self._BuildEndMultiplexArgs()
with open(os.devnull, 'w') as null:
stderr = None if self._debug else null
proc = subprocess.Popen(args,
close_fds=True,
stdin=null,
stdout=null,
stderr=stderr)
proc.communicate(None)
##############################################################################
## Bits
def _CheckSettingsBeforeRun(self):
if not self._hostname:
raise ValueError('hostname is not set')
def _BuildSSHArgs(self):
args = ['ssh', '-T', '-p', '%d' % self._port]
if self._identity_file:
args.extend(['-i', self._identity_file])
if not self._check_ssh_host_key:
args.extend(['-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null'])
if self._multiplex:
args.extend(['-o', 'ControlMaster=auto',
'-o', 'ControlPath=/tmp/catnip-ssh.%u.%r@%h:%p',
'-o', 'ControlPersist=yes'])
args.append('%s@%s' % (self._username, self._hostname))
return args
def _BuildRunArgs(self):
return ['sudo', 'catnip-run']
def _BuildGetStatusArgs(self):
return ['sudo', 'catnip-status']
def _BuildEndMultiplexArgs(self):
return ['-o', 'ControlPath=/tmp/catnip-ssh.%u.%r@%h:%p',
'-O', 'exit']
| 6,676 | 1,926 |
# IF () THEN ()
# IF direction = left THEN position = (-1, 0)
class RuleAntecedent:
def __init__(self, direction=None, world=None):
self.direction = direction # up, down, left, right
self.world = world # (-1, 0, 0) (-1, 0, 1)
class RuleConsequent:
def __init__(self, position=None, direction=None):
self.position = position
self.direction = direction
class Rule:
def __init__(self, antecedent, consequent):
self.antecedent = antecedent
self.consequent = consequent
M = 30
N = 30
RULES = [
Rule(
RuleAntecedent(direction="left", world=(-1, 0, 0)),
RuleConsequent(position=(-1, 0), direction=None)
),
Rule(
RuleAntecedent(direction="left", world=(-1, 0, 1)),
RuleConsequent(position=None, direction="up")
),
Rule(
RuleAntecedent(direction="up", world=(0, -1, 0)),
RuleConsequent(position=(0, -1), direction=None)
),
Rule(
RuleAntecedent(direction="up", world=(0, -1, 1)),
RuleConsequent(position=None, direction="right")
),
Rule(
RuleAntecedent(direction="right", world=(1, 0, 0)),
RuleConsequent(position=(1, 0), direction=None)
),
Rule(
RuleAntecedent(direction="right", world=(1, 0, 1)),
RuleConsequent(position=None, direction="down")
),
Rule(
RuleAntecedent(direction="down", world=(0, 1, 0)),
RuleConsequent(position=(0, 1), direction=None)
),
Rule(
RuleAntecedent(direction="down", world=(0, 1, 1)),
RuleConsequent(position=None, direction="left")
)
]
| 1,618 | 561 |
def test(inFile):
from xml.dom.ext.reader import HtmlLib
from xml.dom import ext
from xml.dom import Node
from xml.dom.html import HTMLDocument
doc = HTMLDocument.HTMLDocument()
HtmlLib.FromHtmlStream(inFile,doc)
print doc
ext.PrettyPrint(doc)
if __name__ == '__main__':
import sys
inFile = sys.stdin
if len(sys.argv) == 2:
inFile = open(sys.argv[1],'r')
test(inFile)
| 437 | 159 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021/08/09 11:32:25
# @File : project.py
# @Author : K.B.Lam
# @Version : 1.0
from .base import _BaseModel
from app.extensions import db
from app.models.tools import get_username
class Project(_BaseModel):
__tablename__ = "Project"
__bind_key__ = "default"
REQUIRE_ITEMS = _BaseModel.REQUIRE_ITEMS + ["name", "projectTestType", "version",
"uid", "description"]
OPTIONAL_ITEMS = _BaseModel.OPTIONAL_ITEMS
name = db.Column('name', db.String(128), nullable=False, comment="项目名称")
projectTestType = db.Column('projectTestType', db.String(64), nullable=False, comment="项目测试类型")
version = db.Column('version', db.String(32), nullable=False, comment="项目版本")
uid = db.Column('uid', db.String(32), nullable=False, comment="创建者")
description = db.Column('description', db.String(256), nullable=True)
def get_json(self):
return {
"object_id": self.object_id,
"name": self.name,
"uid_name": self.uid,
"uid": get_username("UID", self.uid),
"projectTestType": self.projectTestType,
"version": self.version,
"create_at": self.created_at,
"updated_at": self.updated_at,
"description": self.description
}
@staticmethod
def get_type():
res = []
return {"res": res}
| 1,455 | 498 |
from flask.views import MethodView
from flask import request, make_response, jsonify, current_app
from flask_jwt_extended import (jwt_required, get_jwt_identity, get_raw_jwt)
from datetime import datetime
from api.helpers.classification import (
get_classified_data, find_by_id, find_all_by_id, save_image, save_to_db, delete_by_id)
from api.models.Classification import Classification
class ClassifyImage(MethodView):
# This class handles image upload and return classification data
@jwt_required
def post(self):
current_user = get_jwt_identity()
try:
image = request.files.getlist("image")[0]
except Exception as ex:
response = {
"success": False,
"msg": "Missing image"
}
return make_response(jsonify(response)), 400
try:
image_name = image.filename.split('.')[0]
ext = image.filename.split('.')[1]
timestamp = datetime.timestamp(datetime.now())
image_url = f"{current_user}_{image_name}_{timestamp}.{ext}"
save_image(current_user, image, image_url)
# Mock classification data
_current = get_classified_data()
to_save = Classification(
image_name=image_name,
image_url=image_url,
label=_current[0],
confidence=_current[1],
user_id=current_user
)
classification_schema = save_to_db(to_save)
response = {
"success": True,
"msg": "Image labelled successfully",
"body": classification_schema
}
return make_response(jsonify(response)), 200
except Exception as err:
response = {
"success": False,
"msg": "Error while saving image"
}
return make_response(jsonify(response)), 500
class ClassificationInfo(MethodView):
# This class returns a single classification data
@jwt_required
def get(self, classification_id):
try:
if not classification_id:
response = {
"success": False,
"msg": "Classification id not found"
}
return make_response(jsonify(response)), 400
classification = find_by_id(classification_id)
response = {
"success": True,
"msg": "Classification found",
"body": classification
}
return make_response(jsonify(response)), 200
except Exception as err:
response = {
"success": False,
"msg": "Error while fetching classification"
}
return make_response(jsonify(response)), 500
class GetAllClassifications(MethodView):
# This class returns all classifications by user id
@jwt_required
def get(self):
current_user = get_jwt_identity()
try:
classifications = find_all_by_id(current_user)
response = {
"success": True,
"msg": "Classifications found",
"body": classifications
}
return make_response(jsonify(response)), 200
except Exception as err:
response = {
"success": False,
"msg": "Error while fetching classifications"
}
return make_response(jsonify(response)), 500
class DeleteClassification(MethodView):
# This class deletes a classification using its id
@jwt_required
def delete(self, classification_id):
if not classification_id:
response = {
"success": False,
"msg": "Classification id not found"
}
return make_response(jsonify(response)), 400
current_user_id = get_jwt_identity()
classification = find_by_id(classification_id)
# Check if a classification with that id exists or not
if not classification:
response = {
"success": False,
"msg": "Classification not found"
}
return make_response(jsonify(response)), 404
if classification["user_id"] != current_user_id:
# The user making this request is not the one who created this classification
# So we cannot let him delete it
response = {
"success": False,
"msg": "Can only delete classifications you created"
}
return make_response(jsonify(response)), 401
try:
delete_by_id(classification_id)
response = {
"success":True,
"msg": "Classification deleted successfully"
}
return make_response(jsonify(response)), 200
except Exception as err:
response = {
"success": False,
"msg": "Error deleting classification"
}
return make_response(jsonify(response)), 500
classificationController = {
"classify_image": ClassifyImage.as_view("classify_image"),
"get_classification": ClassificationInfo.as_view("get_classification"),
"get_all_classifications": GetAllClassifications.as_view("get_all_classifications"),
"delete_classification": DeleteClassification.as_view("delete_classification")
}
| 5,542 | 1,390 |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.http import JsonResponse
from django.core import serializers
from . import models
import numpy as np
from sklearn.cluster import KMeans
def kmeans(request):
response = {
'label': [],
'cluster_centers': []
}
X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
response['label'] = kmeans.labels_.tolist()
response['cluster_centers'] = kmeans.cluster_centers_.tolist()
# response = serializers.serialize('python', response)
return JsonResponse(response) | 684 | 235 |
#!/usr/bin/env python
# Create wheel with: python setup.py bdist_wheel
# Install with: pip install -U dist/loadconfig-*.whl
from os import environ
from re import sub
from setuptools import setup
for line in open('loadconfig/__init__.py'):
if line.startswith('__version__'):
version = sub(".+'(.+?)'\n", r'\1', line)
environ["PBR_VERSION"] = version
setup(setup_requires=['pbr'], pbr=True)
| 412 | 137 |
import numpy as np
import scipy.sparse as ss
import logging
import time
import warnings
from .feature_selection import get_significant_genes
from .feature_selection import calculate_minmax
warnings.simplefilter("ignore")
logging.basicConfig(format='%(process)d - %(levelname)s : %(asctime)s - %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def run_CDR_analysis(data, phenotype, capvar = 0.95, pernum = 2000, thres = 0.05):
"""Main CDR-g analysis function
The key step in CDR-g is an SVD-decomposition on gene co-expression matrices.
Depending on the sequencing platform, this SVD step can produce thousands of
factor loadings. By default, CDR-g selects number of factor loadings which
captures 95% of variance in the dataset.
Args:
data (anndata): anndata object of interest
phenotype (str): condition of interest
capvar (float, optional): specifies the number of factor loadings to examine. Defaults to 0.95.
pernum (int, optional): number of permutations to determine importance score. Defaults to 2000.
thres (float, optional): cut-off for permutation importance to select genes. Defaults to 0.05.
"""
start = time.time()
gene_num = data.X.shape[0]
cell_num = data.X.shape[1]
logger.info('processing dataset of %s genes X %s cells', cell_num, gene_num)
logger.info('target class label:: %s', phenotype)
logger.info("SVD and threshold selection")
res = pvalgenerator(data, phenotype, capvar)
logger.info("completed SVD and varimax")
logger.info("permutation testing for gene sets:: perms:: %s threshold :: %s", pernum, thres)
npheno= data.uns["n_pheno"]
#get_significant_genes_perms(data, npheno, permnum = pernum, thres = thres)
get_significant_genes(data, npheno, permnum = pernum, thres = thres)
logger.info("computed thresholds for gene selection")
end = time.time()
timediff = end - start
numfact = data.uns["selected_loading"]
logger.info('N factor loadings:: %s', numfact)
logger.info('wall clock time in seconds:: %s', timediff)
def dask_ver(matrixlist, capvar):
"""provides svd and concatenation with dask"""
import dask.array as da
from dask_ml.decomposition import TruncatedSVD
if ss.issparse(matrixlist[0]):
list_of_mats_as_dask_arrays = [da.from_array(np.array(d.todense())) for d in matrixlist]
else:
list_of_mats_as_dask_arrays = [da.from_array(d) for d in matrixlist]
list_of_corr_mats = [da.corrcoef(d) for d in list_of_mats_as_dask_arrays]
X = da.concatenate(list_of_corr_mats, axis=1)
X[da.isnan(X)] = 0.0
_, y, Ek, Ss = get_optimal_threshold(X, capvar)
#Ek = svd.components_
#Ss = svd.singular_values_
return Ek, Ss, X, y
def process_svd_to_factors(Ek, Ss, N_k):
"""function for rotation and flips"""
Ek = Ek.T
ind = np.argsort(Ss)[::-1]
Ss = Ss[ind]
Ek = Ek[:, ind]
Lk = Ss**2 # singular values to eigenvalues
Fk = (Lk[:N_k]**0.5)*Ek[:,:N_k] # factor loadings
# Varimax rotation of the factor loadings
ROT = classic_orthomax(Fk, gamma=1) # finding rotation (gamma=1 implyes at CLASSIC varimax)
Fs = np.dot(Fk,ROT) # rotated factor loadings
Ls = np.diag(ROT.T@np.diag(Lk[:N_k])@ROT) # rotated eigenvalues
ind = np.argsort(Ls)[::-1]
Ls = Ls[ind]
Fs = Fs[:, ind]
Fs = flip_Ek(Fs)
return Fs, Ls, Fk, Lk
### aux functions for matrix extraction
def get_numbers_of_pheno(ad, pheno):
"""return list of nums"""
vals = ad.obs[pheno].value_counts().tolist()
return vals
def get_bools_of_pheno(ad, pheno):
"""return list of booleans"""
phenotypes = ad.obs[pheno].unique()
bool_list = [ad.obs[pheno] == i for i in phenotypes]
return bool_list
def extract_matrix_from_anndata(ad, pheno_column):
ind = get_bools_of_pheno(ad, pheno_column)
rands = [ad[i,:].X.T for i in ind]
return rands, len(rands)
#### functions for generating pvals and integrating whole varimax
def _full_Fs(ad, pheno, capvar):
matlist, numpheno = extract_matrix_from_anndata(ad, pheno)
Ee, Ss, _, N = dask_ver(matlist, capvar) # specify algorithm
Fs, Ls, Fk, Lk = process_svd_to_factors(Ee, Ss, N)
ad.uns["selected_loading"] = N
ad.uns["Fs"] = Fs
ad.uns["Ls"] = Ls
ad.uns["Fk"] = Fk
ad.uns["Lk"] = Lk
ad.uns["n_pheno"] = numpheno
Fs_diff = calculate_minmax(Fs, numpheno)
return Fs_diff
def pvalgenerator(ad, pheno, capvar):
Fs_diff = _full_Fs(ad, pheno, capvar)
ad.uns["Fs_diff"] = Fs_diff
return Fs_diff
# leos' aux functions
def classic_orthomax(Phi, gamma = 1, q = 20, tol = 1e-6):
"""Returns the orthomax rotation"""
from numpy import eye, asarray, dot, sum, diag
from numpy.linalg import svd
p,k = Phi.shape
R = eye(k)
d=0
for i in range(q):
d_old = d
Lambda = dot(Phi, R)
u,s,vh = svd(dot(Phi.T,asarray(Lambda)**3 - (gamma/p) * dot(Lambda, diag(diag(dot(Lambda.T,Lambda))))))
R = dot(u,vh)
d = sum(s)
if d_old!=0 and d/d_old < 1 + tol: break
return R
def flip_Ek(Ek):
"""That functions guaranties that the eigenvectors will "point up".
"""
n, m = Ek.shape
e_k_to_flip = abs(Ek.min(axis=0)) > Ek.max(axis=0)
flip = np.ones(m)
flip[e_k_to_flip] *= -1
Ek *= flip
return Ek
### aux functions for detecting factors.
def get_optimal_threshold(num, thres, ncomp = 2000):
"""
selects number of factors for truncated SVD
"""
from dask_ml.decomposition import TruncatedSVD
import dask.array as da
nrows = num.shape[0] # this shows num cells and is required for svd
numgenes = num.shape[1] # this is to make sure if less 2000
if numgenes < ncomp:
ncomp = numgenes - 1
print(ncomp)
numm = num.rechunk((nrows, 10))
svd = TruncatedSVD(n_components=ncomp, n_iter=5, random_state=42)
svd.fit(numm)
x = np.cumsum(svd.explained_variance_ratio_)
y = np.argmax(x>thres)
if y == 0:
y = ncomp
X = svd.components_[0:y]
v = svd.singular_values_[0:y]
return x, y, X, v
| 6,263 | 2,427 |
import re
from feed import get_builds
def _filter_old_builds(builds):
to_remove = []
for build in builds:
for b in builds:
if b['artifact'] == build['artifact']:
partial_b_version = b['version'][:b['version'].rfind('.')]
partial_build_version = build['version'][:build['version'].rfind('.')]
if partial_b_version == partial_build_version and b['timestamp'] > build['timestamp']:
to_remove.append(build)
break
return [b for b in builds if b not in to_remove]
def command_check_builds(reg_ex, command):
builds = get_builds()
response = ''
clean_builds = _filter_old_builds(builds)
clean_builds.sort(key=lambda b: b['version'])
for build in clean_builds:
response += '%s %s *#%s* _%s_\n' % (
':heavy_check_mark:' if 'successful' in build['status'] else ':bangbang:', build['artifact'],
build['version'], 'successful' if 'successful' in build['status'] else 'failed')
return response
def command_check_specific_build(reg_ex, command):
build_name = reg_ex.match(command).group(1)
builds = get_builds()
response = ''
clean_builds = _filter_old_builds(builds)
clean_builds.sort(key=lambda b: b['version'])
clean_builds = filter(lambda b: build_name in b['version'], clean_builds)
for build in clean_builds:
response += '%s %s *#%s* _%s_\n' % (
':heavy_check_mark:' if 'successful' in build['status'] else ':bangbang:', build['artifact'],
build['version'], 'successful' if 'successful' in build['status'] else 'failed')
return response
COMMANDS = [
{
'regex': re.compile(r'\.do check builds(\s*)'),
'command': command_check_builds
},
{
'regex': re.compile(r'\.do check build ([\w.-]+)'),
'command': command_check_specific_build
}
]
| 1,925 | 618 |
from src.APIs import IBMWatsonAPI, FacebookAPI
from src.NLP import ReminderNLP, WeatherNLP, NewsNLP, GmailNLP, CalendarNLP
from src.MsgBuilder import NLPMB
from src.Models import Client
import json
# processes the client message and select the related API
def process_message(client_id, msg):
cli = Client.get_client(client_id)
if cli is None:
cli = Client.insert_client(client_id, None)
results = IBMWatsonAPI.send_message(msg, cli.context)
print(results)
__selectAPI(results, cli)
# process a received quick_reply
def process_quick_reply(client_id, quick_reply):
cli = Client.get_client(client_id)
if cli is None:
cli = Client.insert_client(client_id, None)
results = IBMWatsonAPI.send_message(quick_reply, cli.context)
__selectAPI(results, cli)
def send_info(results, cli):
print('INFO REQUEST')
(context, output, _) = results
for m in output:
if m != '':
FacebookAPI.send_message(cli.id, m)
Client.update_client_context(cli.id, None)
cli.context = None
# method select the correct API and deals with invalid request
def __selectAPI(results, cli):
(newContext, _, _) = results
switch_request = {
'Info': send_info,
'WeatherRequest': WeatherNLP.process_message,
'NewsRequest': NewsNLP.process_message,
'EmailRequest': GmailNLP.process_message,
'ReminderRequest': ReminderNLP.process_message,
'CalendarRequest': CalendarNLP.process_message,
}
try:
node = newContext['node']
print(node)
except KeyError:
node = 'AnythingElse'
switch_request.get(node, __invalid_request)(results, cli)
# method deals with invalid request
def __invalid_request(results, cli):
print('ANYTHING ELSE')
(newContext,output,_) = results
json_newContext = json.dumps(newContext, indent=2)
Client.update_client_context(cli.id, json_newContext)
for m in output:
FacebookAPI.send_message(cli.id, m)
FacebookAPI.send_quick_replies(cli.id, NLPMB.quick_reply_features(), "Here's all the features, enjoy!")
| 2,124 | 670 |
from typing import Any
class TaleObject:
"""A basic block of Tale's object model.
All values in Tale exist only as instances of this class.
For example, in the following expression:
x = 1
`1` is an instance of `TaleObject`.
Attributes:
type: An instance of `TaleObject` that represents the type of the
object.
name: A name of the object.
py_instance: An instance of the object in Python memory.
"""
def __init__(self, type: 'TaleObject', py_instance: Any, name = None):
self.type = type
self.py_instance = py_instance
self.name = name
TaleType = TaleObject(None, None)
TaleType.type = TaleType
TaleString = TaleObject(TaleType, str)
TaleString.name = TaleObject(TaleString, 'String')
TaleType.name = TaleObject(TaleString, 'Type')
TaleNone = TaleObject(None, None, TaleObject(TaleString, 'None'))
TaleInt = TaleObject(TaleType, int, TaleObject(TaleString, 'Int'))
TaleTuple = TaleObject(TaleType, None, TaleObject(TaleString, 'Tuple'))
| 1,043 | 339 |
import json
def twitterfilter(filename, outputpath="filteredtweets/", inputpath="unfilteredtweets/"):
with open(inputpath + filename + ".json", "r") as f:
data = json.load(f)
indexs = []
for i in range(0, len(data)):
if data[i]["user"].lower() != filename.lower():
indexs.append(i)
print("Total Tweets: " + str(len(data)))
print("Deleted tweets: " + str(len(indexs)))
for ind in reversed(indexs):
del data[ind]
print("New Total Tweets: " + str(len(data)))
with open(outputpath + "new" + filename + ".json", "w") as nf:
json.dump(data, nf)
| 666 | 217 |
import unittest
import random
from linear_search import linear_search
class TestLinearSearch(unittest.TestCase):
def setUp(self):
self.items_length = 20
self.unsorted_values = [random.random() for i in range(self.items_length)]
self.target = random.choice(self.unsorted_values)
def test_linear_search_returns_an_index(self):
position = linear_search(self.unsorted_values, self.target)
assert position in range(self.items_length)
assert isinstance(position, int)
def test_linear_search_raises_an_error_with_an_invalid_target(self):
target = 1
try:
linear_search(self.unsorted_values, target)
except ValueError as e:
self.assertEqual(e.message, "{} was not found in the list".format(target))
def test_linear_search_finds_target(self):
position = linear_search(self.unsorted_values, self.target)
self.assertEqual(self.unsorted_values[position], self.target)
if __name__ == '__main__':
unittest.main()
| 1,038 | 313 |
# -*- coding: utf-8 -*-
'''
Created on 3 mars 2017
@author: guillaume
'''
from django.core.management.base import BaseCommand, CommandError
import time
import logging
from django.test.client import Client
# Create your tests here.
from whowlong.models import Place,Trajet
from whowlong.computingtools.calculator import RouteComputer
from django.contrib.auth.models import User
logger = logging.getLogger(__name__)
from django.http import JsonResponse
class Command(BaseCommand):
help = u'create trajet arround adress'
#def add_arguments(self, parser):
#parser.add_argument('poll_id', nargs='+', type=int)
# voir https://stackoverflow.com/questions/27611468/django-management-command-argument
def add_arguments(self, parser):
parser.add_argument('--adress', type=str)
parser.add_argument('--arroundInMeter', type=int)
def handle(self, *args, **options):
start_time = time.time()
logger.info(u'Start get trajet arround')
adress = options['adress']
logger.info(u'adress:%s '%(adress))
t0 = time.time()
arround = options['arroundInMeter']
logger.info(u'arround:%sm '%(arround))
routeComputer = RouteComputer()
user = User.objects.get(username='guillaume')
request= routeComputer.initRequest(user, '80 rue jean antoine injalbert 34130 castelnau le lez', arround, label=u'CHEZMOIS')
routeComputer.buildTrajetsList(request)
logger.debug(" objectList,userProfil = agsp.getObjectsList(token) [%06dms] " %(1000*(time.time()-t0)))
self.stdout.write(self.style.SUCCESS(u'END'))
routeComputer.computeTrajetLength(request)
| 1,793 | 565 |
import re
from functools import lru_cache
import requests
from ocdsmerge.exceptions import (MissingDateKeyError, NonObjectReleaseError, NonStringDateValueError,
NullDateValueError)
@lru_cache()
def get_tags():
"""
Returns the tags of all versions of OCDS in alphabetical order.
"""
return re.findall(r'"(\d+__\d+__\d+)/', requests.get('https://standard.open-contracting.org/schema/').text)
def get_release_schema_url(tag):
"""
Returns the URL of the release schema in the given version of OCDS.
"""
return f'https://standard.open-contracting.org/schema/{tag}/release-schema.json'
# If we need a method to get dates from releases, see https://github.com/open-contracting/ocds-merge/issues/25
def sorted_releases(releases):
"""
Sorts a list of releases by date.
"""
# Avoids an error if sorting a single compiled release.
if isinstance(releases, list) and len(releases) == 1 and isinstance(releases[0], dict):
return releases
try:
return sorted(releases, key=lambda release: release['date'])
except KeyError:
raise MissingDateKeyError('date', 'The `date` field of at least one release is missing.')
except TypeError as e:
if ' not supported between instances of ' in e.args[0]:
if 'NoneType' in e.args[0]:
raise NullDateValueError('The `date` field of at least one release is null.')
else:
raise NonStringDateValueError('The `date` field of at least one release is not a string.')
elif e.args[0] in ('string indices must be integers',
'string index indices must be integers or slices, not str'):
raise NonObjectReleaseError('At least one release is a string, not a dict. Use `json.loads` to parse the '
'string as JSON.')
elif e.args[0] == 'byte indices must be integers or slices, not str':
raise NonObjectReleaseError('At least one release is a byte-string, not a dict. Use `json.loads` to parse '
'the byte-string as JSON.')
elif e.args[0] == 'list indices must be integers or slices, not str':
raise NonObjectReleaseError('At least one release is a list, not a dict.')
elif e.args[0] == 'tuple indices must be integers or slices, not str':
raise NonObjectReleaseError('At least one release is a tuple, not a dict.')
elif e.args[0] in ("'set' object is not subscriptable",
"'set' object is not subscriptable (key 'date')"):
raise NonObjectReleaseError('At least one release is a set, not a dict.')
else:
raise
| 2,759 | 756 |
"""
Unit and regression test for the geometry_analysis package.
"""
# Import package, test suite, and other packages as needed
import geometry_analysis
import pytest
import sys
import numpy as np
import math
@pytest.fixture()
def water_molecule():
name = "water"
symbols = ["H", "O", "H"]
coordinates = np.array([[2,0,0], [0,0,0], [-2,0,0]])
water = geometry_analysis.Molecule(name, symbols, coordinates)
return water
def test_create_failure():
name = 25
symbols = ['H', 'O', 'H']
coordinates = np.zeros([3,3])
with pytest.raises(TypeError):
water = geometry_analysis.Molecule(name, symbols, coordinates)
def test_molecules_set_coordinates(water_molecule):
"""Test bond list is rebuilt when we reset coordinates """
num_bonds = len(water_molecule.bonds)
assert num_bonds == 2
new_coordinates = np.array([[5,0,0], [0,0,0], [-2,0,0]])
water_molecule.coordinates = new_coordinates
new_bonds = len(water_molecule.bonds)
assert new_bonds == 1
assert np.array_equal(new_coordinates, water_molecule.coordinates)
def test_geometry_analysis_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "geometry_analysis" in sys.modules
def test_calculate_distance():
"""Test the calculate_distance function"""
r1 = np.array([0,0,-1])
r2 = np.array([0,1,0])
expected_distance = np.sqrt(2)
calculated_distance = geometry_analysis.calculate_distance(r1, r2)
assert expected_distance == calculated_distance
def test_calculate_angle_180():
"""Test the calculate_distance function"""
r1 = np.array([-1,0,0])
r2 = np.array([0,0,0])
r3 = np.array([1,0,0])
expected_theta = math.pi
calculated_theta = geometry_analysis.calculate_angle(r1, r2, r3)
assert expected_theta == calculated_theta
def test_calculate_angle_90():
"""Test the calculate_distance function"""
r1 = np.array([1,0,0])
r2 = np.array([0,0,0])
r3 = np.array([0,1,0])
expected_theta = (math.pi) / 2
calculated_theta = geometry_analysis.calculate_angle(r1, r2, r3)
assert expected_theta == calculated_theta
@pytest.mark.parametrize('p1, p2, p3, expected_angle', [
(np.array([-1,0,0]), np.array([0,0,0]), np.array([1,0,0]), 180),
(np.array([1,0,0]), np.array([0,0,0]), np.array([0,1,0]), 90),
])
def test_calculate_angle(p1, p2, p3, expected_angle):
calculated_theta = geometry_analysis.calculate_angle(p1, p2, p3, expected_angle)
assert expected_angle == calculated_theta
| 2,566 | 936 |
"""
Regridding vectors with quiver
------------------------------
This example demonstrates the regridding functionality in quiver (there exists
equivalent functionality in :meth:`cartopy.mpl.geoaxes.GeoAxes.barbs`).
Regridding can be an effective way of visualising a vector field, particularly
if the data is dense or warped.
"""
__tags__ = ['Vector data']
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
def sample_data(shape=(20, 30)):
"""
Return ``(x, y, u, v, crs)`` of some vector data
computed mathematically. The returned CRS will be a North Polar
Stereographic projection, meaning that the vectors will be unevenly
spaced in a PlateCarree projection.
"""
crs = ccrs.NorthPolarStereo()
scale = 1e7
x = np.linspace(-scale, scale, shape[1])
y = np.linspace(-scale, scale, shape[0])
x2d, y2d = np.meshgrid(x, y)
u = 10 * np.cos(2 * x2d / scale + 3 * y2d / scale)
v = 20 * np.cos(6 * x2d / scale)
return x, y, u, v, crs
def main():
fig = plt.figure(figsize=(8, 10))
x, y, u, v, vector_crs = sample_data(shape=(50, 50))
ax1 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree())
ax1.coastlines('50m')
ax1.set_extent([-45, 55, 20, 80], ccrs.PlateCarree())
ax1.quiver(x, y, u, v, transform=vector_crs)
ax2 = fig.add_subplot(2, 1, 2, projection=ccrs.PlateCarree())
ax2.set_title('The same vector field regridded')
ax2.coastlines('50m')
ax2.set_extent([-45, 55, 20, 80], ccrs.PlateCarree())
ax2.quiver(x, y, u, v, transform=vector_crs, regrid_shape=20)
plt.show()
if __name__ == '__main__':
main()
| 1,656 | 662 |
"""Tests for letsencrypt.client.log."""
import unittest
import mock
class DialogHandlerTest(unittest.TestCase):
def setUp(self):
self.d = mock.MagicMock() # pylint: disable=invalid-name
from letsencrypt.client.log import DialogHandler
self.handler = DialogHandler(height=2, width=6, d=self.d)
self.handler.PADDING_HEIGHT = 2
self.handler.PADDING_WIDTH = 4
def test_adds_padding(self):
self.handler.emit(mock.MagicMock())
self.d.infobox.assert_called_once_with(mock.ANY, 4, 10)
def test_args_in_msg_get_replaced(self):
assert len('123456') <= self.handler.width
self.handler.emit(mock.MagicMock(msg='123%s', args=(456,)))
self.d.infobox.assert_called_once_with('123456', mock.ANY, mock.ANY)
def test_wraps_nospace_is_greedy(self):
assert len('1234567') > self.handler.width
self.handler.emit(mock.MagicMock(msg='1234567'))
self.d.infobox.assert_called_once_with('123456\n7', mock.ANY, mock.ANY)
def test_wraps_at_whitespace(self):
assert len('123 567') > self.handler.width
self.handler.emit(mock.MagicMock(msg='123 567'))
self.d.infobox.assert_called_once_with('123\n567', mock.ANY, mock.ANY)
def test_only_last_lines_are_printed(self):
assert len('a\nb\nc'.split()) > self.handler.height
self.handler.emit(mock.MagicMock(msg='a\n\nb\nc'))
self.d.infobox.assert_called_once_with('b\nc', mock.ANY, mock.ANY)
if __name__ == '__main__':
unittest.main()
| 1,543 | 601 |
val = int(input('enter the number: '))
x=0
y=0
for i in range(1,val+1):
if(i%2!=0):
x= x+7
else:
y = y+6
if(val%2!=0):
print(' {} term in accordance to the program is {}'.format(val,x-7))
else:
print('{} term in accordance to the program is {}'.format(val,y-6))
| 309 | 131 |
import datetime
from decimal import Decimal
import json
import os
import pytz
import tempfile
from unittest.mock import patch
from freezegun import freeze_time
from ditto.core.utils import datetime_now
from ditto.core.utils.downloader import DownloadException, filedownloader
from django.test import override_settings
from .test_fetch import FetchTwitterTestCase
from ditto.twitter.fetch.savers import TweetSaver, UserSaver
from ditto.twitter.models import Media, Tweet, User
class TweetSaverTestCase(FetchTwitterTestCase):
"""Testing the TweetSaver class"""
# Note that we've changed the id and id_str of each Tweet in this
# fixture to something much shorter, and easier to test with.
api_fixture = "tweets.json"
def make_tweet(self, is_private=False):
self.fetch_time = datetime_now()
# Get the JSON for a single tweet.
tweets_data = json.loads(self.make_response_body())
tweet_data = tweets_data[0]
if is_private:
tweet_data["user"]["protected"] = True
# Send the JSON, and our new User object, to try and save the tweet:
TweetSaver().save_tweet(tweet_data, self.fetch_time)
# Load that saved tweet from the DB:
return Tweet.objects.get(twitter_id=300)
def test_saves_correct_tweet_data(self):
tweet = self.make_tweet()
# And check it's all there:
self.assertEqual(
tweet.title,
"@flaneur ooh, very exciting, thank you! Both my ears owe you a drink.",
)
self.assertEqual(
tweet.summary,
"@flaneur ooh, very exciting, thank you! Both my ears owe you a drink.",
)
self.assertEqual(
tweet.text,
"@flaneur ooh, very exciting, thank you!\n\nBoth my ears owe you a drink.",
)
self.assertEqual(tweet.latitude, Decimal("40.057016"))
self.assertEqual(tweet.longitude, Decimal("-75.143103"))
self.assertFalse(tweet.is_private)
self.assertEqual(tweet.fetch_time, self.fetch_time)
self.assertEqual(tweet.permalink, "https://twitter.com/philgyford/status/300")
tweets = json.loads(self.make_response_body())
self.assertEqual(tweet.raw, json.dumps(tweets[0]))
self.assertEqual(tweet.user.screen_name, "philgyford")
self.assertEqual(tweet.twitter_id, 300)
self.assertEqual(
tweet.post_time,
datetime.datetime.strptime(
"2015-08-06 19:42:59", "%Y-%m-%d %H:%M:%S"
).replace(tzinfo=pytz.utc),
)
self.assertEqual(tweet.favorite_count, 2)
self.assertEqual(tweet.retweet_count, 1)
self.assertEqual(tweet.media_count, 0)
self.assertEqual(tweet.in_reply_to_screen_name, "flaneur")
self.assertEqual(tweet.in_reply_to_status_id, 629375876216528896)
self.assertEqual(tweet.in_reply_to_user_id, 1859981)
self.assertEqual(tweet.language, "en")
self.assertEqual(tweet.place_attribute_street_address, "795 Folsom St")
self.assertEqual(tweet.place_full_name, "Twitter HQ, San Francisco")
self.assertEqual(tweet.place_country, "United States")
self.assertEqual(
tweet.source,
(
u'<a href="http://tapbots.com/tweetbot" rel="nofollow">Tweetbot '
'for iΟS</a>'
)
)
def test_saves_private_tweets_correctly(self):
"""If the user is protected, their tweets should be marked private."""
tweet = self.make_tweet(is_private=True)
self.assertTrue(tweet.is_private)
def test_saves_280_character_tweets_correctly(self):
"It should save the full text but truncate title and summary to 255 characters."
self.maxDiff = 3000
self.api_fixture = "tweets_280_characters.json"
tweet = self.make_tweet()
self.assertEqual(
tweet.text,
(
"@BarclaysUKHelp Thanks Jonny. I tried online chat at the time and "
"they said the form doesn’t work on iOS Safari. It’d be nice if it "
"said that on the form, rather than it returning to the start "
"half-way through :) So I set up an account at @TSB instead - their "
"form worked."
),
)
self.assertEqual(
tweet.text_html,
(
'<span class="twython-tweet-prefix"><a href="'
'https://twitter.com/BarclaysUKHelp" rel="external">@BarclaysUKHelp'
"</a> </span>Thanks Jonny. I tried online chat at the time and they "
"said the form doesn’t work on iOS Safari. It’d be nice if it said "
"that on the form, rather than it returning to the start half-way "
"through :) So I set up an account at "
'<a href="https://twitter.com/TSB" rel="external">@TSB</a> '
"instead - their form worked."
),
)
self.assertEqual(
tweet.title,
(
"Thanks Jonny. I tried online chat at the time and they said the "
"form doesn’t work on iOS Safari. It’d be nice if it said that on "
"the form, rather than it returning to the start half-way through "
":) So I set up an account at @TSB instead - their form…"
),
)
self.assertEqual(
tweet.summary,
(
"Thanks Jonny. I tried online chat at the time and they said the "
"form doesn’t work on iOS Safari. It’d be nice if it said that on "
"the form, rather than it returning to the start half-way through "
":) So I set up an account at @TSB instead - their form…"
),
)
def test_saves_user(self):
"Saving a Tweet should also save its user."
tweet = self.make_tweet()
self.assertEqual(tweet.user.twitter_id, 12552)
self.assertEqual(tweet.user.fetch_time, self.fetch_time)
def test_saves_quoted_tweets(self):
"Saving a Tweet that quotes another Tweet should save the quoted Tweet."
self.api_fixture = "tweets_with_quoted_tweet.json"
tweet = self.make_tweet()
self.assertEqual(
tweet.text,
(
"Quoting a couple of tweets: https://t.co/HSaYtiWAbg and "
"https://t.co/hpX1aGkWsv"
),
)
self.assertEqual(tweet.quoted_status_id, 663744897778872321)
quoted_tweet = Tweet.objects.get(twitter_id=663744897778872321)
self.assertEqual(
quoted_tweet.text,
"Very quiet in the basement of #Innovate2015 come say hi and talk #iot",
)
self.assertEqual(quoted_tweet.user.screen_name, "iotwatch")
def test_saves_double_quoted_tweets(self):
"""Saving Tweet 1 that quotes Tweet 2 that quotes Tweet 3 should save
Tweet 2, and cope with Tweet 3 not being savable."""
self.api_fixture = "tweets_with_double_quoted_tweet.json"
tweet1 = self.make_tweet()
self.assertEqual(
tweet1.text,
(
"Anyone fancy meeting sometime today/tomorrow to see "
"@genmon\u2019s book vending machine at Google Campus, "
"EC2? https://t.co/1ScaCLOUxb"
),
)
# ie, tweet2's ID:
self.assertEqual(tweet1.quoted_status_id, 714528026650869760)
tweet2 = Tweet.objects.get(twitter_id=714528026650869760)
self.assertEqual(
tweet2.text,
"Ludicrous hobby is ludicrous. But here we go https://t.co/DqYZB2gtQv",
)
self.assertEqual(tweet2.user.screen_name, "genmon")
# ie, tweet3's ID:
self.assertEqual(tweet2.quoted_status_id, 714527559946473474)
def test_saves_retweeted_tweets(self):
"Saving a Tweet that is a retweet should save the retweeted Tweet."
self.api_fixture = "tweets_with_retweeted_tweet.json"
tweet = self.make_tweet()
self.assertEqual(
tweet.text,
(
"RT @stefiorazi: Twitter help: Looking for early Barbican "
"Estate residents to interview. mail@modernistestates RTs "
"appreciated https://t.co/\u2026"
),
)
self.assertEqual(tweet.retweeted_status_id, 735555565724827649)
retweeted_tweet = Tweet.objects.get(twitter_id=735555565724827649)
self.assertEqual(
retweeted_tweet.text,
(
"Twitter help: Looking for early Barbican Estate residents to "
"interview. mail@modernistestates RTs appreciated "
"https://t.co/IFSZIh9DHm"
),
)
self.assertEqual(retweeted_tweet.user.screen_name, "stefiorazi")
def test_extended_2016_tweets(self):
"""Saves correctly from the new (2016) tweet format.
https://dev.twitter.com/overview/api/upcoming-changes-to-tweets
"""
self.api_fixture = "tweets_extended_format_2016.json"
tweet = self.make_tweet()
self.assertEqual(
tweet.text,
(
"@philgyford Here\u2019s a test tweet that goes on as much as "
"possible and includes an image. Hi to my fans in testland! "
"https://t.co/tzhyk2QWSr"
),
)
self.assertEqual(
tweet.summary,
(
"Here\u2019s a test tweet that goes on as much as possible and "
"includes an image. Hi to my fans in testland!"
),
)
self.assertEqual(
tweet.title,
(
"Here\u2019s a test tweet that goes on as much as possible and "
"includes an image. Hi to my fans in testland!"
),
)
class TweetSaverMediaTestCase(FetchTwitterTestCase):
"Parent class for testing the save_media() method of the TweetSaver class."
# Child classes should have an api_fixture property.
def setUp(self):
"Save a tweet using the api_fixture's data."
fetch_time = datetime_now()
tweet_data = json.loads(self.make_response_body())
# Send the JSON, and our new User object, to try and save the tweet:
TweetSaver().save_tweet(tweet_data, fetch_time)
# Load that saved tweet from the DB:
self.tweet = Tweet.objects.get(twitter_id=9876543210)
class TweetSaverPhotosTestCase(TweetSaverMediaTestCase):
"Testing that photos are saved correctly."
api_fixture = "tweet_with_photos.json"
def test_saves_photos(self):
self.assertEqual(self.tweet.media_count, 3)
photos = Media.objects.filter(tweets__pk=self.tweet.pk)
self.assertEqual(len(photos), 3)
photo = photos[1]
self.assertEqual(photo.media_type, "photo")
self.assertEqual(photo.twitter_id, 1234567890)
self.assertEqual(
photo.image_url, "https://pbs.twimg.com/media/CSaWsSkWsAA-yXb.jpg"
)
self.assertEqual(photo.large_w, 935)
self.assertEqual(photo.large_h, 397)
self.assertEqual(photo.medium_w, 600)
self.assertEqual(photo.medium_h, 254)
self.assertEqual(photo.small_w, 340)
self.assertEqual(photo.small_h, 144)
self.assertEqual(photo.thumb_w, 150)
self.assertEqual(photo.thumb_h, 150)
self.assertIn(self.tweet, photo.tweets.all())
class TweetSaverVideosTestCase(TweetSaverMediaTestCase):
"Testing that videos are saved correctly."
api_fixture = "tweet_with_video.json"
def test_saves_videos(self):
self.assertEqual(self.tweet.media_count, 1)
videos = Media.objects.filter(tweets__pk=self.tweet.pk)
self.assertEqual(len(videos), 1)
video = videos[0]
self.assertEqual(video.media_type, "video")
self.assertEqual(video.twitter_id, 1234567890)
self.assertEqual(
video.image_url,
"https://pbs.twimg.com/ext_tw_video_thumb/661601811007188992/pu/img/gcxHGl7EA08a-Gps.jpg", # noqa: E501
)
self.assertEqual(video.large_w, 640)
self.assertEqual(video.large_h, 360)
self.assertEqual(video.medium_w, 600)
self.assertEqual(video.medium_h, 338)
self.assertEqual(video.small_w, 340)
self.assertEqual(video.small_h, 191)
self.assertEqual(video.thumb_w, 150)
self.assertEqual(video.thumb_h, 150)
self.assertIn(self.tweet, video.tweets.all())
self.assertEqual(video.aspect_ratio, "16:9")
self.assertEqual(
video.dash_url,
"https://video.twimg.com/ext_tw_video/661601811007188992/pu/pl/K0pVjBgnc5BI_4e5.mpd", # noqa: E501
)
self.assertEqual(
video.xmpeg_url,
"https://video.twimg.com/ext_tw_video/661601811007188992/pu/pl/K0pVjBgnc5BI_4e5.m3u8", # noqa: E501
)
class TweetSaverAnimatedGifTestCase(TweetSaverMediaTestCase):
"Testing that animated GIFs are saved correctly."
api_fixture = "tweet_with_animated_gif.json"
def test_saves_gifs(self):
self.assertEqual(self.tweet.media_count, 1)
media = Media.objects.filter(tweets__pk=self.tweet.pk)
self.assertEqual(len(media), 1)
gif = media[0]
self.assertEqual(gif.media_type, "animated_gif")
self.assertEqual(gif.twitter_id, 726396540303073281)
self.assertEqual(
gif.image_url, "https://pbs.twimg.com/tweet_video_thumb/ChStzgbWYAErHLi.jpg"
)
self.assertEqual(gif.large_w, 320)
self.assertEqual(gif.large_h, 232)
self.assertEqual(gif.medium_w, 320)
self.assertEqual(gif.medium_h, 232)
self.assertEqual(gif.small_w, 320)
self.assertEqual(gif.small_h, 232)
self.assertEqual(gif.thumb_w, 150)
self.assertEqual(gif.thumb_h, 150)
self.assertIn(self.tweet, gif.tweets.all())
self.assertEqual(gif.aspect_ratio, "40:29")
self.assertEqual(
gif.mp4_url, "https://pbs.twimg.com/tweet_video/ChStzgbWYAErHLi.mp4"
)
class UserSaverTestCase(FetchTwitterTestCase):
api_fixture = "verify_credentials.json"
def make_user_data(self, custom={}):
"""Get the JSON for a single user.
custom is a dict of attributes to override on the default data.
eg, {'protected': True}
"""
raw_json = self.make_response_body()
user_data = json.loads(raw_json)
for key, value in custom.items():
user_data[key] = value
return user_data
@patch.object(filedownloader, "download")
def make_user_object(self, user_data, download):
""""Creates/updates a User from API data, then fetches that User from
the DB and returns it.
"""
# Quietly prevents avatar files being fetched:
download.side_effect = DownloadException("Oops")
UserSaver().save_user(user_data, datetime_now())
return User.objects.get(twitter_id=12552)
@freeze_time("2015-08-14 12:00:00", tz_offset=-8)
def test_saves_correct_user_data(self):
user_data = self.make_user_data()
user = self.make_user_object(user_data)
self.assertEqual(user.fetch_time, datetime_now())
self.assertEqual(user.raw, json.dumps(user_data))
self.assertEqual(user.screen_name, "philgyford")
self.assertEqual(user.url, "http://www.gyford.com/")
self.assertFalse(user.is_private)
self.assertFalse(user.is_verified)
self.assertEqual(
user.created_at,
datetime.datetime.strptime(
"2006-11-15 16:55:59", "%Y-%m-%d %H:%M:%S"
).replace(tzinfo=pytz.utc),
)
self.assertEqual(user.description, "Good. Good to Firm in places.")
self.assertEqual(user.location, "London, UK")
self.assertEqual(user.time_zone, "London")
self.assertEqual(user.favourites_count, 1389)
self.assertEqual(user.followers_count, 2435)
self.assertEqual(user.friends_count, 309)
self.assertEqual(user.listed_count, 138)
self.assertEqual(user.statuses_count, 16428)
def test_saves_alternate_data(self):
"""Check some different data to in the main user test."""
user_data = self.make_user_data({"protected": True, "verified": True})
user = self.make_user_object(user_data)
self.assertTrue(user.is_private)
self.assertTrue(user.is_verified)
def test_handles_missing_expanded_url(self):
"""Test fix for when expanded_url is None, as here:
{'indices': [0, 28],
'url': 'http://www.benhammersley.com',
'expanded_url': None
)
"""
entities = {
"url": {
"urls": [
{
"indices": [0, 22],
"url": "http://t.co/UEs0CCkdrl",
"expanded_url": None,
}
]
}
}
user_data = self.make_user_data({"entities": entities})
user = self.make_user_object(user_data)
self.assertEqual(user.url, "http://t.co/UEs0CCkdrl")
@patch.object(filedownloader, "download")
@patch.object(UserSaver, "_fetch_and_save_avatar")
def test_calls_fetch_and_save_avatar(self, fetch_avatar, download):
"_fetch_and_save_avatar should be called with the User object."
# Quietly prevents avatar files being fetched:
download.side_effect = DownloadException("Oops")
# Just make the mocked method return the User that's passed in:
fetch_avatar.side_effect = lambda value: value
user_data = self.make_user_data()
saved_user = UserSaver().save_user(user_data, datetime_now())
fetch_avatar.assert_called_once_with(saved_user)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
@patch.object(filedownloader, "download")
def test_downloads_and_saves_avatar(self, download):
"Should call download() and save avatar."
# Make a temporary file, like download() would make:
jpg = tempfile.NamedTemporaryFile()
temp_filepath = jpg.name
download.return_value = temp_filepath
user_data = self.make_user_data()
saved_user = UserSaver().save_user(user_data, datetime_now())
download.assert_called_once_with(
saved_user.profile_image_url_https,
["image/jpeg", "image/jpg", "image/png", "image/gif"],
)
self.assertEqual(
saved_user.avatar,
"twitter/avatars/25/52/12552/%s" % os.path.basename(temp_filepath),
)
@patch.object(filedownloader, "download")
@patch.object(os.path, "exists")
def test_does_not_download_and_save_avatar(self, exists, download):
"If we already have the user's avatar, don't download it."
# Fake that the file we look for exists:
exists.return_value = True
user_data = self.make_user_data()
UserSaver().save_user(user_data, datetime_now())
assert not download.called
| 19,312 | 6,540 |
def baum_sweet(z):
num_of_zeros = []
count_of_zeros = []
if z == "0":
n = 1
else:
for i in z:
if i == "0":
num_of_zeros.append(i)
else:
count_of_zeros.append(num_of_zeros.count("0"))
num_of_zeros = []
count_of_zeros.append(num_of_zeros.count("0"))
n = 1
for i in count_of_zeros:
if i % 2 == 1:
n = 0
return n
num = int(input("type a number: "))
baum_sweet_sequence = []
for i in range(num+1):
x = bin(i)
z = x[2:]
baum_sweet_sequence.append(baum_sweet(z))
print(baum_sweet_sequence)
| 659 | 255 |
from .models import MSiteSettings
class SettingsMiddleware:
"""Set our settings object on the request"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
request.settings = MSiteSettings.objects.first() or MSiteSettings()
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
| 626 | 157 |
# from aa import Enum
#
# class Account_Type(Enum):
# SPOT = 0 # 现货账户
# OTC = 1 # OTC账户
# MARGIN = 2 # 逐仓杠杆账户,该账户类型以subType区分具体币种对账户
# SUPER = 3 # margin(或cross - margin):全仓杠杆账户
# POINT = 4 # 点卡账户
# MINEPOOL = 5 # 矿池账户
# ETF = 6 # ETF账户
#
#
| 288 | 202 |
from typing import Union, Callable, Iterable, Optional
from typing_extensions import Literal
from anndata import AnnData
from cellrank import logging as logg
from cellrank.ul._docs import d, inject_docs
from cellrank.tl._utils import _deprecate
from cellrank.tl.kernels import VelocityKernel, ConnectivityKernel
from cellrank.tl.kernels._base_kernel import KernelExpression
from cellrank.tl.kernels._velocity_kernel import BackwardMode, VelocityMode
from cellrank.tl.kernels._velocity_schemes import Scheme
@_deprecate(version="2.0")
@inject_docs(m=VelocityMode, b=BackwardMode, s=Scheme) # don't swap the order
@d.dedent
def transition_matrix(
adata: AnnData,
backward: bool = False,
vkey: str = "velocity",
xkey: str = "Ms",
conn_key: str = "connectivities",
gene_subset: Optional[Iterable] = None,
mode: Literal[
"deterministic", "stochastic", "sampling", "monte_carlo"
] = VelocityMode.DETERMINISTIC,
backward_mode: Literal["transpose", "negate"] = BackwardMode.TRANSPOSE,
scheme: Union[
Literal["dot_product", "cosine", "correlation"], Callable
] = Scheme.CORRELATION,
softmax_scale: Optional[float] = None,
weight_connectivities: float = 0.2,
density_normalize: bool = True,
key: Optional[str] = None,
**kwargs,
) -> KernelExpression:
"""
Compute a transition matrix based on a combination of RNA Velocity and transcriptomic or spatial similarity.
To learn more about the way in which the transition matrices are computed, see
:class:`cellrank.tl.kernels.VelocityKernel` for the velocity-based transition matrix and
:class:`cellrank.tl.kernels.ConnectivityKernel` for the similarity-based transition matrix.
Parameters
----------
%(adata)s
%(backward)s
vkey
Key from ``adata.layers`` to access the velocities.
xkey
Key in ``adata.layers`` where expected gene expression counts are stored.
conn_key
Key in :attr:`anndata.AnnData.obsp` to obtain the connectivity matrix, describing cell-cell similarity.
gene_subset
List of genes to be used to compute transition probabilities.
By default, genes from ``adata.var['velocity_genes']`` are used.
%(velocity_mode)s
%(velocity_backward_mode_high_lvl)s
%(velocity_scheme)s
%(softmax_scale)s
weight_connectivities
Weight given to similarities as opposed to velocities. Must be in `[0, 1]`.
density_normalize
Whether to use density correction when computing the transition probabilities based on similarities.
Density correction is done as by :cite:`haghverdi:16`.
%(write_to_adata.parameters)s
kwargs
Keyword arguments for :meth:`cellrank.tl.kernels.VelocityKernel.compute_transition_matrix`.
Returns
-------
A kernel expression object containing the computed transition matrix.
%(write_to_adata)s
"""
def compute_velocity_kernel() -> VelocityKernel:
return VelocityKernel(
adata,
backward=backward,
vkey=vkey,
xkey=xkey,
gene_subset=gene_subset,
conn_key=conn_key,
).compute_transition_matrix(
softmax_scale=softmax_scale,
mode=mode,
backward_mode=backward_mode,
scheme=scheme,
**kwargs,
)
if 0 < weight_connectivities < 1:
vk = compute_velocity_kernel()
logg.info(f"Using a connectivity kernel with weight `{weight_connectivities}`")
ck = ConnectivityKernel(
adata, backward=backward, conn_key=conn_key
).compute_transition_matrix(density_normalize=density_normalize)
final = (
(1 - weight_connectivities) * vk + weight_connectivities * ck
).compute_transition_matrix()
elif weight_connectivities == 0:
final = compute_velocity_kernel()
elif weight_connectivities == 1:
final = ConnectivityKernel(
adata,
backward=backward,
conn_key=conn_key,
).compute_transition_matrix(density_normalize=density_normalize)
else:
raise ValueError(
f"Parameter `weight_connectivities` must be in range `[0, 1]`, found `{weight_connectivities}`."
)
final.write_to_adata(key=key)
return final
| 4,335 | 1,308 |
import pytest
from django.urls import reverse
pytestmark = pytest.mark.django_db
def test_retrieve_unauthenticated_user(api_client):
url = reverse("auth-user-detail")
response = api_client.get(url)
assert response.status_code == 403, response.data
@pytest.mark.as_user
def test_retrieve_authenticated_user(api_client, user):
url = reverse("auth-user-detail")
response = api_client.get(url)
assert response.status_code == 200, response.data
assert response.data["id"] == user.id
| 516 | 177 |
import os
import sys
import requests
import csv
import subprocess
import json
import argparse
from urllib.parse import quote
# https://www.geeksforgeeks.org/print-colors-python-terminal/
def prRed(skk, exit=True):
print('\033[91m {}\033[00m'.format(skk))
if exit:
sys.exit(-1)
def prGreen(skk):
print('\033[92m {}\033[00m'.format(skk))
def prYellow(skk):
print("\033[93m {}\033[00m".format(skk))
class User:
def __init__(self, id, first_name, last_name, email):
self.id = id
self.first_name = '' if first_name is None else first_name
self.last_name = '' if last_name is None else last_name
self.email = '' if email is None else email
def get_username():
if 'JUPYTERHUB_USER' in os.environ:
return os.environ['JUPYTERHUB_USER']
else:
return os.environ['USER']
def ngshare_url():
global _ngshare_url
try:
return _ngshare_url
except NameError:
try:
from nbgrader.apps import NbGrader
nbgrader = NbGrader()
nbgrader.load_config_file()
exchange = nbgrader.config.ExchangeFactory.exchange()
_ngshare_url = exchange.ngshare_url
return _ngshare_url
except Exception as e:
prRed(
'Cannot determine ngshare URL. Please check your nbgrader_config.py!',
False,
)
prRed(e)
def get_header():
if 'JUPYTERHUB_API_TOKEN' in os.environ:
return {'Authorization': 'token ' + os.environ['JUPYTERHUB_API_TOKEN']}
else:
return None
def check_status_code(response):
if response.status_code != requests.codes.ok:
prRed(
'ngshare returned an invalid status code {}'.format(
response.status_code
),
False,
)
if response.status_code >= 500:
prRed(
'ngshare encountered an error. Please contact the maintainers'
)
check_message(response)
def check_message(response):
response = response.json()
if not response['success']:
prRed(response['message'])
return response
def encode_url(url):
return quote(url, safe='/', encoding=None, errors=None)
def post(url, data):
header = get_header()
encoded_url = encode_url(url)
try:
response = requests.post(
ngshare_url() + encoded_url, data=data, headers=header
)
response.raise_for_status()
except requests.exceptions.ConnectionError:
prRed('Could not establish connection to ngshare server')
except Exception:
check_status_code(response)
return check_message(response)
def delete(url, data):
header = get_header()
encoded_url = encode_url(url)
try:
response = requests.delete(
ngshare_url() + encoded_url, data=data, headers=header
)
response.raise_for_status()
except requests.exceptions.ConnectionError:
prRed('Could not establish connection to ngshare server')
except Exception:
check_status_code(response)
return check_message(response)
def check_username_warning(users):
invalid_usernames = [n for n in users if n != n.lower()]
if invalid_usernames:
prYellow(
'The following usernames have upper-case letters. Normally JupyterHub forces usernames to be lowercase. If the user has trouble accessing the course, you should add their lowercase username to ngshare instead.',
)
for user in invalid_usernames:
prYellow(user)
def create_course(args):
instructors = args.instructors or []
check_username_warning(instructors)
url = '/course/{}'.format(args.course_id)
data = {'user': get_username(), 'instructors': json.dumps(instructors)}
response = post(url, data)
prGreen('Successfully created {}'.format(args.course_id))
def add_student(args):
# add student to ngshare
check_username_warning([args.student_id])
student = User(args.student_id, args.first_name, args.last_name, args.email)
url = '/student/{}/{}'.format(args.course_id, student.id)
data = {
'user': get_username(),
'first_name': student.first_name,
'last_name': student.last_name,
'email': student.email,
}
response = post(url, data)
prGreen(
'Successfully added/updated {} on {}'.format(student.id, args.course_id)
)
if not args.no_gb:
add_jh_student(student)
def add_jh_student(student: User):
# add student to nbgrader gradebook
command = ['nbgrader', 'db', 'student', 'add']
if len(student.first_name) > 0:
command.append('--first-name')
command.append(student.first_name)
if len(student.last_name) > 0:
command.append('--last-name')
command.append(student.last_name)
if len(student.email) > 0:
command.append('--email')
command.append(student.email)
command.append(student.id)
subprocess.run(command)
def add_students(args):
students = []
if not os.path.exists(args.csv_file):
prRed(
'The csv file you entered does not exist. Please enter a valid path!'
)
with open(args.csv_file, 'r') as f:
csv_reader = csv.reader(f, delimiter=',')
rows = list(csv_reader)
if len(rows) == 0:
prRed('The csv file you entered is empty')
header = rows[0]
required_cols = ['student_id', 'first_name', 'last_name', 'email']
cols_dict = dict()
for i, col in enumerate(header):
cols_dict[col] = i
for col in required_cols:
if col not in cols_dict:
prRed('Missing column {} in {}.'.format(col, args.csv_file))
for i, row in enumerate(rows[1:]):
student_dict = {}
student_id = row[cols_dict['student_id']]
if len(student_id.replace(' ', '')) == 0:
prRed(
'Student ID cannot be empty (row {})'.format(i + 1), False
)
continue
first_name = row[cols_dict['first_name']]
last_name = row[cols_dict['last_name']]
email = row[cols_dict['email']]
student_dict['username'] = student_id
student_dict['first_name'] = first_name
student_dict['last_name'] = last_name
student_dict['email'] = email
students.append(student_dict)
check_username_warning([student['username'] for student in students])
url = '/students/{}'.format(args.course_id)
data = {'user': get_username(), 'students': json.dumps(students)}
response = post(url, data)
if response['success']:
for i, s in enumerate(response['status']):
user = s['username']
if s['success']:
prGreen(
'{} was successfully added to {}'.format(
user, args.course_id
)
)
student = User(
user,
students[i]['first_name'],
students[i]['last_name'],
students[i]['email'],
)
if not args.no_gb:
add_jh_student(student)
else:
prRed(
'There was an error adding {} to {}: {}'.format(
user, args.course_id, s['message']
),
False,
)
def remove_jh_student(student_id, force):
# remove a student from nbgrader gradebook
command = 'nbgrader db student remove {} '.format(student_id)
if force:
command += '--force'
os.system(command)
def remove_students(args):
for student in args.students:
if not args.no_gb:
remove_jh_student(student, args.force)
url = '/student/{}/{}'.format(args.course_id, student)
data = {'user': get_username()}
response = delete(url, data)
prGreen(
'Successfully deleted {} from {}'.format(student, args.course_id)
)
def add_instructor(args):
check_username_warning([args.instructor_id])
url = '/instructor/{}/{}'.format(args.course_id, args.instructor_id)
data = {
'user': get_username(),
'first_name': args.first_name,
'last_name': args.last_name,
'email': args.email,
}
print(data)
response = post(url, data)
prGreen(
'Successfully added {} as an instructor to {}'.format(
args.instructor_id, args.course_id
)
)
def remove_instructor(args):
url = '/instructor/{}/{}'.format(args.course_id, args.instructor_id)
data = {'user': get_username()}
response = delete(url, data)
prGreen(
'Successfully deleted instructor {} from {}'.format(
args.instructor_id, args.course_id
)
)
def parse_args(argv):
parser = argparse.ArgumentParser(description='ngshare Course Management')
subparsers = parser.add_subparsers()
create_course_parser = subparsers.add_parser(
'create_course', help='Create a course'
)
create_course_parser.add_argument(
'course_id', metavar='COURSE_ID', help='ID of the course'
)
create_course_parser.add_argument(
'instructors',
metavar='INSTRUCTOR',
nargs='*',
default=None,
help='List of instructors assigned to the course',
)
create_course_parser.set_defaults(func=create_course)
add_instructor_parser = subparsers.add_parser(
'add_instructor', help='Add/update one instructor for a course'
)
add_instructor_parser.add_argument(
'course_id', metavar='COURSE_ID', help='ID of the course'
)
add_instructor_parser.add_argument(
'instructor_id',
metavar='INSTRUCTOR_ID',
help='Username of the added/modified instructor',
)
add_instructor_parser.add_argument(
'-f',
'--first_name',
default=None,
help='First name of the instructor',
)
add_instructor_parser.add_argument(
'-l',
'--last_name',
default=None,
help='Last name of the instructor',
)
add_instructor_parser.add_argument(
'-e',
'--email',
default=None,
help='Email of the instructor',
)
add_instructor_parser.set_defaults(func=add_instructor)
remove_instructor_parser = subparsers.add_parser(
'remove_instructor', help='Remove one instructor from a course'
)
remove_instructor_parser.add_argument(
'course_id', metavar='COURSE_ID', help='ID of the course'
)
remove_instructor_parser.add_argument(
'instructor_id',
metavar='INSTRUCTOR_ID',
help='Username of the instructor to remove',
)
remove_instructor_parser.set_defaults(func=remove_instructor)
add_student_parser = subparsers.add_parser(
'add_student', help='Add/update one student for a course'
)
add_student_parser.add_argument(
'course_id', metavar='COURSE_ID', help='ID of the course'
)
add_student_parser.add_argument(
'student_id',
metavar='STUDENT_ID',
help='Username of the added/modified student',
)
add_student_parser.add_argument(
'-f',
'--first_name',
default=None,
help='First name of the student',
)
add_student_parser.add_argument(
'-l',
'--last_name',
default=None,
help='Last name of the student',
)
add_student_parser.add_argument(
'-e',
'--email',
default=None,
help='Email of the student',
)
add_student_parser.add_argument(
'--no-gb',
action='store_true',
help='Do not add student to local nbgrader gradebook',
)
add_student_parser.set_defaults(func=add_student)
add_students_parser = subparsers.add_parser(
'add_students',
help='Add/update multiple students in a course using a CSV file',
)
add_students_parser.add_argument(
'course_id', metavar='COURSE_ID', help='ID of the course'
)
add_students_parser.add_argument(
'csv_file',
metavar='CSV_FILE',
help='A CSV file with four fields: student_id,first_name,last_name,email',
)
add_students_parser.add_argument(
'--no-gb',
action='store_true',
help='Do not add students to local nbgrader gradebook',
)
add_students_parser.set_defaults(func=add_students)
remove_students_parser = subparsers.add_parser(
'remove_students', help='Remove one or more students from a course'
)
remove_students_parser.add_argument(
'course_id', metavar='COURSE_ID', help='ID of the course'
)
remove_students_parser.add_argument(
'students',
metavar='STUDENT',
nargs='+',
help='List of student IDs to remove',
)
remove_students_parser.add_argument(
'--no-gb',
action='store_true',
help='Do not remove student from local nbgrader gradebook',
)
remove_students_parser.add_argument(
'--force',
action='store_true',
help='Force student removal from local nbgrader gradebook, even if this deletes their grades',
)
remove_students_parser.set_defaults(func=remove_students)
parser.set_defaults(func=lambda x: parser.print_help())
args = parser.parse_args(argv)
return args
def main(argv=None):
argv = argv or sys.argv[1:]
args = parse_args(argv)
args.func(args)
if __name__ == '__main__':
sys.exit(main())
| 13,753 | 4,269 |
{"filter":false,"title":"settings.py","tooltip":"/Noda_SF_Project/Noda_SF_Project/settings.py","undoManager":{"mark":3,"position":3,"stack":[[{"start":{"row":38,"column":33},"end":{"row":39,"column":0},"action":"insert","lines":["",""],"id":2},{"start":{"row":39,"column":0},"end":{"row":39,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":39,"column":4},"end":{"row":39,"column":6},"action":"insert","lines":["''"],"id":3}],[{"start":{"row":39,"column":5},"end":{"row":39,"column":6},"action":"insert","lines":["n"],"id":4},{"start":{"row":39,"column":6},"end":{"row":39,"column":7},"action":"insert","lines":["o"]},{"start":{"row":39,"column":7},"end":{"row":39,"column":8},"action":"insert","lines":["d"]},{"start":{"row":39,"column":8},"end":{"row":39,"column":9},"action":"insert","lines":["a"]},{"start":{"row":39,"column":9},"end":{"row":39,"column":10},"action":"insert","lines":["s"]},{"start":{"row":39,"column":10},"end":{"row":39,"column":11},"action":"insert","lines":["f"]}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":[","],"id":5}]]},"ace":{"folds":[],"scrolltop":1233,"scrollleft":0,"selection":{"start":{"row":120,"column":13},"end":{"row":120,"column":13},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"b94b6875263f69d9119932a08b7ab4cf189c05b3","timestamp":1562792805975} | 1,423 | 571 |
#!/usr/bin/env python2
import numpy as np
import os
import scipy
VIS_DIR = "vis"
class Visualizer:
def __init__(self):
self.active = False
def begin(self, dest, max_entries):
self.lines = []
self.active = True
self.max_entries = max_entries
self.next_entry = 0
self.dest_dir = os.path.join(VIS_DIR, dest)
if not os.path.exists(self.dest_dir):
os.mkdir(self.dest_dir)
def reset(self):
self.next_entry = 0
self.active = True
def end(self):
self.active = False
with open(os.path.join(self.dest_dir, "index.html"), "w") as vis_file:
#print >>vis_file, "<html><head><link rel='stylesheet' href='style.css'></head><body><table>"
print >>vis_file, "<html><head>"
print >>vis_file, "<link rel='stylesheet' href='../style.css' />"
print >>vis_file, "</head><body><table>"
for line in self.lines:
print >>vis_file, " <tr>"
for field in line:
print >>vis_file, " <td>",
print >>vis_file, field,
print >>vis_file, "</td>"
print >>vis_file, " </tr>"
print >>vis_file, "</table></body></html>"
def show(self, data):
if not self.active:
return
table_data = []
for i_field, field in enumerate(data):
if isinstance(field, np.ndarray):
filename = "%d_%d.jpg" % (self.next_entry, i_field)
filepath = os.path.join(self.dest_dir, filename)
scipy.misc.imsave(filepath, field)
table_data.append("<img src='%s' />" % filename)
else:
table_data.append(str(field))
self.lines.append(table_data)
self.next_entry += 1
if self.next_entry >= self.max_entries:
self.active = False
visualizer = Visualizer()
| 1,968 | 601 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'error_screen.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import sys, os
import platform
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt, QEvent, QThread, Signal)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_ErrorWindow(object):
def setupUi(self, MainWindow):
if MainWindow.objectName():
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(680, 400)
MainWindow.setMinimumSize(QSize(680, 400))
MainWindow.setMaximumSize(QSize(680, 400))
MainWindow.setStyleSheet("QFrame { \n"
" background-color: rgb(56, 58, 89); \n"
" color: rgb(220, 220, 220);\n"
" border-radius: 10px;\n"
"}")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QFrame(self.centralwidget)
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QVBoxLayout(self.frame)
self.verticalLayout_2.setContentsMargins(10, 10, 10, 10)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.frame_2 = QFrame(self.frame)
self.frame_2.setMaximumSize(QSize(16777215, 20))
self.frame_2.setFrameShape(QFrame.StyledPanel)
self.frame_2.setFrameShadow(QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout = QHBoxLayout(self.frame_2)
self.horizontalLayout.setContentsMargins(610, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.closeButton = QPushButton(self.frame_2)
self.closeButton.setMaximumSize(QSize(20, 20))
font = QFont()
font.setFamily("Small Fonts")
font.setBold(False)
font.setWeight(50)
self.closeButton.setFont(font)
self.closeButton.setStyleSheet(".QPushButton {\n"
" background-color:transparent;\n"
" color: rgb(255, 255, 255);\n"
" padding:16px 31px;\n"
" text-decoration:none;\n"
" text-align:center;\n"
" vertical-align: middle;\n"
"}\n"
"\n"
".QPushButton:hover {\n"
" color: rgb(255, 255, 255);\n"
" background-color: rgb(255, 0, 0);\n"
" border-radius:8px;\n"
"\n"
"}\n"
"")
self.closeButton.setObjectName("closeButton")
self.horizontalLayout.addWidget(self.closeButton)
self.verticalLayout_2.addWidget(self.frame_2)
self.label_title = QLabel(self.frame)
font = QFont()
font.setFamily("Segoe UI")
font.setPointSize(40)
self.label_title.setFont(font)
self.label_title.setStyleSheet("color: rgb(254, 121, 199);")
self.label_title.setAlignment(Qt.AlignCenter)
self.label_title.setObjectName("label_title")
self.verticalLayout_2.addWidget(self.label_title)
self.label_error = QLabel(self.frame)
self.label_error.setMaximumSize(QSize(16777215, 50))
font = QFont()
font.setFamily("Segoe UI")
font.setPointSize(24)
self.label_error.setFont(font)
self.label_error.setStyleSheet("color : rgb(98, 114, 164);")
self.label_error.setAlignment(Qt.AlignCenter)
self.label_error.setObjectName("label_error")
self.verticalLayout_2.addWidget(self.label_error)
self.label_errorInfo = QLabel(self.frame)
font = QFont()
font.setFamily("Rockwell")
font.setPointSize(15)
self.label_errorInfo.setFont(font)
self.label_errorInfo.setStyleSheet("color : rgb(98, 114, 164);")
self.label_errorInfo.setAlignment(Qt.AlignCenter)
self.label_errorInfo.setObjectName("label_errorInfo")
self.verticalLayout_2.addWidget(self.label_errorInfo)
self.label_contact = QLabel(self.frame)
self.label_contact.setMaximumSize(QSize(16777215, 60))
font = QFont()
font.setFamily("Segoe UI")
font.setPointSize(12)
self.label_contact.setFont(font)
self.label_contact.setStyleSheet("color: rgb(254, 121, 199);")
self.label_contact.setAlignment(Qt.AlignCenter)
self.label_contact.setObjectName("label_contact")
self.verticalLayout_2.addWidget(self.label_contact)
self.label_copyright = QLabel(self.frame)
self.label_copyright.setMaximumSize(QSize(16777215, 15))
self.label_copyright.setStyleSheet(" color: rgb(98, 114, 164);")
self.label_copyright.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.label_copyright.setObjectName("label_copyright")
self.verticalLayout_2.addWidget(self.label_copyright)
self.verticalLayout.addWidget(self.frame)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", "MainWindow"))
self.closeButton.setText(QCoreApplication.translate("MainWindow", "X"))
self.label_title.setText(QCoreApplication.translate("MainWindow", "<strong>Frux\'s</strong> BOT"))
self.label_error.setText(QCoreApplication.translate("MainWindow", "--<strong>ERROR!</strong>--"))
self.label_errorInfo.setText(QCoreApplication.translate("MainWindow", "{}"))
self.label_contact.setText(QCoreApplication.translate("MainWindow", "<strong>CONTACT </strong> @Frux#0063"))
self.label_copyright.setText(QCoreApplication.translate("MainWindow", "©COPYRIGHT, FRUXC"))
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 6,803 | 2,372 |
"""Implementation of AvoidChanges."""
import numpy as np
from ..Specification import Specification, SpecEvaluation
# from .VoidSpecification import VoidSpecification
from ..biotools import (
sequences_differences_array,
group_nearby_indices,
)
from ..Location import Location
class AvoidChanges(Specification):
"""Specify that some locations of the sequence should not be changed.
Shorthand for annotations: "change".
Parameters
----------
location
Location object indicating the position of the segment that must be
left unchanged. Alternatively,
indices can be provided. If neither is provided, the assumed location
is the whole sequence.
indices
List of indices that must be left unchanged.
target_sequence
At the moment, this is rather an internal variable. Do not use unless
you're not afraid of side effects.
"""
localization_interval_length = 6 # used when optimizing the minimize_diffs
best_possible_score = 0
enforced_by_nucleotide_restrictions = True
shorthand_name = "keep"
priority = -1000
def __init__(
self,
max_edits=0,
max_edits_percent=None,
location=None,
indices=None,
target_sequence=None,
boost=1.0,
):
"""Initialize."""
if location is None and (indices is not None):
location = (min(indices), max(indices) + 1)
self.location = Location.from_data(location)
if (self.location is not None) and self.location.strand == -1:
self.location.strand = 1
self.indices = np.array(indices) if (indices is not None) else None
self.target_sequence = target_sequence
self.max_edits = max_edits
self.max_edits_percent = max_edits_percent
self.boost = boost
def extract_subsequence(self, sequence):
"""Extract a subsequence from the location or indices.
Used to initialize the function when the sequence is provided.
"""
if (self.location is None) and (self.indices is None):
return sequence
elif self.indices is not None:
return "".join(np.array(list(sequence))[self.indices])
else: # self.location is not None:
return self.location.extract_sequence(sequence)
def initialized_on_problem(self, problem, role=None):
"""Find out what sequence it is that we are supposed to conserve."""
result = self._copy_with_full_span_if_no_location(problem)
L = len(result.location if result.indices is None else result.indices)
if result.max_edits_percent is not None:
result.max_edits = np.floor(result.max_edits_percent * L / 100.0)
result.enforced_by_nucleotide_restrictions = result.max_edits == 0
# Initialize the "target_sequence" in two cases:
# - Always at the very beginning
# - When the new sequence is bigger than the previous one
# (used in CircularDnaOptimizationProblem)
if result.target_sequence is None or (
len(result.target_sequence) < len(self.location)
):
result = result.copy_with_changes()
result.target_sequence = self.extract_subsequence(problem.sequence)
return result
def evaluate(self, problem):
"""Return a score equal to -number_of modifications.
Locations are "binned" modifications regions. Each bin has a length
in nucleotides equal to ``localization_interval_length`.`
"""
target = self.target_sequence
sequence = self.extract_subsequence(problem.sequence)
differing_indices = np.nonzero(
sequences_differences_array(sequence, target)
)[0]
if self.indices is not None:
differing_indices = self.indices[differing_indices]
elif self.location is not None:
if self.location.strand == -1:
differing_indices = self.location.end - differing_indices
else:
differing_indices = differing_indices + self.location.start
intervals = [
(r[0], r[-1] + 1)
for r in group_nearby_indices(
differing_indices,
max_group_spread=self.localization_interval_length,
)
]
locations = [Location(start, end, 1) for start, end in intervals]
score = self.max_edits - len(differing_indices)
return SpecEvaluation(self, problem, score=score, locations=locations)
def localized(self, location, problem=None, with_righthand=False):
"""Localize the spec to the overlap of its location and the new.
"""
if self.max_edits != 0:
return self
start, end = location.start, location.end
if self.indices is not None:
pos = ((start <= self.indices) & (self.indices < end)).nonzero()[0]
new_indices = self.indices[pos]
new_target = "".join(np.array(list(self.target_sequence))[pos])
return self.copy_with_changes(
indices=new_indices, target_sequence=new_target
)
else:
new_location = self.location.overlap_region(location)
if new_location is None:
return None
else:
new_constraint = self.copy_with_changes(location=new_location)
relative_location = new_location + (-self.location.start)
new_constraint.target_sequence = relative_location.extract_sequence(
self.target_sequence
)
return new_constraint
def restrict_nucleotides(self, sequence, location=None):
"""When localizing, forbid any nucleotide but the one already there."""
if self.max_edits or self.max_edits_percent:
return []
if location is not None:
start = max(location.start, self.location.start)
end = min(location.end, self.location.end)
else:
start, end = self.location.start, self.location.end
if self.indices is not None:
return [
((i, i + 1), set([sequence[i : i + 1]]))
for i in self.indices
if start <= i < end
]
else:
return [((start, end), set([sequence[start:end]]))]
def short_label(self):
return "keep"
def breach_label(self):
return "edits"
| 6,509 | 1,803 |
from django.contrib.auth import get_user_model
from ekohms.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.http import HttpResponse
from django.shortcuts import render,redirect
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from .forms import UserRegisterForm
from django.contrib import messages
UserModel = get_user_model()
def register(request):
# if request.method == 'GET':
# return render(request, 'users/register.html')
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.email_verified = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('users/acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': default_token_generator.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send(fail_silently=False)
messages.success(request, 'Account successfully created')
messages.success(request, 'Please confirm your email address to complete the registration')
return redirect('login')
# return HttpResponse('Please confirm your email address to complete the registration')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
user.email_verified = True
user.save()
#return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
messages.success(request, 'Thank you for your email confirmation. Now you can login your account.')
return redirect('login')
else:
return HttpResponse('Activation link is invalid!')
# form = UserRegisterForm()
# return render(request, 'users/register.html',{'form':form} )
# Create your views here.
| 2,795 | 748 |
############# exo ###############
# Ne modifiez pas les variables ci-dessous
protocole = "https://"
nom_du_site = "docstring"
extension = "fr"
page = "glossaire"
# Modifiez le code à partir d'ici
URL = f"{protocole+nom_du_site+'.'+extension+'/'+page}"
print(URL) | 277 | 107 |
from django.db.models.base import Model
from rest_framework import serializers
from rest_framework.utils import field_mapping
from . models import User, UserManager
from rest_framework.exceptions import AuthenticationFailed
from django.contrib import auth
from rest_framework.exceptions import AuthenticationFailed
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, force_str, smart_bytes, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
class RegisterSerializer(serializers.ModelSerializer):
password=serializers.CharField(max_length=68,min_length=6,write_only=True)
class Meta:
model=User
fields=['email','username','password']
def validate(self, attrs):
email = attrs.get('email','')
username=attrs.get('username','')
if not username.isalnum():
raise serializers.ValidationError('The username should only contain alphanumeric characters')
return attrs
def create(self, validated_data):
return User.objects.create_user(**validated_data)
class EmailVerificationSerializer(serializers.ModelSerializer):
token=serializers.CharField(max_length=555)
class Meta:
model=User
fields=['token']
class LoginSerializer(serializers.ModelSerializer):
email = serializers.EmailField(max_length=255,min_length=3)
password = serializers.CharField(max_length=68,min_length=6,write_only = True)
username = serializers.CharField(max_length=68,min_length=3,read_only=True)
tokens = serializers.CharField(max_length=68,min_length=6,read_only=True)
class Meta:
model = User
fields = ['email','password','username','tokens']
def validate(self,attrs):
email = attrs.get('email','')
password = attrs.get('password','')
user = auth.authenticate(email = email,password=password)
if not user:
raise AuthenticationFailed('Invalid credentials,try again')
if not user.is_active:
raise AuthenticationFailed('Account disabled,contact admin')
if not user.is_verified:
raise AuthenticationFailed('Email is not verified')
return{
'email':user.email,
'username':user.username,
'tokens':user.tokens
}
return super().validate(attrs)
class ResetPasswordEmailRequest(serializers.Serializer):
email = serializers.EmailField(min_length=2)
class Meta:
fields = ['email']
class SetNewPasswordSerializer(serializers.Serializer):
password = serializers.CharField(min_length=6, max_length=64, write_only= True)
token = serializers.CharField(min_length=1, write_only= True)
uidb64 = serializers.CharField(min_length=1, write_only= True)
class Meta:
fields = ['password', 'token','uidb64']
def validate(self, attrs):
try:
password = attrs.get('password')
token = attrs.get('token')
uidb64 = attrs.get('uidb64')
id = force_str(urlsafe_base64_decode(uidb64))
user = User.objects.get(id=id)
if not PasswordResetTokenGenerator().check_token(user, token):
raise AuthenticationFailed('The token is invalid', 401)
user.set_password(password)
user.save()
except Exception as e:
raise AuthenticationFailed('The token is invalid', 401)
return super().validate(attrs)
class CustomTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
# Add custom claims
token['username'] = user.username
return token | 3,859 | 1,061 |
from setuptools import setup
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'py-publicbr',
packages = ['publicbr', 'publicbr.cnpj'],
version = '0.1',
license='MIT',
description = 'Extract and consolidate Brazilian public datasets',
long_description=long_description,
long_description_content_type='text/markdown',
author = 'Pedro Toledo',
author_email = 'pedroltoledo@gmail.com',
url = 'https://github.com/pltoledo/py-publicbr',
download_url = 'https://github.com/pltoledo/py-publicbr/archive/v_01.tar.gz',
keywords = ['public data', 'brazil', 'data', 'public', 'etl'],
install_requires=[
'tqdm',
'beautifulsoup4',
'requests',
'Unidecode',
'pyspark',
'geocoder'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Office/Business',
'Topic :: Sociology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
],
python_requires='>=3.8'
) | 1,079 | 368 |
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
VERSION = __version__
from ._version import ( # noqa: F401
library_version_number,
library_version_string,
)
| 313 | 89 |
# -*- coding: utf-8 -*-
__author__ = 'Dmitriy.Dakhnovskiy'
from .base_table import BaseTable
| 94 | 42 |
from . import serializable
from .user import User
class Score(serializable.Type):
def __init__(self, id, user, score=None, attempts=0):
self.id = int(id)
self.user = User.deserialize(user)
self.score = float(score) if score != None else None
self.attempts = int(attempts)
def add_attempt(self, inc=1):
self.attempts += inc
def set(self, score):
self.score = float(score)
| 393 | 152 |
'''
Copyright 2019, Amazon Web Services Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Python 3
Helper module to return the current time in Pacific time zone.
'''
from datetime import datetime
from pytz import timezone
def now_pst():
'''Return the current time in PST timezone'''
now_utc = datetime.now(timezone('UTC'))
return now_utc.astimezone(timezone('US/Pacific'))
| 870 | 257 |
# -*- coding: utf-8 -
#
# This file is part of dj-pages released under the MIT license.
# See the NOTICE for more information.
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404, HttpResponse, HttpResponseServerError
from django.shortcuts import render_to_response
from django.template import RequestContext, loader, Context
def page_handler(request):
""" main page handler """
path = request.path_info
if path == "/" or not path:
path = "/"
elif path.endswith('/'):
path = path[:-1]
page = Page.from_path(path)
if page is None:
raise Http404
if page.type == "page":
return render_page(request, page)
elif page.type == "content":
return render_content(request, page)
else:
return HttpResponseServerError("Unkown page type. Contact the
administrator of this site.")
def render_page(request, page):
content = render_template(page.body,
context_instance=RequestContext(request))
return HttpResponse(content)
def render_content(request, page):
try:
schema = Schema.get(page.schema)
except ResourceNotFound:
raise Http404("template not found")
template = schema.templates['show']
content = render_template(template, {
"doc": doc
}, context_instance=RequestContext(request))
return HttpResponse(content)
| 1,446 | 404 |
"""Providing a class for dealing with soft assignments of spikes at the end."""
import copy as copy
import numpy as np
from tqdm import tqdm
from yass.template import WaveForms
from yass.merge.merge import template_dist_linear_align, template_spike_dist_linear_align
def get_soft_assignments(templates, templates_upsampled, spike_train,
spike_train_upsampled, filename_residual, n_similar_units=2):
"""Given templates and spikes determines collision templates.
params:
-------
templates: np.ndarray
Has shape (# units, # time samples, # channels).
n_similar_units: int
Number of similar units that the spikes should be compare against.
"""
def softmax(x):
"""Sape must be (N, d)"""
e = np.exp(x)
return e / e.sum(axis=1)[:, None]
templates = np.transpose(templates, [2, 0, 1])
templates_upsampled = np.transpose(templates_upsampled, [2, 0, 1])
affinity_matrix = template_dist_linear_align(templates)
n_spikes = spike_train.shape[0]
temp = WaveForms(templates.transpose([0, 2, 1]))
pdist = temp.pair_dist()
soft_assignments = np.zeros([n_spikes, n_similar_units])
# By default assign each spike to its own cluster
soft_assignments[:, 0] = 1
sim_unit_map = np.zeros([temp.n_unit, n_similar_units]).astype(np.int)
for unit in tqdm(range(temp.n_unit), "Computing soft assignments"):
spt_idx = np.where(spike_train[:, 1] == unit)[0]
spt = spike_train[spt_idx, 0]
# Get all upsampled ids
units = spike_train_upsampled[spt_idx, 1]
n_unit_spikes = len(spt)
spikes, skipped_idx = read_spikes(
filename=filename_residual,
spikes=spt,
n_channels=temp.n_channel,
spike_size=temp.n_time,
units=units,
templates=templates_upsampled,
residual_flag=True)
sim_units = pdist[unit].argsort()[:n_similar_units]
sim_unit_map[unit] = sim_units
# Get distances of spikes to both similar units.
dist_features = template_spike_dist_linear_align(
templates=templates[sim_units],
spikes=spikes)
# Note that we are actually doing soft-min by using negative distance.
assignments = softmax(- dist_features.T)
success_idx = np.setdiff1d(
np.arange(n_unit_spikes), np.array(skipped_idx))
soft_assignments[spt_idx[success_idx], :] = assignments
return soft_assignments, sim_unit_map
| 2,545 | 848 |
#!/usr/bin/env python3
# Tool that dumps all the information we need from the WikiExtraction parser [1] to create our own dataset into a file, such that it is later easily accessible
# [1]: https://github.com/attardi/wikiextractor
# Important: not efficient code, but it only has to run once on simple wiki, which is small
# This tool relies on the output of the bash script 'extractDataWithWikiExtractor'
# The dumped dictionary will have the following form
# It will map from an article id to an article dictionary
# Each article dictionary will have the keys: meta, plain, links, lists
import argparse
import os
import pickle
from lxml import etree
# the dumped dictionary
articles = {}
xmlParser = etree.XMLParser(recover=True)
def removeXMLMarkups(article):
article = article.strip();
splits = article.split('\n', 1)
assert(len(splits) == 2)
assert(splits[0].startswith('<doc'))
article = splits[1]
splits = article.rsplit('\n', 1)
assert(len(splits) == 2)
assert(splits[1].startswith('</doc>'))
article = splits[0]
return article
def addArticleKeys(key, files):
for fn in files:
f = open(fn)
currArticle = ''
for line in f:
currArticle += line
if line.strip() == '</doc>':
xmlArticle = etree.fromstring(currArticle, parser=xmlParser)
assert(xmlArticle.attrib.keys() == ['id','url','title'])
currid = int(xmlArticle.attrib['id'])
assert(currid in articles)
articles[currid][key] = removeXMLMarkups(currArticle)
currArticle = ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help="Directory, where the script 'extractDataWithWikiExtractor.sh' was run." + \
"\nThus, folder that contains the directories: simplewiki-plain, simplewiki-lists, simplewiki-links", default='.')
parser.add_argument('--output', type=str, help="Name and directory of outputfile.", default='wikiextraction_dump.pickle')
args = parser.parse_args()
inputDir = args.input
output = args.output
plainDir = os.path.join(inputDir, 'simplewiki-plain')
linksDir = os.path.join(inputDir, 'simplewiki-links')
listsDir = os.path.join(inputDir, 'simplewiki-lists')
if not os.path.isdir(plainDir) or \
not os.path.isdir(linksDir) or \
not os.path.isdir(listsDir):
raise(Exception('At least one of the following folders is not present in directory \'' + inputDir + \
'\': simplewiki-plain, simplewiki-lists, simplewiki-links'))
getAllFilesInFolder = lambda folder : [os.path.join(root, name) \
for root, dirs, files in os.walk(folder) \
for name in files \
if name.startswith(("wiki_"))]
plainFiles = getAllFilesInFolder(plainDir)
linksFiles = getAllFilesInFolder(linksDir)
listsFiles = getAllFilesInFolder(listsDir)
for fn in plainFiles:
f = open(fn)
currArticle = ''
for line in f:
currArticle += line
if line.strip() == '</doc>':
xmlArticle = etree.fromstring(currArticle, parser=xmlParser)
assert(xmlArticle.attrib.keys() == ['id','url','title'])
currid = int(xmlArticle.attrib['id'])
assert(currid not in articles)
articles[currid] = {}
articles[currid]['meta'] = dict(xmlArticle.attrib)
articles[currid]['plain'] = removeXMLMarkups(currArticle)
currArticle = ''
addArticleKeys('links', linksFiles)
addArticleKeys('lists', listsFiles)
pickle.dump(articles, open(output, "wb"))
print('articles successfully dumped into ' + output)
| 3,473 | 1,250 |
# mkdat.py
#
# To make the list of monsters data.
#
import string
class enuming:
def __init__(self, n, namekey, ch='?'):
self.n = int(n)
self.namekey = str(namekey)
self.ch = ch = str(ch)
def DEBUGP(self):
print(self.n, self.namekey, self.ch)
class enuml:
def __init__(self, filename, debugmode=False):
f = open(filename, 'r')
l = f.readlines()
f.close()
self.dat = []
self.elist = []
n = 0
for i in l:
stch = i[0]
if stch == '#' or stch == '' or stch == '\n':
continue
n += 1
i = i[:-1]
stmp = i.split(':')
if debugmode:
for s in stmp:
print(s, end=',')
print(' : len ->' + str(len(stmp)))
ch = '?'
if len(stmp) >= 2:
ch = stmp[1]
else:
ch = '?'
d = enuming(n,stmp[0], ch)
self.elist.append(d)
self.dat.append(stmp)
def get_elist(self):
return self.elist
def get_dat(self):
return self.dat
def test():
a = enuml('data/dat.txt')
for i in a.elist:
i.DEBUGP()
def test2():
a = enuml('data/dat.txt')
test()
test2()
| 1,127 | 455 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import os
import shutil
import peewee
from builtins import *
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase
from retry import retry
from nzbhydra import database, config
from nzbhydra.database import Indexer, IndexerApiAccess, IndexerSearch, IndexerStatus, Search, IndexerNzbDownload, TvIdCache, MovieIdCache, SearchResult
def set_and_drop(dbfile="tests2.db", tables=None):
# if tables is None:
# tables = [Indexer, IndexerNzbDownload, Search, IndexerSearch, IndexerApiAccess, IndexerStatus, TvIdCache, MovieIdCache, SearchResult]
# deleteDbFile(dbfile)
# database.db = SqliteExtDatabase(dbfile)
# database.db.connect()
# #database.db.start()
#
# models = [
# obj for name, obj in inspect.getmembers(
# tables, lambda obj: type(obj) == type and issubclass(obj, peewee.Model)
# )
# ]
# peewee.create_model_tables(models)
# x = database.Indexer.select().count()
if os.path.exists("testsettings.cfg"):
os.remove("testsettings.cfg")
shutil.copy("testsettings.cfg.orig", "testsettings.cfg")
config.load("testsettings.cfg")
# sleep(1)
pass
@retry(WindowsError, delay=1, tries=5)
def deleteDbFile(dbfile):
if os.path.exists(dbfile):
os.remove(dbfile)
| 1,489 | 482 |
#4. 利用reduce实现列表中元素求积
from functools import reduce
r = reduce(lambda x, y : x * y, (1,3,5))
print(r)
| 105 | 58 |
# Generated by Django 2.1.5 on 2019-02-07 01:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('garden_api', '0003_auto_20190206_1440'),
]
operations = [
migrations.AlterField(
model_name='soilmoisture',
name='has_moisture',
field=models.BooleanField(),
),
]
| 391 | 146 |
import urllib.request,json
from .models import Quote
# Getting the news base url
quote_api = None
def configure_request(app):
global quote_api
quote_api = app.config["QUOTES_API"]
def get_quotes():
'''
Function that gets the random quotes
'''
get_article_details_url = 'http://quotes.stormconsultancy.co.uk/random.json'.format()
with urllib.request.urlopen(get_article_details_url) as url:
quote_data = url.read()
quote_data_response = json.loads(quote_data)
quote_object = None
if quote_data_response:
author = quote_data_response.get('author')
id = quote_data_response.get('id')
quote = quote_data_response.get('quote')
permalink = quote_data_response.get('permalink')
quote_object = Quote(author,id,quote,permalink)
return quote_data
| 888 | 279 |
from . import *
from . import Base as Scheme
__module__ = __name__
Schema = Scheme.Model
class Base(Schema):
"""
...
"""
class Config(Schema.Configuration): title = "Gitlab" + "-" + "{0}".format(__module__.split(".").pop())
class Query(Base):
"""
API Search-Query Schema
"""
archived: Optional[Boolean] = Field(
alias = "Archived",
title = "archived",
description = "Limit by archived status"
)
id_after: Integer = Field(
alias = "ID-Greater-Than",
title = "id_after",
description = "Limit results to projects with IDs greater than the specified ID"
)
id_before: Integer = Field(
alias = "ID-Less-Than",
title = "id_before",
description = "Limit results to projects with IDs less than the specified ID"
)
last_activity_after: Date = Field(
alias = "Last-Activity-After",
title = "last_activity_after",
description = "Limit results to projects with last_activity after specified time. Format: ISO 8601 YYYY-MM-DDTHH:MM:SSZ"
)
last_activity_before: Date = Field(
alias = "Last-Activity-Before",
title = "last_activity_before",
description = "Limit results to projects with last_activity before specified time. Format: ISO 8601 YYYY-MM-DDTHH:MM:SSZ"
)
membership: Optional[Boolean] = Field(
alias = "Membership",
title = "membership",
description = "Limit by projects that the current user is a member of"
)
min_access_level: Integer = Field(
alias = "Minimum-Access-Level",
title = "min_access_level",
description = "Limit by current user minimal access level"
)
order_by: String = Field(
alias = "Order-By",
title = "order_by",
description = "Return projects ordered by id, name, path, created_at, updated_at, or last_activity_at fields. repository_size, storage_size, packages_size or wiki_size fields are only allowed for admins. Default is created_at"
)
owned: Optional[Boolean] = Field(
alias = "Owned",
title = "owned",
description = "Limit by projects explicitly owned by the current user"
)
repository_checksum_failed: Optional[Union[String, Boolean]] = Field("Premium-Required",
alias = "Checksum-Failure",
title = "repository_checksum_failed",
description = "Limit projects where the repository checksum calculation has failed (Introduced in GitLab Premium 11.2)"
)
repository_storage: String = Field(
alias = "Storage",
title = "repository_storage",
description = "Limit results to projects stored on repository_storage. (admins only)"
)
search_namespaces: Optional[Boolean] = Field(
alias = "Search-Namespace",
title = "search_namespaces",
description = "Include ancestor namespaces when matching search criteria. Default is false"
)
search: String = Field(
alias = "Search",
title = "search",
description = "Return list of projects matching the search criteria"
)
simple: Optional[Boolean] = Field(
alias = "Simple",
title = "simple",
description = "Return only limited fields for each project. This is a no-op without authentication as then only simple fields are returned"
)
sort: String = Field(
alias = "Sortable-Function",
title = "sort",
description = "Return projects sorted in asc or desc order. Default is desc"
)
starred: Optional[Boolean] = Field(
alias = "Starred",
title = "starred",
description = "Limit by projects starred by the current user"
)
statistics: Optional[Boolean] = Field(
alias = "Statistics",
title = "statistics",
description = "Include project statistics"
)
visibility: String = Field(
alias = "Visibility",
title = "visibility",
description = "Limit by visibility public, internal, or private"
)
wiki_checksum_failed: Optional[Union[String,Boolean]] = Field("Premium-Required",
alias = "Wiki-Checksum-Failure",
title = "wiki_checksum_failed",
description = "Limit projects where the wiki checksum calculation has failed (Introduced in GitLab Premium 11.2)"
)
with_custom_attributes: Optional[Boolean] = Field(
alias = "Custom-Attributes",
title = "with_custom_attributes",
description = "Include custom attributes in response. (admins only)"
)
with_issues_enabled: Optional[Boolean] = Field(
alias = "Issues-Enabled",
title = "with_issues_enabled",
description = "Limit by enabled issues feature"
)
with_merge_requests_enabled: Optional[Boolean] = Field(
alias = "MR-Enabled",
title = "with_merge_requests_enabled",
description = "Limit by enabled merge requests feature"
)
with_programming_language: String = Field(
alias = "Programming-Language",
title = "with_programming_language",
description = "Limit by projects which use the given programming language"
)
class Namespace(Base):
id: Integer = Field(alias = "id", title = "id", description = "id")
name: String = Field(alias = "name", title = "name", description = "name")
path: String = Field(alias = "path", title = "path", description = "path")
kind: String = Field(alias = "kind", title = "kind", description = "kind")
full_path: String = Field(alias = "full_path", title = "full_path", description = "full_path")
parent_id: Optional[Integer] = Field(None, alias = "parent_id", title = "parent_id", description = "parent_id")
avatar_url: Optional[String] = Field(None, alias = "avatar_url", title = "avatar_url", description = "avatar_url")
web_url: String = Field(alias = "web_url", title = "web_url", description = "web_url")
class Config(Base.Config): title = Base.Config.title + "-" + "Namespace"
class Statistics(Base):
commit_count: Integer = Field(0,
alias = "Total-Commits",
title = "commit_count",
description = ""
)
storage_size: Integer = Field(0,
alias = "Storage-Size",
title = "storage_size",
description = ""
)
repository_size: Integer = Field(0,
alias = "Programming-Language",
title = "Repository-Size",
description = ""
)
wiki_size: Integer = Field(0,
alias = "Programming-Language",
title = "wiki_size",
description = ""
)
lfs_objects_size: Integer = Field(0,
alias = "LFS-Objects-Size",
title = "lfs_objects_size",
description = ""
)
job_artifacts_size: Integer = Field(0,
alias = "Artifacts-Size",
title = "job_artifacts_size",
description = ""
)
packages_size: Integer = Field(0,
alias = "Packages-Size",
title = "packages_size",
description = ""
)
snippets_size: Integer = Field(0,
alias = "Snippets-Size",
title = "snippets_size",
description = ""
)
class Config(Base.Config): title = Base.Config.title + "-" + "Statistics"
class Links(Base):
self: String = Field(
alias = "self",
title = "self",
description = "self"
)
issues: String = Field(
alias = "issues",
title = "issues",
description = "issues"
)
merge_requests: String = Field(
alias = "merge_requests",
title = "merge_requests",
description = "merge_requests"
)
repo_branches: String = Field(
alias = "repo_branches",
title = "repo_branches",
description = "repo_branches"
)
labels: String = Field(
alias = "labels",
title = "labels",
description = "labels"
)
events: String = Field(
alias = "events",
title = "events",
description = "events"
)
members: String = Field(
alias = "members",
title = "members",
description = "members"
)
class Config(Base.Config): title = Base.Config.title + "-" + "Links"
class Access(Base):
notification_level: Optional[Integer] = None
access_level: Optional[Integer] = None
class Config(Base.Config): title = Base.Config.title + "-" + "Access"
class Permissions(Base):
project_access: Optional[Access] = None
group_access: Optional[Access] = None
class Config(Base.Config): title = Base.Config.title + "-" + "Permissions"
class Owner(Base):
id: String = Field(
alias = "id",
title="id",
description = "id")
name: String = Field(
alias = "name",
title="name",
description = "name")
created_at: Optional[String] = Field(
alias = "created_at",
title="created_at",
description = "created_at")
class Config(Base.Config): title = Base.Config.title + "-" + "Owner"
class Project(Base):
"""
[...]
"""
id: Integer = Field(
alias = "id",
title = "id",
description = "id"
)
created_at: Date = Field(
alias = "created_at",
title = "created_at",
description = "created_at"
)
forks_count: Integer = Field(
alias = "forks_count",
title = "forks_count",
description = "forks_count"
)
star_count: Integer = Field(
alias = "star_count",
title = "star_count",
description = "star_count"
)
description: Optional[String] = Field(
alias = "description",
title = "description",
description = "description"
)
default_branch: String = Field(
alias = "default_branch",
title = "default_branch",
description = "default_branch"
)
visibility: Optional[String] = Field(
alias = "visibility",
title = "visibility",
description = "visibility"
)
ssh_url_to_repo: String = Field(
alias = "ssh_url_to_repo",
title = "ssh_url_to_repo",
description = "ssh_url_to_repo"
)
http_url_to_repo: String = Field(
alias = "http_url_to_repo",
title = "http_url_to_repo",
description = "http_url_to_repo"
)
web_url: String = Field(
alias = "web_url",
title = "web_url",
description = "web_url"
)
readme_url: Optional[String] = Field(
alias = "readme_url",
title = "readme_url",
description = "readme_url"
)
tag_list: Optional[List] = Field(
alias = "tag_list",
title = "tag_list",
description = "tag_list"
)
owner: Optional[Owner] = Field(
alias = "owner",
title = "owner",
description = "owner"
)
name: String = Field(
alias = "name",
title = "name",
description = "name"
)
name_with_namespace: String = Field(
alias = "name_with_namespace",
title = "name_with_namespace",
description = "name_with_namespace"
)
path: String = Field(
alias = "path",
title = "path",
description = "path"
)
path_with_namespace: String = Field(
alias = "path_with_namespace",
title = "path_with_namespace",
description = "path_with_namespace"
)
issues_enabled: Optional[Boolean] = Field(
alias = "issues_enabled",
title = "issues_enabled",
description = "issues_enabled"
)
open_issues_count: Optional[Integer] = Field(
alias = "open_issues_count",
title = "open_issues_count",
description = "open_issues_count"
)
merge_requests_enabled: Optional[Boolean] = Field(
alias = "merge_requests_enabled",
title = "merge_requests_enabled",
description = "merge_requests_enabled"
)
jobs_enabled: Optional[Boolean] = Field(
alias = "jobs_enabled",
title = "jobs_enabled",
description = "jobs_enabled"
)
wiki_enabled: Optional[Boolean] = Field(
alias = "wiki_enabled",
title = "wiki_enabled",
description = "wiki_enabled"
)
snippets_enabled: Optional[Boolean] = Field(
alias = "snippets_enabled",
title = "snippets_enabled",
description = "snippets_enabled"
)
can_create_merge_request_in: Optional[Boolean] = Field(
alias = "can_create_merge_request_in",
title = "can_create_merge_request_in",
description = "can_create_merge_request_in"
)
resolve_outdated_diff_discussions: Optional[Boolean] = Field(
alias = "resolve_outdated_diff_discussions",
title = "resolve_outdated_diff_discussions",
description = "resolve_outdated_diff_discussions"
)
container_registry_enabled: Optional[Boolean] = Field(
alias = "container_registry_enabled",
title = "container_registry_enabled",
description = "container_registry_enabled"
)
last_activity_at: Optional[Date] = Field(
alias = "last_activity_at",
title = "last_activity_at",
description = "last_activity_at"
)
creator_id: Optional[Integer] = Field(
alias = "creator_id",
title = "creator_id",
description = "creator_id"
)
namespace: Namespace = Field(
alias = "namespace",
title = "namespace",
description = "namespace"
)
import_status: Optional[String] = Field(
alias = "import_status",
title = "import_status",
description = "import_status"
)
import_error: Optional[String] = Field(
alias = "import_error",
title = "import_error",
description = "import_error"
)
permissions: Optional[Permissions] = Field(
alias = "permissions",
title = "permissions",
description = "permissions"
)
archived: Optional[Boolean] = Field(
alias = "archived",
title = "archived",
description = "archived"
)
avatar_url: Optional[String] = Field(
alias = "avatar_url",
title = "avatar_url",
description = "avatar_url"
)
shared_runners_enabled: Optional[Integer] = Field(
alias = "shared_runners_enabled",
title = "shared_runners_enabled",
description = "shared_runners_enabled"
)
runners_token: Optional[String] = Field(
alias = "runners_token",
title = "runners_token",
description = "runners_token"
)
ci_default_git_depth: Optional[Integer] = Field(
alias = "ci_default_git_depth",
title = "ci_default_git_depth",
description = "ci_default_git_depth"
)
ci_forward_deployment_enabled: Optional[Boolean] = Field(
alias = "ci_forward_deployment_enabled",
title = "ci_forward_deployment_enabled",
description = "ci_forward_deployment_enabled"
)
public_jobs: Optional[Boolean] = Field(
alias = "public_jobs",
title = "public_jobs",
description = "public_jobs"
)
shared_with_groups: Optional[List] = Field(
alias = "shared_with_groups",
title = "shared_with_groups",
description = "shared_with_groups"
)
only_allow_merge_if_pipeline_succeeds: Optional[Boolean] = Field(
alias = "only_allow_merge_if_pipeline_succeeds",
title = "only_allow_merge_if_pipeline_succeeds",
description = "only_allow_merge_if_pipeline_succeeds"
)
allow_merge_on_skipped_pipeline: Optional[Boolean] = Field(
alias = "allow_merge_on_skipped_pipeline",
title = "allow_merge_on_skipped_pipeline",
description = "allow_merge_on_skipped_pipeline"
)
restrict_user_defined_variables: Optional[Boolean] = Field(
alias = "restrict_user_defined_variables",
title = "restrict_user_defined_variables",
description = "restrict_user_defined_variables"
)
only_allow_merge_if_all_discussions_are_resolved: Optional[Boolean] = Field(
alias = "only_allow_merge_if_all_discussions_are_resolved",
title = "only_allow_merge_if_all_discussions_are_resolved",
description = "only_allow_merge_if_all_discussions_are_resolved"
)
remove_source_branch_after_merge: Optional[Boolean] = Field(
alias = "remove_source_branch_after_merge",
title = "remove_source_branch_after_merge",
description = "remove_source_branch_after_merge"
)
request_access_enabled: Optional[Boolean] = Field(
alias = "request_access_enabled",
title = "request_access_enabled",
description = "request_access_enabled"
)
merge_method: Optional[String] = Field(
alias = "merge_method",
title = "merge_method",
description = "merge_method"
)
auto_devops_enabled: Optional[Boolean] = Field(
alias = "auto_devops_enabled",
title = "auto_devops_enabled",
description = "auto_devops_enabled"
)
auto_devops_deploy_strategy: Optional[String] = Field(
alias = "auto_devops_deploy_strategy",
title = "auto_devops_deploy_strategy",
description = "auto_devops_deploy_strategy"
)
repository_storage: Optional[String] = Field(
alias = "repository_storage",
title = "repository_storage",
description = "repository_storage"
)
approvals_before_merge: Optional[Integer] = Field(
alias = "approvals_before_merge",
title = "approvals_before_merge",
description = "approvals_before_merge"
)
mirror: Optional[Boolean] = Field(
alias = "mirror",
title = "mirror",
description = "mirror"
)
mirror_user_id: Optional[Integer] = Field(
alias = "mirror_user_id",
title = "mirror_user_id",
description = "mirror_user_id"
)
mirror_trigger_builds: Optional[Boolean] = Field(
alias = "mirror_trigger_builds",
title = "mirror_trigger_builds",
description = "mirror_trigger_builds"
)
only_mirror_protected_branches: Optional[Boolean] = Field(
alias = "only_mirror_protected_branches",
title = "only_mirror_protected_branches",
description = "only_mirror_protected_branches"
)
mirror_overwrites_diverged_branches: Optional[Boolean] = Field(
alias = "mirror_overwrites_diverged_branches",
title = "mirror_overwrites_diverged_branches",
description = "mirror_overwrites_diverged_branches"
)
external_authorization_classification_label: Optional[String] = Field(
alias = "external_authorization_classification_label",
title = "external_authorization_classification_label",
description = "external_authorization_classification_label"
)
packages_enabled: Optional[Boolean] = Field(
alias = "packages_enabled",
title = "packages_enabled",
description = "packages_enabled"
)
service_desk_enabled: Optional[Boolean] = Field(
alias = "service_desk_enabled",
title = "service_desk_enabled",
description = "service_desk_enabled"
)
service_desk_address: Optional[String] = Field(
alias = "service_desk_address",
title = "service_desk_address",
description = "service_desk_address"
)
autoclose_referenced_issues: Optional[Boolean] = Field(
alias = "autoclose_referenced_issues",
title = "autoclose_referenced_issues",
description = "autoclose_referenced_issues"
)
suggestion_commit_message: Optional[String] = Field(
alias = "suggestion_commit_message",
title = "suggestion_commit_message",
description = "suggestion_commit_message"
)
statistics: Optional[Statistics] = Field(
alias = "statistics",
title = "statistics",
description = "statistics"
)
container_registry_image_prefix: Optional[String] = Field(
alias = "container_registry_image_prefix",
title = "container_registry_image_prefix",
description = "container_registry_image_prefix"
)
_links: Optional[Links] = Field(
alias = "_links",
title = "_links",
description = "_links"
)
class Config(Base.Config): title = Base.Config.title + "-" + "Project"
class Projects(Base):
Response: List[Project]
class Config(Base.Config): title = Base.Config.title + "-" + "Projects"
class Pages(Base):
Response: Dictionary[Integer, List[Project]]
class Config(Base.Config): title = Base.Config.title + "-" + "Pages"
| 20,732 | 6,101 |
from typing import List
FileData = List[str]
def read_file(filename: str) -> FileData:
with open(filename, "r") as file:
return file.readlines()
def first_exercise(data: FileData) -> int:
return 0
def second_exercise(data: FileData) -> int:
return 0
def main():
data = read_file("data")
print("The result of the first exercise is: ", first_exercise(data))
print("The result of the second exercise is: ", second_exercise(data))
if __name__ == "__main__":
main()
| 509 | 174 |