id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6593786 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MPK M2-editor
# No-UI module
# Copyright (C) 2017 <NAME> dam.pic AT free.fr
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TODO
## get programmes
## send programmes
## single
## all
# get ram
## send ram
## interface
## tabs for progs
## autofill
## load/save config
# single program
# multi programs
## .mk2
# mk1
# human readable
# live update
# factory reset
# package
# PyPI?
import sys
import os
import rtmidi
import time
from collections import OrderedDict
from pprint import pprint
class Akai_MPK_Mini():
GET_CONFIG = [240, 71, 0, 38, 102, 0, 1, 1, 247]
def __init__(self):
self.midi_config = OrderedDict((
("sysex_0", 240), # [240]
("sysex_1", 71), # [71]
("sysex_2", 0), # [0]
("sysex_3", 38), # [38]
("sysex_4", 103), # [103] send 100; RAM 102
("sysex_5", 0), # [0] get RAM 1
("sysex_6", 109), # [109]
("programme", 1), # [1,4]
("pad_channel", 9), # [0,15]
("key_channel", 0), # [0,15]
("key_octave", 4), # [0,8] (-4,4)
("arp_on", 0), # [0=OFF; 1=ON] no effect on programmes...?
("arp_mode", 0), # [0=UP; 1=DOWN; 2=EXCLUSIVE; 3=INCLUSIVE; 4=ORDER; 5=RANDOM]
("arp_time", 5), # div [0=1/4; 1=1/4T; 2=1/8; 3=1/8T; 4=1/16; 5=1/16T; 6=1/32; 7=1/32T]
("arp_clock", 0), # [0=INTERNAL; 1=EXTERNAL]
("arp_latch", 0), # [0=OFF; 1=ON]
("arp_swing", 0), # [0=50%; 1=55%; 2=57%; 3=59%; 4=61%; 5=64%]
("arp_taps", 3), # [2,4]
# The arpeggiator tempo is encoded using 2 bytes
# the first one may be 0 or 1
# if the first one is 0
# then the second one is in [30,127]
# if the first one is 1
# then the second one is in [0,112] (128,240)
("arp_tempo_0", 0), # 0 [0,1]
("arp_tempo_1", 79), # 1 [30,127; 0,112]
("arp_octave", 1), # [0,3]
("x_axis_type", 0), # [0=PITCHBEND; 1=CC1; 2=CC2]
("x_axis_L", 80), # [0,127]
("x_axis_R", 81), # [0,127]
("y_axis_type", 1),
("y_axis_D", 83),
("y_axis_U", 82),
# Bank A
("b1_p1_NT", 36), # [0,127] kick
("b1_p1_PC", 56), # [0,127]
("b1_p1_CC", 31), # [0,127]
("b1_p1_TP", 1), # [0=TOGGLE; 1=MOMENTARY]
("b1_p2_NT", 57), # crash 2
("b1_p2_PC", 124),
("b1_p2_CC", 32),
("b1_p2_TP", 1),
("b1_p3_NT", 42), # hi hat closed
("b1_p3_PC", 123),
("b1_p3_CC", 33),
("b1_p3_TP", 1),
("b1_p4_NT", 51), # ride
("b1_p4_PC", 118),
("b1_p4_CC", 34),
("b1_p4_TP", 1),
("b1_p5_NT", 38), # snare
("b1_p5_PC", 110),
("b1_p5_CC", 35),
("b1_p5_TP", 1),
("b1_p6_NT", 48), # tom
("b1_p6_PC", 90),
("b1_p6_CC", 36),
("b1_p6_TP", 1),
("b1_p7_NT", 46), # hi hat open
("b1_p7_PC", 87),
("b1_p7_CC", 37),
("b1_p7_TP", 1),
("b1_p8_NT", 53), # ride bell
("b1_p8_PC", 0),
("b1_p8_CC", 38),
("b1_p8_TP", 1),
# Bank B
("b2_p1_NT", 8),
("b2_p1_PC", 23),
("b2_p1_CC", 39),
("b2_p1_TP", 0),
("b2_p2_NT", 9),
("b2_p2_PC", 24),
("b2_p2_CC", 40),
("b2_p2_TP", 0),
("b2_p3_NT", 10),
("b2_p3_PC", 25),
("b2_p3_CC", 41),
("b2_p3_TP", 0),
("b2_p4_NT", 11),
("b2_p4_PC", 26),
("b2_p4_CC", 42),
("b2_p4_TP", 0),
("b2_p5_NT", 12),
("b2_p5_PC", 27),
("b2_p5_CC", 43),
("b2_p5_TP", 0),
("b2_p6_NT", 13),
("b2_p6_PC", 28),
("b2_p6_CC", 44),
("b2_p6_TP", 0),
("b2_p7_NT", 14),
("b2_p7_PC", 29),
("b2_p7_CC", 45),
("b2_p7_TP", 0),
("b2_p8_NT", 15),
("b2_p8_PC", 30),
("b2_p8_CC", 46),
("b2_p8_TP", 0),
# Knobs
("k1_CC", 1), # [0,127]
("k1_LO", 0), # [0,127]
("k1_HI", 127), # [0,127]
("k2_CC", 2),
("k2_LO", 0),
("k2_HI", 127),
("k3_CC", 3),
("k3_LO", 0),
("k3_HI", 127),
("k4_CC", 91),
("k4_LO", 0),
("k4_HI", 127),
("k5_CC", 5),
("k5_LO", 0),
("k5_HI", 127),
("k6_CC", 6),
("k6_LO", 0),
("k6_HI", 127),
("k7_CC", 7),
("k7_LO", 0),
("k7_HI", 127),
("k8_CC", 93),
("k8_LO", 0),
("k8_HI", 127),
("key_transpose", 3), # [0,24]
("sysex_end", 247) # [247]
))
self.do_live_update = False
self.controller_found = False
self.midi_setup()
def show_popup_controller_not_found(self):
print("Controller not found")
def rx(self, msg, data):
d, time = msg
h = [hex(i) for i in d]
print(h, data)
def midi_setup(self):
self.mo = rtmidi.MidiOut()
self.mi = rtmidi.MidiIn()
is_out_open, is_in_open = False, False
for i, p in enumerate(self.mo.get_ports()):
if any(mpk in p for mpk in ("MPKmini", "MPK Mini")):
self.mo.open_port(i)
is_out_open = True
for i, p in enumerate(self.mi.get_ports()):
if any(mpk in p for mpk in ("MPKmini", "MPK Mini")):
self.mi.open_port(i)
self.mi.ignore_types(sysex=False)
#self.mi.set_callback(self.rx)
is_in_open = True
if not is_out_open and not is_in_open:
self.show_popup_controller_not_found()
else:
self.controller_found = True
def send_midi_message(self, out_message, expected_msg=117):
in_message = [[]]
# print('out:', out_message)
self.mo.send_message(out_message)
time.sleep(0.1)
# O Karnaugh, help me!
while ((expected_msg is None
and in_message is not None)
or (expected_msg is not None
and (
in_message is None
or len(in_message[0]) == 0
or type(in_message) is tuple
and len(in_message[0]) != expected_msg
)
)
):
in_message = self.mi.get_message()
# print('in:', in_message)
if in_message is not None:
in_message = in_message[0] # strip midi time
return in_message
def get_all_programmes(self):
for p_i in range(1, 5):
self.get_programme(p_i)
def get_active_programme(self):
p_i = self.get_active_tab_index()
self.get_programme(p_i)
def get_programme(self, p_i):
out_message = self.GET_CONFIG[:]
out_message[7] = p_i
in_message = self.send_midi_message(out_message, 117)
self.fill_tab(in_message, p_i)
def copy_to(self, p_to):
p_from = self.get_active_tab_index()
conf = self.get_tab_programme(p_from)
self.fill_tab(conf, p_to)
def send_all_programmes(self):
for p_i in range(4):
self.send_programme(p_i)
def send_active_programme(self):
p_i = self.get_active_tab_index()
self.send_programme(p_i)
def send_programme(self, p_i):
message = self.get_tab_programme(p_i)
message[4] = 100
self.send_midi_message(message, None)
def get_RAM(self):
p_i = self.get_active_tab_index()
out_message = self.GET_CONFIG[:]
out_message[5] = 0
out_message[7] = 0
print(out_message)
in_message = self.send_midi_message(out_message, 117)
print(in_message)
self.fill_tab(in_message, p_i)
def send_RAM(self):
#p_i = self.get_active_tab_index()
#out_message = self.get_tab_programme(p_i)
out_message = list(self.midi_config.values())
print(out_message)
out_message[4] = 100
out_message[7] = 0
self.send_midi_message(out_message, None)
# I/O
def load_mk2(self, filepath):
print('Loading', filepath)
with open(filepath, 'rb') as f:
conf = [int(i) for i in f.read()]
# print(len(conf))
self.fill_tab(conf, self.get_active_tab_index())
def save_mk2(self, filepath):
print('Saving', filepath)
conf = self.get_tab_programme(self.get_active_tab_index())
with open(filepath, 'wb') as f:
for b in conf:
f.write(b.to_bytes(1, 'little'))
# Autofill
def show_autofill(self):
self.autofill_window.show()
if __name__ == "__main__":
mpk = Akai_MPK_Mini()
mpk.send_RAM()
print(mpk.midi_config)
while True:
time.sleep(1)
| StarcoderdataPython |
1606955 | import os
import random
from glob import glob
import cv2
import numpy as np
from augraphy.augmentations.lib import sobel
from augraphy.base.augmentation import Augmentation
from augraphy.utilities import *
class BleedThrough(Augmentation):
"""Emulates bleed through effect from the combination of ink bleed and
gaussian blur operations.
:param intensity_range: Pair of floats determining the range from which
noise intensity is sampled.
:type intensity: tuple, optional
:param color_range: Pair of ints determining the range from which color
noise is sampled.
:type color_range: tuple, optional
:param ksize: Tuple of height/width pairs from which to sample the kernel
size. Higher value increases the spreadness of bleeding effect.
:type ksizes: tuple, optional
:param sigmaX: Standard deviation of the kernel along the x-axis.
:type sigmaX: float, optional
:param alpha: Intensity of bleeding effect, recommended value range from
0.1 to 0.5.
:type alpha: float, optional
:param offsets: Tuple of x and y offset pair to shift the bleed through
effect from original input.
:type offsets: tuple, optional
:param dpi: DPI of foreground image for bleedthrough effect.
Select either 100, 200 or 300.
:type dpi: int, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
intensity_range=(0.1, 0.2),
color_range=(0, 224),
ksize=(17, 17),
sigmaX=0,
alpha=0.3,
offsets=(10, 20),
dpi=100,
p=1,
):
super().__init__(p=p)
self.intensity_range = intensity_range
self.color_range = color_range
self.ksize = ksize
self.sigmaX = sigmaX
self.alpha = alpha
self.offsets = offsets
self.dpi = dpi
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"BleedThrough(intensity_range={self.intensity_range}, color_range={self.color_range}, ksize={self.ksize}, sigmaX={self.sigmaX},alpha={self.alpha},offsets={self.offsets},dpi={self.dpi},p={self.p})"
# Blend images to produce bleedthrough effect
def blend(self, img, img_bleed, alpha):
# convert to single channel to avoud unnecessary noise in colour image
if len(img_bleed.shape) > 2:
img_bleed_input = cv2.cvtColor(img_bleed.astype("uint8"), cv2.COLOR_BGR2GRAY)
else:
img_bleed_input = img_bleed.astype("uint8")
ob = OverlayBuilder("normal", img_bleed_input, img, 1, (1, 1), "center", 0, self.alpha)
return ob.build_overlay()
# Offset image so that bleedthrough effect is visible and not stacked with input image
def generate_offset(self, img_bleed, offsets):
x_offset = offsets[0]
y_offset = offsets[1]
if (x_offset == 0) and (y_offset == 0):
return img_bleed
elif x_offset == 0:
img_bleed[y_offset:, :] = img_bleed[:-y_offset, :]
elif y_offset == 0:
img_bleed[:, x_offset:] = img_bleed[:, :-x_offset]
else:
img_bleed[y_offset:, x_offset:] = img_bleed[:-y_offset, :-x_offset]
return img_bleed
# Preprocess and create bleeding ink effect
def generate_bleeding_ink(self, img, intensity_range, color_range, ksize, sigmaX):
intensity = random.uniform(intensity_range[0], intensity_range[1])
add_noise_fn = (
lambda x, y: random.randint(color_range[0], color_range[1])
if (y == 255 and random.random() < intensity)
else x
)
add_noise = np.vectorize(add_noise_fn)
sobelized = sobel(img)
img_noise = np.double(add_noise(img, sobelized))
img_bleed = cv2.GaussianBlur(img_noise, ksize=ksize, sigmaX=sigmaX)
return img_bleed
# create foreground image for bleedthrough effect
def create_bleedthrough_foreground(self, image):
try:
# Id for figshare published grayscale image
if self.dpi == 300:
article_ID = "19227981"
elif self.dpi == 200:
article_ID = "19227879"
else:
article_ID = "19210698"
# path to foreground folder
foreground_folder = os.path.join(os.getcwd() + "/figshare_bleedthrough/")
# create figshare downloader
fsdl = FigshareDownloader(directory="figshare_BleedThrough/")
# download files
fsdl.download_random_file_from_article(article_ID)
# file path list
foreground_images_path = glob(foreground_folder + "*.png", recursive=True)
# get random image path
random_path = foreground_images_path[random.randint(0, len(foreground_images_path) - 1)]
# get random image
image_bleedthrough_foreground = cv2.imread(random_path)
# resize foreground
image_bleedthrough_foreground = cv2.resize(
image_bleedthrough_foreground,
(image.shape[1], image.shape[0]),
interpolation=cv2.INTER_AREA,
)
# failed to download, flip and mirror image to get bleedthrough foreground
except Exception:
image_flip = cv2.flip(image, 0)
image_bleedthrough_foreground = cv2.flip(image_flip, 1)
return image_bleedthrough_foreground
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
image_bleedthrough_foreground = self.create_bleedthrough_foreground(image)
image_bleed = self.generate_bleeding_ink(
image_bleedthrough_foreground,
self.intensity_range,
self.color_range,
self.ksize,
self.sigmaX,
)
image_bleed_offset = self.generate_offset(image_bleed, self.offsets)
image_bleedthrough = self.blend(image, image_bleed_offset, self.alpha)
return image_bleedthrough
| StarcoderdataPython |
4901064 | <filename>data/prepare_data.py
#!/usr/bin/env python3
import sys
import argparse
import os
import numpy
import subprocess
from essentia.standard import MonoLoader
import soundfile
vocal_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data-vocal")
novocal_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data-hpss")
def parse_args():
parser = argparse.ArgumentParser(
prog="prepare_data",
description="Prepare evaluation datasets for HPSS from instrument stems",
)
parser.add_argument(
"--sample-rate",
type=int,
default=44100,
help="sample rate (default: 44100 Hz)",
)
parser.add_argument(
"stem_dirs", nargs="+", help="directories containing instrument stems"
)
parser.add_argument(
"--vocals", action="store_true", help="include vocals in the mix"
)
parser.add_argument("--track-limit", type=int, default=-1, help="limit to n tracks")
parser.add_argument(
"--segment-limit",
type=int,
default=sys.maxsize,
help="limit to n segments per track",
)
parser.add_argument(
"--segment-offset",
type=int,
default=0,
help="offset of segment to start from (useful to skip intros)",
)
parser.add_argument(
"--segment-size", type=float, default=30.0, help="segment size in seconds"
)
return parser.parse_args()
def main():
args = parse_args()
data_dir = None
if args.vocals:
data_dir = vocal_dir
else:
data_dir = novocal_dir
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
seq = 0
for sd in args.stem_dirs:
for song in os.scandir(sd):
for dir_name, _, file_list in os.walk(song):
instruments = [
os.path.join(dir_name, f) for f in file_list if f.endswith(".wav")
]
if instruments:
print("Found directory containing wav files: %d" % seq)
print(os.path.basename(dir_name).replace(" ", "_"))
loaded_wavs = [None] * len(instruments)
drum_track_index = -1
vocal_track_index = -1
mix_track_index = -1
for i, instrument in enumerate(instruments):
if "drum" in instrument.lower():
drum_track_index = i
elif "vocal" in instrument.lower():
vocal_track_index = i
elif "mix" in instrument.lower():
mix_track_index = i
# automatically resamples for us
loaded_wavs[i] = MonoLoader(
filename=instrument, sampleRate=args.sample_rate
)()
track_len = len(loaded_wavs[0])
# ensure all stems have the same length
assert (
len(loaded_wavs[i]) == track_len
for i in range(1, len(loaded_wavs))
)
# first create the full mix
harmonic_mix = sum(
[
l
for i, l in enumerate(loaded_wavs)
if i
not in [
drum_track_index,
vocal_track_index,
mix_track_index,
]
]
)
full_mix = None
if args.vocals:
full_mix = (
harmonic_mix
+ loaded_wavs[drum_track_index]
+ loaded_wavs[vocal_track_index]
)
else:
full_mix = harmonic_mix + loaded_wavs[drum_track_index]
seg_samples = int(numpy.floor(args.segment_size * args.sample_rate))
total_segs = int(numpy.floor(track_len / seg_samples))
seg_limit = min(total_segs - 1, args.segment_limit)
for seg in range(seg_limit):
if seg < args.segment_offset:
continue
seqstr = "%03d%04d" % (seq, seg)
left = seg * seg_samples
right = (seg + 1) * seg_samples
harm_path = os.path.join(
data_dir, "{0}_harmonic.wav".format(seqstr)
)
mix_path = os.path.join(data_dir, "{0}_mix.wav".format(seqstr))
perc_path = os.path.join(
data_dir, "{0}_percussive.wav".format(seqstr)
)
vocal_path = os.path.join(
data_dir, "{0}_vocal.wav".format(seqstr)
)
soundfile.write(
harm_path, harmonic_mix[left:right], args.sample_rate
)
soundfile.write(
mix_path, full_mix[left:right], args.sample_rate
)
# write the drum track
soundfile.write(
perc_path,
loaded_wavs[drum_track_index][left:right],
args.sample_rate,
)
if args.vocals:
# write the vocal track
soundfile.write(
vocal_path,
loaded_wavs[vocal_track_index][left:right],
args.sample_rate,
)
seq += 1
if args.track_limit > -1:
if seq == args.track_limit:
return 0
return 0
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
9681036 | <gh_stars>1-10
from tempfile import NamedTemporaryFile
import contextlib
import os
import tarfile
import zipfile
from PIL import Image
class CbtFile(object):
ext = '.cbt'
@property
def pages(self):
widths = (256, 128, 64)
heights = (128, 128, 128)
orientation = ('horizontal', 'horizontal', 'vertical')
return [{
'filename': 'base/path/page-{:x}.png'.format(n),
'width': widths[n % 3],
'height': heights[n % 3],
'orientation': orientation[n % 3],
'black_pixel': (n, n),
} for n in range(15)]
@property
def expected_pages(self):
return [
dict(
width=x['width'],
height=x['height'],
orientation=x['orientation'],
filename=os.path.join('original', x['filename']),
)
for x in self.pages]
pass
@contextlib.contextmanager
def packaged(self):
with NamedTemporaryFile() as f:
with tarfile.open(fileobj=f, mode='w') as ar:
for o in self.pages:
with NamedTemporaryFile() as mf:
im = Image.new(
'RGB', (o['width'], o['height']))
im.putpixel(o['black_pixel'], (0xfe, 0xfe, 0xfe))
im.save(mf, 'png')
mf.flush()
ar.add(mf.name, o['filename'])
f.flush()
f.seek(0)
yield f
class CbzFile(CbtFile):
ext = '.cbz'
@contextlib.contextmanager
def packaged(self):
with NamedTemporaryFile() as f:
with zipfile.ZipFile(f, mode='w') as ar:
for o in self.pages:
with NamedTemporaryFile() as mf:
im = Image.new(
'RGB', (o['width'], o['height']))
im.putpixel(o['black_pixel'], (0xfe, 0xfe, 0xfe))
im.save(mf, 'png')
mf.flush()
ar.write(mf.name, o['filename'])
ar.writestr('derp.db', 'abcedfg')
f.flush()
f.seek(0)
yield f
| StarcoderdataPython |
1956747 | import json
import os
from typing import List
from urllib.parse import urlencode
import requests
from app.assess.models.application import Application
from app.assess.models.fund import Fund
from app.assess.models.round import Round
from app.config import APPLICATION_STORE_API_HOST
from app.config import FLASK_ROOT
from app.config import FUND_STORE_API_HOST
from app.config import ROUND_STORE_API_HOST
# Fund Store Endpoints
FUNDS_ENDPOINT = "/funds/"
FUND_ENDPOINT = "/funds/{fund_id}"
# Round Store Endpoints
ROUNDS_ENDPOINT = "/funds/{fund_id}"
ROUND_ENDPOINT = "/funds/{fund_id}/rounds/{round_id}"
# Application Store Endpoints
APPLICATION_ENDPOINT = "/applications/{application_id}"
APPLICATION_STATUS_ENDPOINT = "/applications/{application_id}/status"
APPLICATION_SEARCH_ENDPOINT = "/applications?{params}"
def get_data(endpoint: str):
if endpoint[:8] == "https://":
response = requests.get(endpoint)
if response.status_code == 200:
data = response.json()
else:
return None
else:
data = get_local_data(endpoint)
return data
def get_local_data(endpoint: str):
api_data_json = os.path.join(
FLASK_ROOT, "tests", "api_data", "endpoint_data.json"
)
fp = open(api_data_json)
api_data = json.load(fp)
fp.close()
if endpoint in api_data:
return api_data.get(endpoint)
def call_search_applications(params: dict):
applications_endpoint = (
APPLICATION_STORE_API_HOST
+ APPLICATION_SEARCH_ENDPOINT.format(params=urlencode(params))
)
applications_response = get_data(applications_endpoint)
return applications_response
def get_funds() -> List[Fund] | None:
endpoint = FUND_STORE_API_HOST + FUNDS_ENDPOINT
response = get_data(endpoint)
if response and len(response) > 0:
funds = []
for fund in response:
funds.append(Fund.from_json(fund))
return funds
return None
def get_fund(fund_id: str) -> Fund | None:
endpoint = FUND_STORE_API_HOST + FUND_ENDPOINT.format(fund_id=fund_id)
response = get_data(endpoint)
if response and "fund_id" in response:
fund = Fund.from_json(response)
if "rounds" in response and len(response["rounds"]) > 0:
for fund_round in response["rounds"]:
fund.add_round(Round.from_json(fund_round))
return fund
return None
def get_rounds(fund_id: str) -> Fund | List:
endpoint = ROUND_STORE_API_HOST + ROUNDS_ENDPOINT.format(fund_id=fund_id)
response = get_data(endpoint)
rounds = []
if response and len(response) > 0:
for round_data in response:
rounds.append(Round.from_json(round_data))
return rounds
def get_round_with_applications(fund_id: str, round_id: str) -> Round | None:
round_endpoint = ROUND_STORE_API_HOST + ROUND_ENDPOINT.format(
fund_id=fund_id, round_id=round_id
)
round_response = get_data(round_endpoint)
if round_response and "round_id" in round_response:
fund_round = Round.from_json(round_response)
applications_response = call_search_applications(
{
"fund_id": fund_id,
"datetime_start": fund_round.opens,
"datetime_end": fund_round.deadline,
}
)
if applications_response and len(applications_response) > 0:
for application in applications_response:
fund_round.add_application(Application.from_json(application))
return fund_round
return None
def get_applications(params: dict) -> List[Application] | None:
applications_response = call_search_applications(params)
if applications_response and len(applications_response) > 0:
applications = []
for application_data in applications_response:
applications.append(Application.from_json(application_data))
return applications
return None
def get_todo_summary() -> dict | None:
applications = call_search_applications("")
if applications and len(applications) > 0:
todo_summary = {}
todo_summary.update(
{
"completed": len(
[
1
for application in applications
if application["status"] == "COMPLETED"
]
),
"assessing": len(
[
1
for application in applications
if application["status"] == "ASSESSING"
]
),
"not_started": len(
[
1
for application in applications
if application["status"] == "NOT_STARTED"
]
),
}
)
return todo_summary
return None
def get_application(identifier: str) -> Application | None:
application_endpoint = (
APPLICATION_STORE_API_HOST
+ APPLICATION_ENDPOINT.format(application_id=identifier)
)
application_response = get_data(application_endpoint)
if application_response and "id" in application_response:
application = Application.from_json(application_response)
return application
return None
def get_application_status(application_id: str) -> Application | None:
application_status_endpoint = (
APPLICATION_STORE_API_HOST
+ APPLICATION_STATUS_ENDPOINT.format(application_id=application_id)
)
print(application_status_endpoint)
application_status_response = get_data(application_status_endpoint)
if application_status_response and "id" in application_status_response:
application = Application.from_json(application_status_response)
return application
return None
def get_questions(application_id):
"""_summary_: Function is set up to retrieve
the data from application store with
get_data() function.
Args:
application_id: Takes an application_id.
Returns:
Returns a dictionary of questions & their statuses.
"""
status_endpoint = (
APPLICATION_STORE_API_HOST
+ APPLICATION_STATUS_ENDPOINT.format(application_id=application_id)
)
questions = get_data(status_endpoint)
if questions:
data = {title: status for title, status in questions.items()}
return data
| StarcoderdataPython |
85628 | <reponame>CiscoTestAutomation/genietelemetrylibs
'''
GenieTelemetry CpuUtilizationCheck Plugin
'''
# Python
import copy
import logging
# argparse
from argparse import ArgumentParser
# ATS
from ats.log.utils import banner
from ats.utils import parser as argparse
from ats.datastructures import classproperty
# GenieTelemetry
from genie.telemetry.plugin import BasePlugin
from genie.telemetry.status import OK, WARNING, ERRORED, PARTIAL, CRITICAL
# Genie
from genie.utils.timeout import Timeout
# module logger
logger = logging.getLogger(__name__)
class Plugin(BasePlugin):
__plugin_name__ = 'CPU utilization Check Plugin'
__version__ = '1.0.0'
__supported_os__ = ['iosxe']
@classproperty
def parser(cls):
parser = argparse.ArgsPropagationParser(add_help = False)
parser.title = 'CPU utilization Check'
# timeout
# -------
parser.add_argument('--cpucheck_timeout',
action="store",
default=120,
help = "Specify poll timeout value\ndefault "
"to 120 seconds")
# # interval
# # -------
parser.add_argument('--cpucheck_interval',
action="store",
default=20,
help = "Specify poll interval value\ndefault "
"to 20 seconds")
# # five_min_percentage
# # -------------------
parser.add_argument('--cpucheck_fivemin_pcnt',
action="store",
default=60,
help = "Specify limited 5 minutes percentage of "
"cpu usage\ndefault to 60")
return parser
def parse_args(self, argv):
'''parse_args
parse arguments if available, store results to self.args. This follows
the easypy argument propagation scheme, where any unknown arguments to
this plugin is then stored back into sys.argv and untouched.
Does nothing if a plugin doesn't come with a built-in parser.
'''
# do nothing when there's no parser
if not self.parser:
return
argv = copy.copy(argv)
# avoid parsing unknowns
self.args, _ = self.parser.parse_known_args(argv)
def execution(self, device, **kwargs):
# Init
status = OK
# create timeout object
timeout = Timeout(max_time=int(self.args.cpucheck_timeout),
interval=int(self.args.cpucheck_interval))
# loop status
loop_stat_ok = True
if not hasattr(self, 'PARSER_MODULE'):
return WARNING('Does not have CPU related parsers to check')
while timeout.iterate():
# Execute command to get five minutes usage percentage
try:
cpu_dict = self.PARSER_MODULE(device).parse(
sort_time='5min', key_word='CPU')
except Exception as e:
return ERRORED('No output from show processes cpu\n{}'.format(e))
# Check 5 minutes percentage smaller than cpucheck_fivemin_pcnt
if int(cpu_dict['five_min_cpu']) >= int(self.args.cpucheck_fivemin_pcnt):
message = "****** Device {d} *****\n".format(d=device.name)
message += "Excessive CPU utilization detected for 5 min interval\n"
message += "Allowed: {e}%\n".format(e=self.args.cpucheck_fivemin_pcnt)
message += "Measured: FiveMin: {r}%".format(r=cpu_dict['five_min_cpu'])
loop_stat_ok = False
timeout.sleep()
else:
message = "***** CPU usage is Expected ***** \n"
message += "Allowed threashold: {e} \n"\
.format(e=self.args.cpucheck_fivemin_pcnt)
message += "Measured from device: {r}"\
.format(r=cpu_dict['five_min_cpu'])
loop_stat_ok = True
status += OK(message)
logger.info(banner(message))
break
if not loop_stat_ok:
status += CRITICAL(message)
logger.error(banner(message))
# Final status
return status
| StarcoderdataPython |
4888299 | # MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Example implementation of a thrift service for a calculator.
'''
__all__ = ['Server']
import threading
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TProtocol
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from calculator.handler import Handler
from calculator.gen.calculator import CalculatorService
class Server:
'''
Example implementation of a thrift server for the `CalculatorService`.
Offers sensible default values but has fully configurable options
for both the thrift transport as well as the protocol layer.
Can be used similar to a thread by offering the methods:
* :meth:`Server.start`
* :meth:`Server.stop` and
* :meth:`Server.join`
.. code-block:: python
server = Server()
try:
server.start()
server.join()
except (KeyboardInterrupt, SystemExit):
server.stop()
'''
def __init__(self,
handler: CalculatorService.Iface = None,
transport: TTransport.TTransportBase = None,
transport_factory: TTransport.TTransportFactoryBase = None,
protocol_factory: TProtocol.TProtocolFactory = None):
'''
Constructor (optionally) sets up transport as well as protocol
factories and then wraps to parent.
:param handler: CalculatorService implementation to be used.
If omitted this will be created dynamically using
:meth:`Server._default_handler`.
:type handler: CalculatorService.Iface
:param transport: Thrift transport layer to be used.
If omitted transport will be created dynamically using
:meth:`Server._default_transport`.
:type: TTransport.TTransportBase
:param transport_factory: Thrift transport factory to be used.
If omitted this will be created dynamically using
:meth:`Server._default_transport_factory`.
:type: TTransport.TTransportFactoryBase
:param protocol_factory: Thrift protocol factory to be used.
If omitted this will be created dynamically using
:meth:`Server._default_protocol_factory`.
:type: TProtocol.TProtocolFactory
'''
# Use given handler or use default implementation
if not handler:
handler = self._default_handler()
# Use given transport or use default implementation
if not transport:
transport = self._default_transport()
self._transport = transport
# Use given transport factory or use default implementation
if not transport_factory:
transport_factory = self._default_transport_factory()
# Use given protocol factory or use default implementation
if not protocol_factory:
protocol_factory = self._default_protocol_factory()
# Actually create server
self._server = self._create_server(
CalculatorService.Processor(handler),
transport,
transport_factory,
protocol_factory
)
# Prepare server thread
self._server_thread = None
def start(self):
'''
Starts the server in a background thread.
'''
self._server_thread = threading.Thread(target=self._server.serve)
self._server_thread.start()
def stop(self):
'''
Signals the server to stop (but does not wait for it to
actually stop).
'''
# Close underlying server transport channel
self._transport.close()
# Wait for server to shut down
if self._server_thread:
self._server_thread.join()
self._server_thread = None
def join(self):
'''
Waits for the server to shut down.
'''
if self._server_thread:
self._server_thread.join()
# pylint: disable=no-self-use
def _create_server(self,
processor: CalculatorService.Processor,
transport: TTransport.TTransportBase,
transport_factory: TTransport.TTransportFactoryBase,
protocol_factory: TProtocol.TProtocolFactory
) -> TServer:
'''
Creates thrift server based on given input parameters.
:param processor: Thrift interface processor to be used.
:type processor: CalculatorService.Processor
:param transport: Thrift transport layer to be used.
:type transport: TTransport.TTransportBase
:param transport_factory: Thrift transport factory to be used.
:type transport_factory: TTransport.TTransportFactoryBase
:param protocol_factory: Thrift protocol factory to be used.
:type protocol_factory: TProtocol.TProtocolFactory
:return: Thrift server bases on input parameters.
:rtype: TServer
'''
server = TServer.TSimpleServer(
processor,
transport,
transport_factory,
protocol_factory
)
return server
# pylint: disable=no-self-use
def _default_handler(self) -> CalculatorService.Iface:
'''
Creates default interface implementation.
:return: Default interface implementation to be used.
'''
handler = Handler()
return handler
# pylint: disable=no-self-use
def _default_transport(self) -> TTransport.TTransportBase:
'''
Creates default thrift transport layer.
:return: Default transport layer to be used.
'''
transport = TSocket.TServerSocket('127.0.0.1', 9876)
return transport
# pylint: disable=no-self-use
def _default_transport_factory(self) -> TTransport.TTransportFactoryBase:
'''
Creates default thrift transport factory.
:return: Default transport factory to be used.
'''
transport_factory = TTransport.TBufferedTransportFactory()
return transport_factory
# pylint: disable=no-self-use
def _default_protocol_factory(self) -> TProtocol.TProtocolFactory:
'''
Creates default thrift protocol factory.
:return: Default protocol factory to be used.
'''
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
return protocol_factory
def main():
'''
Main function running example thrift server
'''
# Create server
server = Server()
# Actually run the server
try:
print('Running the CalculatorService ', end=None)
print('Press CTRL-C or CTRL-Break to stop)...')
server.start()
server.join()
except (KeyboardInterrupt, SystemExit):
server.stop()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5073130 | <filename>vk_stats/stats.py
#!/usr/bin/env python3
# coding=utf-8
# SysRq ScadsStats. Finding active users on VK walls.
# Copyright (C) 2015-2016 <NAME>
#
# This file is part of SysRq VK Stats.
#
# SysRq VK Stats is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SysRq VK Stats is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import csv
import queue
import atexit
import shutil
import pickle
import tempfile
import time
import threading
import vk
import vk.exceptions
import requests
import requests.exceptions
from webbrowser import open as open_url
from vk.utils import stringify_values
from kivy.app import App
from kivy.logger import Logger
from kivy.core.window import Window
from kivy.config import Config
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.actionbar import ActionButton
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.lang import Builder
from kivy.utils import platform
from kivy.clock import Clock
from kivy.properties import ObjectProperty as Object
try:
from .service import HOME, SCRIPTDIR, _
from .KivyCalendar import DatePicker
except SystemError:
from service import HOME, SCRIPTDIR, _
from KivyCalendar import DatePicker
__author__ = "CyberTailor <<EMAIL>>"
__version__ = '1.0.1 "Carboneum"'
v_number = 4
api_ver = "5.50"
infinity = float("inf")
NAME = "ScadsStats: "
CURDIR = os.getcwd()
SAVEDIR = CURDIR + "/results"
TEMP = tempfile.mktemp(prefix="sysrq-")
os.mkdir(TEMP)
atexit.register(shutil.rmtree, TEMP)
mustdie = platform == "win"
# translating strings in _()
Logger.info(NAME + _("Создана временная директория %s"), TEMP)
Builder.load_file(SCRIPTDIR + "/interface.kv")
class Stop(Exception):
""" Exception to stop thread """
pass
class Partial:
"""
Ignoring arguments passed to __call__ method
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs.copy()
def __call__(self, *stuff):
return self.func(*self.args, **self.kwargs)
class ExcThread(threading.Thread):
"""
Thread with information about exceptions
"""
def __init__(self, bucket, after=None, **kwargs):
threading.Thread.__init__(self, **kwargs)
self.bucket = bucket
self.after = after
def run(self):
"""
Makes exceptions available for main thread
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
except Exception:
self.bucket.put(sys.exc_info())
finally:
del self._target, self._args, self._kwargs
if self.after is not None:
self.after()
class FailSafeSession(vk.Session):
"""
Session with reduced chance of raising error
"""
def send_api_request(self, request, captcha_response=None):
"""
Modified method with immunity to timeout and bad internet
:param request: VK API method
:param captcha_response: captcha dictionary
"""
url = self.API_URL + request._method_name
method_args = request._api._method_default_args.copy()
method_args.update(stringify_values(request._method_args))
access_token = self.access_token
if access_token:
method_args['access_token'] = access_token
if captcha_response:
method_args['captcha_sid'] = captcha_response['sid']
method_args['captcha_key'] = captcha_response['key']
timeout = request._api._timeout
try:
response = self.requests_session.post(url, method_args, timeout=timeout)
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError):
Logger.warning(NAME + _("Операция прервана по тайм-ауту"))
time.sleep(5)
response = self.send_api_request(request, captcha_response=captcha_response)
return response
class FailSafeAuthSession(vk.AuthSession, FailSafeSession):
""" Failsafe AuthSession """
pass
class SleepAPI(vk.API):
"""
API class with immunity to 'Too many requests per second' error
"""
def __getattr__(self, method_name):
time.sleep(0.33)
return vk.API.__getattr__(self, method_name)
class AllOk(BoxLayout):
ok = Object
message = Object
def show(self, text, title):
self.message.text = text
popup = Popup(title=title,
title_size='16pt',
content=self,
size_hint=(0.85, 0.6))
self.ok.bind(on_press=popup.dismiss)
popup.open()
class Info(BoxLayout):
ok = Object
message = Object
def show(self, text, title):
self.message.text = text
popup = Popup(title=title,
content=self,
title_size='16pt',
size_hint=(0.85, 0.6))
self.ok.bind(on_press=popup.dismiss)
popup.open()
class Alert(BoxLayout):
ok = Object
message = Object
def show(self, text, kill=False):
self.message.text = text
popup = Popup(title=_("Предупреждение"),
title_size='16pt',
content=self,
size_hint=(0.85, 0.6),
auto_dismiss=False)
if kill:
self.ok.bind(on_press=Partial(os._exit, 1))
else:
self.ok.bind(on_press=popup.dismiss)
popup.open()
class Bar:
def __init__(self, bar, bar_text):
self.bar = bar
self.bar.max = 100
self.bar_text = bar_text
def set_max(self, max_value):
self.bar.max = max_value
def set_value(self, value):
self.bar.value = value
def set_text(self, text):
self.bar_text.text = text
def finish(self):
self.set_value(0)
def info(message, title=_("Некоторая информация")):
Info().show(message, title)
def all_ok(message, title=_("Всё OK")):
AllOk().show(message, title)
def warning(message, kill=False):
Alert().show(message, kill)
def results(folder):
"""
Setting folder for results
:param folder: path to directory where the program will save results
"""
global SAVEDIR
Logger.info(NAME + _("Результаты будут сохранены в %s"), folder)
SAVEDIR = folder
def write_token(access_token):
"""
Writing token to file in home directory.
:param access_token: access_token for VK.com
"""
token_file = open(HOME + "/token.txt", mode="w")
token_file.write(access_token)
def upgrade(version, upd_log):
"""
Upgrading program
:param upd_log: Label object
:param version: version name for VK Stats
"""
upd_log.text = _("Скачиваю новую версию...")
installer = TEMP + "/VK_Stats.exe"
with open(installer, 'wb') as file:
file.write(requests.get(
"https://github.com/SysRq-Tech/ScadsStats/releases/download/{}/WIN.exe".format(version)).content)
upd_log.text = _("Запускаю установщик...")
os.startfile(installer)
upd_log.text = _("Завершаю работу приложения...")
time.sleep(1.5)
os._exit(0)
def upd_check():
"""
Checking for updates
"""
latest = requests.get("http://tools-sysrq.rhcloud.com/update?program=vk_stats").json()
if latest["number"] > v_number:
return latest["version"]
else:
return None
def get_api(api_session=None, access_token=None):
"""
Providing instance of *SleepAPI* class.
:param api_session: vk.Session
:param access_token: token for VKontakte
"""
global api
if api_session is not None:
api = SleepAPI(api_session, v=api_ver)
if access_token is not None:
session = FailSafeSession(access_token)
api = SleepAPI(session, v=api_ver)
write_token(access_token)
try:
api.users.get()
except vk.exceptions.VkAPIError:
if "token.txt" in os.listdir(HOME):
os.remove(HOME + "/token.txt")
raise vk.exceptions.VkAuthError()
return api
def login(email, password):
"""
Authorisation in https://vk.com
:param password: password for VK.<PASSWORD>
:param email: e-mail address or phone number
:return: access_token for VK
"""
app_id = 4589594
session = FailSafeAuthSession(user_login=email, user_password=password,
app_id=app_id, scope="offline,stats,groups,wall")
get_api(api_session=session)
write_token(session.get_access_token())
def resolve(url):
"""
Resolving VKontakte URLs
:param url: address of group or profile
:return: {"id": <ID of wall>, "name": <screen name>, "title": <title or name/surname>}
"""
wall_data = api.utils.resolveScreenName(screen_name=url.split("/")[-1])
if not wall_data:
raise Stop(_("Неверный URL"))
wall_type = wall_data["type"]
obj_id = wall_data["object_id"]
if wall_type == "group":
group_data = api.groups.getById(group_ids=obj_id)[0]
screen_name = group_data["screen_name"]
title = group_data["name"]
wall_id = "-" + str(obj_id)
else:
profile = api.users.get(user_ids=obj_id, fields="screen_name")[0]
screen_name = profile["screen_name"]
title = "{first_name} {last_name}".format(**profile)
wall_id = obj_id
return {"id": wall_id, "name": screen_name, "title": title}
def percents(el, seq):
"""
Computing progress for sequence.
:param el: element or first number
:param seq: sequence or last number
:return: percent
"""
if isinstance(seq, int):
percent = el * 100 / seq
else:
percent = (seq.index(el) + 1) * 100 / len(seq)
return round(percent, 2)
def make_packs(l, num):
pack_len = len(l) // num
work_list = l.copy()
result = []
for make_packs._ in range(num - 1):
result.append(work_list[:pack_len])
del work_list[:pack_len]
result.append(work_list)
return result
def list_of_str(seq):
"""
Converting sequence of integers to list of strings.
:param seq: any sequence
:return: list of string
"""
return [str(el) for el in seq]
class Stats:
"""
Gathering statistics
"""
def __init__(self, name, bar, posts_lim=0, from_lim="0.0.0", to_lim="0.0.0"):
"""
Run set_bar() and loggers() functions before calling.
:param name: ID or screen name
:param posts_lim: limit for posts
:param from_lim: date of the earliest post
:param to_lim: date of the latest post
"""
self.bar = bar
self.plist = []
self.likers_list = []
self.comm_list = []
self.id_list = []
self.screen_name = name
self.dir_opened = None
self.cache = "{}/{}.dat".format(TEMP, self.screen_name)
self.savedir = os.path.join(SAVEDIR, self.screen_name)
# ID of a wall
self.wall = resolve(self.screen_name)["id"]
# limit for posts
if not posts_lim:
self.posts_lim = api.wall.get(owner_id=self.wall, count=1)["count"]
else:
self.posts_lim = posts_lim
Logger.info(NAME + _("Ограничено до %s постов"), self.posts_lim)
# date limit
try:
date_list = [int(num) for num in from_lim.split(".") + to_lim.split(".")]
assert len(date_list) == 6
assert date_list[2] > 2000 or not date_list[2]
assert date_list[-1] > 2000
except (AssertionError, ValueError):
raise Stop(_("Неправильный формат даты!"))
if not sum(date_list[:3]): # if result is 0
self.from_lim = 0
else:
self.from_lim = time.mktime((date_list[2], date_list[1], date_list[0], 0, 0, 0, 0, 0, -1))
Logger.info(NAME + _("Будут получены посты с даты %s"), from_lim)
if not sum(date_list[3:]):
self.to_lim = infinity
else:
self.to_lim = time.mktime((date_list[5], date_list[4], date_list[3], 23, 59, 59, 0, 0, -1))
Logger.info(NAME + _("Будут получены посты до даты %s"), to_lim)
if os.path.isfile(self.cache):
with open(self.cache, "rb") as cache:
loaded = pickle.load(cache)
if loaded[4] >= self.from_lim \
and loaded[5] <= self.to_lim \
and loaded[6] <= self.posts_lim:
self.plist, self.likers_list, self.comm_list, self.dir_opened = loaded[:4]
Logger.info(NAME + _("Кэш стены загруженен"))
def _restore(self):
self.plist = []
self.likers_list = []
self.comm_list = []
self.id_list = []
self.dir_opened = None
self.bar.set_text("")
self.bar.finish()
def _check_limit(self, data):
date = data["date"]
if self.from_lim and date < self.from_lim:
return True
return False
def _get_posts(self):
posts = []
thousands_range = self.posts_lim // 1000 + (1 if self.posts_lim % 1000 else 0)
offset = 0
self.bar.set_text(_("Получение постов"))
for post in range(thousands_range):
if offset > 0:
if self._check_limit(posts[-1]) or len(posts) > self.posts_lim:
return posts
self.bar.set_value(percents(offset, self.posts_lim))
posts.extend(api.execute.wallGetThousand(owner_id=self.wall, offset=offset))
offset += 1000
self.bar.finish()
return posts
def _get_likers(self, offset=0, did=0, task=0):
id_list_copy = self.id_list.copy()
twenty_five_range = len(self.id_list) // 25 + (1 if len(self.id_list) % 25 else 0)
for i in range(twenty_five_range):
self.bar.set_value(percents(did, task))
count = id_list_copy[:25]
if not id_list_copy:
break
data = api.execute.likesList(wall=self.wall, posts=",".join(list_of_str(count)), offset=offset)
for index, post in enumerate(data):
self.likers_list.extend(post["items"])
if post["count"] - offset <= 1000:
self.id_list.remove(count[index])
did += 1
del id_list_copy[:25]
if self.id_list:
self._get_likers(offset + 1000, did, task)
self.bar.finish()
def _get_comm(self, offset=0, did=0, task=0):
id_list_copy = self.id_list.copy()
twenty_five_range = len(self.id_list) // 25 + (1 if len(self.id_list) % 25 else 0)
for i in range(twenty_five_range):
self.bar.set_value(percents(did, task))
count = id_list_copy[:25]
if not id_list_copy:
break
data = api.execute.commList(wall=self.wall, posts=",".join(list_of_str(count)), offset=offset)
for index, comm in enumerate(data):
self.comm_list.extend([commentator["from_id"] for commentator in comm["items"]])
if comm["count"] - offset <= 100:
self.id_list.remove(count[index])
did += 1
del id_list_copy[:25]
if self.id_list:
self._get_comm(offset + 100, did, task)
self.bar.finish()
def _process_post_pack(self, posts):
for data in posts:
if self._check_limit(data):
continue
if data["date"] > self.to_lim:
continue
post_id = data["id"]
from_id = data["from_id"]
likes = data["likes"]["count"]
comments = data["comments"]["count"]
self.plist.append({"data": [from_id, likes, comments], "id": post_id})
def posts_list(self):
"""
Making list of posts with senders' IDs and count of likes.
:return: list of posts
"""
if self.plist:
print("OK")
return
posts = self._get_posts()
task = len(posts)
packs = make_packs(posts, 2)
self.bar.set_text(_("Обработка постов"))
workers = [threading.Thread(target=self._process_post_pack, args=(pack,)) for pack in packs]
for w in workers:
w.start()
while True:
alive = [w.is_alive() for w in workers]
if alive == [False, False]:
break
self.bar.set_value(percents(len(self.plist), task))
time.sleep(0.005)
Logger.info(NAME + _("Обработано %s постов"), len(self.plist))
self.plist = self.plist[:self.posts_lim]
self.bar.finish()
def users(self, users_list):
"""
List of information about users
:param users_list: list of users' IDs
"""
result = []
task = len(users_list)
self.bar.set_text(_("Получение пользователей"))
while users_list:
users = ",".join([str(user) for user in users_list[:1000] if user > 0])
self.bar.set_value(percents(len(result), task))
data = api.users.get(user_ids=users, fields="screen_name")
result.extend(data)
del users_list[:1000]
self.bar.finish()
return result
def likers(self):
"""
Users who liked posts.
:return: lists of likers
"""
if self.likers_list:
return
self.id_list = [data["id"] for data in self.plist]
self.bar.set_text(_("Получение лайкеров"))
self._get_likers(task=len(self.id_list))
def commentators(self):
"""
Users who commented posts.
:return: lists of posts and commentators
"""
if self.comm_list:
return
self.id_list = [data["id"] for data in self.plist]
self.bar.set_text(_("Получение комментаторов"))
self._get_comm(task=len(self.id_list))
def gather_stats(self):
"""
Gathering statistics [POSTERS].
:return: tuple with user's information and count of posts
"""
self.posts_list()
self.bar.set_text(_("Обработка пользователей"))
from_ids = [uid["data"][0] for uid in self.plist]
from_ids_unique = list({uid for uid in from_ids})
from_list = []
data = self.users(from_ids_unique)
self.bar.set_text(_("Обработка пользователей"))
for user in data:
if "deactivated" in user: # if user is deleted or banned
user["screen_name"] = user["deactivated"].upper()
posts_from_user = from_ids.count(user["id"])
self.bar.set_value(percents(user, data))
from_list.append((posts_from_user, user))
self.bar.finish()
return from_list
def __call__(self, mode="Writers"):
"""
Exporting statistics.
:param mode: prefix for file
"""
self.bar.set_text(_("Инициализация"))
api.stats.trackVisitor()
data = self.gather_stats()
self.savedir = os.path.join(SAVEDIR, self.screen_name, mode.lower())
if not os.path.isdir(self.savedir):
os.makedirs(self.savedir, exist_ok=True)
self.bar.set_text(_("Сохранение результатов"))
res_txt = os.path.join(self.savedir, mode.lower() + ".txt")
res_csv = os.path.join(self.savedir, mode.lower() + ".csv")
res_html = os.path.join(self.savedir, mode.lower() + ".html")
for file in res_txt, res_csv, res_html:
if os.path.isfile(file):
os.remove(file)
txt_file = open(res_txt, mode="a")
print(_("РЕЖИМ СТАТИСТИКИ:"), mode.upper(), file=txt_file)
csv_file = open(res_csv, mode="a", newline="")
writer = csv.writer(csv_file)
writer.writerow(["URL", _("Имя"), _("Счёт")])
html_file = open(res_html, mode="a")
html_header = open(SCRIPTDIR + "/html/stats_header.html").read()
html_item = open(SCRIPTDIR + "/html/stats_item.html").read()
html_item_inactive = open(SCRIPTDIR + "/html/stats_item_inactive.html").read()
html_end = open(SCRIPTDIR + "/html/stats_end.html").read()
print(html_header.format(title=mode, user=_("Пользователь"), count=_("Счёт")),
file=html_file)
Logger.info(NAME + _("Сохранение результатов в %s"), self.savedir)
task = len(data)
place = [0, infinity]
for did in range(1, len(data) + 1):
if not data:
break
max_object = max(data, key=lambda sequence: sequence[0])
max_count = max_object[0]
if max_count < place[1]:
place[1] = max_count
place[0] += 1
max_index = data.index(max_object)
user_data = data.pop(max_index)[1]
if max_count > 0:
prefix = "" if user_data["screen_name"] in ["DELETED", "BANNED"] else "https://vk.com/"
user_string = "{2}. {1}{screen_name} ({first_name} {last_name}): {0}".format(max_count, prefix,
place[0], **user_data)
print(user_string, file=txt_file)
writer.writerow([prefix + user_data["screen_name"],
"{first_name} {last_name}".format(**user_data),
max_count])
if prefix:
print(html_item.format(place[0], max_count, **user_data), file=html_file)
else:
print(html_item_inactive.format(place[0], max_count, **user_data), file=html_file)
if not did % 50:
for file in txt_file, csv_file, html_file:
file.flush()
self.bar.set_value(percents(did, task))
time.sleep(0.005)
print(html_end.format(_("Получить программу")), file=html_file)
for file in txt_file, csv_file, html_file:
file.close()
if not self.dir_opened == os.path.join(SAVEDIR, self.screen_name):
if mustdie:
os.startfile(self.savedir)
elif platform == "linux":
os.system("xdg-open '{}'".format(self.savedir))
self.dir_opened = os.path.join(SAVEDIR, self.screen_name)
with open(self.cache, "wb") as cache:
pickle.dump([self.plist, self.likers_list, self.comm_list, self.dir_opened,
self.from_lim, self.to_lim, self.posts_lim], file=cache)
self._restore()
all_ok(_("Сделано!"))
class FavoritesStats(Stats):
"""
Gather, make and export statistics for liked posts
"""
def gather_stats(self):
"""
Gathering statistics for liked posts.
:return: dictionary with user's information and general count of likes
"""
self.posts_list()
self.bar.set_text(_("Обработка пользователей"))
data = [val["data"] for val in self.plist]
users = {val[0]: 0 for val in data}
result = []
for user, likes, comm in data:
users[user] += likes
items_list = list(users.items())
users_list = [key[0] for key in items_list]
likes_list = [key[1] for key in items_list]
users_data = self.users(users_list)
self.bar.set_text(_("Обработка пользователей"))
for user, likes in zip(users_data, likes_list):
if "deactivated" in user:
user["screen_name"] = user["deactivated"].upper()
if likes > 0:
result.append((likes, user))
self.bar.set_value(percents(likes, likes_list))
self.bar.finish()
return result
def __call__(self, **kwargs):
"""
Exporting statistics for likes
:param kwargs: for compatibility
"""
Stats.__call__(self, mode="Favorites")
class LikersStats(Stats):
"""
Gather, make and export statistics for likers
"""
def gather_stats(self):
"""
Gathering statistics for likers.
:return: dictionary with user's information and general count of likes
"""
self.posts_list()
self.likers()
self.bar.set_text(_("Обработка пользователей"))
likers_unique = list({uid for uid in self.likers_list})
result = []
did = 0
task = len(likers_unique)
users_data = self.users(likers_unique)
self.bar.set_text(_("Обработка пользователей"))
for liker in users_data:
count = self.likers_list.count(liker["id"])
if "deactivated" in liker:
liker["screen_name"] = liker["deactivated"].upper()
self.bar.set_value(percents(did, task))
result.append((count, liker))
did += 1
self.bar.finish()
return result
def __call__(self, **kwargs):
"""
Exporting statistics for likers
"""
Stats.__call__(self, mode="Likers")
class DiscussedStats(Stats):
"""
Gather, make and export statistics for likers
"""
def gather_stats(self):
"""
Gathering statistics for commented posts.
:return: dictionary with user's information and general count of comments to his/her posts
"""
self.posts_list()
self.bar.set_text(_("Обработка пользователей"))
data = [val["data"] for val in self.plist]
users = {val[0]: 0 for val in data}
result = []
for user, likes, comments in data:
users[user] += comments
items_list = list(users.items())
users_list = [key[0] for key in items_list]
comments_list = [key[-1] for key in items_list]
users_data = self.users(users_list)
self.bar.set_text(_("Обработка пользователей"))
for user, likes in zip(users_data, comments_list):
if "deactivated" in user:
user["screen_name"] = user["deactivated"].upper()
self.bar.set_value(percents(likes, comments_list))
result.append((likes, user))
self.bar.finish()
return result
def __call__(self, **kwargs):
"""
Exporting statistics for likers
"""
Stats.__call__(self, mode="Discussed")
class CommentatorsStats(Stats):
"""
Gather, make and export statistics for commentators
"""
def gather_stats(self):
"""
Gathering statistics for likers.
:return: dictionary with user's information and general count of likes
"""
self.posts_list()
self.commentators()
self.bar.set_text(_("Обработка пользователей"))
comm_unique = list({uid for uid in self.comm_list})
result = []
did = 0
task = len(comm_unique)
users_data = self.users(comm_unique)
self.bar.set_text(_("Обработка пользователей"))
for commentator in users_data:
count = self.comm_list.count(commentator["id"])
if "deactivated" in commentator:
commentator["screen_name"] = commentator["deactivated"].upper()
self.bar.set_value(percents(did, task))
result.append((count, commentator))
did += 1
self.bar.finish()
return result
def __call__(self, **kwargs):
"""
Exporting statistics for commentators
"""
Stats.__call__(self, mode="Commentators")
# =====--- GUI ---===== #
class CenteredTextInput(TextInput):
pass
class IconButton(Button):
pass
class Tooltip(Label):
pass
class Date(DatePicker):
pass
class TooltipButton(ActionButton):
tooltip = Tooltip(text="Hello world")
def __init__(self, **kwargs):
Window.bind(mouse_pos=self.on_mouse_pos)
super(ActionButton, self).__init__(**kwargs)
def on_mouse_pos(self, *args):
if not self.get_root_window():
return
pos = args[1]
self.tooltip.pos = pos
Clock.unschedule(self.show_tooltip) # cancel scheduled event since I moved the cursor
self.close_tooltip() # close if it's opened
if self.collide_point(*self.to_widget(*pos)):
Clock.schedule_once(self.show_tooltip, 0.75)
def close_tooltip(self, *args):
Window.remove_widget(self.tooltip)
def show_tooltip(self, *args):
self.tooltip.text = self.text
Window.add_widget(self.tooltip)
class Update(BoxLayout):
no = Object
text_no = _("Нет")
yes = Object
text_yes = _("Да")
version = Object
upd_text = Object
class Saveto(BoxLayout):
select = Object
select_text = _("Выбрать")
chooser = Object
def save(self, popup):
selection = self.chooser.selection
results(selection[0] if selection else SAVEDIR)
popup.dismiss()
class Token(BoxLayout):
login = Object
login_text = _("Войти")
token = Object
token_hint = _("полный URL, полученный по инструкции выше")
link = Object
token_manual = _("1) Откройте [color=3366bb][ref=http://vk.cc/3T1J9A]страницу авторизации[/ref][/color]\n" +
"2) Войдите и дайте разрешения приложению\n" +
"3) Скопируйте текст из адресной строки\n" +
"4) Вставьте его ниже!")
class Login(BoxLayout):
log_in = Object
log_in_text = _("Войти")
by_token = Object
by_token_text = _("Без пароля")
login = Object
login_text = _("Логин:")
password = Object
password_text = _("<PASSWORD>:")
def token_auth(self, popup):
try:
get_api(access_token=self.content.token.text)
except vk.exceptions.VkAuthError:
warning(_("Неверный токен!"))
else:
popup.dismiss()
def use_token(self, parent_popup, force=False):
parent_popup.dismiss()
self.content = Token()
popup = Popup(title=_("Вход по токену"),
title_size='16pt',
content=self.content,
size_hint=(0.8, 0.65))
if force:
popup.auto_dismiss = False
self.content.link.bind(on_ref_press=Partial(open_url, "http://vk.cc/3T1J9A"))
self.content.login.bind(on_press=Partial(self.token_auth, popup))
popup.open()
def auth(self, popup):
try:
login(self.login.text, self.password.text)
except vk.exceptions.VkAuthError:
warning(_("Неверный логин или пароль!"))
else:
popup.dismiss()
class Account(BoxLayout):
relogin = Object
relogin_text = _("Перезайти")
class About(TabbedPanel):
link = Object
rst = Object
about_text = _("О программе")
description = _("[b][size=28]ScadsStats 1.0[/size][/b]\n"
"Вычисление активных пользователей на стенах ВКонтакте.[color=3366bb]\n"
"[ref=https://vk.com/sysrqtech]Сообщество ВК[/ref][/color]")
credits_text = _("Благодарности")
authors = _("Авторы")
translators = _("Переводчики")
designers = _("Дизайнеры")
license_item = _("Лицензия")
loading = _("Загрузка...")
help_item = _("Помочь нам")
help_text = _("Вы перенаправлены на страницу репозитория.\n"
"Сделайте программу лучше!")
def __init__(self, **kwargs):
super(About, self).__init__(**kwargs)
self.bind(current_tab=self.on_current_tab)
def on_current_tab(self, *args):
if args[1].text == _("Лицензия"):
self.rst.text = open(SCRIPTDIR + "/docs/license.rst").read()
elif args[1].text == _("Помочь нам"):
open_url("https://github.com/SysRq-Tech/ScadsStats")
class Main(App, BoxLayout):
bar = Object
bar_text = Object
group_input = Object
from_input = Object
to_input = Object
posts_input = Object
posts_label = _("Посты:")
posts_hint = _("все по умолчанию")
mode = Object
mode_label = _("Режим:")
date_label = _("Дата:")
go = Object
go_text = _("Поехали!")
icon = SCRIPTDIR + "/images/icon.png"
title = "ScadsStats"
saveto_item = _("Сохранять в...")
quit_item = _("Выход")
account_item = _("Аккаунт")
update_item = _("Обновиться")
about_item = _("О ScadsStats")
writers = _("Пишущие")
favorites = _("Лайкаемые")
likers = _("Лайкеры")
discussed = _("Обсуждаемые")
commentators = _("Комментаторы")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.started = False
def _restore(self):
self.started = False
@staticmethod
def get_user_photo():
"""
:return: 200px user photo
"""
return api.users.get(fields="photo_200")[0]["photo_200"]
@staticmethod
def get_user_name():
"""
:return: user's name and surname for Account menu
"""
data = api.users.get()[0]
return _("Вы авторизованы как [b]{first_name} {last_name}[/b]").format(**data)
@staticmethod
def update_check():
"""
Checking for updates
"""
status = upd_check()
if status is None:
all_ok(_("Вы используете последнюю версию!"), title=_("Нечего делать ;)"))
else:
if not mustdie:
warning(_("Используйте пакетный менеджер для обновления"))
else:
content = Update()
content.version.text = _("Найдено обновление до {}!").format(status) + "\n" + _("Обновиться") + "?"
popup = Popup(title=_("Найдено обновление!"),
title_size='16pt',
content=content,
size_hint=(0.8, 0.7))
content.no.bind(on_press=popup.dismiss)
content.yes.bind(on_press=Partial(upgrade, status, content.upd_text))
popup.open()
@staticmethod
def is_dir(directory, filename):
return os.path.isdir(os.path.join(directory, filename))
def datepicker(self):
self.date = None
content = Date(self.date, self.date_input)
popup = Popup(title=_("Ограничение по дате"),
title_size='16pt',
content=content,
size_hint=(0.7, 0.9))
content.ok.bind(
on_press=Partial(content.set_date, content.from_date.active_date, content.to_date.active_date, popup))
popup.open()
@staticmethod
def saveto():
content = Saveto()
popup = Popup(title=_("Выберите папку"),
title_size='16pt',
content=content,
size_hint=(0.9, 0.9))
content.select.bind(on_press=Partial(content.save, popup))
popup.open()
@staticmethod
def login(force=False, parent=None):
if parent is not None:
parent.dismiss()
content = Login()
popup = Popup(title=_("Вход по паролю"),
title_size='16pt',
content=content,
size_hint=(0.8, 0.55))
if force:
popup.auto_dismiss = False
content.by_token.bind(on_press=Partial(content.use_token, popup, force=force))
content.log_in.bind(on_press=Partial(content.auth, popup))
popup.open()
def account(self):
content = Account()
popup = Popup(title=_("Аккаунт"),
title_size='16pt',
content=content,
size_hint=(0.8, 0.6))
content.relogin.bind(on_press=Partial(self.login, parent=popup))
popup.open()
@staticmethod
def about():
content = About()
popup = Popup(title=_("О ScadsStats"),
title_size='16pt',
content=content,
size_hint=(0.95, 0.95))
content.link.bind(on_ref_press=Partial(open_url, "https://vk.com/sysrqtech"))
popup.open()
def watch(self, bucket):
while True:
if not self.started:
return
try:
exc = bucket.get(block=False)
except queue.Empty:
pass
else:
exc_type, exc_obj, exc_trace = exc
# deal with the exception
warning(str(exc_obj))
self._restore()
return
time.sleep(1)
def start(self):
"""
Gathering statistics
"""
if self.started:
return
self.started = True
group = self.group_input.text
from_date = self.from_input.text
to_date = self.to_input.text
posts = self.posts_input.text
mode = self.mode.text
if not group:
warning(_("Укажите стену"))
self._restore()
return
if not posts:
posts = 0
else:
posts = int(posts)
try:
if mode == _("Пишущие"):
method = Stats(group, Bar(self.bar, self.bar_text),
posts_lim=posts, from_lim=from_date, to_lim=to_date)
elif mode == _("Лайкаемые"):
method = FavoritesStats(group, Bar(self.bar, self.bar_text),
posts_lim=posts, from_lim=from_date, to_lim=to_date)
elif mode == _("Лайкеры"):
method = LikersStats(group, Bar(self.bar, self.bar_text),
posts_lim=posts, from_lim=from_date, to_lim=to_date)
elif mode == _("Обсуждаемые"):
method = DiscussedStats(group, Bar(self.bar, self.bar_text),
posts_lim=posts, from_lim=from_date, to_lim=to_date)
else:
method = CommentatorsStats(group, Bar(self.bar, self.bar_text),
posts_lim=posts, from_lim=from_date, to_lim=to_date)
except Stop as err:
warning(err.args[0])
self._restore()
return
bucket = queue.Queue()
thread = ExcThread(bucket, target=method, after=self._restore).start()
threading.Thread(target=self.watch, args=(bucket,)).start()
def check(self, *args):
"""
Checking for access to VKontakte
"""
try:
if "token.txt" in os.listdir(HOME):
token = open(HOME + "/token.txt").read()
try:
get_api(access_token=token)
except vk.exceptions.VkAuthError:
self.login(force=True)
else:
self.login(force=True)
except requests.exceptions.ConnectionError:
warning(_("Проверьте Ваше интернет-соединение"), kill=True)
def build(self):
"""
Scheduling check for access to vk.com
"""
Clock.schedule_once(self.check, 1)
return self
def main():
Window.size = (750, 400)
Config.set("graphics", "resizable", 1)
Config.set("kivy", "exit_on_escape", 0)
Config.set("input", "mouse", "mouse,disable_multitouch")
try:
Main().run()
except TypeError as err:
if not err.args[0] == "'NoneType' object is not subscriptable":
raise
if __name__ == "__main__":
main()
| StarcoderdataPython |
329027 | # In[]
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
def expand_Size(srcPath, dstPath):
img = cv2.imread(srcPath)
img = np.asarray(img, dtype = "uint8")
kernel = np.ones((21,21))
img = cv2.dilate(img, kernel, iterations = 1)
cv2.imwrite(dstPath, img)
src = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180410_191400_Seg_Base/Masks"
dst = "D:/[Data]/[Cardiomegaly]/1_ChestPA_Labeled_Baeksongyi/[PNG]_2_Generated_Data(2k)/Generated_Data_20180410_191400_Seg_Base_Expand_20pixel/Masks"
folders = ["train", "test","validation"]
masks = MasksTypes = ["Landmark", "Thorax(x)"]
if not os.path.isdir(dst):
os.mkdir(dst)
for mask in masks :
src_ = src + "/" + mask
dst_ = dst + "/" + mask
if not os.path.isdir(dst_):
os.mkdir(dst_)
for folder in folders :
src__ = src_ + "/" + folder
dst__ = dst_ + "/" + folder
if not os.path.isdir(dst__):
os.mkdir(dst__)
for file in os.listdir(src__):
expand_Size(src__ + "/" + file, dst__ + "/" + file)
print(src__ + "/" + file) | StarcoderdataPython |
6527169 | <reponame>spascou/kappa
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import botocore.exceptions
import kappa.event_source.base
import logging
LOG = logging.getLogger(__name__)
class KinesisEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super(KinesisEventSource, self).__init__(context, config)
self._lambda = kappa.awsclient.create_client(
'lambda', context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
'list_event_source_mappings',
FunctionName=function.name,
EventSourceArn=self.arn)
LOG.debug(response)
if len(response['EventSourceMappings']) > 0:
uuid = response['EventSourceMappings'][0]['UUID']
return uuid
def add(self, function):
try:
response = self._lambda.call(
'create_event_source_mapping',
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
MaximumBatchingWindowInSeconds=self.batch_window,
StartingPosition=self.starting_position,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to add event source')
def enable(self, function):
self._config['enabled'] = True
try:
response = self._lambda.call(
'update_event_source_mapping',
UUID=self._get_uuid(function),
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to enable event source')
def disable(self, function):
self._config['enabled'] = False
try:
response = self._lambda.call(
'update_event_source_mapping',
FunctionName=function.name,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to disable event source')
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'update_event_source_mapping',
BatchSize=self.batch_size,
MaximumBatchingWindowInSeconds=self.batch_window,
Enabled=self.enabled,
FunctionName=function.arn)
LOG.debug(response)
except Exception:
LOG.exception('Unable to update event source')
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call(
'delete_event_source_mapping',
UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug('getting status for event source %s', self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'get_event_source_mapping',
UUID=self._get_uuid(function))
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug('event source %s does not exist', self.arn)
response = None
else:
LOG.debug('No UUID for event source %s', self.arn)
return response
| StarcoderdataPython |
6656940 | <reponame>trantinan2512/Francis<filename>web/apps/configs/migrations/0001_initial.py
# Generated by Django 2.1.3 on 2018-12-22 01:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TrophyRoomConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room_channel_id', models.BigIntegerField(help_text='Put ID of the channel to display trophy list here.')),
('trophy_list_message_id', models.BigIntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Trophy Room Config',
'verbose_name_plural': 'Trophy Room Config',
},
),
]
| StarcoderdataPython |
3571165 | from numpy import *
from scipy.interpolate import *
from matplotlib import pyplot as plt
class projectP():
#def __init__(self):
### Any default variables go here, should they be needed
def regress(self):
j = 0
i = 1
data = []
count = []
while (i != None):
i = int(input("Enter means: "))
if (i != None):
data[j] = i
j = j + 1
for i in data:
if (data[i] != None):
count[i] = data[i]
print(data)
# Some predictive linear regression kung foo goes here
def regScore(self, data, count):
p1 = polyfit(count, data,1) # Linear polyfit? Slope and intercept?
p2 = polyfit(count,data,2)
p3 = polyfit(count,data,3)
plt.plot(count,data,'o')
plt.plot(count,polyval(p1,count), 'r-')
plt.plot(count,polyval(p2,count), 'b--')
#plt.plot(count,polyval(p3,count), 'm:')
plt.show()
| StarcoderdataPython |
5179976 | import fileinput
from typing import Tuple
import numpy as np
from task3 import Position, direction_actions, EXIT, WALL, WALKABLE, AGENT, MARK
class Agent:
class WallException(Exception):
pass
def __init__(self, board: np.ndarray, marking=False):
self.marking = marking
self.board = board.copy()
agent_position = np.where(board == AGENT)
self.current_position: Position = Position(int(agent_position[0]), int(agent_position[1]))
def look(self, direction):
row_action, column_action = direction_actions[direction]
return self.board[self.current_position.row + row_action][self.current_position.column + column_action]
def move(self, direction, change_own_board=True):
destination: int = self.look(direction)
if destination == WALKABLE or (not change_own_board and destination == AGENT):
self.__make_move(direction, change_own_board)
elif destination == WALL:
raise Agent.WallException()
elif destination == EXIT:
self.__update_current_position(direction)
def __make_move(self, direction, change_own_board=True):
row_action, column_action = direction_actions[direction]
if change_own_board:
self.board[self.current_position.row][self.current_position.column] = MARK if self.marking else WALKABLE
self.board[self.current_position.row + row_action][self.current_position.column + column_action] = AGENT
self.current_position: Position = \
Position(self.current_position.row + row_action, self.current_position.column + column_action)
def __update_current_position(self, direction):
row_action, column_action = direction_actions[direction]
self.current_position = \
Position(self.current_position.row + row_action, self.current_position.column + column_action)
class AgentWalk:
def __init__(self, board: np.ndarray, max_time: int):
self.max_time = max_time
self.board = board
@classmethod
def from_file(cls, filename: str):
board_, max_time = AgentWalk.read_input(filename)
return cls(board_, max_time)
@classmethod
def from_stdin(cls):
cities, max_time = AgentWalk.read_input()
return cls(cities, max_time)
@staticmethod
def read_input(filename=None) -> Tuple[np.ndarray, int]:
with open(filename, 'r') if filename is not None else fileinput.input() as file:
first_line = file.readline()
# print([str(x) for x in first_line.split()])
[max_time, rows, columns] = [int(x) for x in first_line.split()]
board = np.ndarray((rows, columns))
for i, line in enumerate(file, 0):
for j, x in enumerate(str.rstrip(line)):
board[i][j] = int(x)
return board, max_time
def print_board_matrix(self):
return '\n'.join([f'{row}' for row in self.board])
| StarcoderdataPython |
385995 | from multiprocessing import Pool
import h5py
import numpy as np
class DataShapeError(Exception):
"""Basic exception for if loaded data cannot be reshaped into the required shape"""
pass
class Data_from_HDF5():
def __init__(self, training_data_file_list, testing_data_file_list, data_shape):
self.required_data_shape = data_shape
self.train = load_data_from_list(training_data_file_list)
self.test = load_data_from_list(testing_data_file_list)
self.correct_shape()
self.any_nan()
def any_nan(self):
if self.train is not None:
assert not np.any(np.isnan(self.train))
if self.test is not None:
assert not np.any(np.isnan(self.test))
def correct_shape(self):
cases = [('training', self.train), ('testing', self.test)]
self.train, self.test = tuple(map(lambda case: reshape_data_array(*case, self.required_data_shape), cases))
def reshape_data_array(name, data, required_image_shape):
if data is not None:
image_number = data.shape[0]
try:
return data.reshape((image_number, *required_image_shape))
except ValueError:
image_shape = data.shape[1:]
message = 'loaded {} data, of shape {}, cannot be reshaped into the required shape {}'.format(name,
image_shape,
required_image_shape)
raise DataShapeError(message)
else:
return None
def load_data_from_list(data_file_list):
if data_file_list:
with Pool() as p:
data = list(p.map(load_data_from_file, data_file_list))
return np.concatenate(data)
else:
return None
def load_data_from_file(data_file):
with h5py.File(data_file, 'r') as f:
return np.array(f['data']).astype('float32')
| StarcoderdataPython |
9623253 | <reponame>joewen85/mycmdb
# Generated by Django 2.2.17 on 2021-03-18 06:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('device', '0034_auto_20201009_1655'),
]
operations = [
migrations.AlterField(
model_name='deploy_record',
name='result',
field=models.TextField(null=True, verbose_name='执行任务结果'),
),
]
| StarcoderdataPython |
1661778 | import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent.absolute()))
from hybrid.sites import SiteInfo, flatirons_site
from hybrid.hybrid_simulation import HybridSimulation
from hybrid.dispatch.plot_tools import plot_battery_output, plot_battery_dispatch_error, plot_generation_profile
from hybrid.keys import set_developer_nrel_gov_key
import json
set_developer_nrel_gov_key('')
# ADD CUSTOM WIND MODULE
# download FLORIS at www.github.com/NREL/FLORIS
# pip install -e floris
with open("../../../floris/examples/example_input.json", 'r') as f:
floris_config = json.load(f)
# properties from floris
nTurbs = len(floris_config['farm']['properties']['layout_x'])
solar_size_mw = 50 #20
wind_size_mw = 50 #80
battery_capacity_mwh = 200 #30
interconnection_size_mw = 50 #100
technologies = {'pv': {
'system_capacity_kw': solar_size_mw * 1000,
},
'wind': {
'num_turbines': 25,
'turbine_rating_kw': 2000,
'model_name': 'floris',
'timestep': [0,8759],
'floris_config': floris_config # if not specified, use default SAM models
},
'battery': {
'system_capacity_kwh': 20 * 1000,
'system_capacity_kw': 5 * 1000
},
'grid': interconnection_size_mw} # TODO: why is this specified twice?
# Get resource
lat = flatirons_site['lat']
lon = flatirons_site['lon']
prices_file = '../../resource_files/grid/pricing-data-2015-IronMtn-002_factors.csv'
site = SiteInfo(flatirons_site, grid_resource_file=prices_file)
# Create model
hybrid_plant = HybridSimulation(technologies, site, interconnect_kw=interconnection_size_mw * 1000)
hybrid_plant.pv.system_capacity_kw = solar_size_mw * 1000
hybrid_plant.pv.dc_degradation = [0] * 25
hybrid_plant.wind.system_capacity_by_num_turbines(wind_size_mw * 1000)
hybrid_plant.ppa_price = 0.06 # [$/kWh]
hybrid_plant.simulate(25)
file = 'figures/'
tag = 'simple2_'
#plot_battery_dispatch_error(hybrid_plant, plot_filename=file+tag+'battery_dispatch_error.png')
'''
for d in range(0, 360, 5):
plot_battery_output(hybrid_plant, start_day=d, plot_filename=file+tag+'day'+str(d)+'_battery_gen.png')
plot_generation_profile(hybrid_plant, start_day=d, plot_filename=file+tag+'day'+str(d)+'_system_gen.png')
'''
plot_battery_dispatch_error(hybrid_plant)
plot_battery_output(hybrid_plant)
plot_generation_profile(hybrid_plant)
#plot_battery_dispatch_error(hybrid_plant, plot_filename=tag+'battery_dispatch_error.png')
# Save the outputs
annual_energies = hybrid_plant.annual_energies
npvs = hybrid_plant.net_present_values
print(annual_energies)
print(npvs)
| StarcoderdataPython |
6515470 | <filename>chatterbot/output/terminal.py<gh_stars>1-10
from .output_adapter import OutputAdapter
class TerminalAdapter(OutputAdapter):
"""
A simple adapter that allows ChatterBot to
communicate through the terminal.
"""
def process_response(self, statement, session_id=None):
"""
Print the response to the user's input.
"""
print(statement.text)
return statement.text
| StarcoderdataPython |
96991 | """
.. module:: lidar_tour
:platform: Windows
:synopsis: Example starting in west_coast_usa with a vehicle that has a
Lidar attached and drives around the environment using the
builtin AI. Lidar data is displayed using the OpenGL-based
Lidar visualiser.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import sys
from time import sleep
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from beamngpy import BeamNGpy, Scenario, Vehicle, setup_logging
from beamngpy.sensors import Lidar
from beamngpy.visualiser import LidarVisualiser
SIZE = 1024
def lidar_resize(width, height):
if height == 0:
height = 1
glViewport(0, 0, width, height)
def open_window(width, height):
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE)
glutInitWindowSize(width, height)
window = glutCreateWindow(b'Lidar Tour')
lidar_resize(width, height)
return window
def main():
setup_logging()
beamng = BeamNGpy('localhost', 64256, home='C:/Users/merie/Documents/BeamNG.research.v1.7.0.1')
#beamng.change_setting('research', True)
scenario = Scenario('west_coast_usa', 'lidar_tour',
description='Tour through the west coast gathering '
'Lidar data')
vehicle = Vehicle('ego_vehicle', model='etk800', licence='LIDAR')
lidar = Lidar()
vehicle.attach_sensor('lidar', lidar)
#beamng.open_lidar('lidar', vehicle, 'shmem', 8000)
scenario.add_vehicle(vehicle, pos=(-717.121, 101, 118.675), rot=None, rot_quat=(0, 0, 0.3826834, 0.9238795))
scenario.make(beamng)
bng = beamng.open(launch=True)
#bng.open_lidar('lidar', vehicle, 'shmem', 8000)
#lidar.connect(bng, vehicle)
try:
bng.load_scenario(scenario) # this is where the error happens
window = open_window(SIZE, SIZE)
lidar_vis = LidarVisualiser(Lidar.max_points)
lidar_vis.open(SIZE, SIZE)
bng.set_steps_per_second(60)
bng.set_deterministic()
bng.hide_hud()
bng.start_scenario()
bng.pause()
vehicle.ai_set_mode('span')
def update():
sensors = bng.poll_sensors(vehicle)
points = sensors['lidar']['points']
bng.step(3, wait=False)
lidar_vis.update_points(points, vehicle.state)
glutPostRedisplay()
glutReshapeFunc(lidar_resize)
glutIdleFunc(update)
glutMainLoop()
except Exception as e:
print(e)
finally:
bng.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1653042 | <gh_stars>1-10
import attr
# For Attrib
IS_PROTO_FIELD = '__SAN11PK_PLATFORM__IS_PROTO_FIELD'
PROTO_PATH = '__SAN11PK_PLATFORM__PROTO_PATH'
PROTO_CONVERTER = '__SAN11PK_PLATFORM__PROTO_CONVERTER'
IS_DB_FIELD = '__SAN11PK_PLATFORM__IS_PROTO_FIELD'
DB_PATH = '__SAN11PK_PLATFORM__DB_PATH'
DB_CONVERTER = '__SAN11PK_PLATFORM__DB_CONVERTER'
REPEATED = '__SAN11PK_PLATFORM__REPEATED'
def is_repeated(attribute: attr.Attribute) -> bool:
return attribute.metadata[REPEATED] | StarcoderdataPython |
1796575 | <filename>database/models.py
# Database Models
from datetime import datetime
from flask_bcrypt import generate_password_hash, check_password_hash
from database import db
class Comment(db.EmbeddedDocument):
content = db.StringField(required=True)
sender = db.ReferenceField('User')
created_date = db.DateTimeField(required=True, default=datetime.now)
class Meta:
collection_name = "comment"
class Card(db.Document):
title = db.StringField(required=True)
content = db.StringField()
start_date = db.DateTimeField()
end_date = db.DateTimeField()
status = db.StringField(required=True, default='received', choices={'received', 'started', 'checked', 'completed'})
assigned_to = db.ListField(db.ReferenceField('User'))
created_by = db.ReferenceField('User')
project = db.ReferenceField('Project')
created_date = db.DateTimeField(required=True, default=datetime.now)
completion_date = db.DateTimeField()
comments = db.ListField(db.EmbeddedDocumentField('Comment'))
class Meta:
collection_name = "card"
class Project(db.Document):
title = db.StringField(required=True, unique=True)
status = db.StringField(required=True, default='active', choices={'active', 'archived'})
created_by = db.ReferenceField('User')
created_date = db.DateTimeField(required=True, default=datetime.now)
cards = db.ListField(db.ReferenceField('Card'), reverse_delete_rule=db.PULL)
class Meta:
collection_name = "project"
strict = False
def find_all(self):
items = self._repo.find_all()
return items
class User(db.Document):
email = db.EmailField(required=True, unique=True)
password = db.StringField(required=True, min_length=6)
projects = db.ListField(db.ReferenceField('Project'), reverse_delete_rule=db.PULL)
cards = db.ListField(db.ReferenceField('Card'), reverse_delete_rule=db.PULL)
assignments = db.ListField(db.ReferenceField('Card'), reverse_delete_rule=db.PULL)
class Meta:
collection_name = "user"
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def check_password(self, password):
return check_password_hash(self.password, password)
User.register_delete_rule(Project, 'created_by', db.CASCADE)
User.register_delete_rule(Card, 'created_by', db.CASCADE)
Project.register_delete_rule(Card, 'project', db.CASCADE)
| StarcoderdataPython |
6657559 | <gh_stars>0
# Test for Exercise 17: Temperature Converter
import pytest
from exercises.exercise18 import temperature_converter
@pytest.mark.parametrize("convert, temperature, result",
[("C", 32, ("Celsius", 0)), ("C", 77, ("Celsius", 25)),
("C", 95, ("Celsius", 35)), ("F", 30, ("Fahrenheit", 86)),
("F", 35, ("Fahrenheit", 95)), ("F", 40, ("Fahrenheit", 104))])
def test_temperature_converter(convert, temperature, result):
assert temperature_converter(convert, temperature) == result
| StarcoderdataPython |
3386594 | <filename>src/models/UserModel.py
# -*- coding: utf-8 -*-
# src/models/UserModel.py
"""
...Web Flask com autorização JWT (Jason web token authorization)
------------------------------------------------------------------------
Modelo de usuário
------------------------------------------------------------------------
URLs: https://codeburst.io/jwt-authorization-in-flask-c63c1acf4eeb
https://medium.com/@dushan14/create-a-web-application-with-python-flask-postgresql-and-deploy-on-heroku-243d548335cc
https://github.com/oleg-agapov/flask-jwt-auth
https://www.codementor.io/olawalealadeusi896/restful-api-with-python-flask-framework-and-postgres-db-part-1-kbrwbygx5
Modelos que determinam as estruturas lógicas de um banco de dados. Simplificando, determina como as tabelas
ficariam no banco de dados. Os modelos definem como os registros podem ser manipulados ou recuperados no
banco de dados.
"""
from marshmallow import fields, Schema
import datetime
from . import db, bcrypt
from .EntityModel import EntitySchema
class UserModel(db.Model):
"""
User Model
"""
# table name
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
role = db.Column(db.String(128), nullable=False)
deleted = db.Column(db.Boolean, default=False, nullable= True)
created_at = db.Column(db.DateTime)
modified_at = db.Column(db.DateTime)
entities = db.relationship('EntityModel', backref='entities', lazy=True)
# class constructor - definir os atributos de classe
def __init__(self, data):
"""
Class constructor
"""
self.name = data.get('name')
self.email = data.get('email')
self.password = self.__<PASSWORD>_hash(data.get('password'))
self.role = data.get('role')
self.deleted = data.get('deleted')
self.created_at = datetime.datetime.utcnow()
self.modified_at = datetime.datetime.utcnow()
#salvar usuários para o nosso banco de dados
def save(self):
db.session.add(self)
db.session.commit()
#atualizar o registro do nosso usuário no db
def update(self, data):
for key, item in data.items():
if key == 'password':
self.password = self.__generate_hash(value)
setattr(self, key, item)
self.modified_at = datetime.datetime.utcnow()
db.session.commit()
#excluir o registro do db
def delete(self):
db.session.delete(self)
db.session.commit()
@staticmethod
def get_all_users(): #obter todos os usuários do banco de dados
return UserModel.query.all()
@staticmethod
def get_one_user(id): #obter um único usuário do db usando campo primary_key
return UserModel.query.filter_by(id=id, deleted=False).first()
@staticmethod
def get_user_by_email(value):
return UserModel.query.filter_by(email=value, deleted=False).first()
"""Métodos estáticos adicionais"""
#saremos __generate_hash() a senha do usuário de hash antes de salvá-lo no banco de dados
def __generate_hash(self, password):
return bcrypt.generate_password_hash(password, rounds=10).decode("utf-8")
#será usado posteriormente em nosso código para validar a senha do usuário durante o login
def check_hash(self, password):
return bcrypt.check_password_hash(self.password, password)
#retornar uma representação imprimível do objeto UserModel, neste caso estamos apenas retornando o id
def __repr(self):
return '<id {}>'.format(self.id)
class UserSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
password = fields.Str(required=True, load_only=True)
role = fields.Str(required=True)
deleted= fields.Boolean(required=False)
created_at = fields.DateTime(dump_only=True)
modified_at = fields.DateTime(dump_only=True)
entities = fields.Nested(EntitySchema, many=True)
| StarcoderdataPython |
6605342 | # List GKE nodes
# Official GCP SDK (Python) Documentation: https://googleapis.github.io/google-api-python-client/docs/dyn/
import json
import sys
import argparse
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.cloud import resource_manager
client = resource_manager.Client()
credentials = GoogleCredentials.get_application_default()
service = discovery.build('container', 'v1', credentials=credentials)
# Filter of Projects that will be scanned
parser_args = argparse.ArgumentParser(description='Define the projetc_id filter.'
'if empity will looking for all the active project_id that the credential have access')
parser_args.add_argument('--project')
project_Filter = parser_args.parse_args()
if project_Filter.project is None:
env_filter = {'lifecycleState': 'ACTIVE' }
else:
env_filter = {'projectId': project_Filter.project ,'lifecycleState': 'ACTIVE' }
# Print csv Header
print ('project_id; project_name;cluster_name;node_name;node_version;status;autopilot;image_type;',
'preemptible;machineType;diskSizeGb;diskType;autoscaling;minNodeCount;maxNodeCount;autoUpgrade;autoRepair;',
'maxPodsPerNode;podIpv4CidrSize;locations')
zone='-'
for project in client.list_projects(env_filter):
req = service.projects().zones().clusters().list(projectId=project.project_id, zone=zone)
resp = req.execute()
try:
for cluster in resp['clusters']:
for node in cluster['nodePools']:
print(
project.project_id, ';',
project.name, ';',
cluster.get('name'),';',
node.get('name'),';',
node.get('version'),';',
node.get('status'),';',
node.get('autopilot'),';',
node.get('config').get('imageType'),';',
node.get('config').get('preemptible'),';',
node.get('config').get('machineType'),';',
node.get('config').get('diskSizeGb'),';',
node.get('config').get('diskType'),';',
node.get('autoscaling',{}).get('enabled',{}),';',
node.get('autoscaling',{}).get('minNodeCount',{}),';',
node.get('autoscaling',{}).get('maxNodeCount',{}),';',
node.get('management',{}).get('autoUpgrade',{}),';',
node.get('management',{}).get('autoRepair',{}),';',
node.get('maxPodsConstraint',{}).get('maxPodsPerNode',{}),';',
node.get('podIpv4CidrSize'),';',
node.get('locations')
)
except KeyError: pass | StarcoderdataPython |
3523509 | <reponame>hectorbenitez/flask-workshop<filename>app/routes.py
from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
user = {'username': 'Hector'}
events = [
{
'name': 'Workshop Python',
'date': 'May 10'
},
{
'name': 'Captura la bandera',
'date': 'May 10, 11'
}
]
return render_template('index.html', user=user, title='UPSLP', events=events) | StarcoderdataPython |
5191428 | import itertools, functools
n = int(input())
sectors = [n] + [int(input()) for _ in range(0, n - 1)]
def distances(items):
length = len(items)
items = list(enumerate(items + items + items))
return [min([abs(it[0] - index) for it in items if it[1] == items[index][1] + 1]) for index in range(length + 1, length * 2)]
print(sum(map(pow, distances(sectors), itertools.repeat(2)))) | StarcoderdataPython |
385159 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=protected-access
from __future__ import (division, absolute_import, print_function,
unicode_literals)
class Car:
wheels = 0
def __init__(self, color, model, year):
self.color = color
self.model = model
self.year = year
self.__cupholders = 6
self._voltage = 12
@property
def voltage(self):
return self._voltage
@voltage.setter
def voltage(self, volts):
print("Warning: this can cause problems!")
self._voltage = volts
@voltage.deleter
def voltage(self):
print("Warning: the radio will stop working!")
del self._voltage
def main():
my_car = Car("yellow", "beetle", 1967)
print(f"My car is {my_car.color}")
my_car.wheels = 5
print(f"Wheels: {my_car.wheels}")
my_other_car = Car("red", "corvette", "1999")
print(f"My other car is {my_other_car.color}")
# Change the class variable value
Car.wheels = 4
print(f"My car has {my_car.wheels} wheels")
print(f"My other car has {my_other_car.wheels} wheels")
# Paint the car
my_car.color = "red"
print(f"It was built in {my_car.year}")
my_car.year = 1966
print(f"It was built in {my_car.year}")
print(f"It has {my_car._Car__cupholders} cupholders")
# print(f"It has {my_car.__cupholders} cupholders.")
# Delete year
del my_car.year
# print(f"It was built in {my_car.year}")
# Electric car
print(f"My car uses {my_car.voltage} volts")
my_car.voltage = 6
print(f"My car now uses {my_car.voltage} volts")
del my_car.voltage
if __name__ == '__main__':
main()
| StarcoderdataPython |
6414740 | <reponame>MGPoirot/GPEX
from os.path import basename
import os
import gpxpy
import gpxpy.gpx
from scipy.stats import skewtest
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
from scipy.signal import savgol_filter
def read_gpx(path: str):
# Read the data
gpx_file = open(path, 'r')
gpx = gpxpy.parse(gpx_file)
for track in gpx.tracks:
for segment in track.segments:
points = [p for p in segment.points]
speeds = []
for i in range(1, len(points)):
speeds.append(3.6 * points[i].speed_between(points[i - 1]))
speeds = [speeds[0]] + speeds
speeds = savgol_filter(speeds, 11, 1)
for i in range(len(points)):
points[i].speed = speeds[i]
return points
def get_map_extent(points):
lons = [p.longitude for p in points]
lats = [p.latitude for p in points]
lon_min, lon_max = min(lons), max(lons)
lat_min, lat_max = min(lats), max(lats)
d_lon, d_lat = lon_max - lon_min, lat_max - lat_min
margin = 0.1
extent = [lon_min - margin * d_lon,
lon_max + margin * d_lon,
lat_min - margin * d_lat,
lat_max + margin * d_lat]
return extent
def threshold(points):
# Fit the data to increase normality
result = []
thresholds = np.linspace(6, np.median(speeds), 100)
for t in thresholds:
filtered_speeds = [s for s in speeds if s > t]
_, p = skewtest(filtered_speeds)
result.append(p)
optim = thresholds[result.index(max(result))]
points_in = [p for p in points if p.speed > optim]
points_out = [p for p in points if p.speed <= optim]
return points_in, points_out
def histplot(points_in, points_out, cmap, clim):
cmin, cmax = clim
speeds_in, speeds_out = [p.speed for p in points_in], [p.speed for p in points_out]
bins = np.linspace(0, cmax * 1.5, 100)
h = plt.hist(speeds_in, bins=bins)
for i in range(len(bins) - 1):
rel_col = (bins[i] - cmin) / (cmax - cmin) * 255
rgba = cmap(int(rel_col))
h[2].patches[i].set_color(rgba)
plt.hist(speeds_out, bins=bins, color='r')
plt.title(str(np.round(np.average(speeds_in), 1)) + r' km/h avg ' + '({:.0%})'.format(len(points_in) / len(points)))
plt.xlim([0, max(bins)])
plt.yticks([])
def scatterplot(points_in, points_out, cmap, clim):
cmin, cmax = clim
speeds_in, speeds_out = [p.speed for p in points_in], [p.speed for p in points_out]
plt.scatter([p.time.timestamp() for p in points_in], speeds_in, c=speeds_in, s=1, cmap=cmap, vmin=cmin, vmax=cmax)
plt.scatter([p.time.timestamp() for p in points_out], speeds_out, s=1, c='r')
plt.scatter([p.time.timestamp() for p in points_in], smooth_speeds, color='k', s=1)
plt.xlim([min([p.time.timestamp() for p in points]), max([p.time.timestamp() for p in points])])
plt.ylim([0, cmax * 1.5])
plt.xticks([])
plt.ylabel('speed (km/h)')
def mapplot(points_in, points_out, cmap, clim):
cmin, cmax = clim
speeds_in, speeds_out = [p.speed for p in points_in], [p.speed for p in points_out]
stamen_terrain = cimgt.Stamen('terrain-background')
ax = plt.subplot(1, 2, 2, projection=stamen_terrain.crs)
extent = get_map_extent(points)
ax.set_extent(extent)
area = (extent[1]-extent[0]) * (extent[3]-extent[2])
detail = int(11 + np.floor(np.abs(np.log10(area / 5))))
ax.add_image(stamen_terrain, detail)
# plt.scatter([p.longitude for p in points_in], [p.latitude for p in points_in],
# c=speeds_in, s=15, transform=ccrs.Geodetic(), cmap=cmap, vmin=cmin, vmax=cmax)
plt.scatter([p.longitude for p in points_in], [p.latitude for p in points_in],
c=speeds_in, s=15, transform=ccrs.PlateCarree(), cmap=cmap, vmin=cmin, vmax=cmax)
if len(points_out):
# plt.scatter([p.longitude for p in points_out], [p.latitude for p in points_out],
# c='r', s=3, transform=ccrs.Geodetic())
plt.scatter([p.longitude for p in points_out], [p.latitude for p in points_out],
c='r', s=3, transform=ccrs.PlateCarree())
fname = r'C:\Users\mgpoirot\Downloads\Lunch_Ride.gpx'
fname = r"C:\Users\mgpoirot\Downloads\Evening_Ride.gpx"
fname = r"C:\Users\mgpoirot\Downloads\Lunch_Ride(1).gpx"
fname = r"C:\Users\mgpoirot\Downloads\Morning_Ride.gpx"
fname = r"C:\Users\mgpoirot\Downloads\Night_Run.gpx"
fname = r"C:\Users\mgpoirot\Downloads\Morning_Ride(1).gpx"
#fname = r"C:\Users\mgpoirot\Downloads\Morning_Run.gpx"
#fname = r"C:\Users\mgpoirot\Downloads\Lunch_Run.gpx"
#fname = r"C:\Users\mgpoirot\Downloads\Afternoon_Ride(1).gpx"
fname = r"D:\repositories\GPEX\Afternoon_Ride.gpx"
points = read_gpx(fname)
speeds = [p.speed for p in points]
# Extract information into usable formats
points_in, points_out = threshold(points)
cmap = matplotlib.cm.get_cmap('viridis')
smooth_speeds = savgol_filter([p.speed for p in points_in], 401, 3)
clim = min(smooth_speeds), max(smooth_speeds)
# Plot
plt.figure()
plt.subplot(2, 2, 1)
histplot(points_in, points_out, cmap, clim)
plt.subplot(2, 2, 3)
scatterplot(points_in, points_out, cmap, clim)
mapplot(points_in, points_out, cmap, clim)
plt.suptitle(basename(fname))
figure_name = fname.split(os.extsep)[0] + '{}.png'
extension = ''
if os.path.isfile(figure_name.format(extension)):
extension = 1
while os.path.isfile(figure_name.format(str(extension))):
extension += 1
plt.savefig(figure_name.format(str(extension)))
plt.show()
do_save = input('Do you want to save the figure? ')
if do_save not in 'Yesyes':
os.remove(figure_name.format(str(extension)))
| StarcoderdataPython |
3544454 | import json
import uuid
import logging
import pkg_resources
from itertools import count
from pathlib import Path
from urllib.parse import urlencode, urljoin
from banal import ensure_dict, ensure_list
from requests import RequestException, Session
from requests_toolbelt import MultipartEncoder # type: ignore
from typing import Dict, Iterable, Iterator, List, Optional
from alephclient import settings
from alephclient.errors import AlephException
from alephclient.util import backoff, prop_push
log = logging.getLogger(__name__)
MIME = "application/octet-stream"
VERSION = pkg_resources.get_distribution("alephclient").version
class APIResultSet(object):
def __init__(self, api: "AlephAPI", url: str):
self.api = api
self.url = url
self.current = 0
self.result = self.api._request("GET", self.url)
def __iter__(self):
return self
def __next__(self):
if self.index >= self.result.get("limit"):
next_url = self.result.get("next")
if next_url is None:
raise StopIteration
self.result = self.api._request("GET", next_url)
try:
item = self.result.get("results", [])[self.index]
except IndexError:
raise StopIteration
self.current += 1
return self._patch(item)
next = __next__
def _patch(self, item):
return item
@property
def index(self):
return self.current - self.result.get("offset")
def __len__(self):
return self.result.get("total")
def __repr__(self):
return "<APIResultSet(%r, %r)>" % (self.url, len(self))
class EntityResultSet(APIResultSet):
def __init__(self, api: "AlephAPI", url: str, publisher: bool):
super(EntityResultSet, self).__init__(api, url)
self.publisher = publisher
def _patch(self, item):
return self.api._patch_entity(item, self.publisher)
class EntitySetItemsResultSet(APIResultSet):
def __init__(self, api: "AlephAPI", url: str, publisher: bool):
super(EntitySetItemsResultSet, self).__init__(api, url)
self.publisher = publisher
def _patch(self, item):
entity = ensure_dict(item.get("entity"))
item["entity"] = self.api._patch_entity(entity, self.publisher)
return item
class AlephAPI(object):
def __init__(
self,
host: Optional[str] = settings.HOST,
api_key: Optional[str] = settings.API_KEY,
session_id: Optional[str] = None,
retries: int = settings.MAX_TRIES,
):
if not host:
raise AlephException("No host environment variable found")
self.base_url = urljoin(host, "/api/2/")
self.retries = retries
session_id = session_id or str(uuid.uuid4())
self.session: Session = Session()
self.session.headers["X-Aleph-Session"] = session_id
self.session.headers["User-Agent"] = "alephclient/%s" % VERSION
if api_key is not None:
self.session.headers["Authorization"] = "ApiKey %s" % api_key
def _make_url(
self,
path: str,
query: Optional[str] = None,
filters: Optional[List] = None,
**params,
):
"""Construct the target url from given args"""
url = self.base_url + path
if query:
params["q"] = query
if filters:
for key, val in filters:
if val is not None:
params["filter:" + key] = val
if len(params):
params_filter = {k: v for k, v in params.items() if v is not None}
url = url + "?" + urlencode(params_filter)
return url
def _patch_entity(
self, entity: Dict, publisher: bool, collection: Optional[Dict] = None
):
"""Add extra properties from context to the given entity."""
properties: Dict = entity.get("properties", {})
collection_: Dict = collection or entity.get("collection") or {}
links: Dict = entity.get("links", {})
api_url = links.get("self")
if api_url is None:
api_url = "entities/%s" % entity.get("id")
api_url = self._make_url(api_url)
prop_push(properties, "alephUrl", api_url)
if publisher:
# Context: setting the original publisher or collection
# label can help make the data more traceable when merging
# data from multiple sources.
publisher_label = collection_.get("label")
publisher_label = collection_.get("publisher", publisher_label)
prop_push(properties, "publisher", publisher_label)
publisher_url = collection_.get("links", {}).get("ui")
publisher_url = collection_.get("publisher_url", publisher_url)
prop_push(properties, "publisherUrl", publisher_url)
entity["properties"] = properties
return entity
def _request(self, method: str, url: str, **kwargs) -> Dict:
"""A single point to make the http requests.
Having a single point to make all requests let's us set headers, manage
successful and failed responses and possibly manage session etc
conviniently in a single place.
"""
try:
response = self.session.request(method=method, url=url, **kwargs)
response.raise_for_status()
except RequestException as exc:
raise AlephException(exc)
if len(response.text):
return response.json()
return {}
def search(
self,
query: str,
schema: Optional[str] = None,
schemata: Optional[str] = None,
filters: Optional[List] = None,
publisher: bool = False,
) -> "EntityResultSet":
"""Conduct a search and return the search results."""
filters_list: List = ensure_list(filters)
if schema is not None:
filters_list.append(("schema", schema))
if schemata is not None:
filters_list.append(("schemata", schemata))
if schema is None and schemata is None:
filters_list.append(("schemata", "Thing"))
url = self._make_url("entities", query=query, filters=filters_list)
return EntityResultSet(self, url, publisher)
def get_collection(self, collection_id: str) -> Dict:
"""Get a single collection by ID (not foreign ID!)."""
url = self._make_url(f"collections/{collection_id}")
return self._request("GET", url)
def reingest_collection(self, collection_id: str, index: bool = False):
"""Re-ingest all documents in a collection."""
url = self._make_url(f"collections/{collection_id}/reingest", index=index)
return self._request("POST", url)
def reindex_collection(
self, collection_id: str, flush: bool = False, sync: bool = False
):
"""Re-index all entities in a collection."""
url = self._make_url(
f"collections/{collection_id}/reindex", sync=sync, flush=flush
)
return self._request("POST", url)
def delete_collection(self, collection_id: str, sync: bool = False):
"""Delete a collection by ID"""
url = self._make_url(f"collections/{collection_id}", sync=sync)
return self._request("DELETE", url)
def flush_collection(self, collection_id: str, sync: bool = False):
"""Empty all contents from a collection by ID"""
url = self._make_url(
f"collections/{collection_id}", sync=sync, keep_metadata=True
)
return self._request("DELETE", url)
def get_entity(self, entity_id: str, publisher: bool = False) -> Dict:
"""Get a single entity by ID."""
url = self._make_url(f"entities/{entity_id}")
entity = self._request("GET", url)
return self._patch_entity(entity, publisher)
def get_collection_by_foreign_id(self, foreign_id: str) -> Optional[Dict]:
"""Get a dict representing a collection based on its foreign ID."""
if foreign_id is None:
return None
filters = [("foreign_id", foreign_id)]
for coll in self.filter_collections(filters=filters):
return coll
return None
def load_collection_by_foreign_id(
self, foreign_id: str, config: Optional[Dict] = None
) -> Dict:
"""Get a collection by its foreign ID, or create one. Setting clear
will clear any found collection."""
collection = self.get_collection_by_foreign_id(foreign_id)
if collection is not None:
return collection
config_: Dict = ensure_dict(config)
return self.create_collection(
{
"foreign_id": foreign_id,
"label": config_.get("label", foreign_id),
"casefile": config_.get("casefile", False),
"category": config_.get("category", "other"),
"languages": config_.get("languages", []),
"summary": config_.get("summary", ""),
}
)
def filter_collections(
self, query: str = None, filters: Optional[List] = None, **kwargs
) -> "APIResultSet":
"""Filter collections for the given query and/or filters.
params
------
query: query string
filters: list of key, value pairs to filter collections
kwargs: extra arguments for api call such as page, limit etc
"""
if not query and not filters:
raise ValueError("One of query or filters is required")
url = self._make_url("collections", query=query, filters=filters, **kwargs)
return APIResultSet(self, url)
def create_collection(self, data: Dict) -> Dict:
"""Create a collection from the given data.
params
------
data: dict with foreign_id, label, category etc. See `CollectionSchema`
for more details.
"""
url = self._make_url("collections")
return self._request("POST", url, json=data)
def update_collection(
self, collection_id: str, data: Dict, sync: bool = False
) -> Dict:
"""Update an existing collection using the given data.
params
------
collection_id: id of the collection to update
data: dict with foreign_id, label, category etc. See `CollectionSchema`
for more details.
"""
url = self._make_url(f"collections/{collection_id}", sync=sync)
return self._request("PUT", url, json=data)
def stream_entities(
self,
collection: Optional[Dict] = None,
include: Optional[List] = None,
schema: Optional[str] = None,
publisher: bool = False,
) -> Iterator[Dict]:
"""Iterate over all entities in the given collection.
params
------
collection_id: id of the collection to stream
include: an array of fields from the index to include.
"""
url = self._make_url("entities/_stream")
if collection is not None:
collection_id = collection.get("id")
url = f"collections/{collection_id}/_stream"
url = self._make_url(url)
params = {"include": include, "schema": schema}
try:
res = self.session.get(url, params=params, stream=True)
res.raise_for_status()
for entity in res.iter_lines(chunk_size=None):
entity = json.loads(entity)
yield self._patch_entity(
entity, publisher=publisher, collection=collection
)
except RequestException as exc:
raise AlephException(exc)
def _bulk_chunk(
self,
collection_id: str,
chunk: List,
entityset_id: Optional[str] = None,
force: bool = False,
unsafe: bool = False,
):
for attempt in count(1):
url = self._make_url(f"collections/{collection_id}/_bulk")
params = {"unsafe": unsafe, "entityset_id": entityset_id}
try:
response = self.session.post(url, json=chunk, params=params)
response.raise_for_status()
return
except RequestException as exc:
ae = AlephException(exc)
if not ae.transient or attempt > self.retries:
if not force:
raise ae
log.error(ae)
return
backoff(ae, attempt)
def write_entities(
self, collection_id: str, entities: Iterable, chunk_size: int = 1000, **kw
):
"""Create entities in bulk via the API, in the given
collection.
params
------
collection_id: id of the collection to use
entities: an iterable of entities to upload
"""
chunk = []
for entity in entities:
if hasattr(entity, "to_dict"):
entity = entity.to_dict()
chunk.append(entity)
if len(chunk) >= chunk_size:
self._bulk_chunk(collection_id, chunk, **kw)
chunk = []
if len(chunk):
self._bulk_chunk(collection_id, chunk, **kw)
def match(
self,
entity: Dict,
collection_ids: Optional[str] = None,
url: str = None,
publisher: bool = False,
) -> Iterator[List]:
"""Find similar entities given a sample entity."""
params = {"collection_ids": ensure_list(collection_ids)}
if url is None:
url = self._make_url("match")
try:
response = self.session.post(url, json=entity, params=params)
response.raise_for_status()
for result in response.json().get("results", []):
yield self._patch_entity(result, publisher=publisher)
except RequestException as exc:
raise AlephException(exc)
def entitysets(
self,
collection_id: Optional[str] = None,
set_types: Optional[List] = None,
prefix: Optional[str] = None,
) -> "APIResultSet":
"""Stream EntitySets"""
filters_collection = [("collection_id", collection_id)]
filters_type = [("type", t) for t in ensure_list(set_types)]
url = self._make_url(
"entitysets", prefix=prefix, filters=[*filters_collection, *filters_type]
)
return APIResultSet(self, url)
def entitysetitems(
self, entityset_id: str, publisher: bool = False
) -> "APIResultSet":
url = self._make_url(f"entitysets/{entityset_id}/items")
return EntitySetItemsResultSet(self, url, publisher=publisher)
def ingest_upload(
self,
collection_id: str,
file_path: Optional[Path] = None,
metadata: Optional[Dict] = None,
sync: bool = False,
index: bool = True,
) -> Dict:
"""
Create an empty folder in a collection or upload a document to it
params
------
collection_id: id of the collection to upload to
file_path: path of the file to upload. None while creating folders
metadata: dict containing metadata for the file or folders. In case of
files, metadata contains foreign_id of the parent. Metadata for a
directory contains foreign_id for itself as well as its parent and the
name of the directory.
"""
url_path = "collections/{0}/ingest".format(collection_id)
url = self._make_url(url_path, sync=sync, index=index)
if not file_path or file_path.is_dir():
data = {"meta": json.dumps(metadata)}
return self._request("POST", url, data=data)
for attempt in count(1):
try:
with file_path.open("rb") as fh:
# use multipart encoder to allow uploading very large files
m = MultipartEncoder(
fields={
"meta": json.dumps(metadata),
"file": (file_path.name, fh, MIME),
}
)
headers = {"Content-Type": m.content_type}
return self._request("POST", url, data=m, headers=headers)
except AlephException as ae:
if not ae.transient or attempt > self.retries:
raise ae
backoff(ae, attempt)
return {}
| StarcoderdataPython |
8020582 | <gh_stars>0
import requests
import random
import time
import re
import os
import csv
import urllib3
import datetime
from bs4 import BeautifulSoup
class DoubanBookCrawler:
def __init__(self):
self.userAgent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
def getBookCategories(self):
url = "https://book.douban.com/tag/%E7%BB%8F%E6%B5%8E%E5%AD%A6"
html_journal = requests.get(url, self.getRandomHeader()).text
url_issues = re.findall(r'<a href="(.*?)">.*?</a>', html_journal)
def getRandomHeader(self):
user_agent = random.choice(self.userAgent)
header = {
# 'Connection': 'close',
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36"
}
return header
if __name__ == "__main__":
crawler = DoubanBookCrawler()
crawler.getBookCategories()
# ANTI-WEBCRAWLER return 418
| StarcoderdataPython |
4859770 | <filename>engine/graphics/src/waf_graphics.py
# Copyright 2020 The Defold Foundation
# Licensed under the Defold License version 1.0 (the "License"); you may not use
# this file except in compliance with the License.
#
# You may obtain a copy of the License, together with FAQs at
# https://www.defold.com/license
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import sys, os
import Task, TaskGen
from TaskGen import extension
def configure(conf):
conf.find_file('texc.py', var='TEXC', mandatory = True)
conf.find_file('glslvc', var='GLSLVC', mandatory = True)
conf.find_file('glslfc', var='GLSLFC', mandatory = True)
Task.simple_task_type('texture', 'python ${TEXC} ${SRC} -o ${TGT}',
color='PINK',
after='proto_gen_py',
before='cc cxx',
shell=True)
@extension('.png .jpg')
def png_file(self, node):
texture = self.create_task('texture')
texture.set_inputs(node)
out = node.change_ext('.texturec')
texture.set_outputs(out)
Task.simple_task_type('vertexprogram', '${GLSLVC} ${SRC} ${TGT}',
color='PINK',
shell=True)
@extension('.vp')
def vp_file(self, node):
obj_ext = '.vpc'
program = self.create_task('vertexprogram')
program.set_inputs(node)
out = node.change_ext(obj_ext)
program.set_outputs(out)
Task.simple_task_type('fragmentprogram', '${GLSLFC} ${SRC} ${TGT}',
color='PINK',
shell=True)
@extension('.fp')
def fp_file(self, node):
obj_ext = '.fpc'
program = self.create_task('fragmentprogram')
program.set_inputs(node)
out = node.change_ext(obj_ext)
program.set_outputs(out)
| StarcoderdataPython |
1822912 | <reponame>rectcircle/celery-learn
# -*- coding: utf-8 -*-
from get_started.producer1 import product1
from celery.result import AsyncResult
from time import sleep
if __name__ == '__main__':
# 配置了backend,返回的就是AsyncResult对象
print "====测试使用backend===="
task_result1 = product1()
task_result2 = product1()
print type(task_result1)
print task_result1.task_id, task_result2.task_id
print task_result1.backend, task_result2.backend
while not task_result1.ready():
sleep(.5)
print task_result1.result
while not task_result2.ready():
sleep(.5)
print task_result2.result
| StarcoderdataPython |
6672956 | <reponame>linohan/Recommender-Systems-Samples<filename>RecSys Traditional/MF/FFM/util.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import os
# In[1]:
class FieldHandler(object):
def __init__(self, train_file_path, test_file_path=None, category_columns=[], continuation_columns=[]):
self.train_file_path = None
self.test_file_path = None
self.feature_nums = 0
self.field_dict = {}
self.category_columns = category_columns
self.continuation_columns = continuation_columns
if(not isinstance(train_file_path, str)):
raise ValueError('train file path must be str')
if(os.path.exists(train_file_path)):
self.train_file_path = train_file_path
else:
raise OSError('train file path isn\'t exist')
if(test_file_path):
if(os.path.exists(test_file_path)):
self.test_file_path = test_file_path
else:
raise OSError('test file path isn\'t exist')
self.read_data()
self.df[category_columns].fillna('-1', inplace=True)
self.build_field_dict()
self.build_standard_scaler()
self.field_nums = len(self.category_columns+self.continuation_columns)
def build_field_dict(self):
for col in self.df.columns:
if(col in self.category_columns):
cv = self.df[col].unique()
self.field_dict[col] = dict(zip(cv, range(self.feature_nums, self.feature_nums+len(cv))))
self.feature_nums += len(cv)
else:
self.field_dict[col] = self.feature_nums
self.feature_nums += 1
def read_data(self):
if(self.train_file_path and self.test_file_path):
train_df = pd.read_csv(self.train_file_path)[self.category_columns+self.continuation_columns]
test_df = pd.read_csv(self.test_file_path)[self.category_columns+self.continuation_columns]
self.df = pd.concat([train_df, test_df]) # default axis=0
else:
self.df = pd.read_csv(self.train_file_path)[self.category_columns+self.continuation_columns]
def build_standard_scaler(self):
if(self.continuation_columns):
self.standard_scaler = StandardScaler()
self.standard_scaler.fit(self.df[self.continuation_columns].values)
else:
self.standard_scaler = None
def transformation_data(file_path:str, field_hander:FieldHandler, label=None):
df_v = pd.read_csv(file_path)
if(label):
if(label in df_v.columns):
labels = df_v[[label]].values.astype('float32')
else:
raise KeyError(f'label "{label} isn\'t exist')
df_v = df_v[field_hander.category_columns+field_hander.continuation_columns]
df_v[field_hander.category_columns].fillna('-1', inplace=True)
df_v[field_hander.continuation_columns].fillna(-999, inplace=True)
if(field_hander.standard_scaler):
df_v[field_hander.continuation_columns] = field_hander.standard_scaler.transform(df_v[field_hander.continuation_columns])
df_i = df_v.copy()
for col in df_v.columns:
if(col in field_hander.category_columns):
df_i[col] = df_i[col].map(field_hander.field_dict[col])
df_v[col] = 1
else:
df_i[col] = field_hander.field_dict[col]
df_v = df_v.values.astype("float32")
df_i = df_i.values.astype("int32")
features = {
'df_i':df_i,
'df_v':df_v}
if(label):
return features, labels
return features, None
# In[2]:
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
def dataGenerate(path="./Dataset/train.csv"):
df = pd.read_csv(path)
df = df[['Pclass',"Sex","SibSp","Parch","Fare","Embarked","Survived"]]
class_columns = ['Pclass',"Sex","SibSp","Parch","Embarked"]
continuous_columns = ['Fare']
train_x = df.drop('Survived', axis=1)
train_y = df['Survived'].values
train_x = train_x.fillna("-1")
le = LabelEncoder()
oht = OneHotEncoder()
files_dict = {}
s = 0
for index, column in enumerate(class_columns):
try:
train_x[column] = le.fit_transform(train_x[column])
except:
pass
ont_x = oht.fit_transform(train_x[column].values.reshape(-1,1)).toarray()
for i in range(ont_x.shape[1]):
files_dict[s] = index
s +=1
if index == 0:
x_t = ont_x
else:
x_t = np.hstack((x_t, ont_x))
x_t = np.hstack((x_t, train_x[continuous_columns].values.reshape(-1,1)))
files_dict[s] = index + 1
return x_t.astype("float32"), train_y.reshape(-1,1).astype("float32"), files_dict
| StarcoderdataPython |
6660401 | <gh_stars>0
NAME = 'PurpleCoin'
NAME_S = 'PurpleCoins'
SHORT_NAME = 'PURPLE'
START_REWARD = '8192'
REWARD_FIX = '210000'
BLOCKCHAIN_FILE = 'blockchain.sqlite'
PEERS_LIST_FILE = 'settings.sqlite'
SERVER_PORT = 45777
VERSION = '0.0.1'
| StarcoderdataPython |
6416869 | import os
import uuid
import shutil
import string
from configparser import ConfigParser
from importlib import import_module
from os.path import join, dirname, abspath, exists, splitext
import scrapx
from scrapx.commands import ScrapxCommand
from scrapy.utils.template import render_templatefile, string_camelcase
from scrapy.exceptions import UsageError
def sanitize_module_name(module_name):
"""Sanitize the given module name, by replacing dashes and points
with underscores and prefixing it with a letter if it doesn't start
with one
"""
module_name = module_name.replace('-', '_').replace('.', '_')
if module_name[0] not in string.ascii_letters:
module_name = "a" + module_name
return module_name
class Command(ScrapxCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "[options] <name> <domain>"
def short_desc(self):
return "Generate new spider using pre-defined templates"
def add_options(self, parser):
ScrapxCommand.add_options(self, parser)
parser.add_argument("-l", "--list", dest="list", action="store_true",
help="List available templates")
parser.add_argument("-d", "--dump", dest="dump", metavar="TEMPLATE",
help="Dump template to standard output")
parser.add_argument("-t", "--template", dest="template", default="basic",
help="Uses a custom template.")
parser.add_argument("--force", dest="force", action="store_true",
help="If the spider already exists, overwrite it with the template")
parser.add_argument('spider_name', action='store', help='project_name string')
parser.add_argument('domain', action='store', help='project_name string')
@staticmethod
def _spider_exists(spider_dir_path, module):
_path = os.path.join(spider_dir_path, f'{module}.py')
return os.path.exists(_path)
def _find_template(self, template):
template_file = join(self.templates_dir, f'{template}.py.tmpl')
if exists(template_file):
return template_file
print(f"Unable to find template: {template}\n")
print('Use "scrapy genspider --list" to see all available templates.')
def _list_templates(self):
print("Available templates:")
for filename in sorted(os.listdir(self.templates_dir)):
if filename.endswith('.tmpl'):
print(f" {splitext(filename)[0]}")
@property
def templates_dir(self):
return join(
join(scrapx.__path__[0], 'templates'),
'spiders'
)
def run(self, parser):
args = parser.parse_args()
if args.list:
self._list_templates()
return
if args.dump:
template_file = self._find_template(args.dump)
if template_file:
with open(template_file, "r") as f:
print(f.read())
return
module = sanitize_module_name(args.spider_name)
_path_1 = os.path.join(os.path.abspath('.'), 'scrapx.cfg')
_path_2 = os.path.join(os.path.dirname(os.path.abspath('.')), 'scrapx.cfg')
if os.path.exists(_path_1):
_project_config_source = _path_1
runfile_dir_path = os.path.dirname(_path_1)
spider_dir_path = os.path.join(runfile_dir_path, 'spiders')
elif os.path.exists(_path_2):
_project_config_source = _path_2
runfile_dir_path = os.path.dirname(_path_2)
spider_dir_path = os.path.join(runfile_dir_path, 'spiders')
else:
print("Cannot generate a spider outside of a project")
return
project_cfg = ConfigParser()
project_cfg.read(_project_config_source)
botname = project_cfg.get('deploy', 'botname')
if botname == module:
print("Cannot create a spider with the same name as your project")
return
name = args.spider_name
if not args.force and self._spider_exists(spider_dir_path, module):
print('Spider module is already existed')
return
template_name = args.template
domain = args.domain
template_file = self._find_template(template_name)
data_type = project_cfg.get('datatype', 'default')
if template_file:
self._genspider(module, name, domain, template_name, template_file, spider_dir_path, botname)
self._genrunfile(name, runfile_dir_path, botname, _project_config_source, data_type)
@staticmethod
def _genspider(module, name, domain, template_name, template_file, spider_dir_path, botname):
"""Generate the spider module, based on the given template"""
capitalized_module = ''.join(s.capitalize() for s in module.split('_'))
tvars = {
# 'project_name': botname,
'ProjectName': string_camelcase(botname),
# 'module': module,
'name': name,
'domain': domain,
'classname': f'{capitalized_module}Spider'
}
spider_file = f"{join(spider_dir_path, module)}.py"
shutil.copyfile(template_file, spider_file)
render_templatefile(spider_file, **tvars)
print(f"Created spider {name!r} using template {template_name!r} ")
@property
def _uuid(self):
return str(uuid.uuid4())
def _genrunfile(self, name, runfile_dir_path, botname, project_config_path, data_type):
"""Generate the runfile for a spider"""
template_file = self._find_template('run')
run_file_name = 'run_{}'.format(name)
run_file = f"{join(runfile_dir_path, run_file_name)}.py"
collection_name = '_'.join([botname, name])
tvars = {
'ProjectName': string_camelcase(botname),
'spider_name': name,
'collection_name': collection_name,
'project_name': botname,
'uid': self._uuid,
'data_type': data_type
}
shutil.copyfile(template_file, run_file)
render_templatefile(run_file, **tvars)
self._register_run_file(name, run_file_name, project_config_path)
print(f"Created runfile {name!r} using template run.py.tmpl ")
def _register_run_file(self, name, run_file_name, project_config_file_path):
_spider_config_str = f'\n{name}={run_file_name}'
with open(project_config_file_path, 'a') as f:
f.write(_spider_config_str)
| StarcoderdataPython |
5138659 | # =================================================================
#
# Authors: <NAME> <<EMAIL>>
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import click
import json
from jsonschema import validate as jsonschema_validate
import logging
import os
from pygeoapi.util import to_json, yaml_load
LOGGER = logging.getLogger(__name__)
THISDIR = os.path.dirname(os.path.realpath(__file__))
def validate_config(instance_dict):
"""
Validate pygeoapi configuration against pygeoapi schema
:param instance_dict: dict of configuration
:returns: `bool` of validation
"""
schema_file = os.path.join(THISDIR, 'schemas', 'config',
'pygeoapi-config-0.x.yml')
with open(schema_file) as fh2:
schema_dict = yaml_load(fh2)
jsonschema_validate(json.loads(to_json(instance_dict)), schema_dict)
return True
@click.group()
def config():
"""Configuration management"""
pass
@click.command()
@click.pass_context
@click.option('--config', '-c', 'config_file', help='configuration file')
def validate(ctx, config_file):
"""Validate configuration"""
if config_file is None:
raise click.ClickException('--config/-c required')
with open(config_file) as ff:
click.echo('Validating {}'.format(config_file))
instance = yaml_load(ff)
validate_config(instance)
click.echo('Valid configuration')
config.add_command(validate)
| StarcoderdataPython |
5026709 | <reponame>rricard/dxr
"""Tests for searches involving wildcards (*, ?)"""
from dxr.testing import SingleFileTestCase
class WildcardTests(SingleFileTestCase):
source = r"""
int get_foo() {
return 0;
}
int get_bar() {
return 0;
}
int getX() {
return 0;
}
int main() {
return get_foo() + get_bar() + getX();
}
"""
def test_function_asterisk(self):
"""Test searching for functions using an asterisk."""
self.found_lines_eq(
'function:get*',
[('int <b>get_foo</b>() {', 2),
('int <b>get_bar</b>() {', 6),
('int <b>getX</b>() {', 10)])
def test_function_question(self):
"""Test searching for functions using a question mark."""
self.found_line_eq('function:get_fo?', 'int <b>get_foo</b>() {')
def test_function_underscore(self):
"""Test that underscore is treated literally when searching for
functions."""
self.found_nothing('function:get_')
def test_function_ref_asterisk(self):
"""Test searching for function references using an asterisk."""
self.found_line_eq(
'function-ref:get*',
'return <b>get_foo</b>() + <b>get_bar</b>() + <b>getX</b>();')
def test_function_ref_question(self):
"""Test searching for function references using a question mark."""
self.found_line_eq(
'function-ref:get_fo?',
'return <b>get_foo</b>() + get_bar() + getX();')
def test_function_ref_underscore(self):
"""Test that underscore is treated literally when searching for
function references."""
self.found_nothing('function-ref:get_')
| StarcoderdataPython |
1759662 | <reponame>amacd31/bom_data_parser
import pandas as pd
from bom_data_parser import mapper
def read_acorn_sat_csv(fname):
with open(fname) as f:
header_line = f.readline()
measurand = mapper.convert_key(header_line[0:8])
missing_value_key = 'missing_value='
missing_value_start = header_line.index(missing_value_key)+len(missing_value_key)
missing_value = header_line[missing_value_start:].split(' ')[0]
df = pd.read_csv(fname, parse_dates=[0], index_col=0, sep=r"\s+", header=None, skiprows=1, na_values=missing_value, names=['Date',measurand])
return df, None
| StarcoderdataPython |
1708304 | from funcy_chain import getter
def test_names(Chain):
data = {
"user1": {
"firstname": "Alice",
"lastname": "Liddle",
},
"user2": {
"firstname": "Bob",
"lastname": "Kennedy",
},
}
names = (
Chain(data)
.values()
.map(lambda user_data: "{lastname}, {firstname}".format(**user_data))
.sort()
).value
assert names == ["<NAME>", "<NAME>"]
names2 = (
Chain(data).items().map(getter([1, "lastname"], [1, "firstname"])).sort().map(", ".join)
).value
assert names2 == ["<NAME>", "<NAME>"]
def test_integers(Chain):
assert (
Chain([1, 2, 3, 7, 6, 5, 4])
.without(3)
.filter(lambda x: x > 2)
.remove(lambda x: x > 6)
.sort(reverse=True)
).value == [6, 5, 4]
def test_youngest(Chain):
users = [
{"user": "barney", "age": 36},
{"user": "fred", "age": 40},
{"user": "pebbles", "age": 1},
]
names = (Chain(users).sort(getter("age")).map(getter("user"))).value
assert names == ["pebbles", "barney", "fred"]
| StarcoderdataPython |
3404819 | <reponame>joncatanio/cannoli
class Test:
var = 2
def __init__(self, x, y):
self.x = x
self.y = y
def method(self):
print("method: self.x = " + str(self.x))
print("method: self.y = " + str(self.y))
print("Updating self.x")
self.x = "`method` updated my value"
def swapxy(self):
temp = self.x
self.x = self.y
self.y = temp
int_val = 5
obj1 = Test("x value", "y value")
obj2 = obj1
print("Updating obj1.var ...")
obj1.var = 4
print("obj1.var: " + str(obj1.var))
print("obj2.var: " + str(obj2.var))
print("Updating obj2.x ...")
print("PRE obj1.x: " + str(obj1.x))
print("PRE obj2.x: " + str(obj2.x))
obj2.x = "changed string"
print("POST obj1.x: " + str(obj1.x))
print("POST obj2.x: " + str(obj2.x))
print("Assign obj2.var to variable 'a'")
a = obj2.var
print("a: " + str(a))
print("Modify 'a' to show that obj2.var won't be effected")
a = 15
print("a: " + str(a))
print("obj2.var: " + str(obj2.var))
print("Calling obj1.method() ...")
obj1.method()
print("State of obj1 & obj2 after call")
print("obj1.x: " + str(obj1.x) + " obj1.y: " + str(obj1.y) + " obj1.var: " + str(obj1.var))
print("obj2.x: " + str(obj2.x) + " obj2.y: " + str(obj2.y) + " obj2.var: " + str(obj2.var))
print("Calling obj1.swapxy() ...")
obj1.swapxy()
print("obj1.x: " + str(obj1.x) + " obj1.y: " + str(obj1.y) + " obj1.var: " + str(obj1.var))
print("obj2.x: " + str(obj2.x) + " obj2.y: " + str(obj2.y) + " obj2.var: " + str(obj2.var))
| StarcoderdataPython |
6423134 | import datetime
from matplotlib import dates
import numpy as np
import json
import re
import math
import time
import collections
import itertools
positions = ["_right_arm", "_mattrass"]
for vp_number in range(26, 32):
for smartphone_position in positions:
current_folder = "result_data_vp"+str(vp_number)+smartphone_position+"/"
sleep_stage_file = open("sleep_staging_files/VP"+str(vp_number)+"_NAP.TXT", "r")
acceleration_value_file = open(current_folder+"result_acceleration", "r")
audio_value_file = open(current_folder+"result_audio", "r")
output_file = open(str(current_folder)+"VP"+str(vp_number)+smartphone_position+"_30sec_chunks.csv", "w")
content = ""
with open("marker_files/timestamp_and_marker_"+str(vp_number)+".txt", "r") as volume_peak_timestamp_file:
content = volume_peak_timestamp_file.readlines()
volume_peak_timestamp = float(content[0]) / 1000.0
marker_index = int(content[1])
marker_starting_point = (marker_index / 500)
print("Marker_starting_point "+ str(marker_starting_point))
current_timestamp = volume_peak_timestamp
current_audio_timestamp = current_timestamp
starting_timestamp = current_timestamp
# prepare data of sleepstaging file
data = [re.sub('\D','',x.split("\t")[1]) for x in sleep_stage_file]
t_stamp = starting_timestamp
timestamps = []
for x in range(0,len(data)):
t_stamp+=30.0
timestamps.append( t_stamp )
acceleration_json_data = json.load( acceleration_value_file )
audio_json_data = json.load( audio_value_file )
print( "JSON-len"+ str(len(acceleration_json_data)) )
previous_vector = [0,0,0]
previous_audio_vector = 0
date_and_distance = []
def calculate_distance(vec1, vec2):
return math.sqrt( math.pow( (vec1[0]-vec2[0]), 2) + math.pow( (vec1[1]-vec2[1]), 2) + math.pow( (vec1[2]-vec2[2]), 2) )
ordered_audio_dict = collections.OrderedDict()
ordered_dict = collections.OrderedDict()
ordered_dict_start = collections.OrderedDict()
for x in audio_json_data:
ordered_audio_dict[ x[0] ] = x[1]
cnt = 0
for x in acceleration_json_data:
cnt += 1
key = x.keys()[0]
vec1 = x[key]
distance = calculate_distance( vec1, previous_vector )
if(distance > 0.0):
ordered_dict[str(key)] = distance
previous_vector = vec1
keys = ordered_dict.keys()
first_timestamp = min(keys, key=lambda x:abs( float(x) - current_timestamp ))
for x in list(itertools.dropwhile(lambda k: k!= first_timestamp, ordered_dict.iterkeys() )) :
ordered_dict_start[x] = ordered_dict[x]
def plot_audio_data( current_x, data_length, idx ):
global current_audio_timestamp
keys = ordered_audio_dict.keys()
# # get closest timestamp from audio peak timestamp
current_audio_timestamp = min(keys, key=lambda x:abs( float(x) - float(timestamps[idx]) ))
initial_timestamp = current_audio_timestamp
valueList = []
cnt = 0
for x in list(itertools.dropwhile(lambda k: k != initial_timestamp, ordered_audio_dict.iterkeys() )) :
if( initial_timestamp != starting_timestamp ):
if( float(x) < float(initial_timestamp) + 30 and float(x) > float(initial_timestamp) ):
cnt += 1
value = ordered_audio_dict[x]
current_audio_timestamp = float(x)
valueList.append(value)
else:
if( float(x) < float(initial_timestamp) + (30 - int(marker_starting_point)) and float(x) > float(initial_timestamp) ):
current_audio_timestamp = float(x)
output_file.write( str(np.mean(valueList))+"," )
output_file.write( str(np.amax(valueList))+"," )
output_file.write( str(cnt)+"," )
current_audio_timestamp = initial_timestamp + 30
return
def plot_accelerometer_data( current_x, data_length, idx ):
global current_timestamp
keys = ordered_dict_start.keys()
# get closest timestamp from audio peak timestamp
current_timestamp = min(keys, key=lambda x:abs( float(x) - float(timestamps[idx]) ))
initial_timestamp = current_timestamp
cnt = 0
valueList = []
for x in list(itertools.dropwhile(lambda k: k != initial_timestamp, ordered_dict_start.iterkeys() )) :
if( initial_timestamp != starting_timestamp ):
if( float(x) < float(initial_timestamp) + 30 and float(x) > float(initial_timestamp) ):
current_timestamp = float(x)
cnt += 1
valueList.append(ordered_dict_start[x])
else:
if( float(x) < float(initial_timestamp) + (30 - int(marker_starting_point)) and float(x) > float(initial_timestamp) ):
current_timestamp = float(x)
output_file.write( str(cnt)+"," )
output_file.write( str(np.mean(valueList))+"," )
output_file.write( str(np.amax(valueList))+"\n" )
return
previous_value=0
current_x=0.0
growth_x=1.0
rem_phase_adjustment = 0
rem_value = 5
sleep_value = 6
max_x = len(data)
max_y = 6
min_x = 0
min_y = -5
compensate = math.fabs(min_y)
overall_height = max_y + compensate
i = 0
time_t = time.time()
output_file.write("sleep_stage, change_classifier, audio_means, audio_max, audio_count, acceleration_count, acceleration_means, acceleration_max\n")
for value in data:
print( str(i) +"from "+ str(len(data)) )
if( int(value) == sleep_value ):
value = 0
output_file.write(str(value)+",")
print("value: "+str(value)+" prev:"+str(previous_value))
if int(value) > int(previous_value):
print("fall_asleep")
output_file.write("fall asleep,")
if int(value) < int(previous_value):
print("wakeup")
output_file.write("wake up,")
if int(value) == int(previous_value):
print("constant")
output_file.write("constant,")
previous_value = value
plot_audio_data( current_x, max_x, i )
plot_accelerometer_data( current_x, max_x, i )
i+=1
current_x += growth_x
print( "Execution Time: " + str( time.time() - time_t) ) | StarcoderdataPython |
9646856 | from django.urls import path
from .views import EventCreateView, EventDetailView, CategoryView
app_name = 'events'
urlpatterns = [
path('<int:pk>', EventDetailView.as_view(), name='detail'),
path('create/<int:pk>', EventCreateView.as_view(), name='create'),
path('category/<slug:category>', CategoryView.as_view(), name='category'),
]
| StarcoderdataPython |
313619 | from .base_distribution import distribution
import numpy as np
from mpmath import mp
from scipy.optimize import minimize
class powerlaw(distribution):
'''
Discrete power law distributions, given by
P(x) ~ x^(-alpha)
'''
def __init__(self):
super(powerlaw, self).__init__()
self.name = 'power law'
self.n_para = 1
def _loglikelihood(self, alpha_, xmin, logsum_N):
alpha, = alpha_
logsum, N = logsum_N
logll = - alpha * logsum - N * np.log(float(mp.zeta(alpha, xmin)))
return -logll
def _fitting(self, xmin=1):
freq = self.freq[self.freq[:, 0] >= xmin]
sumlog, N = np.sum(freq[:, -1] * np.log(freq[:, 0])), np.sum(freq[:, -1])
if xmin not in self.N_xmin:
self.N_xmin[xmin] = N
res = minimize(self._loglikelihood, x0=(2.5),
method='SLSQP', tol=1e-15,
args=(xmin, (sumlog, N)),
bounds=((1. + 1e-6, 5.0),))
aic = 2 * res.fun + 2 * self.n_para
fits = {}
fits['alpha'] = res.x[0]
return (res.x[0], -res.fun, aic), fits
def _get_ccdf(self, xmin):
alpha = self.fitting_res[xmin][1]['alpha']
total, ccdf = 1., []
normfactor = 1. / float(mp.zeta(alpha, xmin))
for x in range(xmin, self.xmax):
total -= x**(-alpha) * normfactor
ccdf.append([x, total])
return np.asarray(ccdf)
| StarcoderdataPython |
6534593 | <gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import qpid.messaging
from glance.notifier import strategy
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_notification_exchange',
default='glance',
help='Qpid exchange for notifications'),
cfg.StrOpt('qpid_notification_topic',
default='glance_notifications',
help='Qpid topic for notifications'),
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.StrOpt('qpid_port',
default='5672',
help='Qpid broker port'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection'),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_reconnect_timeout',
default=0,
help='Reconnection timeout in seconds'),
cfg.IntOpt('qpid_reconnect_limit',
default=0,
help='Max reconnections before giving up'),
cfg.IntOpt('qpid_reconnect_interval_min',
default=0,
help='Minimum seconds between reconnection attempts'),
cfg.IntOpt('qpid_reconnect_interval_max',
default=0,
help='Maximum seconds between reconnection attempts'),
cfg.IntOpt('qpid_reconnect_interval',
default=0,
help='Equivalent to setting max and min to the same value'),
cfg.IntOpt('qpid_heartbeat',
default=5,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
]
CONF = cfg.CONF
CONF.register_opts(qpid_opts)
class QpidStrategy(strategy.Strategy):
"""A notifier that puts a message on a queue when called."""
def __init__(self):
"""Initialize the Qpid notification strategy."""
self.broker = CONF.qpid_hostname + ":" + CONF.qpid_port
self.connection = qpid.messaging.Connection(self.broker)
self.connection.username = CONF.qpid_username
self.connection.password = <PASSWORD>.qpid_password
self.connection.sasl_mechanisms = CONF.qpid_sasl_mechanisms
# Hard code this option as enabled so that reconnect logic isn't needed
# in this file at all.
self.connection.reconnect = True
if CONF.qpid_reconnect_timeout:
self.connection.reconnect_timeout = CONF.qpid_reconnect_timeout
if CONF.qpid_reconnect_limit:
self.connection.reconnect_limit = CONF.qpid_reconnect_limit
if CONF.qpid_reconnect_interval_max:
self.connection.reconnect_interval_max = (
CONF.qpid_reconnect_interval_max)
if CONF.qpid_reconnect_interval_min:
self.connection.reconnect_interval_min = (
CONF.qpid_reconnect_interval_min)
if CONF.qpid_reconnect_interval:
self.connection.reconnect_interval = CONF.qpid_reconnect_interval
self.connection.heartbeat = CONF.qpid_heartbeat
self.connection.protocol = CONF.qpid_protocol
self.connection.tcp_nodelay = CONF.qpid_tcp_nodelay
self.connection.open()
self.session = self.connection.session()
LOG.info(_('Connected to AMQP server on %s') % self.broker)
self.sender_info = self._sender("info")
self.sender_warn = self._sender("warn")
self.sender_error = self._sender("error")
def _sender(self, priority):
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
topic = "%s.%s" % (CONF.qpid_notification_topic, priority)
address = "%s/%s ; %s" % (CONF.qpid_notification_exchange, topic,
json.dumps(addr_opts))
return self.session.sender(address)
def warn(self, msg):
qpid_msg = qpid.messaging.Message(content=msg)
self.sender_warn.send(qpid_msg)
def info(self, msg):
qpid_msg = qpid.messaging.Message(content=msg)
self.sender_info.send(qpid_msg)
def error(self, msg):
qpid_msg = qpid.messaging.Message(content=msg)
self.sender_error.send(qpid_msg)
| StarcoderdataPython |
1697798 | import argparse
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def parse(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-l",
"--list-devices",
action="store_true",
help="show list of audio devices and exit",
)
parser.add_argument(
'-c',
'--channels',
type=int,
default=1,
help='number of channels'
)
parser.add_argument(
"-b",
"--bin-value",
type=float,
default=5,
help="target value in Hertz of a DFT bin",
)
parser.add_argument(
"-n",
"--noise-threshold",
type=float,
default=0.2,
help="threshold to differentiate data from noise",
)
parser.add_argument(
"-p",
"--peak-threshold",
type=float,
default=3 / 5,
help="threshold to find peaks in the DFT",
)
parser.add_argument(
"-rc",
"--repeat-count",
type=int,
default=2,
help="number of times the same note must be repeated to not be considered as noise",
)
parser.add_argument(
"-d", "--device", type=int_or_str, nargs="+", help="input device (numeric ID or substring)"
)
parser.add_argument(
"-u", "--bus", type=int_or_str, nargs="+", help="virtual bus"
)
parser.add_argument(
"-r",
"--samplerate",
type=float,
default=16000,
help="sampling rate of audio device",
)
arguments = parser.parse_args(args)
return arguments, parser
| StarcoderdataPython |
227022 | <filename>dynamics_learning/networks/kalman/core.py
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional, Tuple, overload
import numpy as np
import torch
from fannypack.utils import Buddy
from torch import nn as nn
from dynamics_learning.data.datasets import VisData
from dynamics_learning.networks.estimator import Estimator, EstimatorConfig
from dynamics_learning.networks.models import ODENet
from dynamics_learning.utils.log_utils import log_basic, log_scalars
from dynamics_learning.utils.net_utils import (
gaussian_log_prob,
quadratic_matmul,
safe_chol,
)
###########
# GENERAL #
###########
class EstimatorCell(ODENet, ABC):
"""A Cell used for estimation."""
def __init__(self) -> None:
super(EstimatorCell, self).__init__()
@abstractmethod
def get_initial_hidden_state(
self, batch_size: int, z0: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Return a nominal initial hidden state.
Parameters
----------
batch_size : int
Batch size
z0 : Optional[torch.Tensor], default=None
A prespecified initial hidden state. Initialized automatically by default.
Returns
-------
torch.Tensor
Batched initial hidden state
"""
@abstractmethod
def latent_to_observation(
self,
z_mu: torch.Tensor,
z_cov: torch.Tensor,
cond: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward inference method. Predicts trajectory of the observations.
Parameters
----------
z_mu : torch.Tensor, shape=(..., n)
Batch of initial mean hidden state.
z_cov : torch.Tensor, shape=(..., n, n)
Batch of initial covariances of the hidden state.
cond : Optional[torch.Tensor], shape=(..., C), default=None
Batch of conditional context vectors.
Returns
-------
y_mean : torch.Tensor, shape=(..., p)
The predicted mean observations.
y_cov : torch.Tensor, shape=(..., p, p)
The predicted covariances.
"""
@abstractmethod
def forward(
self,
t: torch.Tensor,
z: torch.Tensor,
u: torch.Tensor,
cond: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Prediction step of filter.
For discrete filter, returns new belief
distribution. For continuous filter, returns derivative of filter
parameters for ODE solver propagation.
Parameters
----------
time : torch.Tensor, shape=(1)
The current time.
z : torch.Tensor, shape=(B, n)
The current latent state.
u : torch.Tensor, shape=(B, m)
The control input.
cond : Optional[torch.Tensor], shape=(..., C), default=None
Conditional context.
Returns
-------
torch.Tensor, shape=(B, n)
Return the derivative of z with respect to time
"""
@abstractmethod
def measurement_update(
self,
t: torch.Tensor,
y: torch.Tensor,
z: torch.Tensor,
cond: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Execute a measurement update for the EKF.
If continuous, also computes a cached smoothing gain value.
Parameters
----------
t : torch.Tensor, shape=(1)
Current time.
y : torch.Tensor, shape=(B, p)
Batch of observation data at time t, where p is the obs dimension.
z : torch.Tensor, shape=(B, n)
Batch of vectorized latent states. Last dimension varies depending
on whether continuous or discrete.
cond : Optional[torch.Tensor], shape=(..., C), default=None
Conditional context.
Returns
-------
z_new : torch.Tensor, shape=(B, n)
Updated batch of latent states.
"""
##########
# KALMAN #
##########
class KalmanEstimatorCell(EstimatorCell):
"""Base abstract cell for Kalman filters."""
def __init__(
self,
dynamics: nn.Module,
observation_dynamics: Optional[nn.Module] = None,
latent_dim: int = 2,
observation_dim: int = 2,
ctrl_dim: int = 0,
initial_state: Optional[np.ndarray] = None,
initial_variance: Optional[np.ndarray] = None,
process_noise: Optional[np.ndarray] = None,
measurement_noise: Optional[np.ndarray] = None,
is_continuous: bool = True,
const_var: bool = False,
reparam: bool = False,
regularizer: float = 1e-3,
) -> None:
"""Initialize an estimator cell.
Parameters
----------
dynamics : nn.Module
System dynamics.
observation_dynamics : nn.Module
Observation dynamics.
latent_dim : int
Dimension of the state.
observation_dim : int
Dimension of the observation.
ctrl_dim : int
Dimension of the input.
initial_state : Optional[np.ndarray]
initial_state
initial_variance : Optional[np.ndarray]
initial_variance
process_noise : Optional[np.ndarray]
process_noise
measurement_noise : Optional[np.ndarray]
measurement_noise
is_continuous : bool, default=True
Continuous dynamics.
const_var : bool, default=False
Variances are constant.
reparam : bool, default=False
Reparameterize to sample new states.
regularizer: float, default=1e-3
Covariance regularizer.
"""
super(EstimatorCell, self).__init__()
# set models
self._dynamics = dynamics
self._observation_dynamics = observation_dynamics
self._is_continuous = is_continuous
# set common dimensions
self._latent_dim = latent_dim
self._observation_dim = observation_dim
self._ctrl_dim = ctrl_dim
self._reparam = reparam
self._reg = regularizer
# set initial parameters
def _optional_tensor_decomposition(
cov: Optional[np.ndarray], scale: float = 0.5, vtype: str = "process",
) -> torch.Tensor:
if cov is None:
if vtype == "process":
_V = torch.eye(latent_dim, device=self._device) * np.sqrt(scale)
elif vtype == "observation":
_V = torch.eye(observation_dim, device=self._device) * np.sqrt(
scale
)
else:
raise NotImplementedError
else:
_V = safe_chol(
torch.tensor(cov, dtype=torch.float, device=self._device)
)
if const_var:
return _V
else:
return torch.nn.Parameter(_V)
self._P0V = _optional_tensor_decomposition(
initial_variance, scale=1, vtype="process"
)
self._QV = _optional_tensor_decomposition(
process_noise, scale=0.01, vtype="process"
)
self._RV = _optional_tensor_decomposition(
measurement_noise, scale=0.01, vtype="observation"
)
# initial latent state
if initial_state is None:
z0 = torch.zeros(latent_dim, dtype=torch.float)
else:
z0 = self._P0V.new_tensor(initial_state)
self._z0 = torch.nn.Parameter(z0)
# ---------- #
# PROPERTIES #
# ---------- #
@property
def Q(self) -> torch.Tensor:
"""Dynamics noise."""
return (self._QV @ self._QV.T) * torch.eye( # type: ignore
self._latent_dim, device=self._device
)
@property
def R(self) -> torch.Tensor:
"""Measurement noise."""
return (self._RV @ self._RV.T) * torch.eye( # type: ignore
self._observation_dim, device=self._device
)
@property
def P0(self) -> torch.Tensor:
"""Initial belief."""
return (self._P0V @ self._P0V.T) * torch.eye( # type: ignore
self._latent_dim, device=self._device
)
@property
def _device(self):
return next(self.parameters()).device
# --------- #
# UTILITIES #
# --------- #
def device(self, z: torch.Tensor) -> torch.Tensor:
"""Sends a tensor to the device."""
return z.to(self._device)
def observation_dynamics(
self, z: torch.Tensor, cond: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Observe on latent state.
If no obs model, observe on the first observation_dim number of
latent states directly.
Parameters
----------
z : torch.Tensor, shape=(..., n)
The state.
cond : Optional[torch.Tensor], shape=(..., C), default=None
Conditional context.
"""
if self._observation_dynamics is None:
return z[..., 0 : self._observation_dim]
else:
return self._observation_dynamics(z, cond=cond)
class KalmanEstimator(Estimator):
"""Base abstract estimator for Kalman filters."""
def __init__(self, config: "KalmanEstimatorConfig") -> None:
"""Initializes the estimator."""
super(KalmanEstimator, self).__init__(config)
self._is_smooth = config.is_smooth
# tolerances for continuous-time estimation
self._rtol = config.rtol
self._atol = config.atol
# loss-related hyperparameters
self._burn_in = config.burn_in # iterations of filter loss only for stability
self._ramp_iters = config.ramp_iters # ramping for logging
self._dkl_anneal_iter = config.dkl_anneal_iter # last dkl annealing iteration
self._alpha = config.alpha # reconstruction loss-weighting param
self._beta = config.beta # kl divergence loss-weighting param
self._z_pred = config.z_pred # flag for z prediction loss
@property
@abstractmethod
def cell(self) -> KalmanEstimatorCell:
"""The cell associated with the estimator."""
def get_initial_hidden_state(
self, batch_size: int, z0: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""See parent class."""
return self.cell.get_initial_hidden_state(batch_size, z0)
def latent_to_observation(
self,
z_mu: torch.Tensor,
z_cov: torch.Tensor,
cond: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""See parent class."""
return self.cell.latent_to_observation(z_mu, z_cov, cond=cond)
@abstractmethod
def get_smooth(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return smoothed distributions."""
@overload
def loss(
self,
batch_t: torch.Tensor,
batch_y: torch.Tensor,
batch_u: torch.Tensor,
iteration: int,
avg: bool,
) -> torch.Tensor:
"""See vector_to_gaussian_parameters below."""
@overload
def loss(
self,
batch_t: torch.Tensor,
batch_y: torch.Tensor,
batch_u: torch.Tensor,
iteration: int,
avg: bool,
return_components: bool,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""See vector_to_gaussian_parameters below."""
def loss(
self,
batch_t: torch.Tensor,
batch_y: torch.Tensor,
batch_u: torch.Tensor,
iteration: int,
cond: Optional[torch.Tensor] = None,
avg: bool = True,
return_components: bool = False,
):
"""See parent class.
New Parameters
--------------
return_components : bool, default=False
Flag indicating whether to return loss components.
"""
T, B = batch_y.shape[:2]
# loss coefficients
burn_in_coeff = min(1.0, iteration / self._burn_in) # ramp up prediction weight
anneal_coeff = min(1.0, iteration / self._dkl_anneal_iter) # kl annealing
z0_p = self.get_initial_hidden_state(B)
z_mean, z_cov = self(
batch_t, batch_y, batch_u, z0_p, cond=cond, return_hidden=True
)
y_mean, y_cov = self.latent_to_observation(z_mean, z_cov, cond=cond)
if not self._is_smooth:
raise NotImplementedError
z_mean_s, z_cov_s = self.get_smooth()
# filter and kl loss
# the order of the loss computations is important to preserve for LE-EKF!
loss_dkl = self.kl_loss(z_mean_s, z_cov_s, batch_t, batch_u, cond=cond, avg=avg)
loss_f = -gaussian_log_prob(y_mean, y_cov, batch_y)
# smoothing/prediction loss
y_mean_s, y_cov_s = self.latent_to_observation(z_mean_s, z_cov_s, cond=cond)
loss_s = -gaussian_log_prob(y_mean_s, y_cov_s, batch_y)
loss_p = self.prediction_loss(
z_mean_s, z_cov_s, batch_t, batch_y, batch_u, cond=cond, avg=avg
)
if avg:
loss_f = torch.sum(loss_f) / (T * B)
loss_s = torch.sum(loss_s) / (T * B)
if return_components:
return loss_f, loss_s, loss_p, loss_dkl
else:
return (
self._alpha * loss_s
+ (1 - self._alpha) * burn_in_coeff * loss_p
+ self._beta * anneal_coeff * loss_dkl
)
def prediction_loss(
self,
z_mean: torch.Tensor,
z_cov: torch.Tensor,
batch_t: torch.Tensor,
batch_y: torch.Tensor,
batch_u: torch.Tensor,
cond: Optional[torch.Tensor] = None,
l2: bool = False,
avg: bool = True,
) -> torch.Tensor:
"""Prediction loss computation.
Parameters
----------
z_mean : torch.Tensor, shape=(T, B, n)
Latent means.
z_cov : torch.Tensor, shape=(T, B, n)
Latent covariances.
batch_t : torch.Tensor, shape=(T)
Times.
batch_y : torch.Tensor, shape=(T, B, p)
Observation trajectories.
batch_u : torch.Tensor, shape=(T, B, m)
Control inputs.
cond : Optional[torch.Tensor], shape=(B, C)
Conditional context.
l2 : bool
Whether to use the l2 loss.
avg : bool, default=True
Flag indicating whether to average the loss.
Returns
-------
torch.Tensor, shape=(1)
Prediction loss.
"""
T, B = batch_y.shape[:2]
# take prediction loss over obs y or latent state z
if not self._z_pred:
y_mu_p, y_cov_p = self.predict(
z_mean[0], z_cov[0], batch_t, batch_u, cond=cond, return_hidden=False
)
if l2:
loss_p = -((y_mu_p - batch_y) ** 2)
else:
loss_p = -gaussian_log_prob(y_mu_p, y_cov_p, batch_y)
else:
z_mu_p, z_cov_p = self.predict(
z_mean[0], z_cov[0], batch_t, batch_u, cond=cond, return_hidden=True,
)
z_mu_s, z_cov_s = self.get_smooth() # use smoothed vals as targets
if l2:
loss_p = -((z_mu_p - z_mu_s) ** 2)
else:
loss_p = -gaussian_log_prob(z_mu_p, z_cov_p, z_mu_s)
if avg:
loss_p = torch.sum(loss_p) / (T * B)
assert not torch.isnan(loss_p).any()
return loss_p
def kl_loss(
self,
z_mean: torch.Tensor,
z_cov: torch.Tensor,
batch_t: torch.Tensor,
batch_u: torch.Tensor,
cond: Optional[torch.Tensor] = None,
avg: bool = True,
) -> torch.Tensor:
"""Compute KL divergence portion of the loss.
Parameters
----------
z_mean : torch.Tensor, shape=(T, B, n)
The latent mean.
z_cov : torch.Tensor, shape=(T, B, n)
The latent covariance.
batch_t : torch.Tensor, shape=(T)
The times.
batch_u : torch.Tensor, shape=(T, B, m)
The control inputs.
cond : Optional[torch.Tensor], shape=(T, B, C), default=None
Conditional context.
avg : bool, default=True
Flag indicating whether to average the loss.
Returns
-------
torch.Tensor
KL divergence portion of the loss.
"""
# jank implementation of kl divergence stuff
# time invariant and assuming uniform dt.
T, B, n = z_mean.shape
hs_mean = z_mean[:-1].reshape(-1, n) # first T-1 steps
hs_cov = z_cov[:-1].reshape(-1, n, n)
# repeated twice for API compliance, the second dim doesn't matter
if batch_u.shape[-1] > 0:
batch_u = batch_u[:-1].reshape(-1, batch_u[:-1].shape[-1]).repeat(2, 1, 1)
else:
batch_u = torch.zeros(2, (T - 1) * B, 0, device=self.cell._device)
# conditional context
if cond is not None and cond.shape[-1] > 0:
cond = cond[:-1].reshape(-1, cond[:-1].shape[-1])
# simulate each point forward in time by one step
times = z_mean.new_tensor([batch_t[0], batch_t[1]]) # assume constant dt
z_mean_prior, z_cov_prior = self.predict(
hs_mean, hs_cov, times, batch_u, cond=cond, return_hidden=True
)
z_mean_prior = z_mean_prior[-1, ...].reshape(len(batch_t) - 1, B, n)
z_cov_prior = z_cov_prior[-1, ...].reshape(len(batch_t) - 1, B, n, n)
z_mean_prior = torch.cat([self.cell._z0.expand(1, B, n), z_mean_prior], dim=0)
z_cov_prior = torch.cat([self.cell.P0.expand(1, B, n, n), z_cov_prior], dim=0)
loss = kl_gaussian(z_mean, z_cov, z_mean_prior, z_cov_prior)
if avg:
loss = torch.sum(loss) / (T * B)
return loss
def log(self, buddy: Buddy, viz: VisData, filter_length: int = 1) -> None:
"""Logs data during training.
Plots means of prediction and filter distributions versus viz data.
TODO: eventually update these upstream functions with reasonable handling of
conditional context. Main problem is that we expect the context appended to the
data in the case of image data (KVAE) but not for stripped data. This means
that cond is internally handled in some of the inherited log functions but must
be externally passed here, which breaks the API a bit. We'll just handle this
at a later time.
Parameters
----------
See parent class.
"""
log_basic(
self,
buddy,
viz,
filter_length=filter_length,
smooth=self._is_smooth,
ramp_pred=True,
)
# log the loss components without any scaling effects applied during training
# also apply ramped traj length to stabilize early EKF on long trajs
iteration = buddy.optimizer_steps
train_len = min((iteration // self._ramp_iters) + 2, len(viz.t))
batch_t = viz.t[:train_len]
batch_y = viz.y[:train_len]
batch_u = viz.u[:train_len]
loss_f, loss_s, loss_p, loss_dkl = self.loss(
batch_t,
batch_y,
batch_u,
buddy.optimizer_steps,
avg=True,
return_components=True,
)
log_scalars(
buddy,
{
"Validation_F-Loss": loss_f.item(),
"Validation_S-Loss": loss_s.item(),
"Validation_P-Loss": loss_p.item(),
"Validation_KL-Loss": loss_dkl.item(),
"Traj_Length": train_len,
},
scope="Validation_KF",
)
@dataclass(frozen=True)
class KalmanEstimatorConfig(EstimatorConfig):
"""Kalman specific configuration parameters."""
dyn_hidden_units: int
dyn_layers: int
dyn_nonlinearity: nn.Module
obs_hidden_units: int
obs_layers: int
obs_nonlinearity: nn.Module
cond_dim: int = 0 # TODO: is there a way to make this default in EstimatorConfig?
is_continuous: bool = True
is_smooth: bool = True
rtol: float = 1e-7
atol: float = 1e-9
ramp_iters: int = 1000
burn_in: int = 100
dkl_anneal_iter: int = 10000
alpha: float = 0.5
beta: float = 1.0
z_pred: bool = False
# ------------ #
# LOSS HELPERS #
# ------------ #
def kl_gaussian(p_mu, p_cov, q_mu, q_cov):
"""TODO: replace calls with the one in utils."""
mu_diff = q_mu - p_mu
d = p_mu.shape[-1]
# batched trace
trace = torch.diagonal(torch.inverse(q_cov) @ p_cov, dim1=-2, dim2=-1).sum(-1)
KL = 0.5 * (
torch.logdet(q_cov)
- torch.logdet(p_cov)
- d
+ trace
+ quadratic_matmul(mu_diff, torch.inverse(q_cov))
)
return KL
| StarcoderdataPython |
3325399 | <gh_stars>1-10
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as np
from jax import test_util as jtu
from jax.api import pjit, pmap, jvp, grad
from jax.lax import psum
from jax.config import config
config.parse_flags_with_absl()
class PmapTest(jtu.JaxTestCase):
@jtu.skip_on_devices("gpu")
def testBasic(self):
f = lambda x: x - psum(x, 'i')
x = onp.arange(8., dtype=onp.float32).reshape(4, 2)
f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)
ans = f(x)
expected = x - x.sum(0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testTupleOutput(self):
f = lambda x: (x - psum(x, 'i'),)
x = onp.arange(8., dtype=onp.float32).reshape(4, 2)
f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)
ans = f(x)
expected = (x - x.sum(0),)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testTupleInput(self):
f = lambda x: x[0] - psum(x[0], 'i')
x = onp.arange(8., dtype=onp.float32).reshape(4, 2)
f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)
ans = f((x,))
expected = x - x.sum(0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testNested(self):
def f(x, y):
return psum(psum(x, 'i'), 'j')
f = pjit(f, 'i')
f = pjit(f, 'j', out_axes=1)
x = onp.ones((3, 4), onp.float32)
ans = f(x, x)
expected = 12 * onp.ones((4, 3), onp.float32)
self.assertAllClose(ans, expected, check_dtypes=True)
@jtu.skip_on_devices("gpu")
def testForwardModeAutodiff(self):
def f(x):
return np.cos(x - psum(np.sin(x), 'i'))
x = np.ones(4)
expected = jvp(pmap(f, 'i'), (x,), (x,))
g = pjit(f, axis_name='i')
ans = jvp(g, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testReverseModeAutodiff(self):
def f(x):
return x - psum(x, 'i')
x = np.ones(4)
expected1 = grad(lambda x: np.sum(pmap(f, 'i')(x)))(x)
expected2 = grad(lambda x: np.sum(x - np.sum(x)))(x)
g = pjit(f, axis_name='i')
ans = grad(lambda x: np.sum(g(x)))(x)
self.assertAllClose(ans, expected1, check_dtypes=False)
self.assertAllClose(ans, expected2, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
11260331 | <reponame>nbro/probability
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for GLM fitting with Proximal Hessian method."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_graph_and_eager_modes
class _ProximalHessianTest(object):
# https://tminka.github.io/papers/logreg/minka-logreg.pdf
#
# For a given dimensionality d, feature vectors are drawn from a standard
# normal: x ~ N (0, Id). A true parameter vector is chosen randomly on the
# surface of the d-dimensional sphere with radius sqrt(2). Finally, the
# feature vectors are classified randomly according to the logistic model.
# Using this scaling of w, about 16% of the data will be mislabeled.
#
# <NAME>., <NAME>., & <NAME>. (2002). Logistic regression,
# AdaBoost and Bregman distances. Machine Learning, 48, 253--285.
# http://www.cs.princeton.edu/~schapire/papers/breg-dist.ps.gz.
def _make_dataset(self,
n,
d,
link,
scale=1.,
batch_shape=None,
dtype=np.float32,
seed=42):
seed = tfp.util.SeedStream(seed=seed, salt='tfp.glm.proximal_hessian_test')
if batch_shape is None:
batch_shape = []
model_coefficients = tfd.Uniform(
low=np.array(-1, dtype), high=np.array(1, dtype)).sample(
batch_shape + [d], seed=seed())
radius = np.sqrt(2.)
model_coefficients *= (
radius /
tf.linalg.norm(tensor=model_coefficients, axis=-1)[..., tf.newaxis])
mask = tfd.Bernoulli(probs=0.5, dtype=tf.bool).sample(batch_shape + [d])
model_coefficients = tf1.where(mask, model_coefficients,
tf.zeros_like(model_coefficients))
model_matrix = tfd.Normal(
loc=np.array(0, dtype), scale=np.array(1, dtype)).sample(
batch_shape + [n, d], seed=seed())
scale = tf.convert_to_tensor(value=scale, dtype=dtype)
linear_response = tf.matmul(model_matrix,
model_coefficients[..., tf.newaxis])[..., 0]
if link == 'linear':
response = tfd.Normal(
loc=linear_response, scale=scale).sample(seed=seed())
elif link == 'probit':
response = tf.cast(
tfd.Normal(loc=linear_response, scale=scale).sample(seed=seed()) > 0,
dtype)
elif link == 'logit':
response = tfd.Bernoulli(logits=linear_response).sample(seed=seed())
else:
raise ValueError('unrecognized true link: {}'.format(link))
return self.evaluate([model_matrix, response, model_coefficients, mask])
def _make_placeholder(self, x):
return tf1.placeholder_with_default(
input=x, shape=(x.shape if self.use_static_shape else None))
def _adjust_dtype_and_shape_hints(self, x):
x_ = tf.cast(x, self.dtype)
# Since there is no sparse_placeholder_with_default, we manually feed in the
# constituent dense Tensors to create a defacto placeholder SparseTensor.
if isinstance(x_, tf.SparseTensor):
indices_placeholder = self._make_placeholder(x_.indices)
values_placeholder = self._make_placeholder(x_.values)
dense_shape_placeholder = (
x_.dense_shape if self.use_static_shape else
self._make_placeholder(x_.dense_shape))
x_ = tf.SparseTensor(
indices=indices_placeholder,
values=values_placeholder,
dense_shape=dense_shape_placeholder)
else:
x_ = self._make_placeholder(x_)
return x_
def _prepare_inputs_for_fit_sparse(self,
model_matrix,
response,
model_coefficients_start=None,
convert_to_sparse_tensor=False):
if model_coefficients_start is None:
model_coefficients_start = np.zeros(model_matrix.shape[:-2] +
model_matrix.shape[-1:])
if convert_to_sparse_tensor:
model_matrix = tfp.math.dense_to_sparse(model_matrix)
model_matrix = self._adjust_dtype_and_shape_hints(model_matrix)
response = self._adjust_dtype_and_shape_hints(response)
model_coefficients_start = self._adjust_dtype_and_shape_hints(
model_coefficients_start)
return model_matrix, response, model_coefficients_start
def testTwoSweepsAreBetterThanOne(self):
# Compare the log-likelihood after one sweep of fit_sparse to the
# log-likelihood after two sweeps. Expect greater log-likelihood after two
# sweeps. (This should be true typically but is not guaranteed to be true
# in every case.)
x_, y_, _, _ = self._make_dataset(n=int(1e5), d=100, link='logit')
model = tfp.glm.BernoulliNormalCDF()
model_coefficients_0 = tf.zeros(x_.shape[-1], self.dtype)
x_ = self._adjust_dtype_and_shape_hints(x_)
y_ = self._adjust_dtype_and_shape_hints(y_)
model_coefficients_1, is_converged, _ = tfp.glm.fit_sparse_one_step(
model_matrix=x_,
response=y_,
model=model,
model_coefficients_start=model_coefficients_0,
l1_regularizer=800.,
l2_regularizer=None,
maximum_full_sweeps=1,
tolerance=1e-6,
learning_rate=None)
model_coefficients_1_ = self.evaluate(model_coefficients_1)
self.assertAllEqual(False, is_converged)
model_coefficients_2, _, _ = tfp.glm.fit_sparse_one_step(
model_matrix=x_,
response=y_,
model=model,
model_coefficients_start=tf.convert_to_tensor(
value=model_coefficients_1_),
l1_regularizer=800.,
l2_regularizer=None,
maximum_full_sweeps=1,
tolerance=1e-6,
learning_rate=None)
model_coefficients_2_ = self.evaluate(model_coefficients_2)
def _joint_log_prob(model_coefficients_):
predicted_linear_response_ = tf.linalg.matvec(x_, model_coefficients_)
return tf.reduce_sum(
input_tensor=model.log_prob(y_, predicted_linear_response_))
self.assertAllGreater(
_joint_log_prob(model_coefficients_2_) -
_joint_log_prob(model_coefficients_1_), 0)
def _test_fit_glm_from_data(self,
n,
d,
link,
model,
batch_shape=None,
use_sparse_tensor=False):
if batch_shape is None:
batch_shape = []
# Create synthetic data according to the given `link` function.
model_matrix_, response_, model_coefficients_true_, _ = self._make_dataset(
n=n, d=d, link=link, batch_shape=batch_shape)
# Run tfp.glm.fit_sparse on the synthetic data for the given model.
# Also adjust dtype and shape hints depending on the test mode.
model_matrix, response, model_coefficients_start = (
self._prepare_inputs_for_fit_sparse(
model_matrix_,
response_,
convert_to_sparse_tensor=use_sparse_tensor))
model_coefficients_, is_converged_, _ = self.evaluate(
tfp.glm.fit_sparse(
model_matrix,
response,
model,
model_coefficients_start,
l1_regularizer=800.,
l2_regularizer=None,
maximum_iterations=10,
maximum_full_sweeps_per_iteration=10,
tolerance=1e-6,
learning_rate=None))
# Ensure that we have converged and learned coefficients are close to the
# true coefficients.
self.assertAllEqual(is_converged_, True)
self.assertAllClose(
model_coefficients_, model_coefficients_true_, atol=0.2, rtol=0.2)
def testFitGLMFromData_SimilarModel(self):
# Run fit_sparse where the loss function is negative log likelihood of a
# synthetic data set generated from a similar model (probit vs. logit).
# Expect the returned value of model_coefficients to be close to the true
# parameters.
self._test_fit_glm_from_data(
n=int(1e5),
d=100,
link='probit',
model=tfp.glm.Bernoulli(),
batch_shape=None)
def testFitGLMFromData_SingleBatch(self):
batch_shape = [1]
self._test_fit_glm_from_data(
n=int(1e4),
d=100,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape)
def testFitGLMFromData_BatchOfRank1(self):
batch_shape = [3]
self._test_fit_glm_from_data(
n=int(1e4),
d=25,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape)
def testFitGLMFromData_BatchOfRank2(self):
batch_shape = [3, 2]
self._test_fit_glm_from_data(
n=int(1e4),
d=25,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape)
def testFitGLMFromData_SparseTensorSingleBatch(self):
batch_shape = [1]
self._test_fit_glm_from_data(
n=int(1e4),
d=25,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape,
use_sparse_tensor=True)
def testFitGLMFromData_SparseTensorBatchOfRank1(self):
batch_shape = [3]
self._test_fit_glm_from_data(
n=int(1e4),
d=25,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape,
use_sparse_tensor=True)
def testFitGLMFromData_SparseTensorBatchOfRank2(self):
batch_shape = [2, 3]
self._test_fit_glm_from_data(
n=int(1e4),
d=25,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape,
use_sparse_tensor=True)
def testFitGLMFromData_SparseTensorBatchOfRank3(self):
batch_shape = [2, 1, 2]
self._test_fit_glm_from_data(
n=int(1e4),
d=25,
link='linear',
model=tfp.glm.Normal(),
batch_shape=batch_shape,
use_sparse_tensor=True)
def _test_compare_batch_to_single_instance(self, use_sparse_tensor=False):
n = int(1e4)
d = 25
link = 'linear'
model = tfp.glm.Normal()
# Create two sets of synthetic data according to the given `link` function.
model_matrix_1_, response_1_, _, _ = self._make_dataset(
n=n, d=d, link=link, seed=41)
model_matrix_2_, response_2_, _, _ = self._make_dataset(
n=n, d=d, link=link, seed=42)
# Fit both the batches of data individually.
model_matrix_1, response_1, model_coefficients_start_1 = (
self._prepare_inputs_for_fit_sparse(
model_matrix_1_,
response_1_,
convert_to_sparse_tensor=use_sparse_tensor))
model_coefficients_1_, _, _ = self.evaluate(
tfp.glm.fit_sparse(
model_matrix_1,
response_1,
model,
model_coefficients_start_1,
l1_regularizer=800.,
l2_regularizer=None,
maximum_iterations=10,
maximum_full_sweeps_per_iteration=10,
tolerance=1e-6,
learning_rate=None))
model_matrix_2, response_2, model_coefficients_start_2 = (
self._prepare_inputs_for_fit_sparse(
model_matrix_2_,
response_2_,
convert_to_sparse_tensor=use_sparse_tensor))
model_coefficients_2_, _, _ = self.evaluate(
tfp.glm.fit_sparse(
model_matrix_2,
response_2,
model,
model_coefficients_start_2,
l1_regularizer=800.,
l2_regularizer=None,
maximum_iterations=10,
maximum_full_sweeps_per_iteration=10,
tolerance=1e-6,
learning_rate=None))
# Combine the data into a single batch of 2 and fit the batched data.
model_matrix_ = np.stack([model_matrix_1_, model_matrix_2_])
response_ = np.stack([response_1_, response_2_])
model_matrix, response, model_coefficients_start = (
self._prepare_inputs_for_fit_sparse(
model_matrix_,
response_,
convert_to_sparse_tensor=use_sparse_tensor))
model_coefficients_, _, _ = self.evaluate(
tfp.glm.fit_sparse(
model_matrix,
response,
model,
model_coefficients_start,
l1_regularizer=800.,
l2_regularizer=None,
maximum_iterations=10,
maximum_full_sweeps_per_iteration=10,
tolerance=1e-6,
learning_rate=None))
# Ensure that the learned coefficients from the individual samples are close
# to those learned from the batched samples.
self.assertAllClose(
model_coefficients_1_, model_coefficients_[0], atol=0., rtol=1e-3)
self.assertAllClose(
model_coefficients_2_, model_coefficients_[1], atol=0., rtol=1e-3)
def testCompareBatchResultsToSingleInstance_Dense(self):
self._test_compare_batch_to_single_instance(use_sparse_tensor=False)
def testCompareBatchResultsToSingleInstance_Sparse(self):
self._test_compare_batch_to_single_instance(use_sparse_tensor=True)
class ProximalHessianTestStaticShapeFloat32(test_util.TestCase,
_ProximalHessianTest):
dtype = tf.float32
use_static_shape = True
class ProximalHessianTestDynamicShapeFloat32(test_util.TestCase,
_ProximalHessianTest):
dtype = tf.float32
use_static_shape = False
class ProximalHessianTestStaticShapeFloat64(test_util.TestCase,
_ProximalHessianTest):
dtype = tf.float64
use_static_shape = True
class ProximalHessianTestDynamicShapeFloat64(test_util.TestCase,
_ProximalHessianTest):
dtype = tf.float64
use_static_shape = False
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
5126816 | from header_common import *
from header_operations import *
from header_triggers import *
from header_scenes import *
from module_constants import *
####################################################################################################################
# Each scene record contains the following fields:
# 1) Scene id {string}: used for referencing scenes in other files. The prefix scn_ is automatically added before each scene-id.
# 2) Scene flags {int}. See header_scenes.py for a list of available flags
# 3) Mesh name {string}: This is used for indoor scenes only. Use the keyword "none" for outdoor scenes.
# 4) Body name {string}: This is used for indoor scenes only. Use the keyword "none" for outdoor scenes.
# 5) Min-pos {(float,float)}: minimum (x,y) coordinate. Player can't move beyond this limit.
# 6) Max-pos {(float,float)}: maximum (x,y) coordinate. Player can't move beyond this limit.
# 7) Water-level {float}.
# 8) Terrain code {string}: You can obtain the terrain code by copying it from the terrain generator screen
# 9) List of other scenes accessible from this scene {list of strings}.
# (deprecated. This will probably be removed in future versions of the module system)
# (In the new system passages are used to travel between scenes and
# the passage's variation-no is used to select the game menu item that the passage leads to.)
# 10) List of chest-troops used in this scene {list of strings}. You can access chests by placing them in edit mode.
# The chest's variation-no is used with this list for selecting which troop's inventory it will access.
# town_1 Sargoth #plain
# town_2 Tihr #steppe
# town_3 Veluca #steppe
# town_4 Suno #plain
# town_5 Jelkala #plain
# town_6 Praven #plain
# town_7 Uxkhal #plain
# town_8 Reyvadin #plain
# town_9 Khudan #snow
# town_10 Tulga #steppe
# town_11 Curaw #snow
# town_12 Wercheg #plain
# town_13 Rivacheg #plain
# town_14 Halmar #steppe
# town_15 Yalen
# town_16 Dhirim
# town_17 Ichamur
# town_18 Narra
# town_19 Shariz
# town_20 Durquba
# town_21 Ahmerrad
# town_22 Bariyye
####################################################################################################################
scenes = [
#OPEN WORLD
("ow_scene_1",sf_generate,"none", "none", (0,0),(100,100),-100,"0x00000001300389800003a4ea000058340000637a0000399b",
[],[],"outer_terrain_plain"),
("ow_scene_2",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("ow_scene_3",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330000000000fffff000000000000000000000000",
[],[],"outer_terrain_plain"),
("ow_scene_4",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000130010e0e0005fd84000011c60000285b00005cbe",
[],[],"outer_terrain_plain"),
("ow_multiplayer_scenes_end",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000003000050000046d1b0000189f00002a8380006d91",
[],[],"outer_terrain_plain"),
#OPEN WORLD END
("random_scene",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[]),
("conversation_scene",0,"none", "none", (-40,-40),(40,40),-100,"0",
[],[]),
("water",0,"none", "none", (-1000,-1000),(1000,1000),-0.5,"0",
[],[]),
("random_scene_steppe",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x0000000229602800000691a400003efe00004b34000059be",
[],[], "outer_terrain_steppe"),
("random_scene_plain",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x0000000229602800000691a400003efe00004b34000059be",
[],[], "outer_terrain_plain"),
("random_scene_snow",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x0000000229602800000691a400003efe00004b34000059be",
[],[], "outer_terrain_snow"),
("random_scene_desert",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x0000000229602800000691a400003efe00004b34000059be",
[],[], "outer_terrain_desert_b"),
("random_scene_steppe_forest",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[], "outer_terrain_plain"),
("random_scene_plain_forest",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[], "outer_terrain_plain"),
("random_scene_snow_forest",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[], "outer_terrain_snow"),
("random_scene_desert_forest",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[], "outer_terrain_desert"),
("camp_scene",sf_generate|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[], "outer_terrain_plain"),
("camp_scene_horse_track",sf_generate|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x300028000003e8fa0000034e00004b34000059be",
[],[], "outer_terrain_plain"),
("four_ways_inn",sf_generate,"none", "none", (0,0),(120,120),-100,"0x0000000030015f2b000350d4000011a4000017ee000054af",
[],[], "outer_terrain_plain"),
("test_scene",sf_generate,"none", "none", (0,0),(120,120),-100,"0x0230817a00028ca300007f4a0000479400161992",
[],[], "outer_terrain_plain"),
("tutorial",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
# # # Randoms
# plain
("random_multi_plain_medium",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001394018dd000649920004406900002920000056d7",
[],[], "outer_terrain_plain"),
("random_multi_plain_large",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x000000013a001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_plain"),
("random_multi_plain_medium_rain",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001394018dd000649920004406900002920000056d7",
[],[], "outer_terrain_plain"),
("random_multi_plain_large_rain",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x000000013a001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_plain"),
# steppe
("random_multi_steppe_medium", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x0000000128601ae300063d8f0004406900002920001e4f81",
[],[], "outer_terrain_steppe"),
("random_multi_steppe_large", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x000000012a00d8630009fe7f0004406900002920001e4f81",
[],[], "outer_terrain_steppe"),
("random_multi_steppe_forest_medium", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x00000001a8601ae300063d8f0004406900002920001e4f81",
[],[], "outer_terrain_steppe"),
("random_multi_steppe_forest_large", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x00000001aa00d8630009fe7f0004406900002920001e4f81",
[],[], "outer_terrain_steppe"),
# snow
("random_multi_snow_medium",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001494018dd000649920004406900002920000056d7",
[],[], "outer_terrain_snow"),
("random_multi_snow_medium_snow",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001494018dd000649920004406900002920000056d7",
[],[], "outer_terrain_snow"),
("random_multi_snow_large",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x000000014a001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_snow"),
("random_multi_snow_large_snow",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x000000014a001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_snow"),
("random_multi_snow_forest_medium",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001c94018dd000649920004406900002920000056d7",
[],[], "outer_terrain_snow"),
("random_multi_snow_forest_medium_snow",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001c94018dd000649920004406900002920000056d7",
[],[], "outer_terrain_snow"),
("random_multi_snow_forest_large",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001ca001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_snow"),
("random_multi_snow_forest_large_snow",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001ca001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_snow"),
# Desert
("random_multi_desert_medium", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x0000000158601ae300063d8f0004406900002920001e4f81",
[],[], "outer_terrain_desert"),
("random_multi_desert_large", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x000000015a00d8630009fe7f0004406900002920001e4f81",
[],[], "outer_terrain_desert"),
("random_multi_desert_forest_medium", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x00000001d8601ae300063d8f0004406900002920001e4f81",
[],[], "outer_terrain_desert"),
("random_multi_desert_forest_large", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0,0),(100, 100), -0.5, "0x00000001da00d8630009fe7f0004406900002920001e4f81",
[],[], "outer_terrain_desert"),
# Forest
("random_multi_forest_medium",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001b94018dd000649920004406900002920000056d7",
[],[], "outer_terrain_plain"),
("random_multi_forest_medium_rain",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001b94018dd000649920004406900002920000056d7",
[],[], "outer_terrain_plain"),
("random_multi_forest_large",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001ba001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_plain"),
("random_multi_forest_large_rain",sf_generate|sf_randomize|sf_auto_entry_points,"none", "none", (0,0),(240,240),-0.5,"0x00000001ba001853000aa6a40004406900002920001e4f81",
[],[], "outer_terrain_plain"),
######################
### CUSTOM
("mp_custom_map_1",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_2",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_3",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_4",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_5",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_6",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_7",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_8",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_9",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_10",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_11",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_12",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_13",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_14",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_15",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_16",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_17",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_18",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_19",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("mp_custom_map_20",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("multiplayer_maps_end",sf_generate,"none", "none", (0,0),(100,100),-100,"0x00000001300389800003a4ea000058340000637a0000399b",
[],[],"outer_terrain_plain"),
("quick_battle_french_farm",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000130010e0e0005fd84000011c60000285b00005cbe",
[],[],"outer_terrain_plain"),
("quick_battle_landshut",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000130010e0e00078584000011c60000285b00005cbe",
[],[],"outer_terrain_plain"),
("quick_battle_river_crossing",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000130010e0e0005fd84000011c60000285b00005cbe",
[],[],"outer_terrain_plain"),
("quick_battle_spanish_village",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000012002a0b20004992700006e54000007fe00001fd2",
[],[],"outer_terrain_steppe"),
("quick_battle_strangefields",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000130010e0e0005fd84000011c60000285b00005cbe",
[],[],"outer_terrain_plain"),
("quick_battle_scene_1",sf_generate,"none", "none", (0,0),(120,120),-100,"0x000000013c665098000769d80000534600001adc00001118",
[],[], "outer_terrain_plain"),
("quick_battle_scene_2",sf_generate,"none", "none", (0,0),(120,120),-100,"0x000000014940095d000649920004406900002920000056d7",
[],[], "outer_terrain_snow"),
("quick_battle_scene_3",sf_generate,"none", "none", (0,0),(120,120),-100,"0x00000001bc6fd0ae000611870000202600001adc0000240e",
[],[], "outer_terrain_plain"),
("quick_battle_scene_4",sf_generate,"none", "none", (0,0),(120,120),-100,"0x0000000122f00b52000611870000175c00007b5c00003013",
[],[], "outer_terrain_steppe"),
("quick_battle_scene_6",sf_generate,"none", "none", (0,0),(100,100),-100,"0x00000001db034fc50006118500001d4900007f70000073fc",
[],[],"outer_terrain_desert_b"),
("quick_battle_maps_end",sf_generate,"none", "none", (0,0),(100,100),-100,"0x00000001db034fc50006118500001d4900007f70000073fc",
[],[],"outer_terrain_plain"),
# SP Scenes
# Vienna battles
("sp_vienna",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
# Austerlitz battles
("sp_sokolniz",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
("sp_auster",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000030024ee3400d2348591ef0ff00001bb1647fd81f",
[],[],"outer_terrain_plain"),
("sp_sokolniz2",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
# Dresden battles
("sp_dresden1",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
("sp_dresden2",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
# Test crap
("sp_scene_1",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
("sp_scene_2",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
("sp_scene_3",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
("sp_scene_4",sf_generate,"none", "none", (0,0),(100,100),-100,"0x0000000330004563000d23480000074800005c49000021c5",
[],[],"outer_terrain_plain"),
# Camps
("sp_camp_austerlitz",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
("sp_camp_dresden",sf_generate,"none", "none", (0,0),(100,100),-100,"0x000000023002a1ba0004210900003ca000006a8900007a7b",
[],[],"outer_terrain_plain"),
]
| StarcoderdataPython |
1937448 | <reponame>jqueguiner/ai-django-core<gh_stars>0
import re
from django.conf import settings
from django.core.mail.backends.smtp import EmailBackend as SMTPEmailBackend
class WhitelistEmailBackend(SMTPEmailBackend):
"""
Via the following settings it is possible to configure if mails are sent to all domains.
If not, you can configure a redirect to an inbox via CATCHALL.
EMAIL_BACKEND = 'ai_django_core.mail.backends.whitelist_smtp.WhitelistEmailBackend'
EMAIL_BACKEND_DOMAIN_WHITELIST = ['ambient.digital']
EMAIL_BACKEND_REDIRECT_ADDRESS = <EMAIL>'
If `EMAIL_BACKEND_REDIRECT_ADDRESS` is set, a mail to `<EMAIL>` will be redirected to
`<EMAIL>`
"""
@staticmethod
def get_domain_whitelist() -> list:
"""
Getter for configuration variable from the settings.
Will return a list of domains: ['ambient.digital', 'ambient.digital']
"""
return getattr(settings, 'EMAIL_BACKEND_DOMAIN_WHITELIST', [])
@staticmethod
def get_email_regex():
"""
Getter for configuration variable from the settings.
Will return a RegEX to match email whitelisted domains.
"""
return r'^[\w\-\.]+@(%s)$' % '|'.join(x for x in
WhitelistEmailBackend.get_domain_whitelist()).replace('.', r'\.')
@staticmethod
def get_backend_redirect_address() -> str:
"""
Getter for configuration variable from the settings.
Will return a string with a placeholder for redirecting non-whitelisted domains.
"""
return getattr(settings, 'EMAIL_BACKEND_REDIRECT_ADDRESS')
@staticmethod
def whitify_mail_addresses(mail_address_list: list) -> list:
"""
Check for every recipient in the list if its domain is included in the whitelist.
If not, and we have a redirect address configured, we change the original mail address to something new,
according to our configuration.
"""
allowed_recipients = []
for to in mail_address_list:
if re.search(WhitelistEmailBackend.get_email_regex(), to):
allowed_recipients.append(to)
elif WhitelistEmailBackend.get_backend_redirect_address():
# Send not allowed emails to the configured redirect address (with CATCHALL)
allowed_recipients.append(WhitelistEmailBackend.get_backend_redirect_address() % to.replace('@', '_'))
return allowed_recipients
def _process_recipients(self, email_messages):
"""
Helper method to wrap custom logic of this backend. Required to make it testable.
"""
for email in email_messages:
allowed_recipients = self.whitify_mail_addresses(email.to)
email.to = allowed_recipients
return email_messages
def send_messages(self, email_messages):
"""
Checks if email-recipients are in allowed domains and cancels if not.
Uses regular smtp-sending afterwards.
"""
email_messages = self._process_recipients(email_messages)
super().send_messages(email_messages)
| StarcoderdataPython |
11216747 | <reponame>frostidaho/python-asyncipc
# http://stackoverflow.com/a/27198531
# https://github.com/joidegn/leo-cli
# https://pymotw.com/2/socket/uds.html
import asyncio
from os import path as _path
from . import _utils
from .serializer import Serialize
from collections import namedtuple
Observer = namedtuple('Observer', 'func callback')
class Server:
def __init__(self, socket_name, message_types):
self.socket_path = _path.join(_utils.RUNTIME_DIR, socket_name)
self.logr = _utils.get_logger()
self.serial = Serialize(**message_types)
self.messages = asyncio.Queue(30)
d_observer = {}
for key in message_types:
d_observer[key] = []
self.observers = d_observer
def register(self, msgtype, fn, callback=None):
d = self.observers
fn2 = asyncio.coroutine(fn)
obs = Observer(fn2, callback)
if isinstance(msgtype, str):
d[msgtype].append(obs)
else:
d[msgtype.__name__].append(obs)
def __call__(self, loop):
coro = asyncio.start_unix_server(self.listener, path=self.socket_path)
loop.run_until_complete(coro)
loop.create_task(self.queue_reader())
async def queue_reader(self):
messages = self.messages
logr = self.logr
d_observers = self.observers
create_task = asyncio.get_event_loop().create_task
while True:
msg = await messages.get()
msg_type = msg.__class__.__name__
observers = d_observers[msg_type]
if observers:
for obs in observers:
fn, callback = obs
task = create_task(fn(msg))
if callback is not None:
task.add_done_callback(callback)
else:
logr.debug(f'{msg_type} has no observers')
async def get_header(self, reader):
data = await reader.read(self.serial.header_length)
return self.serial.load(data)
async def listener(self, reader, writer):
logr = self.logr
logr.debug(f"In listener!")
header = await self.get_header(reader)
value = await reader.read(header.data_length)
msg = header.data_loader(value)
logr.debug(f"Received {msg!r}")
await self.messages.put(msg)
writer.close()
# self.msg = msg
# writer.close()
# writer.write(b'got it')
# try:
# await writer.drain()
# logr.debug("Closing the client socket")
# writer.close()
# except ConnectionResetError:
# pass
return
| StarcoderdataPython |
9724744 | import unittest
from conans.client import tools
from conans.client.build.visual_environment import VisualStudioBuildEnvironment
from conans.test.utils.conanfile import MockConanfile, MockSettings
from conans.test.utils.tools import TestClient
class VisualStudioBuildEnvironmentTest(unittest.TestCase):
def test_visual(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
conanfile.deps_cpp_info.include_paths.append("/one/include/path")
conanfile.deps_cpp_info.include_paths.append("/two/include/path")
conanfile.deps_cpp_info.lib_paths.append("/one/lib/path")
conanfile.deps_cpp_info.lib_paths.append("/two/lib/path")
conanfile.deps_cpp_info.cflags.append("-mycflag")
conanfile.deps_cpp_info.cflags.append("-mycflag2")
conanfile.deps_cpp_info.cxxflags.append("-mycxxflag")
conanfile.deps_cpp_info.cxxflags.append("-mycxxflag2")
conanfile.deps_cpp_info.exelinkflags.append("-myexelinkflag")
conanfile.deps_cpp_info.sharedlinkflags.append("-mysharedlinkflag")
conanfile.deps_cpp_info.libs.extend(['gdi32', 'user32.lib'])
tool = VisualStudioBuildEnvironment(conanfile)
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2'],
"LIB": ["/one/lib/path", "/two/lib/path"],
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
tool.parallel = True
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2',
'/MP%s' % tools.cpu_count(output=conanfile.output)],
"LIB": ["/one/lib/path", "/two/lib/path"],
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
tool.parallel = False
# Now alter the paths before the vars_dict call
tool.include_paths.append("/three/include/path")
tool.lib_paths.append("/three/lib/path")
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path",
"-I/two/include/path",
"-I/three/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2'],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path"],
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
# Now try appending to environment
with tools.environment_append({"CL": "-I/four/include/path -I/five/include/path",
"LIB": "/four/lib/path;/five/lib/path"}):
self.assertEqual(tool.vars_dict, {
"CL": ["-I/one/include/path", "-I/two/include/path",
"-I/three/include/path",
'-MDd',
'-mycflag',
'-mycflag2',
'-Zi',
'-Ob0',
'-Od',
'-mycxxflag',
'-mycxxflag2',
"-I/four/include/path -I/five/include/path"],
"LIB": ["/one/lib/path", "/two/lib/path", "/three/lib/path", "/four/lib/path;/five/lib/path"],
"_LINK_": ['-myexelinkflag', '-mysharedlinkflag', 'gdi32.lib', 'user32.lib']
})
self.assertEqual(tool.vars, {
"CL": '-I"/one/include/path" -I"/two/include/path" -I"/three/include/path" -MDd '
'-mycflag -mycflag2 -Zi -Ob0 -Od '
'-mycxxflag -mycxxflag2 '
'-I/four/include/path -I/five/include/path',
"LIB": "/one/lib/path;/two/lib/path;/three/lib/path;/four/lib/path;/five/lib/path",
"_LINK_": "-myexelinkflag -mysharedlinkflag gdi32.lib user32.lib"
})
def build_type_toolset_test(self):
profile = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
build_type=Release
"""
profile_toolset = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
compiler.toolset=v141
build_type=Release
"""
profile_toolset_clang = """
[settings]
os=Windows
compiler=Visual Studio
compiler.version=15
build_type=Release
compiler.toolset=v141_clang_c2
"""
conanfile = """
from conans import ConanFile, VisualStudioBuildEnvironment
class TestConan(ConanFile):
name = "testlib"
version = "1.0"
settings = "compiler", "build_type", "os"
def build(self):
env_build = VisualStudioBuildEnvironment(self)
self.output.info(env_build.flags)
"""
client = TestClient()
client.save({"profile": profile,
"profile_toolset": profile_toolset,
"profile_toolset_clang": profile_toolset_clang,
"conanfile.py": conanfile})
result = {"Debug": "['-Zi', '-Ob0', '-Od']",
"Release": "['-DNDEBUG', '-O2', '-Ob2']",
"RelWithDebInfo": "['-Zi', '-O2', '-Ob1']",
"MinSizeRel": "['-O1', '-Ob1']"}
result_toolset_clang = {"Debug": "['-gline-tables-only', '-fno-inline', '-O0']",
"Release": "['-DNDEBUG', '-O2']",
"RelWithDebInfo": "['-gline-tables-only', '-O2', '-fno-inline']",
"MinSizeRel": "[]"}
for build_type in ["Debug", "Release", "RelWithDebInfo", "MinSizeRel"]:
client.run("create . danimtb/testing -pr=profile -s build_type=%s" % build_type)
self.assertIn(result[build_type], client.out)
client.run("create . danimtb/testing -pr=profile_toolset -s build_type=%s" % build_type)
self.assertIn(result[build_type], client.out)
client.run("create . danimtb/testing -pr=profile_toolset_clang -s build_type=%s" %
build_type)
self.assertIn(result_toolset_clang[build_type], client.out)
| StarcoderdataPython |
3418368 | """The ``agar`` package contains a number of general purpose utility modules containing classes and functions to help
develop with `Google App Engine python`_ and `webapp2`_."""
from _version import __version__
| StarcoderdataPython |
6701253 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2018 Etsy Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from boundary_layer.logger import logger
from boundary_layer.registry import ConfigFileRegistry, RegistryNode, NodeTypes
from boundary_layer.schemas.internal.operators import OperatorSpecSchema
from boundary_layer.schemas.dag import ImportSchema
from boundary_layer import validator, util
from boundary_layer.containers import PropertySources, ResolvedProperties
class OperatorNode(RegistryNode):
type = NodeTypes.OPERATOR
@property
def operator_class(self):
return self.config['operator_class']
@property
def operator_class_module(self):
return self.config['operator_class_module']
def __init__(self, config, item):
super(OperatorNode, self).__init__(config=config, item=item)
self._resolved_properties = None
self._preprocessor_imports = None
self._default_task_args = None
def set_default_task_args(self, args):
self._default_task_args = args or {}
@property
def resolved_properties(self):
if not self._resolved_properties:
raise Exception(
'Cannot retrieve resolved properties for operator {}: '
'resolve_properties() has not been called yet!'.format(
self))
return self._resolved_properties
@property
def operator_args(self):
""" Return a dictionary of arguments to apply to the operator constructor.
This method accounts for the existence of default_task_args, by
removing any operator args that will be set from the default task args.
This way, any costly constructions in the default task args are not
re-computed for each operator that uses them.
"""
resolved = self.resolved_properties
result = resolved.values.copy()
for property_name in resolved.sources.default_task_args:
if self._default_task_args[property_name] == resolved.values[property_name]:
result.pop(property_name)
return result
def imports(self):
if not self._resolved_properties:
raise Exception(
'Cannot retrieve imports for operator {}: '
'resolve_properties() has not been called yet!'.format(
self))
loaded = ImportSchema().load(self.config.get('imports', {}))
assert not loaded.errors, \
('Internal error: processing `imports` config {} for '
'operator `{}`').format(
self.config.get('imports', {}),
self.name)
result = loaded.data
if self.operator_class:
result.setdefault('objects', [])
result['objects'].append({
'module': self.operator_class_module,
'objects': [self.operator_class],
})
for item in self._preprocessor_imports.values():
if 'objects' in item:
result.setdefault('objects', [])
result['objects'] += item['objects']
if 'modules' in item:
result.setdefault('modules', [])
result['modules'] += item['modules']
return result
def resolve_properties(
self,
execution_context,
default_task_args=None,
base_operator_loader=None,
preprocessor_loader=None):
""" Get the properties / arguments for the operator, and split them
according to their source. Specifically, properties are provided
to the operator by either the DAG config file, the resources
available in the operator's context, any task defaults specified
in the primary DAG, and the schema defaults, in that order of
precedence.
Once the properties are all resolved, this method then validates
all of the resolved arguments against the task's schema.
:param execution_context: the context in which this node is executed,
specifically containing the available resources and the node
that referred to this node, if any
:type execution_context: boundary_layer.containers.ExecutionContext
:param default_task_args: the default task args defined in the
DAG
:type default_task_args: dict
:param base_operator_loader: A method that retrieves typed operators,
equivalent to a Registry.get method
:type base_operator_loader: callable
:param preprocessor_loader: A method that retrieves typed preprocessors,
equivalent to a Registry.get method
:type preprocessor_loader: callable
:returns: a mapping of property source to property key/value pairs
:rtype: dict<dict<string, any>>
"""
schema = self.get_schema(base_operator_loader)
schema_properties = frozenset(schema.get('properties', {}).keys())
self.set_default_task_args(default_task_args)
(sources, property_values) = self._get_property_sources_and_values(
schema_properties,
execution_context)
validated = validator.validate_and_fill_defaults(
item=property_values,
schema=schema)
for key in validated:
if key not in property_values:
continue
sources.schema.add(key)
logger.debug('%s: validated partitioned properties: %s', self.name, sources)
preprocessors = self._load_preprocessors(
base_operator_loader,
preprocessor_loader)
self._preprocessor_imports = {
pp_name: pp.imports()
for (pp_name, pp) in six.iteritems(preprocessors)
}
preprocessed_values = self._apply_preprocessors(
args=validated,
preprocessors=preprocessors)
if self._resolved_properties:
if preprocessed_values != self._resolved_properties.values:
raise Exception(
'resolve_properties() was already called for operator {}, '
'and different values were computed this time! Found: {}, '
'expected: {}. This was probably caused by repeated '
'references to a sub-dag or generator using different resource '
'contexts. This is not presently supported!'.format(
self,
preprocessed_values,
self._resolved_properties.values))
else:
logger.warning(
'resolve_properties() was already called for operator %s, '
'but no differences in the computed properties were found.',
self)
self._resolved_properties = ResolvedProperties(
sources=sources,
values=preprocessed_values)
return self._resolved_properties
def _get_property_sources_and_values(
self,
schema_properties,
execution_context):
""" For the provided set of properties, determine the values, and the
sources of those values, for the current node.
Value sources include:
- default task args defined in the DAG
- available resources
- properties defined in the DAG
- fixed values (set below)
- global defaults (set below)
Note that this method does not validate these values against the
schema, and therefore does not include any values that could be
derived from the schema's default settings.
:param schema_properties: the list of property names that are
applicable to this node
:type schema_properties: list<str>
:param default_task_args: default arg values provided in the DAG
:type default_task_args: dict
:param execution_context: the execution context
:type execution_context: boundary_layer.containers.ExecutionContext
:returns: sources and values as a 2-tuple
:rtype: (PropertySources, dict)
"""
sources = PropertySources(
dag=set(),
default_task_args=set(),
resources=set(),
schema=set(),
global_defaults=set(),
fixed_args=set())
property_values = {}
resource_args = self._get_resource_args(execution_context)
global_defaults = self._get_global_defaults(execution_context)
fixed_args = self._get_fixed_args()
# make sure that the user has not tried to specify values for any
# fixed args; this prevents the user from trying to attach operators
# to a different DAG, for instance (which does not make sense because
# there is only one DAG)
invalid_properties = [
property_name for property_name in fixed_args
if property_name in self.properties]
if invalid_properties:
raise Exception(
'Illegal properties `{}` provided for operator `{}`: these '
'properties are assigned fixed values by boundary-layer that '
'cannot be overridden'.format(
'` ,`'.join(invalid_properties),
self))
for property_name in schema_properties:
if property_name in fixed_args:
# Check fixed args first, because we do not allow these to be
# set by the user
value = fixed_args[property_name]
logger.debug(
'%s: Inserting value `%s` for argument `%s` from fixed_args',
self.name, value, property_name)
property_values[property_name] = value
sources.fixed_args.add(property_name)
continue
if property_name in self.properties:
logger.debug(
'%s: Property `%s` found in user-props', self.name, property_name)
property_values[property_name] = self.properties[property_name]
sources.dag.add(property_name)
continue
resource_hits = resource_args.get(property_name, {})
if len(resource_hits) > 1:
raise ValueError('Error in operator {}: Multiple available resources '
'provide the argument {}: {}. Please specify a value or limit '
'limit the resource scope'.format(
self.name,
property_name,
resource_hits))
if len(resource_hits) == 1:
(resource_name, value) = resource_hits.popitem()
logger.debug('%s: Inserting value `%s` for argument `%s` from resource `%s`',
self.name, value, property_name, resource_name)
property_values[property_name] = value
sources.resources.add(property_name)
continue
if property_name in self._default_task_args:
value = self._default_task_args[property_name]
logger.debug('%s: Inserting value `%s` for argument `%s` from default_task_args',
self.name, value, property_name)
property_values[property_name] = value
sources.default_task_args.add(property_name)
continue
if property_name in global_defaults:
value = global_defaults[property_name]
logger.debug('%s: Inserting value `%s` for argument `%s` from global defaults',
self.name, value, property_name)
property_values[property_name] = value
sources.global_defaults.add(property_name)
continue
logger.debug(
'%s: No resources or defaults available for property `%s`',
self.name,
property_name)
return (sources, property_values)
def _apply_preprocessors(self, args, preprocessors):
""" Apply any necessary preprocessing to the alread-validated args.
This must be the last step in case any preprocessors are defined on
fields that are inserted by the schema defaults.
"""
result = args.copy()
for (property_name, preprocessor) in six.iteritems(preprocessors):
if property_name not in args:
continue
processed_value = preprocessor.process_arg(
args[property_name],
node=self,
raw_args=args)
logger.debug(
'Property `%s` raw value: `%s`, processed value: `%s`',
property_name,
args[property_name],
processed_value)
result[property_name] = processed_value
return result
def _get_resource_args(self, execution_context):
resources_available = self._get_resources_available(execution_context)
result = {}
for (resource_name, resource) in six.iteritems(resources_available):
for (property_name, value) in six.iteritems(resource.get_provided_args()):
result.setdefault(property_name, {})
result[property_name][resource_name] = value
return result
def _get_resources_available(self, execution_context):
keys_available = self.requires_resources & frozenset(execution_context.resources)
return {key: execution_context.resources[key] for key in keys_available}
def _load_preprocessors(self, base_loader, preprocessor_loader):
def aggregator(previous_result, node):
return previous_result + node.config.get('property_preprocessors', [])
preprocessor_configs = self._aggregate_over_hierarchy(
base_loader=base_loader,
initial_value=self.config.get('property_preprocessors', []),
aggregator=aggregator)
if not preprocessor_configs:
return {}
assert preprocessor_loader is not None, \
'load_preprocessors called for node {} with preprocessor config {}, ' \
'but preprocessor_loader is {}!'.format(
self,
preprocessor_configs,
preprocessor_loader)
result = {}
for preprocessor_conf in preprocessor_configs:
preprocessor = preprocessor_loader(preprocessor_conf)
for property_name in preprocessor_conf['apply_to_properties']:
result[property_name] = preprocessor
return result
def _get_fixed_args(self):
return {
'dag': '<<dag>>',
}
def _get_global_defaults(self, execution_context):
return {
'task_id': self._build_task_id(execution_context),
}
def _build_task_id(self, execution_context):
base_name = util.sanitize_operator_name(self.name)
if not execution_context.referrer or execution_context.referrer.type != NodeTypes.GENERATOR:
return base_name
suffix_mode = execution_context.referrer.item.get('auto_task_id_mode')
if not suffix_mode or suffix_mode == 'item_name':
return base_name + '-<<item_name>>'
elif suffix_mode == 'index':
return base_name + '-<<str(index)>>'
raise Exception(
'Unknown suffix_mode `{}` for generator `{}` found while processing '
'node `{}`'.format(
suffix_mode,
execution_context.referrer.name,
self.name))
class OperatorRegistry(ConfigFileRegistry):
node_cls = OperatorNode
spec_schema_cls = OperatorSpecSchema
| StarcoderdataPython |
6466322 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: test_cli
.. moduleauthor:: <NAME> <<EMAIL>>
This is the test module for the project's command-line interface (CLI)
module.
"""
import json
import luigi
# fmt: off
import bio3dbeacon
import bio3dbeacon.cli as cli
from bio3dbeacon import __version__
# fmt: on
from click.testing import Result
from .conftest import FIXTURE_PATH
# To learn more about testing Click applications, visit the link below.
# http://click.pocoo.org/5/testing/
def test_version_displays_library_version(cli_runner):
"""
Arrange/Act: Run the `version` subcommand.
Assert: The output matches the library version.
"""
result: Result = cli_runner.invoke(cli.cli, ["version"])
assert (
__version__ in result.output.strip()
), "Version number should match library version."
def test_verbose_output(cli_runner):
"""
Arrange/Act: Run the `version` subcommand with the '-v' flag.
Assert: The output indicates verbose logging is enabled.
"""
result: Result = cli_runner.invoke(cli.cli, ["-v", "version"])
assert (
"Verbose" in result.output.strip()
), "Verbose logging should be indicated in output."
def test_model_displays_expected_message(cli_runner):
"""
Arrange/Act: Run the `model` subcommand.
Assert: The output matches the library version.
"""
result: Result = cli_runner.invoke(cli.cli, ["model"])
# fmt: off
assert 'cli' in result.output.strip(), \
"'model' messages should contain the CLI name."
# fmt: on
def test_model_add(cli_runner, monkeypatch):
"""
Arrange/Act: Run the `model add` subsubcommand
Assert: The PDB file is added to the database
"""
baker_pfam_path = FIXTURE_PATH / 'baker_pfam'
orig_pdb_file = baker_pfam_path / 'original' / 'pdb' / 'PF05017.pdb'
expected_qmean_json_file = baker_pfam_path / \
'generated' / 'qmean' / 'PF05017_qmean.json'
original_build = luigi.build
def mock_build(*args, **kwargs):
kwargs['local_scheduler'] = True
return original_build(*args, **kwargs)
monkeypatch.setattr(luigi, 'build', mock_build)
def mock_qmean(*args):
with open(expected_qmean_json_file, 'rt') as fp:
data = json.load(fp)
return data
monkeypatch.setattr(bio3dbeacon.tasks.QmeanRunner,
'run_remote', mock_qmean)
result: Result = cli_runner.invoke(
cli.cli, ["model", "add", "--pdbfile", str(orig_pdb_file)])
assert result.exit_code == 0
| StarcoderdataPython |
3421790 | <reponame>JONGWE1/BankManagement
from flask import render_template, redirect, request, url_for, flash, make_response
from .import imgpatient
from .form import ImgpCheckinForm, ImgpRecipeForm
from ..model import Medicine, Price, UserInfo, ImgDoctorTimetable, ImgpCheckin, ImgpCheckinAfford, ImgpRecipe, ImgpRecipeAfford, ImgpCost
from ..decorator import is_login, isauth
from .. import db
import datetime
@imgpatient.route('/imgpatient/checkin', methods= ['GET', 'POST'])
@is_login
@isauth
def checkin(name, auth):
patientcheckin = ImgpCheckin()
form = ImgpCheckinForm()
price = ImgpCheckinAfford()
if request.method == 'GET':
return render_template('imgpatient/checkin.html', form= form, name= name, auth=auth)
else:
if form.validate_on_submit():
response = make_response(redirect(url_for('imgpatient.imgpindex')))
prepatient = ImgpCheckin.query.order_by(ImgpCheckin.imgpcheckinid.desc()).first()
patientcheckin.imgpcheckinid = prepatient.imgpcheckinid + 1
nowpcheckid = patientcheckin.imgpcheckinid
response.set_cookie('img', str(nowpcheckid))
patientcheckin.patientid = form.patientid.data
patientcheckin.doctorid = form.doctorname.data
docid = UserInfo.query.filter_by(id= form.doctorname.data).first()
patientcheckin.doctortype = docid.rank
db.session.add(patientcheckin)
db.session.commit()
price.imgpcheckinid = patientcheckin.imgpcheckinid
price.imgpid = form.patientid.data
priceinfo = Price.query.filter_by(optionid= docid.rank).first()
price.price = priceinfo.price
db.session.add(price)
db.session.commit()
return response
@imgpatient.route('/imgpatient/imgpindex', methods= ['GET', 'POST'])
@is_login
@isauth
def imgpindex(name, auth):
nowpcheckid = int(request.cookies.get('img'))
if request.method == 'GET':
# nowpcheckid = request.cookies.get('img')
# print('2', type(int(nowpcheckid)), int(nowpcheckid))
return render_template('imgpatient/imgpindex.html', name= name, auth=auth)
@imgpatient.route('/imgpatient/recipe', methods= ['GET', 'POST'])
@is_login
@isauth
def recipe(name, auth):
patientrecipe=ImgpRecipe()
form = ImgpRecipeForm()
nowpcheckid = request.cookies.get('img')
if request.method == 'GET':
return render_template('/imgpatient/medicine.html', form= form, name= name, auth=auth)
else:
if form.validate_on_submit():
patientrecipe.imgpcheckinid = int(nowpcheckid)
patientrecipe.imgpid = form.imgpid.data
patientrecipe.medicinenames = ','.join(form.medicines.data)
db.session.add(patientrecipe)
db.session.commit()
return redirect(url_for('imgpatient.recipenum', name= name))
@imgpatient.route('/imgpatient/recipenum', methods= ['GET', 'POST'])
@is_login
@isauth
def recipenum(name, auth):
nowpcheckid = request.cookies.get('img')
price = ImgpRecipeAfford()
if request.method == 'GET':
patientcheckinid = int(nowpcheckid)
selectedinfo = ImgpRecipe.query.filter_by(imgpcheckinid= patientcheckinid).first()
medicinenames = selectedinfo.medicinenames
medslist = medicinenames.split(',')
medsnlist = []
for item in medslist:
med = Medicine.query.filter_by(id= item).first()
medname = med.medicinename
medsnlist.append(medname)
return render_template('imgpatient/recipenum.html', medsnlist= medsnlist, name= name, auth=auth)
else:
patientcheckinid = int(nowpcheckid)
imgprecipe = ImgpRecipe.query.filter(ImgpRecipe.imgpcheckinid == patientcheckinid).first()
mednumbers = []
d = request.values.to_dict()
for number in d.keys():
mednumbers.append(d.get(number))
imgprecipe.medicinenumbers = ','.join(mednumbers)
db.session.commit()
price.imgpcheckinid = patientcheckinid
imgpreinfo = ImgpRecipe.query.filter_by(imgpcheckinid= patientcheckinid).first()
price.imgpid = imgpreinfo.imgpid
recipeinfo = ImgpRecipe.query.filter_by(imgpcheckinid= patientcheckinid).first()
recipemdname = recipeinfo.medicinenames
recipemdnamel = recipemdname.split(',')
recipenum = recipeinfo.medicinenumbers
recipenuml = recipenum.split(',')
count = 0
zipinfo = zip(recipemdnamel, recipenuml)
for item in zipinfo:
medinfo = Price.query.filter_by(optionid= int(item[0])).first()
count = count + medinfo.price * int(item[1])
# for item in recipemdnamel:
# medinfo = Price.query.filter_by(optionid= int(item)).first()
# count = count + medinfo.price *
price.price = count
db.session.add(price)
db.session.commit()
flash('处方已经上传完成')
return redirect(url_for('imgpatient.imgpindex', name= name))
@imgpatient.route('/imgpatient/cost', methods= ['GET', 'POST'])
@is_login
@isauth
def cost(name, auth):
cost = ImgpCost()
nowpcheckid = int(request.cookies.get('img'))
if request.method == 'GET':
imgpcheckininfo = ImgpCheckinAfford.query.filter_by(imgpcheckinid= nowpcheckid).first()
imgprecipeinfo = ImgpRecipeAfford.query.filter_by(imgpcheckinid= nowpcheckid).first()
ociprice = imgpcheckininfo.price
orprice = imgprecipeinfo.price
cost.imgpcheckinid = nowpcheckid
cost.price = ociprice + orprice
price = cost.price
db.session.add(cost)
db.session.commit()
return render_template('imgpatient/cost.html', price= price, name= name, auth= auth) | StarcoderdataPython |
3458799 | <gh_stars>0
from setuptools import setup
setup(
name="GSVDevice",
version="0.0.1",
author="<NAME>",
author_email="",
description="A Python module for communicating with GSV6/8 devices",
url="",
packages=['GSVDevice'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux :: Windows :: OSX",
],
python_requires='>=3',
install_requires=['pyserial']
)
| StarcoderdataPython |
4984167 | <filename>src/train_detector.py
import dlib
options = dlib.simple_object_detector_training_options()
options.C = 1.0
options.num_threads = 8
options.be_verbose = True
dlib.train_simple_object_detector('/home/shammyz/repos/dlib-pupil/annotations.xml', '/home/shammyz/repos/dlib-pupil/detector.svm', options)
print("[INFO] training accuracy: {}".format(
dlib.test_simple_object_detector('/home/shammyz/repos/dlib-pupil/annotations.xml', '/home/shammyz/repos/dlib-pupil/detector.svm')))
| StarcoderdataPython |
3524372 | <reponame>teambge/bge-python-sdk
from validator import Required, In, validate, Length
from bgesdk.management.constants import TAB_CHOICES, LANGUAGE_CHOICES
language = [x[0] for x in LANGUAGE_CHOICES]
tab = [x[0] for x in TAB_CHOICES]
def validator_doc(doc_data):
results = {}
result_errors = {}
validation = {
'doc_tab': [Required, In(tab)],
'model_id': [
Required, Length(0, maximum=50), lambda x: isinstance(x, str)],
'doc_content': [Required, lambda x: isinstance(x, list)]
}
res = validate(validation, doc_data)
result_errors.update(res.errors)
if isinstance(doc_data['doc_content'], list):
for content in doc_data['doc_content']:
validation = {
'language': [Required, In(language)],
'doc_name': [
Required, Length(0, maximum=50),
lambda x: isinstance(x, str)],
'content_title': [
Required, Length(0, maximum=200),
lambda x: isinstance(x, str)],
'developer': [
Length(0, maximum=50), lambda x: isinstance(x, str)],
'brief_intro': [lambda x: isinstance(x, dict)],
'method': [lambda x: isinstance(x, dict)],
'model_evaluation': [lambda x: isinstance(x, dict)],
'data_set_size': [lambda x: isinstance(x, dict)],
'ethnicity': [lambda x: isinstance(x, dict)],
'limitation': [lambda x: isinstance(x, dict)],
'return_params': [lambda x: isinstance(x, dict)],
'state_explain': [lambda x: isinstance(x, dict)],
'example_result': [lambda x: isinstance(x, dict)],
'ref': [lambda x: isinstance(x, list)]
}
content_res = validate(validation, content)
result_errors.update(content_res.errors)
valid = True
if result_errors:
valid = False
results['errors'] = result_errors
results['valid'] = valid
return results | StarcoderdataPython |
4804978 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 20:21:23 2018
@author: <NAME>
"""
from CoTeTo.Loader import Loader
from os.path import isfile
from CoTeTo import jsonld_budo_to_ontology as bo
class BoundaryFile(Loader):
name = 'BoundaryFile'
description = 'load requirements of timeseries data '
version = '1.0'
author = '<NAME>, <NAME>'
helptxt = """Loading InputDataProperty File with filled "setup" jsonld"""
def load(self, uriList,outputBase):
for u in uriList:
if isfile(u):
self.logger.info('BoundaryFile - loading %s', u)
bo.generate_boundary_condition_file(u,outputBase)
else:
self.logger.error('BoundaryFile - file not readable %s', u)
| StarcoderdataPython |
1738622 | <filename>dbops_venv/lib/python3.5/site-packages/alembic/context.py
from .environment import EnvironmentContext
from . import util
# create proxy functions for
# each method on the EnvironmentContext class.
util.create_module_class_proxy(EnvironmentContext, globals(), locals())
| StarcoderdataPython |
9651223 | <gh_stars>0
# Built on top of the original implementation at https://github.com/papermsucode/mdmmt
#
# Modifications by Copyright 2022 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logic for the Transformer architecture used for MMT.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import math
import torch
from torch import nn
from towhee.models.layers.activations import swish, gelu
logger = logging.getLogger(__name__)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
features=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
if position_ids is not None:
position_embeddings = self.position_embeddings(position_ids)
embeddings = position_embeddings + token_type_embeddings + features
else:
embeddings = token_type_embeddings + features
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
"""Self-attention mechanism."""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}")
self.output_attentions = False
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size
/ config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention
# scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel
# forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer,
attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
"""Self-attention output."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.layer_norm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
"""Self-attention layer."""
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,
) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
"""Fully-connected layer, part 1."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act]
# self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
"""Fully-connected layer, part 2."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.layer_norm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
"""Complete Bert layer."""
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,
) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
"""Complete Bert Model (Transformer encoder)."""
def __init__(self, config):
super().__init__()
self.output_attentions = False
self.output_hidden_states = False
self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
# last-layer hidden state, (all hidden states), (all attentions)
return outputs
class BertPooler(nn.Module):
"""Extraction of a single output embedding."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertMMT(nn.Module):
r"""Bert Model.
Outputs: `Tuple` comprising various elements depending on the configuration
(config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size,
sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the
model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size,
hidden_size)``
Last layer hidden-state of the first token of the sequence
(classification token)
further processed by a Linear layer and a Tanh activation function.
The Linear
layer weights are trained from the next sentence prediction
(classification)
objective during Bert pretraining. This output is usually *not* a
good summary
of the semantic content of the input, you're often better with
averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when
``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer +
the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the
initial embedding outputs.
**attentions**: (`optional`, returned when
``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape
``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
# Weights initialization
self.apply(self._init_weights)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
features=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to
# [batch_size, num_heads, from_seq_length, to_seq_length]
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
features=features)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
) + encoder_outputs[1:] # add hidden_states and attentions if they are here
# sequence_output, pooled_output, (hidden_states), (attentions)
return outputs
| StarcoderdataPython |
6699030 | # -*- coding: utf-8 -*-
"""Definition of meta model 'deprecatedelements'."""
from functools import partial
import pyecore.ecore as Ecore
from pyecore.ecore import *
from modality.pyuml2.types import Boolean
from ..portsandflows import FlowDirection
from . import deprecatedelements_mixins as _user_module
name = "deprecatedelements"
nsURI = "http://www.eclipse.org/papyrus/sysml/1.4/SysML/DeprecatedElements"
nsPrefix = "DeprecatedElements"
eClass = EPackage(name=name, nsURI=nsURI, nsPrefix=nsPrefix)
eClassifiers = {}
getEClassifier = partial(Ecore.getEClassifier, searchspace=eClassifiers)
class FlowPort(_user_module.FlowPortMixin, EObject, metaclass=MetaEClass):
"""
A FlowPort is an interaction point through which input and/or output of
items such as data, material, or energy may flow. This enables the
owning block to declare which items it may exchange with its environment
and the interaction points through which the exchange is made. We
distinguish between atomic flow port and a nonatomic flow port. Atomic
flow ports relay items that are classified by a single Block, ValueType,
DataType, or Signal classifier. A nonatomic flow port relays items of
several types as specified by a FlowSpecification. Flow ports and
associated flow specifications define “what can flow” between the block
and its environment, whereas item flows specify “what does flow” in a
specific usage context. Flow ports relay items to their owning block
or to a connector that connects them with their owner’s internal parts
(internal connector).
"""
direction = EAttribute(
eType=FlowDirection,
derived=False,
changeable=True,
default_value=FlowDirection.inout,
)
_isAtomic = EAttribute(
eType=Boolean, derived=True, changeable=False, name="isAtomic", transient=True
)
base_Port = EReference(ordered=False, unique=True, containment=False, derived=False)
def __init__(self, *, base_Port=None, direction=None, isAtomic=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if direction is not None:
self.direction = direction
if isAtomic is not None:
self.isAtomic = isAtomic
if base_Port is not None:
self.base_Port = base_Port
class FlowSpecification(
_user_module.FlowSpecificationMixin, EObject, metaclass=MetaEClass
):
"""
A FlowSpecification specifies inputs and outputs as a set of flow
properties. A flow specification is used by flow ports to specify
what items can flow via the port.
"""
base_Interface = EReference(
ordered=False, unique=True, containment=False, derived=False
)
def __init__(self, *, base_Interface=None, **kwargs):
if kwargs:
raise AttributeError("unexpected arguments: {}".format(kwargs))
super().__init__()
if base_Interface is not None:
self.base_Interface = base_Interface
| StarcoderdataPython |
311565 | <filename>arekit/common/experiment/api/ctx_training.py
from arekit.common.experiment.api.ctx_base import DataIO
class TrainingData(DataIO):
""" Data, that is necessary for models training stage.
"""
def __init__(self, stemmer, labels_count):
super(TrainingData, self).__init__(stemmer)
self.__labels_count = labels_count
@property
def LabelsCount(self):
return self.__labels_count
@property
def Evaluator(self):
raise NotImplementedError()
@property
def Callback(self):
raise NotImplementedError()
| StarcoderdataPython |
11318333 | <filename>demos/aspect_polarity_classification/bert-baselines/train_apc_bert_baseline.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# file: train_apc_bert_baseline.py
# time: 2021/7/27
# author: yangheng <<EMAIL>>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
from pyabsa import APCTrainer, APCConfigManager, BERTBaselineAPCModelList, ABSADatasetList
apc_config_english = APCConfigManager.get_apc_config_bert_baseline()
apc_config_english.model = BERTBaselineAPCModelList.ASGCN_BERT
apc_config_english.num_epoch = 10
apc_config_english.evaluate_begin = 2
apc_config_english.max_seq_len = 100
apc_config_english.dropout = 0.5
apc_config_english.log_step = 5
apc_config_english.l2reg = 0.0005
apc_config_english.seed = 1
apc_config_english.use_syntax_based_SRD = True
apc_config_english.similarity_threshold = 1
apc_config_english.cross_validate_fold = -1 # disable cross_validate
laptop14 = ABSADatasetList.Laptop14
sent_classifier = APCTrainer(config=apc_config_english,
dataset=laptop14, # train set and test set will be automatically detected
checkpoint_save_mode=1, # None to avoid save model
auto_device=True # automatic choose CUDA or CPU
).train()
Restaurant14 = ABSADatasetList.Restaurant14
sent_classifier = APCTrainer(config=apc_config_english,
dataset=Restaurant14, # train set and test set will be automatically detected
checkpoint_save_mode=1, # None to avoid save model
auto_device=True # automatic choose CUDA or CPU
).train()
Restaurant15 = ABSADatasetList.Restaurant15
sent_classifier = APCTrainer(config=apc_config_english,
dataset=Restaurant15, # train set and test set will be automatically detected
checkpoint_save_mode=1, # None to avoid save model
auto_device=True # automatic choose CUDA or CPU
).train()
Restaurant16 = ABSADatasetList.Restaurant16
sent_classifier = APCTrainer(config=apc_config_english,
dataset=Restaurant16, # train set and test set will be automatically detected
checkpoint_save_mode=1, # None to avoid save model
auto_device=True # automatic choose CUDA or CPU
).train()
| StarcoderdataPython |
5176036 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: CopyFiles.py
#
#Purpose: Copy folders/files from source folder to desintation folder
#
#==============================================================================
import arcpy
import sys
import traceback
import time
import os
# Add "Root folder"\SupportFiles to sys path inorder to import
# modules in subfolder
supportFilePath = os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(sys.argv[0]))), "SupportFiles")
sys.path.append(supportFilePath)
from datetime import datetime
from Utilities import changeOwnership
from Utilities import getFreeSpace
from Utilities import getDirSize
from UtilitiesArcPy import checkResults
from UtilitiesArcPy import copyData
from walkingDirTrees import listFiles
scriptName = sys.argv[0]
if len(sys.argv) < 3:
print "\n" + scriptName + " <SourceFolder> <DestinationFolder>"
print "\nWhere:"
print "\n\t<SourceFolder> (required): path of source folder to copy."
print "\n\t<DestinationFolder> (required): path of folder where source folder will be copied."
print
sys.exit(1)
# script parameter:
src_folder = sys.argv[1]
dest_folder = sys.argv[2]
owner_account = None
# if len(sys.argv) > 3:
# owner_account = sys.argv[3]
# ---------------------------------------------------------------------
# Check parameters
# ---------------------------------------------------------------------
goodParameters = True
# Check path parameter to make sure they exist
if not os.path.exists(src_folder):
print "\nThe path specified for parameter <SourceFolder>" + \
" (" + src_folder + ") does not exist."
goodParameters = False
if not os.path.exists(dest_folder):
print "\nThe path specified for parameter <dest_folder>" + \
" (" + dest_folder + ") does not exist."
goodParameters = False
# Exit script if parameters are not valid.
if not goodParameters:
print "\nInvalid script parameters. Exiting " + scriptName + "."
sys.exit(1)
printMsg = True
totalCopySuccess = True
def copyDataFolders(srcRootPath, destRootPath, ownerAccount=None):
copySuccess = True
# Check if there is available space on destination drive
# to copy folders.
freeSpace = getFreeSpace(destRootPath, "GB")
# Get total size of source folders
srcSize = getDirSize(srcRootPath, "GB")
print '{:<34}{:>10.4f}{:>3}'.format("Available space to copy folders:", freeSpace, " GB")
print '{:<34}{:>10.4f}{:>3}'.format("Size of folders to copy:", srcSize, " GB")
print
if srcSize >= freeSpace:
totalCopySuccess = False
print
print "ERROR: Not enough available space to copy folders/files."
print
else:
# Get list of top-level directories and files
returnFolders = 1 #Yes
recursive = 0 #No
dirList = listFiles(srcRootPath, "*", recursive, returnFolders)
x = 0
for srcPath in dirList:
# List may have files so test for directory
if os.path.isdir(srcPath):
pathType = "Folder"
elif os.path.isfile(srcPath):
pathType = "File"
else:
pathType = ""
# "Create" destination path
destPath = os.path.join(destRootPath, os.path.basename(srcPath))
print
print "- Copying " + pathType.lower() + "..."
print '{:<16}{:<100}'.format("\tSource:", srcPath)
print '{:<16}{:<100}'.format("\tDestination:", destPath)
# Copy data and check results
results = copyData(srcPath, destPath)
success = checkResults(results, printMsg)
if not success:
copySuccess = success
# Change ownership function doesn't seem to be working
# so for time being lets comment out this function call.
# if not success:
# copySuccess = success
# else:
# if ownerAccount:
# changeOwnership(destPath, ownerAccount)
return copySuccess
try:
startTime = datetime.now()
print "\n=============================================================="
print "Copying files..."
print "==============================================================\n"
print '{:<25}{}'.format("Source folder:", src_folder)
print '{:<25}{}\n'.format("Destination folder:", dest_folder)
success = copyDataFolders(src_folder, dest_folder, owner_account)
if not success:
totalCopySuccess = success
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
if totalCopySuccess:
print "\n\nFile copy was successful.\n"
else:
print "\n\nERROR occurred during file copy.\n"
print '{:<14}{:%Y-%m-%d %H:%M:%S}'.format("Start time:", startTime)
endTime = datetime.now()
print '{:<14}{:%Y-%m-%d %H:%M:%S}'.format("End time:", endTime)
print '\nDone.'
if totalCopySuccess:
sys.exit(0)
else:
sys.exit(1)
| StarcoderdataPython |
1972101 | import unittest
import os
import pyopenms
class TestChromatogramExtractor(unittest.TestCase):
def setUp(self):
dirname = os.path.dirname(os.path.abspath(__file__))
self.filename = os.path.join(dirname, "test.TraML")
self.filename_mzml = os.path.join(dirname, "test2.mzML")
def test_extractor(self):
targeted = pyopenms.TargetedExperiment();
tramlfile = pyopenms.TraMLFile();
tramlfile.load(self.filename, targeted);
exp = pyopenms.MSExperiment()
pyopenms.MzMLFile().load(self.filename_mzml, exp)
trafo = pyopenms.TransformationDescription()
tmp_out = pyopenms.MSExperiment();
extractor = pyopenms.ChromatogramExtractor()
extractor.extractChromatograms(exp, tmp_out, targeted, 10, False, trafo, -1, "tophat")
# Basically test that the output is non-zero (e.g. the data is
# correctly relayed to python)
# The functionality is not tested here!
self.assertEqual(len(tmp_out.getChromatograms()), len(targeted.getTransitions()))
self.assertNotEqual(len(tmp_out.getChromatograms()), 0)
self.assertEqual(tmp_out.getChromatograms()[0].size(), exp.size())
self.assertNotEqual(tmp_out.getChromatograms()[0].size(), 0)
self.assertNotEqual(tmp_out.getChromatograms()[0][0].getRT(), 0)
self.assertNotEqual(tmp_out.getChromatograms()[0][0].getIntensity(), 0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6634797 | # -*- coding: utf-8 -*-
from __future__ import generator_stop
import json
from codecs import open
from pprint import pprint
import tatsu
from tatsu.ast import AST
from tatsu.walkers import NodeWalker
from codegen import PostfixCodeGenerator # pylint: disable= E0401 # noqa
def simple_parse():
grammar = open('grammars/calc_cut.ebnf').read()
parser = tatsu.compile(grammar)
ast = parser.parse('3 + 5 * ( 10 - 20 )', trace=False, colorize=True)
print()
print('# SIMPLE PARSE')
print('# AST')
pprint(ast, width=20, indent=4)
print()
print('# JSON')
print(json.dumps(ast, indent=4))
print()
def annotated_parse():
grammar = open('grammars/calc_annotated.ebnf').read()
parser = tatsu.compile(grammar)
ast = parser.parse('3 + 5 * ( 10 - 20 )')
print()
print('# ANNOTATED AST')
pprint(ast, width=20, indent=4)
print()
class CalcBasicSemantics(object):
def number(self, ast):
return int(ast)
def term(self, ast):
if not isinstance(ast, AST):
return ast
elif ast.op == '*':
return ast.left * ast.right
elif ast.op == '/':
return ast.left / ast.right
else:
raise Exception('Unknown operator', ast.op)
def expression(self, ast):
if not isinstance(ast, AST):
return ast
elif ast.op == '+':
return ast.left + ast.right
elif ast.op == '-':
return ast.left - ast.right
else:
raise Exception('Unknown operator', ast.op)
def parse_with_basic_semantics():
grammar = open('grammars/calc_annotated.ebnf').read()
parser = tatsu.compile(grammar)
result = parser.parse(
'3 + 5 * ( 10 - 20 )',
semantics=CalcBasicSemantics()
)
print()
print('# BASIC SEMANTICS RESULT')
assert result == -47
print(result)
print()
class CalcSemantics(object):
def number(self, ast):
return int(ast)
def addition(self, ast):
return ast.left + ast.right
def subtraction(self, ast):
return ast.left - ast.right
def multiplication(self, ast):
return ast.left * ast.right
def division(self, ast):
return ast.left / ast.right
def parse_factored():
grammar = open('grammars/calc_factored.ebnf').read()
parser = tatsu.compile(grammar)
ast = parser.parse(
'3 + 5 * ( 10 - 20 )',
semantics=CalcSemantics()
)
print()
print('# FACTORED SEMANTICS RESULT')
pprint(ast, width=20, indent=4)
print()
def parse_to_model():
grammar = open('grammars/calc_model.ebnf').read()
parser = tatsu.compile(grammar, asmodel=True)
model = parser.parse('3 + 5 * ( 10 - 20 )')
print()
print('# MODEL TYPE IS:', type(model).__name__)
print(json.dumps(model.asjson(), indent=4))
print()
class CalcWalker(NodeWalker):
def walk_object(self, node):
return node
def walk__add(self, node):
return self.walk(node.left) + self.walk(node.right)
def walk__subtract(self, node):
return self.walk(node.left) - self.walk(node.right)
def walk__multiply(self, node):
return self.walk(node.left) * self.walk(node.right)
def walk__divide(self, node):
return self.walk(node.left) / self.walk(node.right)
def parse_and_walk_model():
grammar = open('grammars/calc_model.ebnf').read()
parser = tatsu.compile(grammar, asmodel=True)
model = parser.parse('3 + 5 * ( 10 - 20 )')
print()
print('# WALKER RESULT')
result = CalcWalker().walk(model)
assert result == -47
print(result)
print()
def parse_and_translate():
grammar = open('grammars/calc_model.ebnf').read()
parser = tatsu.compile(grammar, asmodel=True)
model = parser.parse('3 + 5 * ( 10 - 20 )')
postfix = PostfixCodeGenerator().render(model)
print()
print('# TRANSLATED TO POSTFIX')
print(postfix)
def main():
simple_parse()
annotated_parse()
parse_with_basic_semantics()
parse_factored()
parse_to_model()
parse_and_walk_model()
parse_and_translate()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8122055 | import unittest
from ..source.example import prove_works
class Test_example(unittest.TestCase):
def test_one(self):
example = prove_works()
self.assertEqual(example.mirror(1), 1)
if __name__=='__main__':
unittest.main()
| StarcoderdataPython |
364667 | from ansiscape.enums import InterpretationKey, NamedColor, SelectGraphicRendition
from ansiscape.handlers import get_color_interpreter, get_interpreter_for_sgr
from ansiscape.interpreters import register_interpreters
from ansiscape.sequence import Sequence
from ansiscape.types import Color, Interpretation, SequencePart, SequenceType
from ansiscape.version import get_version
register_interpreters()
def alternative_font_0(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_0, *parts)
def alternative_font_1(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_1, *parts)
def alternative_font_2(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_2, *parts)
def alternative_font_3(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_3, *parts)
def alternative_font_4(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_4, *parts)
def alternative_font_5(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_5, *parts)
def alternative_font_6(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_6, *parts)
def alternative_font_7(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_7, *parts)
def alternative_font_8(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FONT_ALT_8, *parts)
def background(color: Color, *parts: SequencePart) -> SequenceType:
i = get_color_interpreter(InterpretationKey.BACKGROUND)
return i.make_sequence(color, *parts)
def black(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_BLACK, *parts)
def black_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_BLACK, *parts)
def blackletter(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.CALLIGRAPHY_BLACKLETTER, *parts)
def blue(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_BLUE, *parts)
def blue_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_BLUE, *parts)
def bright_black(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_BLACK, *parts)
def bright_black_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_BLACK, *parts)
def bright_blue(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_BLUE, *parts)
def bright_blue_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_BLUE, *parts)
def bright_cyan(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_CYAN, *parts)
def bright_cyan_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_CYAN, *parts)
def bright_green(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_GREEN, *parts)
def bright_green_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_GREEN, *parts)
def bright_magenta(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_MAGENTA, *parts)
def bright_magenta_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_MAGENTA, *parts)
def bright_red(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_RED, *parts)
def bright_red_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_RED, *parts)
def bright_white(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_WHITE, *parts)
def bright_white_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_WHITE, *parts)
def bright_yellow(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.BRIGHT_YELLOW, *parts)
def bright_yellow_background(*parts: SequencePart) -> SequenceType:
return background(NamedColor.BRIGHT_YELLOW, *parts)
def circle(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FRAME_CIRCLE, *parts)
def conceal(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.CONCEAL_ON, *parts)
def cyan(*parts: SequencePart) -> SequenceType:
return foreground(NamedColor.CYAN, *parts)
def cyan_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_CYAN, *parts)
def double_line_under_or_right(*parts: SequencePart) -> SequenceType:
return make_sequence(
SelectGraphicRendition.IDEOGRAM_DOUBLE_LINE_UNDER_OR_RIGHT,
*parts,
)
def double_line_over_or_left(*parts: SequencePart) -> SequenceType:
return make_sequence(
SelectGraphicRendition.IDEOGRAM_DOUBLE_LINE_OVER_OR_LEFT,
*parts,
)
def double_underline(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.UNDERLINE_DOUBLE, *parts)
def fast_blink(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BLINK_FAST, *parts)
def foreground(color: Color, *parts: SequencePart) -> SequenceType:
i = get_color_interpreter(InterpretationKey.FOREGROUND)
return i.make_sequence(color, *parts)
def frame(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FRAME_BOX, *parts)
def green(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_GREEN, *parts)
def green_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_GREEN, *parts)
def heavy(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.WEIGHT_HEAVY, *parts)
def invert(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.INVERT_ON, *parts)
def italic(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.CALLIGRAPHY_ITALIC, *parts)
def light(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.WEIGHT_LIGHT, *parts)
def magenta(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_MAGENTA, *parts)
def magenta_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_MAGENTA, *parts)
def make_sequence(sgr: SelectGraphicRendition, *parts: SequencePart) -> SequenceType:
i = get_interpreter_for_sgr(sgr)
return i.make_sequence(sgr, *parts)
def overline(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.OVERLINE_ON, *parts)
def proportional_spacing(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.PROPORTIONAL_SPACING_ON, *parts)
def red(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_RED, *parts)
def red_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_RED, *parts)
def sequence(*parts: SequencePart) -> SequenceType:
return Sequence(*parts)
def single_line_under_or_right(*parts: SequencePart) -> SequenceType:
return make_sequence(
SelectGraphicRendition.IDEOGRAM_SINGLE_LINE_UNDER_OR_RIGHT,
*parts,
)
def single_line_over_or_left(*parts: SequencePart) -> SequenceType:
return make_sequence(
SelectGraphicRendition.IDEOGRAM_SINGLE_LINE_OVER_OR_LEFT,
*parts,
)
def single_underline(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.UNDERLINE_SINGLE, *parts)
def slow_blink(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BLINK_SLOW, *parts)
def strike(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.STRIKE_ON, *parts)
def stress(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.IDEOGRAM_STRESS, *parts)
def white(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_WHITE, *parts)
def white_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_WHITE, *parts)
def yellow(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.FOREGROUND_YELLOW, *parts)
def yellow_background(*parts: SequencePart) -> SequenceType:
return make_sequence(SelectGraphicRendition.BACKGROUND_YELLOW, *parts)
__all__ = [
"get_version",
"Interpretation",
"Sequence",
]
| StarcoderdataPython |
9788528 | <filename>table_enforcer/main_classes.py
# -*- coding: utf-8 -*-
"""Main module."""
import typing as t
import pandas as pd
from box import Box
from table_enforcer.errors import ValidationError, RecodingError
from .utils import validate as v
__all__ = [
"Enforcer",
"BaseColumn",
"Column",
"CompoundColumn",
]
VALIDATOR_FUNCTION = t.Callable[[pd.Series], pd.DataFrame]
RECODER_FUNCTION = t.Callable[[pd.Series], pd.Series]
def find_failed_rows(results):
failed_rows = results.apply(lambda vec: ~vec.all(), axis=1)
return results.loc[failed_rows]
def set_from_kwargs(kwargs, key, default):
if key in kwargs.keys():
value = kwargs[key]
else:
value = default
return value
class Enforcer(object):
"""Class to define table definitions."""
def __init__(self, columns):
"""Initialize an enforcer instance."""
self.columns = columns
def _make_validations(self, table: pd.DataFrame) -> Box:
"""Return a dict-like object containing dataframes of which tests passed/failed for each column."""
results = []
for column in self.columns:
results.append(column.validate(table))
return results
def validate(self, table: pd.DataFrame) -> bool:
"""Return True if all validation tests pass: False otherwise."""
validations = self._make_validations(table=table)
results = [df.all().all() for df in validations]
return all(results)
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
"""Return a fully recoded dataframe.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests.
"""
df = pd.DataFrame(index=table.index)
for column in self.columns:
df = column.update_dataframe(df, table=table, validate=validate)
return df
class BaseColumn(object):
"""Base Class for Columns.
Lays out essential methods api.
"""
def update_dataframe(self, df, table, validate=False):
"""Perform ``self.recode`` and add resulting column(s) to ``df`` and return ``df``."""
df = df.copy()
recoded_columns = self.recode(table=table, validate=validate)
return pd.concat([df, recoded_columns], axis=1)
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
"""Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate.
"""
raise NotImplementedError("This method must be defined for each subclass.")
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
"""Pass the appropriate columns through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests.
"""
raise NotImplementedError("This method must be defined for each subclass.")
class Column(BaseColumn):
"""Class representing a single table column."""
def __init__(
self,
name: str,
dtype: type,
unique: bool,
validators: t.List[VALIDATOR_FUNCTION],
recoders: t.List[RECODER_FUNCTION],) -> None:
"""Construct a new `Column` object.
Args:
name (str): The exact name of the column in a ``pd.DataFrame``.
dtype (type): The type that each member of the recoded column must belong to.
unique (bool): Whether values are allowed to recur in this column.
validators (list): A list of validator functions.
recoders (list): A list of recoder functions.
"""
if validators is None:
validators = []
if recoders is None:
recoders = []
self.name = name
self.dtype = dtype
self.unique = unique
self.validators = self._dict_of_funcs(validators)
self.recoders = self._dict_of_funcs(recoders)
def _dict_of_funcs(self, funcs: list) -> pd.Series:
"""Return a pd.Series of functions with index derived from the function name."""
return {func.__name__: func for func in funcs}
def _validate_series_dtype(self, series: pd.Series) -> pd.Series:
"""Validate that the series data is the correct dtype."""
return series.apply(lambda i: isinstance(i, self.dtype))
def _check_series_name(self, series, override_name=None):
if override_name is None:
name = self.name
else:
name = override_name
if series.name != name:
raise ValueError(f"The name of provided series '{series.name}' does not match this column's name '{name}'.")
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
"""Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate.
"""
series = table[self.name]
self._check_series_name(series)
validators = self.validators
results = pd.DataFrame({validator: series for validator in validators}, index=series.index)
for name, func in validators.items():
results[name] = func(results[name])
results['dtype'] = self._validate_series_dtype(series)
if self.unique:
results['unique'] = v.funcs.unique(series)
if failed_only:
results = find_failed_rows(results)
return results
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
"""Pass the provided series obj through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests.
"""
series = table[self.name]
self._check_series_name(series)
col = self.name
data = series.copy()
for recoder in self.recoders.values():
try:
data = recoder(data)
except (BaseException) as err:
raise RecodingError(col, recoder, err)
if validate:
failed_rows = find_failed_rows(self.validate(data.to_frame()))
if failed_rows.shape[0] > 0:
raise ValidationError(f"Rows that failed to validate for column '{self.name}':\n{failed_rows}")
return data.to_frame()
class CompoundColumn(BaseColumn):
"""Class representing multiple columns and the logic governing their transformation from source table to recoded table."""
def __init__(
self,
input_columns: t.List[Column],
output_columns: t.List[Column],
column_transform,) -> None:
"""Construct a new ``CompoundColumn`` object.
Args:
input_columns (list, Column): A list of ``Column`` objects representing column(s) from the SOURCE table.
output_columns (list, Column): A list of ``Column`` objects representing column(s) from the FINAL table.
column_transform (Callable): Function accepting the table object, performing transformations to it and returning a DataFrame containing the NEW columns only.
"""
self.input_columns = input_columns
self.output_columns = output_columns
self.column_transform = column_transform
def _do_validation_set(self, table: pd.DataFrame, columns, validation_type, failed_only=False) -> pd.DataFrame:
"""Return a dataframe of validation results for the appropriate series vs the vector of validators."""
validations = []
for column in columns:
validation = column.validate(table=table, failed_only=failed_only)
validation["column_name"] = column.name
validation["validation_type"] = validation_type
validations.append(validation)
validation_table = pd.concat(validations)
validation_table.index.name = 'row'
return validation_table.reset_index().set_index(["validation_type", "column_name", "row"])
def _validate_input(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
"""Return a dataframe of validation results for the appropriate series vs the vector of validators."""
return self._do_validation_set(
table=table,
columns=self.input_columns,
validation_type="input",
failed_only=failed_only,)
def _recode_set(self, table: pd.DataFrame, columns, validate=False) -> pd.DataFrame:
recoded_columns = []
for column in columns:
recoded = column.recode(table=table, validate=validate)
recoded_columns.append(recoded)
return pd.concat(recoded_columns, axis=1)
def _recode_input(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
return self._recode_set(table=table, columns=self.input_columns, validate=validate)
def _validate_output(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
transformed_columns = self.column_transform(table)
return self._do_validation_set(
table=transformed_columns,
columns=self.output_columns,
validation_type="output",
failed_only=failed_only,)
def _recode_output(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
transformed_columns = self.column_transform(table)
return self._recode_set(table=transformed_columns, columns=self.output_columns, validate=validate)
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
"""Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate.
"""
return pd.concat([
self._validate_input(table, failed_only=failed_only),
self._validate_output(table, failed_only=failed_only),
]).fillna(True)
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
"""Pass the appropriate columns through each recoder function sequentially and return the final result.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests.
"""
return self._recode_output(self._recode_input(table, validate=validate), validate=validate)
| StarcoderdataPython |
3368741 | # API to submit Flight Declarations into Spotlight
from os import environ as env
import redis, json
import requests
import logging
from datetime import datetime, timedelta
class PassportCredentialsGetter():
def __init__(self):
pass
def get_cached_credentials(self):
r = redis.Redis(host=env.get('REDIS_HOST',"redis"), port =env.get('REDIS_PORT',6379))
now = datetime.now()
token_details = r.get('flight_declaration_access_token_details')
if token_details:
token_details = json.loads(token_details)
created_at = token_details['created_at']
set_date = datetime.strptime(created_at,"%Y-%m-%dT%H:%M:%S.%f")
if now < (set_date - timedelta(minutes=58)):
credentials = self.get_write_credentials()
r.set('flight_declaration_access_token_details', json.dumps({'credentials': credentials, 'created_at':now.isoformat()}))
else:
credentials = token_details['credentials']
else:
credentials = self.get_write_credentials()
error = credentials.get('error')
if not error: # there is no error in the token
r.set('flight_declaration_access_token_details', json.dumps({'credentials': credentials, 'created_at':now.isoformat()}))
r.expire("flight_declaration_access_token_details", timedelta(minutes=58))
return credentials
def get_write_credentials(self):
payload = {"grant_type":"client_credentials","client_id": env.get('SPOTLIGHT_WRITE_CLIENT_ID'),"client_secret": env.get('SPOTLIGHT_WRITE_CLIENT_SECRET'),"audience": env.get('SPOTLIGHT_AUDIENCE'),"scope": env.get('SPOTLIGHT_FLIGHT_DECLARATION_SCOPE')}
url = env.get('PASSPORT_URL') + env.get('PASSPORT_TOKEN_URL')
token_response = requests.post(url, data = payload)
if token_response.status_code == 200:
t_data = token_response.json()
else:
t_data = {}
return t_data
class FlightDeclarationsUploader():
def __init__(self, credentials):
self.credentials = credentials
def upload_to_server(self, flight_declaration_json):
headers = {"Authorization": "Bearer "+ self.credentials['access_token']}
payload = {"flight_declaration" : flight_declaration_json}
securl = env.get('FLIGHT_SPOTLIGHT_URL') + '/set_flight_declaration'
try:
response = requests.post(securl, data= payload, headers=headers)
except Exception as e:
logging.error(e)
else:
logging.debug("Uploaded Flight Declarations")
return "Uploaded Flight Declarations"
| StarcoderdataPython |
3291849 | from __future__ import absolute_import, division, print_function, unicode_literals
from six import python_2_unicode_compatible
from canvasapi.canvas_object import CanvasObject
from canvasapi.util import combine_kwargs
@python_2_unicode_compatible
class Login(CanvasObject):
def __str__(self):
return "{} ({})".format(self.id, self.unique_id)
def delete(self):
"""
Delete an existing login.
:calls: `DELETE /api/v1/users/:user_id/logins/:id \
<https://canvas.instructure.com/doc/api/logins.html#method.pseudonyms.destroy>`_
:rtype: :class:`canvasapi.login.Login`
"""
response = self._requester.request(
'DELETE',
'users/{}/logins/{}'.format(self.user_id, self.id)
)
return Login(self._requester, response.json())
def edit(self, **kwargs):
"""
Update an existing login for a user in the given account.
:calls: `PUT /api/v1/accounts/:account_id/logins/:id \
<https://canvas.instructure.com/doc/api/logins.html#method.pseudonyms.update>`_
:rtype: :class:`canvasapi.login.Login`
"""
response = self._requester.request(
'PUT',
'accounts/{}/logins/{}'.format(self.account_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
return Login(self._requester, response.json())
| StarcoderdataPython |
257041 | <reponame>jonadaly/neomodel<filename>test/test_relationship_models.py
from datetime import datetime
from pytest import raises
import pytz
from neomodel import (StructuredNode, StructuredRel, Relationship, RelationshipTo,
StringProperty, DateTimeProperty, DeflateError)
HOOKS_CALLED = {
'pre_save': 0,
'post_save': 0
}
class FriendRel(StructuredRel):
since = DateTimeProperty(default=lambda: datetime.now(pytz.utc))
class HatesRel(FriendRel):
reason = StringProperty()
def pre_save(self):
HOOKS_CALLED['pre_save'] += 1
def post_save(self):
HOOKS_CALLED['post_save'] += 1
class Badger(StructuredNode):
name = StringProperty(unique_index=True)
friend = Relationship('Badger', 'FRIEND', model=FriendRel)
hates = RelationshipTo('Stoat', 'HATES', model=HatesRel)
class Stoat(StructuredNode):
name = StringProperty(unique_index=True)
hates = RelationshipTo('Badger', 'HATES', model=HatesRel)
def test_either_connect_with_rel_model():
paul = Badger(name="Paul").save()
tom = Badger(name="Tom").save()
# creating rels
new_rel = tom.friend.disconnect(paul)
new_rel = tom.friend.connect(paul)
assert isinstance(new_rel, FriendRel)
assert isinstance(new_rel.since, datetime)
# updating properties
new_rel.since = datetime.now(pytz.utc)
assert isinstance(new_rel.save(), FriendRel)
# start and end nodes are the opposite of what you'd expect when using either..
# I've tried everything possible to correct this to no avail
paul = new_rel.start_node()
tom = new_rel.end_node()
assert paul.name == 'Paul'
assert tom.name == 'Tom'
def test_direction_connect_with_rel_model():
paul = Badger(name="<NAME>").save()
ian = Stoat(name="Ian the stoat").save()
rel = ian.hates.connect(paul, {'reason': "thinks paul should bath more often"})
assert isinstance(rel.since, datetime)
assert isinstance(rel, FriendRel)
assert rel.reason.startswith("thinks")
rel.reason = 'he smells'
rel.save()
ian = rel.start_node()
assert isinstance(ian, Stoat)
paul = rel.end_node()
assert isinstance(paul, Badger)
assert ian.name.startswith("Ian")
assert paul.name.startswith("Paul")
rel = ian.hates.relationship(paul)
assert isinstance(rel, HatesRel)
assert isinstance(rel.since, datetime)
rel.save()
# test deflate checking
rel.since = "2:30pm"
with raises(DeflateError):
rel.save()
# check deflate check via connect
with raises(DeflateError):
paul.hates.connect(ian, {'reason': "thinks paul should bath more often", 'since': '2:30pm'})
def test_traversal_where_clause():
phill = Badger(name="<NAME>").save()
tim = Badger(name="<NAME>").save()
bob = Badger(name="<NAME>").save()
rel = tim.friend.connect(bob)
now = datetime.now(pytz.utc)
assert rel.since < now
rel2 = tim.friend.connect(phill)
assert rel2.since > now
friends = tim.friend.match(since__gt=now)
assert len(friends) == 1
def test_multiple_rels_exist_issue_223():
# check a badger can dislike a stoat for multiple reasons
phill = Badger(name="Phill").save()
ian = Stoat(name="Stoat").save()
rel_a = phill.hates.connect(ian, {'reason': 'a'})
rel_b = phill.hates.connect(ian, {'reason': 'b'})
assert rel_a.id != rel_b.id
ian_a = phill.hates.match(reason='a')[0]
ian_b = phill.hates.match(reason='b')[0]
assert ian_a.id == ian_b.id
def test_retrieve_all_rels():
tom = Badger(name="tom").save()
ian = Stoat(name="ian").save()
rel_a = tom.hates.connect(ian, {'reason': 'a'})
rel_b = tom.hates.connect(ian, {'reason': 'b'})
rels = tom.hates.all_relationships(ian)
assert len(rels) == 2
assert rels[0].id in [rel_a.id, rel_b.id]
assert rels[1].id in [rel_a.id, rel_b.id]
def test_save_hook_on_rel_model():
HOOKS_CALLED['pre_save'] = 0
HOOKS_CALLED['post_save'] = 0
paul = Badger(name="PaulB").save()
ian = Stoat(name="IanS").save()
rel = ian.hates.connect(paul, {'reason': "yadda yadda"})
rel.save()
assert HOOKS_CALLED['pre_save'] == 2
assert HOOKS_CALLED['post_save'] == 2
| StarcoderdataPython |
8041407 | <reponame>cam-parra/indy-plenum
from plenum.test import waits
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.restart.helper import get_group, restart_nodes
nodeCount = 7
TestRunningTimeLimitSec = 150
def test_restart_groups_4_of_7_wp_tm(looper, txnPoolNodeSet, tconf, tdir,
sdk_pool_handle, sdk_wallet_client, allPluginsPath):
tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
restart_group = get_group(txnPoolNodeSet, 4, include_primary=True)
restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath,
after_restart_timeout=tm, start_one_by_one=True)
sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
| StarcoderdataPython |
1652208 | <reponame>ClinGen/gene-and-variant-curation-tools<filename>gci-vci-serverless/tests/unit/test_ddb_update_expression_builder.py
import pytest
from src.db.ddb_client import UpdateExpressionBuilder
def test_builds_set_expression():
test_builder = UpdateExpressionBuilder()
test_builder.append_set('foo')
test_builder.append_set('bar')
act_expression = test_builder.build_expression()
assert act_expression == 'SET #foo = :foo, #bar = :bar'
def test_builds_remove_expression():
test_builder = UpdateExpressionBuilder()
test_builder.append_remove('foo')
test_builder.append_remove('bar')
act_expression = test_builder.build_expression()
assert act_expression == 'REMOVE #foo, #bar'
def test_builds_multiple_action_expression():
test_builder = UpdateExpressionBuilder()
test_builder.append_set('foo')
test_builder.append_set('bar')
test_builder.append_remove('foobar')
act_expression = test_builder.build_expression()
assert act_expression == 'SET #foo = :foo, #bar = :bar REMOVE #foobar'
| StarcoderdataPython |
143442 | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:104 ms, 在所有 Python3 提交中击败了64.10% 的用户
内存消耗:14.4 MB, 在所有 Python3 提交中击败了79.35% 的用户
解题思路:
具体实现见代码注释
"""
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
def find(root1, root2): # 两棵树的对应节点
if root1 and root2: # 如果节点都存在
node = TreeNode(root1.val + root2.val) # 合并后的当前节点为两树节点和
node.left = find(root1.left, root2.left) # 处理两树的对应左子树, 返回的节点为当前合并子树的左子树
node.right = find(root1.right, root2.right) # 处理两树的对应右子树
elif root1 or root2: # 只存在一个节点
node = TreeNode(root1.val if root1 else root2.val) # 当前节点等于存在节点值
node.left = find(root1.left if root1 else None, root2.left if root2 else None) # 遍历存在节点的左子树
node.right = find(root1.right if root1 else None, root2.right if root2 else None) # 遍历存在节点的右子树
else:
return None # 均不存在返回None
return node
t = find(t1, t2)
return t
"""
执行用时:100 ms, 在所有 Python3 提交中击败了91.61% 的用户
内存消耗:14.5 MB, 在所有 Python3 提交中击败了46.33% 的用户
解题思路:
同上,但不新建树
"""
class Solution:
def mergeTrees(self, t1: TreeNode, t2: TreeNode) -> TreeNode:
def find(root1, root2): # 两棵树的对应节点
if root1 and root2: # 如果节点都存在
root1.val = root1.val + root2.val # 合并后的当前节点为两树节点和
root1.left = find(root1.left, root2.left) # 处理两树的对应左子树, 返回的节点为当前合并子树的左子树
root1.right = find(root1.right, root2.right) # 处理两树的对应右子树
elif root1 or root2: # 只存在一个节点, 直接返回存在的节点即可
return root1 if root1 else root2
else:
return None # 均不存在返回None
return root1
t = find(t1, t2)
return t | StarcoderdataPython |
6644305 | # Ler o nome de uma pessoa e diga se ela tem Silva no nome
name = input('Digite seu nome: ').strip()
print('Seu nome tem Silva? {} ' .format('silva' in name.lower()))
| StarcoderdataPython |
122016 | <reponame>junkert/helios
#!/usr/bin/python
import os
import sys
import time
import json
import pprint
import random
import signal
import shutil
import LPD8806
import colorsys
import threading
import traceback
# Globals
num_leds = 160
colors = {'r': 0, 'g': 0, 'b': 0, 'v': 1.0}
output_dict = {}
random.seed(time.time())
class Brightness(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.sleep_time = 0.1
def run(self):
while True:
for i in range(0, 100):
colors['v'] = i * 0.01
self.refresh_dict()
time.sleep(self.sleep_time)
for i in range(100, -1, -1):
colors['v'] = i * 0.01
self.refresh_dict()
time.sleep(self.sleep_time)
def refresh_dict(self):
if 'brightness_slider' in output_dict:
self.sleep_time = float(output_dict['brightness_slider']) * 0.01
class ReadFile(threading.Thread):
def __init__(self,
settings_file='/mnt/ram/led_settings.dict',
lock_file='/mnt/ram/.led_settings.dict.lock',
backup_file='/home/levi/funkytown_leds/led_settings.dict.backup'):
self.settings_file = settings_file
self.lock_file = lock_file
self.previous_pattern = 'fader'
threading.Thread.__init__(self)
if not os.path.exists(settings_file):
shutil.copyfile(backup_file, settings_file)
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
def convert(self, input_dict):
if isinstance(input_dict, dict):
return ({self.convert(key): self.convert(value)
for key, value in input_dict.iteritems()})
elif isinstance(input_dict, list):
return [self.convert(element) for element in input_dict]
elif isinstance(input_dict, unicode):
return input_dict.encode('utf-8')
else:
return input_dict
def run(self):
while True:
self.refresh_input()
time.sleep(0.25)
def get_lock_file(self):
while os.path.exists(self.lock_file):
time.sleep(0.01)
fp = open(self.lock_file, 'w')
fp.write("")
fp.close()
def release_lock_file(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
def refresh_input(self):
global output_dict
lock = threading.Lock()
# Check to make sure the file is not open already (file size < 100 bytes)
while os.path.getsize(self.settings_file) < 100:
time.sleep(0.1)
self.get_lock_file()
lock.acquire()
fl = open(self.settings_file, 'r')
try:
line = fl.readlines()[0]
input_dict = json.loads(line)
output_dict = self.convert(input_dict)
#pprint.pprint(output_dict)
if 'pattern' in output_dict:
if self.previous_pattern != output_dict['pattern']:
self.previous_pattern = output_dict['pattern']
except Exception, e:
print "============ EXCEPTION ============"
print "%s" % e
print traceback.format_exc()
print "==================================="
finally:
fl.close()
lock.release()
self.release_lock_file()
class ColorChanger(threading.Thread):
def __init__(self):
self.sleep_time = 0.2
self.choice = 'white'
threading.Thread.__init__(self)
@staticmethod
def set_color():
colors['r'] = output_dict['r']
colors['g'] = output_dict['g']
colors['b'] = output_dict['b']
def run(self):
while True:
self.refresh_dict()
if 'color_button' in output_dict:
if self.choice == 'rainbow':
self.rainbow()
elif self.choice == 'white':
output_dict['r'] = 200
output_dict['g'] = 200
output_dict['b'] = 200
self.set_color()
elif self.choice == 'black':
self.set_color()
elif self.choice == 'solid':
self.set_color()
time.sleep(self.sleep_time)
def refresh_dict(self):
global output_dict
self.choice = output_dict['color_button']
speed = float(output_dict['hue_slider']) * 0.00001
if speed > 1 and self.choice == 'rainbow':
self.sleep_time = speed
print speed
else:
self.sleep_time = 0.1
def rainbow(self):
for i in range(0, 360):
self.refresh_dict()
if 'saturation_slider' in output_dict:
sat = float(output_dict['saturation_slider']) / 255.000
else:
sat = 1.000
if 'brightness_slider' in output_dict:
bright = float(output_dict['brightness_slider']) / 255.000
else:
bright = 1.000
(r, g, b) = colorsys.hsv_to_rgb(float(i) / 360, sat, bright)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
colors['r'] = r
colors['g'] = g
colors['b'] = b
self.refresh_dict()
if 'color_button' in output_dict:
if output_dict['color_button'] != 'rainbow':
return
time.sleep(self.sleep_time)
for i in range(359, -1, -1):
self.refresh_dict()
if 'saturation_slider' in output_dict:
sat = float(output_dict['saturation_slider']) / 255.000
else:
sat = 1.0
if 'brightness_slider' in output_dict:
bright = float(output_dict['brightness_slider']) / 255.000
else:
bright = 1.000
(r, g, b) = colorsys.hsv_to_rgb(float(i) / 360, sat, bright)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
colors['r'] = r
colors['g'] = g
colors['b'] = b
self.refresh_dict()
if 'color_button' in output_dict:
if output_dict['color_button'] != 'rainbow':
return
time.sleep(self.sleep_time)
class Pattern(threading.Thread):
def __init__(self, color):
self.color = color
self.strip = LPD8806.strand(leds=num_leds)
self.sleep_time = 0.1
self.choice = 'SingleTail'
self.valid_choices = ['Fader',
'FadeTail',
'SingleTail']
threading.Thread.__init__(self)
def run(self):
while True:
self.refresh_dict()
if self.choice == 'Fader':
self.fader()
elif self.choice == 'FadeTail':
self.fade_tail()
elif self.choice == 'SingleTail':
self.mover()
else:
self.black_out()
self.mover()
self.black_out()
def refresh_dict(self):
button = str(output_dict['pattern_button'])
self.sleep_time = float(int(output_dict['pattern_slider'])) * 0.00001
if button in self.valid_choices:
self.choice = button
def black_out(self):
self.strip.fill(0, 0, 0, start=0, end=num_leds)
self.strip.update()
time.sleep(0.1)
def white_out(self):
self.strip.fill(150, 150, 150, start=0, end=num_leds)
self.strip.update()
time.sleep(0.1)
def fade_tail(self):
def build(led, pos, diff):
if led['led'] + 1 > num_leds - 1:
led['led'] = 0
else:
led['led'] += 1
for i in ['r', 'g', 'b']:
led[i] = colors[i] - (diff * pos)
if led[i] < diff:
led[i] = 0
return led
group = []
tail = int(0.50 * num_leds)
diff = int((255 / tail) + 2)
# build structure
for i in range(0, tail):
group.append({'led': num_leds - i,
'r': 0,
'g': 0,
'b': 0})
for head in range(0, num_leds):
self.refresh_dict()
for j in range(0, tail):
led = build(group[j], j, diff)
#print led
self.strip.set(led['led'], led['r'], led['g'], led['b'])
group[j] = led
self.strip.update()
time.sleep(self.sleep_time)
def mover(self):
# Move from beginning to end
for i in range(0, num_leds):
if i == 0:
off = num_leds - 1
else:
off = i - 1
self.strip.set(off, 0, 0, 0)
self.strip.set(i, colors['r'], colors['g'], colors['b'])
self.strip.update()
self.refresh_dict()
time.sleep(self.sleep_time)
# Move from end to beginning
for i in range(num_leds - 1, -1, -1):
if i == num_leds - 1:
off = 0
else:
off = i + 1
self.strip.set(off, 0, 0, 0)
self.strip.set(i, colors['r'], colors['g'], colors['b'])
self.strip.update()
self.refresh_dict()
time.sleep(self.sleep_time)
def fader(self):
self.strip.fill(colors['r'], colors['g'], colors['b'], start=0, end=num_leds)
self.strip.update()
self.refresh_dict()
time.sleep(self.sleep_time)
def random(self):
""" This function is not used.
Maybe remove this or save away somewhere safe.
"""
def build_led():
steps = 50
led = {'led': random.randint(0, num_leds - 1),
'r': 0,
'g': 0,
'b': 0,
'r_max': random.randint(1, 254),
'g_max': random.randint(1, 254),
'b_max': random.randint(1, 254),
'dir': 1,
'count': 0}
led['max_val'] = max(led['r_max'], led['g_max'], led['b_max']) + 3
if led['max_val'] > 254:
led['max_val'] = 255
led['r_dec'] = led['r_max'] / steps
led['g_dec'] = led['g_max'] / steps
led['b_dec'] = led['b_max'] / steps
#if led['r'] < 200 and led['g'] < 200 and led['b'] < 200:
# v = random.sample(['r', 'g', 'b'], 1)
# led[v[0]] = random.randint(200, 255)
print led
return led
leds = []
self.strip.fill(0, 0, 0, start=0, end=num_leds)
self.strip.update()
for i in range(0, num_leds / 2):
led = build_led()
leds.append(led)
while True:
for i in range(0, len(leds)):
led = leds[i]
if led['dir'] > 0:
if (led['count'] * led['r_dec']) % led['r_max'] == 0:
led['r'] += 1
if led['count'] % led['g_dec'] == 0:
led['g'] += 1
if led['count'] % led['b_dec'] == 0:
led['b'] += 1
else:
if led['count'] % led['r_dec'] == 0:
led['r'] -= 1
if led['count'] % led['g_dec'] == 0:
led['g'] -= 1
if led['count'] % led['b_dec'] == 0:
led['b'] -= 1
led['count'] += 1
if (led['r'] >= led['max_val'] or
led['g'] >= led['max_val'] or
led['b'] >= led['max_val']):
led['dir'] = -1
led['count'] = 0
if (led['r'] <= 0 and
led['g'] <= 0 and
led['b'] <= 0):
leds[i] = build_led()
led = leds[i]
self.strip.set(led['led'], led['r'], led['g'], led['b'])
leds[i] = led
self.strip.update()
time.sleep(0.005)
class UserInput(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
print "Press 0 to close application."
#print "1. Black Out"
#print "2. White Out"
#print "3. fader"
#print "4. Tail Mover"
#print "5. Mover (single led moving)"
#print "6. Reset Color"
#print "7. Use HTTP input"
#print ""
#print "0. Exit"
#print ""
try:
choice = int(raw_input("Choice: "))
print ""
if choice == 0:
print "Exiting"
os._exit(0)
time.sleep(2)
except:
print "Invalid input."
def ctrlc():
print "Exiting . . ."
os._exit(0)
def main():
print "Application will take time to start all threads."
# Override ctrl-c to kill threads
signal.signal(signal.SIGINT, ctrlc)
readfile = ReadFile()
color = ColorChanger()
brightness = Brightness()
leds = Pattern(color)
print "Starting File Reader"
readfile.start()
time.sleep(1)
print "Starting color"
color.start()
brightness.start()
time.sleep(1)
print "Starting leds"
leds.start()
print "Starting User Controls"
user = UserInput()
user.start()
sys.exit(0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5170295 | <reponame>yvsdrop/UnityPy
from .EndianBinaryReader import EndianBinaryReader
from .EndianBinaryWriter import EndianBinaryWriter
| StarcoderdataPython |
1813009 | from .sql_base import SqlReader, NameCompare
from .engine import Engine
import copy
import warnings
import re
class PandasReader(SqlReader):
ENGINE = Engine.PANDAS
def __init__(self, df, metadata):
super().__init__()
# using string here, because we don't want to import .metadata due to circular reference
if "metadata.collection.CollectionMetadata" in str(type(df)):
warnings.warn("[df] API has changed to pass (df, metadata). Please update code to pass df first and metadata second. This will be a breaking change in future versions.", Warning)
tmp = df
df = metadata
metadata = tmp
self.df = df
self.metadata, self.original_column_names = self._sanitize_metadata(metadata)
def _sanitize_column_name(self, column_name):
x = re.search(r".*[a-zA-Z0-9()_]", column_name)
if x is None:
raise Exception("Unsupported column name {}. Column names must be alphanumeric or _, (, ).".format(
column_name))
column_name = column_name.replace(" ", "_").replace("(", "_0_").replace(")", "_1_")
return column_name
def _sanitize_metadata(self, metadata):
metadata = copy.deepcopy(metadata)
table_names = list(metadata.m_tables.keys())
if len(table_names) > 1:
raise Exception("Only one table is supported for PandasReader. {} found.".format(len(table_names)))
table_name = table_names[0]
original_column_names = list(metadata.m_tables[table_name].m_columns)
has_key = False
for column_name in original_column_names:
sanitized_column_name = self._sanitize_column_name(column_name)
metadata.m_tables[table_name].m_columns[sanitized_column_name] = metadata.m_tables[table_name].m_columns[column_name]
metadata.m_tables[table_name].m_columns[sanitized_column_name].name = sanitized_column_name
has_key = has_key or metadata.m_tables[table_name].m_columns[sanitized_column_name].is_key
self.df[sanitized_column_name] = self.df[column_name]
if column_name != sanitized_column_name:
del metadata.m_tables[table_name].m_columns[column_name]
if not has_key: # TODO handle this in metadata to avoid circ dep
key = "primary_key"
self.df[key] = range(len(self.df))
from opendp.smartnoise.metadata.collection import Int
metadata.m_tables[table_name].m_columns[key] = Int(key,
minval=0,
maxval=len(self.df),
is_key=True)
return metadata, original_column_names
def _sanitize_query(self, query):
for column in self.original_column_names:
sanitized_column = self._sanitize_column_name(column)
for column_form in ["'{}'".format(column), '"{}"'.format(column), column]:
query = query.replace(column_form, sanitized_column)
return query
def db_name(self):
"""
Get the database associated with this connection
"""
sql = "SELECT current_database();"
dbname = self.execute(sql)[1][0]
return dbname
def execute(self, query):
"""
Executes a raw SQL string against the database and returns
tuples for rows. This will NOT fix the query to target the
specific SQL dialect. Call execute_typed to fix dialect.
"""
query = self._sanitize_query(query)
from pandasql import sqldf
if not isinstance(query, str):
raise ValueError("Please pass strings to execute. To execute ASTs, use execute_typed.")
table_names = list(self.metadata.m_tables.keys())
if len(table_names) > 1:
raise Exception("PandasReader only supports one table, {} found.".format(len(table_names)))
df_name = "df_for_diffpriv1234"
table_name = table_names[0]
def clean_query(query):
for column in self.metadata.m_tables[table_name].m_columns:
if " " in column or "(" in column or ")" in column:
new_column_name = column.replace(" ", "_").replace("(", "_").replace(")", "_")
query = query.replace(column, new_column_name)
query = query.replace("'{}'".format(new_column_name), new_column_name)
return query.replace(table_name, df_name)
for column in self.metadata.m_tables[table_name].m_columns:
new_column_name = column.replace(" ", "_").replace("(", "_").replace(")", "_")
if self.metadata.m_tables[table_name].m_columns[column].is_key:
if column not in self.df:
self.df[column] = range(len(self.df))
else:
if new_column_name not in self.df:
self.df[new_column_name] = self.df[column]
df_for_diffpriv1234 = self.df
q_result = sqldf(clean_query(query), locals())
return [tuple([col for col in q_result.columns])] + [val[1:] for val in q_result.itertuples()]
| StarcoderdataPython |
4947772 | <filename>BasicPythonPrograms/PythonOops.py<gh_stars>0
# Demonstration A Class
class Dog:
#A simple class attribute
attr1= "mammal"
attr2="Dog"
# A sample method
def fun(self):
print("I am a ",self.attr1)
print("I ma a ",self.attr2)
#Driver code
# Object instantiation
Rodger =Dog()
print(Rodger.attr1)
Rodger.fun()
| StarcoderdataPython |
384966 | from pyjexl import JEXL
class QuestionJexl(JEXL):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# TODO: add transforms
# self.add_transform("task", lambda spec: spec)
| StarcoderdataPython |
6514648 | <reponame>ads-ad-itcenter/qunomon.forked
# Copyright © 2019 National Institute of Advanced Industrial Science and Technology (AIST). All rights reserved.
from typing import List
from marshmallow import fields
from . import Result, ResultSchema, BaseSchema
class Format:
def __init__(self, id_: int, type_: str, format_: str) -> None:
self.id = id_
self.format = format_
self.type = type_
class GetFormatRes:
def __init__(self, result: Result, formats: List[Format]) -> None:
self.result = result
self.formats = formats
class FormatSchema(BaseSchema):
__model__ = Format
id = fields.Int(data_key='Id', required=True)
type = fields.Str(data_key='Type', required=True)
format = fields.Str(data_key='Format', required=True)
class GetFormatResSchema(BaseSchema):
__model__ = GetFormatRes
result = fields.Nested(ResultSchema, data_key='Result')
formats = fields.Nested(FormatSchema, data_key='Formats', many=True)
| StarcoderdataPython |
9658214 | <reponame>EnjoyLifeFund/macHighSierra-py36-pkgs
# test_smoke.py -- Functional tests for the Swift backend.
# Copyright (C) 2013 eNovance SAS <<EMAIL>>
#
# Author: <NAME> <<EMAIL>>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Start functional tests
A Swift installation must be available before
starting those tests. The account and authentication method used
during this functional tests must be changed in the configuration file
passed as environment variable.
The container used to create a fake repository is defined
in cls.fakerepo and will be deleted after the tests.
DULWICH_SWIFT_CFG=/tmp/conf.cfg PYTHONPATH=. python -m unittest \
dulwich.tests_swift.test_smoke
"""
import os
import unittest
import tempfile
import shutil
import gevent
from gevent import monkey
monkey.patch_all()
from dulwich import ( # noqa:E402
server,
repo,
index,
client,
objects,
)
from dulwich.contrib import swift # noqa:E402
class DulwichServer():
"""Start the TCPGitServer with Swift backend
"""
def __init__(self, backend, port):
self.port = port
self.backend = backend
def run(self):
self.server = server.TCPGitServer(self.backend,
'localhost',
port=self.port)
self.job = gevent.spawn(self.server.serve_forever)
def stop(self):
self.server.shutdown()
gevent.joinall((self.job,))
class SwiftSystemBackend(server.Backend):
def open_repository(self, path):
return swift.SwiftRepo(path, conf=swift.load_conf())
class SwiftRepoSmokeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.backend = SwiftSystemBackend()
cls.port = 9148
cls.server_address = 'localhost'
cls.fakerepo = 'fakerepo'
cls.th_server = DulwichServer(cls.backend, cls.port)
cls.th_server.run()
cls.conf = swift.load_conf()
@classmethod
def tearDownClass(cls):
cls.th_server.stop()
def setUp(self):
self.scon = swift.SwiftConnector(self.fakerepo, self.conf)
if self.scon.test_root_exists():
try:
self.scon.del_root()
except swift.SwiftException:
pass
self.temp_d = tempfile.mkdtemp()
if os.path.isdir(self.temp_d):
shutil.rmtree(self.temp_d)
def tearDown(self):
if self.scon.test_root_exists():
try:
self.scon.del_root()
except swift.SwiftException:
pass
if os.path.isdir(self.temp_d):
shutil.rmtree(self.temp_d)
def test_init_bare(self):
swift.SwiftRepo.init_bare(self.scon, self.conf)
self.assertTrue(self.scon.test_root_exists())
obj = self.scon.get_container_objects()
filtered = [o for o in obj if o['name'] == 'info/refs'
or o['name'] == 'objects/pack']
self.assertEqual(len(filtered), 2)
def test_clone_bare(self):
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
remote_refs = tcp_client.fetch(self.fakerepo, local_repo)
# The remote repo is empty (no refs retreived)
self.assertEqual(remote_refs, None)
def test_push_commit(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_repo.do_commit('Test commit', 'fbo@localhost')
sha = local_repo.refs.read_loose_ref('refs/heads/master')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_contents)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
remote_sha = swift_repo.refs.read_loose_ref('refs/heads/master')
self.assertEqual(sha, remote_sha)
def test_push_branch(self):
def determine_wants(*args):
return {"refs/heads/mybranch":
local_repo.refs["refs/heads/mybranch"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/mybranch')
sha = local_repo.refs.read_loose_ref('refs/heads/mybranch')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack("/fakerepo",
determine_wants,
local_repo.object_store.generate_pack_contents)
swift_repo = swift.SwiftRepo(self.fakerepo, self.conf)
remote_sha = swift_repo.refs.read_loose_ref('refs/heads/mybranch')
self.assertEqual(sha, remote_sha)
def test_push_multiple_branch(self):
def determine_wants(*args):
return {"refs/heads/mybranch":
local_repo.refs["refs/heads/mybranch"],
"refs/heads/master":
local_repo.refs["refs/heads/master"],
"refs/heads/pullr-108":
local_repo.refs["refs/heads/pullr-108"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_shas = {}
remote_shas = {}
for branch in ('master', 'mybranch', 'pullr-108'):
local_shas[branch] = local_repo.do_commit(
'Test commit %s' % branch, 'fbo@localhost',
ref='refs/heads/%s' % branch)
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_contents)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
for branch in ('master', 'mybranch', 'pullr-108'):
remote_shas[branch] = swift_repo.refs.read_loose_ref(
'refs/heads/%s' % branch)
self.assertDictEqual(local_shas, remote_shas)
def test_push_data_branch(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
os.mkdir(os.path.join(self.temp_d, "dir"))
files = ('testfile', 'testfile2', 'dir/testfile3')
i = 0
for f in files:
open(os.path.join(self.temp_d, f), 'w').write("DATA %s" % i)
i += 1
local_repo.stage(files)
local_repo.do_commit('Test commit', '<EMAIL>',
ref='refs/heads/master')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_contents)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
commit_sha = swift_repo.refs.read_loose_ref('refs/heads/master')
otype, data = swift_repo.object_store.get_raw(commit_sha)
commit = objects.ShaFile.from_raw_string(otype, data)
otype, data = swift_repo.object_store.get_raw(commit._tree)
tree = objects.ShaFile.from_raw_string(otype, data)
objs = tree.items()
objs_ = []
for tree_entry in objs:
objs_.append(swift_repo.object_store.get_raw(tree_entry.sha))
# Blob
self.assertEqual(objs_[1][1], 'DATA 0')
self.assertEqual(objs_[2][1], 'DATA 1')
# Tree
self.assertEqual(objs_[0][0], 2)
def test_clone_then_push_data(self):
self.test_push_data_branch()
shutil.rmtree(self.temp_d)
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
remote_refs = tcp_client.fetch(self.fakerepo, local_repo)
files = (os.path.join(self.temp_d, 'testfile'),
os.path.join(self.temp_d, 'testfile2'))
local_repo["HEAD"] = remote_refs["refs/heads/master"]
indexfile = local_repo.index_path()
tree = local_repo["HEAD"].tree
index.build_index_from_tree(local_repo.path, indexfile,
local_repo.object_store, tree)
for f in files:
self.assertEqual(os.path.isfile(f), True)
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
os.mkdir(os.path.join(self.temp_d, "test"))
files = ('testfile11', 'testfile22', 'test/testfile33')
i = 0
for f in files:
open(os.path.join(self.temp_d, f), 'w').write("DATA %s" % i)
i += 1
local_repo.stage(files)
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/master')
tcp_client.send_pack("/fakerepo",
determine_wants,
local_repo.object_store.generate_pack_contents)
def test_push_remove_branch(self):
def determine_wants(*args):
return {"refs/heads/pullr-108": objects.ZERO_SHA,
"refs/heads/master":
local_repo.refs['refs/heads/master'],
"refs/heads/mybranch":
local_repo.refs['refs/heads/mybranch'],
}
self.test_push_multiple_branch()
local_repo = repo.Repo(self.temp_d)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_contents)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
self.assertNotIn('refs/heads/pullr-108', swift_repo.refs.allkeys())
def test_push_annotated_tag(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"],
"refs/tags/v1.0": local_repo.refs["refs/tags/v1.0"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
sha = local_repo.do_commit('Test commit', 'f<EMAIL>')
otype, data = local_repo.object_store.get_raw(sha)
commit = objects.ShaFile.from_raw_string(otype, data)
tag = objects.Tag()
tag.tagger = "<EMAIL>"
tag.message = "Annotated tag"
tag.tag_timezone = objects.parse_timezone('-0200')[0]
tag.tag_time = commit.author_time
tag.object = (objects.Commit, commit.id)
tag.name = "v0.1"
local_repo.object_store.add_object(tag)
local_repo.refs['refs/tags/v1.0'] = tag.id
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_contents)
swift_repo = swift.SwiftRepo(self.fakerepo, self.conf)
tag_sha = swift_repo.refs.read_loose_ref('refs/tags/v1.0')
otype, data = swift_repo.object_store.get_raw(tag_sha)
rtag = objects.ShaFile.from_raw_string(otype, data)
self.assertEqual(rtag.object[1], commit.id)
self.assertEqual(rtag.id, tag.id)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1894663 | from uuid import uuid4
import arrow
import pytest
from databases import Database
from httpx import AsyncClient
from cyberbox import orm
from cyberbox.routes.auth import crypt_context
@pytest.fixture()
async def create_users(db: Database):
hashed_password = crypt_context.hash("<PASSWORD>")
values = [
dict(
uid=uuid4(),
username=username,
disabled=disabled,
hashed_password=<PASSWORD>,
created=arrow.utcnow().datetime,
is_admin=is_admin,
)
for username, disabled, is_admin in [
("test_user", False, False),
("disabled_user", True, False),
("active_user", False, False),
("admin_user", False, True),
]
]
await db.execute_many(orm.User.insert(), values)
async def login_as(username: str, client: AsyncClient) -> tuple:
response = await client.post("/auth/login", data=dict(username=username, password="<PASSWORD>"))
assert response.status_code == 200
result = response.json()
assert isinstance(result, dict)
assert set(result.keys()) == {"token_type", "access_token"}
assert result["token_type"] == "bearer"
access_token = result["access_token"]
assert isinstance(access_token, str)
headers = {"Authorization": f"Bearer {access_token}"}
return username, access_token, headers
@pytest.fixture()
async def logged_user(create_users, client: AsyncClient):
""" Login with known user produces access token. """
return await login_as("test_user", client)
@pytest.fixture()
async def active_user(create_users, client: AsyncClient):
return await login_as("active_user", client)
@pytest.fixture()
async def disabled_user(create_users, client: AsyncClient):
return await login_as("disabled_user", client)
@pytest.fixture()
async def admin_user(create_users, client: AsyncClient):
return await login_as("admin_user", client)
| StarcoderdataPython |
8002934 | <gh_stars>0
from constructs import Construct
from random import randint, randrange
from aws_cdk import (
Duration,
Stack,
aws_iam as iam,
aws_sqs as sqs,
aws_sns as sns,
aws_sns_subscriptions as subs,
aws_apigateway as apigateway,
aws_s3 as s3,
aws_stepfunctions as _aws_stepfunctions,
aws_stepfunctions_tasks as _aws_stepfunctions_tasks,
aws_lambda as _lambda,
aws_dynamodb as dynamodb,
aws_cognito as cognito
)
class SampleStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
api = apigateway.RestApi(self, "api-gateway-upload-to-s3")
endpoint = api.root.add_resource("bucket")
endpoint.add_method("PUT")
bucket = s3.Bucket(self, "videobucket-"+str(randint(100, 999)))
submit_lambda = _lambda.Function(self, 'submitLambda',
handler='lambda_function.lambda_handler',
runtime=_lambda.Runtime.PYTHON_3_8,
code=_lambda.Code.from_asset('lambdas/trigger_aws_rekognition'))
status_lambda = _lambda.Function(self, 'statusLambda',
handler='lambda_function.lambda_handler',
runtime=_lambda.Runtime.PYTHON_3_8,
code=_lambda.Code.from_asset('lambdas/check_rekognition_status'))
submit_job = _aws_stepfunctions_tasks.LambdaInvoke(
self, "Submit Rekognition Job",
lambda_function=submit_lambda,
output_path="$.Payload",
)
wait_job = _aws_stepfunctions.Wait(
self, "Wait 45 Seconds",
time=_aws_stepfunctions.WaitTime.duration(
Duration.seconds(45))
)
status_job = _aws_stepfunctions_tasks.LambdaInvoke(
self, "Get Status",
lambda_function=status_lambda,
output_path="$.Payload",
)
fail_job = _aws_stepfunctions.Fail(
self, "Rekognition Fail",
cause='Rekognition Job Failed',
error='Rekognition returned FAILED'
)
submit_to_dynamo_lambda = _lambda.Function(self, 'rekogSuccessLambda',
handler='lambda_function.lambda_handler',
runtime=_lambda.Runtime.PYTHON_3_8,
code=_lambda.Code.from_asset('lambdas/push_rekognition_results_to_dynamodb'))
succeed_job = _aws_stepfunctions_tasks.LambdaInvoke(
self, "Push rekognition to Dynamo Job",
lambda_function=submit_to_dynamo_lambda,
output_path="$.Payload",
)
# Create Chain
definition = submit_job.next(wait_job) \
.next(status_job) \
.next(_aws_stepfunctions.Choice(self, 'Rekognition Job Complete?')
.when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job)
.when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job)
.otherwise(wait_job))
# Create state machine
sm = _aws_stepfunctions.StateMachine(
self, "StateMachine1",
definition=definition,
timeout=Duration.minutes(15),
)
########
submit_lambda_tran = _lambda.Function(self, 'submitLambdaTran',
handler='lambda_function.lambda_handler',
runtime=_lambda.Runtime.PYTHON_3_8,
code=_lambda.Code.from_asset('lambdas/trigger_aws_transcribe'))
status_lambda_tran = _lambda.Function(self, 'statusLambdaTran',
handler='lambda_function.lambda_handler',
runtime=_lambda.Runtime.PYTHON_3_8,
code=_lambda.Code.from_asset('lambdas/check_transcribe_status'))
# Step functions Definition
submit_job_tran = _aws_stepfunctions_tasks.LambdaInvoke(
self, "Submit Transcription Job",
lambda_function=submit_lambda_tran,
output_path="$.Payload",
)
wait_job_tran = _aws_stepfunctions.Wait(
self, "Wait 30 Seconds",
time=_aws_stepfunctions.WaitTime.duration(
Duration.seconds(30))
)
status_job_tran = _aws_stepfunctions_tasks.LambdaInvoke(
self, "Get Transcription Status",
lambda_function=status_lambda_tran,
output_path="$.Payload",
)
fail_job_tran = _aws_stepfunctions.Fail(
self, "Transcribe Fail",
cause='Transcription Job Failed',
error='Transcription returned FAILED'
)
submit_to_dynamo_lambda_tran = _lambda.Function(self, 'TranscriptionSuccessLambda',
handler='lambda_function.lambda_handler',
runtime=_lambda.Runtime.PYTHON_3_8,
code=_lambda.Code.from_asset(
'lambdas/trigger_aws_comprehend'))
succeed_job_tran = _aws_stepfunctions_tasks.LambdaInvoke(
self, "Push Transcription results to Dynamo Job",
lambda_function=submit_to_dynamo_lambda_tran,
output_path="$.Payload",
)
# Create Chain
definition_tran = submit_job_tran.next(wait_job_tran) \
.next(status_job_tran) \
.next(_aws_stepfunctions.Choice(self, 'Transcribe Job Complete?')
.when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job_tran)
.when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job_tran)
.otherwise(wait_job_tran))
# Create state machine
sm_tran = _aws_stepfunctions.StateMachine(
self, "StateMachineTranscribe",
definition=definition_tran,
timeout=Duration.minutes(15),
)
table = dynamodb.Table(self, "media_store",
partition_key=dynamodb.Attribute(name="id", type=dynamodb.AttributeType.STRING)
)
pool = cognito.UserPool(self, "userPool")
pool.add_client("user-pool-app-client",
o_auth=cognito.OAuthSettings(
flows=cognito.OAuthFlows(
authorization_code_grant=True
),
scopes=[cognito.OAuthScope.OPENID],
callback_urls=["https://domain.com/welcome"],
logout_urls=["https://domain.com/signin"]
)
)
| StarcoderdataPython |
9690580 | <gh_stars>1-10
"""Core code to be used for scheduling a task DAG with HEFT"""
import argparse
from collections import deque, namedtuple
from enum import Enum
import logging
from math import inf
import sys
from types import SimpleNamespace
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from gym_ds3.schedulers.heuristic.heft.gantt import showGanttChart
logger = logging.getLogger('heft')
ScheduleEvent = namedtuple('ScheduleEvent', 'task start end proc')
"""
Default computation matrix - taken from Topcuoglu 2002 HEFT paper
computation matrix: v x q matrix with v tasks and q PEs
"""
W0 = np.array([
[14, 16, 9],
[13, 19, 18],
[11, 13, 19],
[13, 8, 17],
[12, 13, 10],
[13, 16, 9],
[7, 15, 11],
[5, 11, 14],
[18, 12, 20],
[21, 7, 16]
])
"""
Default communication matrix - not listed in Topcuoglu 2002 HEFT paper
communication matrix: q x q matrix with q PEs
Note that a communication cost of 0 is used for a given processor to itself
"""
C0 = np.array([
[0, 1, 1],
[1, 0, 1],
[1, 1, 0]
])
class RankMetric(Enum):
MEAN = "MEAN"
WORST = "WORST"
BEST = "BEST"
EDP = "EDP"
def schedule_dag(dag, computation_matrix=W0, communication_matrix=C0, proc_schedules=None, time_offset=0, relabel_nodes=True, rank_metric=RankMetric.MEAN, **kwargs):
"""
Given an application DAG and a set of matrices specifying PE bandwidth and (task, pe) execution times, computes the HEFT schedule
of that DAG onto that set of PEs
"""
if proc_schedules == None:
proc_schedules = {}
_self = {
'computation_matrix': computation_matrix,
'communication_matrix': communication_matrix,
'task_schedules': {},
'proc_schedules': proc_schedules,
'numExistingJobs': 0,
'time_offset': time_offset,
'root_node': None
}
_self = SimpleNamespace(**_self)
ranks = {}
for proc in proc_schedules:
_self.numExistingJobs = _self.numExistingJobs + len(proc_schedules[proc])
if relabel_nodes:
dag = nx.relabel_nodes(dag, dict(map(lambda node: (node, node+_self.numExistingJobs), list(dag.nodes()))))
else:
#Negates any offsets that would have been needed had the jobs been relabeled
_self.numExistingJobs = 0
for i in range(_self.numExistingJobs + len(_self.computation_matrix)):
_self.task_schedules[i] = None
for i in range(len(_self.communication_matrix)):
if i not in _self.proc_schedules:
_self.proc_schedules[i] = []
for proc in proc_schedules:
for schedule_event in proc_schedules[proc]:
_self.task_schedules[schedule_event.task] = schedule_event
# Nodes with no successors cause the any expression to be empty
root_node = [node for node in dag.nodes() if not any(True for _ in dag.predecessors(node))]
assert len(root_node) == 1, f"Expected a single root node, found {len(root_node)}"
root_node = root_node[0]
_self.root_node = root_node
logger.debug(""); logger.debug("====================== Performing Rank-U Computation ======================\n"); logger.debug("")
_compute_ranku(_self, dag, metric=rank_metric, **kwargs)
logger.debug(""); logger.debug("====================== Computing EFT for each (task, processor) pair and scheduling in order of decreasing Rank-U ======================"); logger.debug("")
sorted_nodes = sorted(dag.nodes(), key=lambda node: dag.nodes()[node]['ranku'], reverse=True)
for n in dag.nodes():
ranks[n] = dag.nodes()[n]['ranku']
if sorted_nodes[0] != root_node:
logger.debug("Root node was not the first node in the sorted list. Must be a zero-cost and zero-weight placeholder node. Rearranging it so it is scheduled first\n")
idx = sorted_nodes.index(root_node)
sorted_nodes[idx], sorted_nodes[0] = sorted_nodes[0], sorted_nodes[idx]
for node in sorted_nodes:
if _self.task_schedules[node] is not None:
continue
minTaskSchedule = ScheduleEvent(node, inf, inf, -1)
for proc in range(len(communication_matrix)):
taskschedule = _compute_eft(_self, dag, node, proc)
if (taskschedule.end < minTaskSchedule.end):
minTaskSchedule = taskschedule
_self.task_schedules[node] = minTaskSchedule
_self.proc_schedules[minTaskSchedule.proc].append(minTaskSchedule)
_self.proc_schedules[minTaskSchedule.proc] = sorted(_self.proc_schedules[minTaskSchedule.proc], key=lambda schedule_event: schedule_event.end)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('\n')
for proc, jobs in _self.proc_schedules.items():
logger.debug(f"Processor {proc} has the following jobs:")
logger.debug(f"\t{jobs}")
logger.debug('\n')
for proc in range(len(_self.proc_schedules)):
for job in range(len(_self.proc_schedules[proc])-1):
first_job = _self.proc_schedules[proc][job]
second_job = _self.proc_schedules[proc][job+1]
assert first_job.end <= second_job.start, \
f"Jobs on a particular processor must finish before the next can begin, but job {first_job.task} on processor {first_job.proc} ends at {first_job.end} and its successor {second_job.task} starts at {second_job.start}"
dict_output = {}
for proc_num, proc_tasks in _self.proc_schedules.items():
for idx, task in enumerate(proc_tasks):
if idx > 0 and (proc_tasks[idx-1].end - proc_tasks[idx-1].start > 0):
dict_output[task.task] = (proc_num, idx, [proc_tasks[idx-1].task])
else:
dict_output[task.task] = (proc_num, idx, [])
return _self.proc_schedules, _self.task_schedules, dict_output, ranks
def _compute_ranku(_self, dag, metric=RankMetric.MEAN, **kwargs):
"""
Uses a basic BFS approach to traverse upwards through the graph assigning ranku along the way
"""
terminal_node = [node for node in dag.nodes() if not any(True for _ in dag.successors(node))]
assert len(terminal_node) == 1, f"Expected a single terminal node, found {len(terminal_node)}"
terminal_node = terminal_node[0]
#TODO: Should this be configurable?
#avgCommunicationCost = np.mean(_self.communication_matrix[np.where(_self.communication_matrix > 0)])
diagonal_mask = np.ones(_self.communication_matrix.shape, dtype=bool)
np.fill_diagonal(diagonal_mask, 0)
avgCommunicationCost = np.mean(_self.communication_matrix[diagonal_mask])
for edge in dag.edges():
logger.debug(f"Assigning {edge}'s average weight based on average communication cost. {float(dag.get_edge_data(*edge)['weight'])} => {float(dag.get_edge_data(*edge)['weight']) / avgCommunicationCost}")
nx.set_edge_attributes(dag, { edge: float(dag.get_edge_data(*edge)['weight']) / avgCommunicationCost }, 'avgweight')
# Utilize a masked array so that np.mean, etc, calculations ignore the entries that are inf
comp_matrix_masked = np.ma.masked_where(_self.computation_matrix == inf, _self.computation_matrix)
nx.set_node_attributes(dag, { terminal_node: np.mean(comp_matrix_masked[terminal_node-_self.numExistingJobs]) }, "ranku")
visit_queue = deque(dag.predecessors(terminal_node))
while visit_queue:
node = visit_queue.pop()
while _node_can_be_processed(_self, dag, node) is not True:
try:
node2 = visit_queue.pop()
except IndexError:
raise RuntimeError(f"Node {node} cannot be processed, and there are no other nodes in the queue to process instead!")
visit_queue.appendleft(node)
node = node2
logger.debug(f"Assigning ranku for node: {node}")
if metric == RankMetric.MEAN:
max_successor_ranku = -1
for succnode in dag.successors(node):
logger.debug(f"\tLooking at successor node: {succnode}")
logger.debug(f"\tThe edge weight from node {node} to node {succnode} is {dag[node][succnode]['avgweight']}, and the ranku for node {node} is {dag.nodes()[succnode]['ranku']}")
val = float(dag[node][succnode]['avgweight']) + dag.nodes()[succnode]['ranku']
if val > max_successor_ranku:
max_successor_ranku = val
assert max_successor_ranku >= 0, f"Expected maximum successor ranku to be greater or equal to 0 but was {max_successor_ranku}"
nx.set_node_attributes(dag, { node: np.mean(comp_matrix_masked[node-_self.numExistingJobs]) + max_successor_ranku }, "ranku")
elif metric == RankMetric.WORST:
max_successor_ranku = -1
max_node_idx = np.where(comp_matrix_masked[node-_self.numExistingJobs] == max(comp_matrix_masked[node-_self.numExistingJobs]))[0][0]
logger.debug(f"\tNode {node} has maximum computation cost of {comp_matrix_masked[node-_self.numExistingJobs][max_node_idx]} on processor {max_node_idx}")
for succnode in dag.successors(node):
logger.debug(f"\tLooking at successor node: {succnode}")
max_succ_idx = np.where(comp_matrix_masked[succnode-_self.numExistingJobs] == max(comp_matrix_masked[succnode-_self.numExistingJobs]))[0][0]
logger.debug(f"\tNode {succnode} has maximum computation cost of {comp_matrix_masked[succnode-_self.numExistingJobs][max_succ_idx]} on processor {max_succ_idx}")
val = _self.communication_matrix[max_node_idx, max_succ_idx] + dag.nodes()[succnode]['ranku']
if val > max_successor_ranku:
max_successor_ranku = val
assert max_successor_ranku >= 0, f"Expected maximum successor ranku to be greater or equal to 0 but was {max_successor_ranku}"
nx.set_node_attributes(dag, { node: comp_matrix_masked[node-_self.numExistingJobs, max_node_idx] + max_successor_ranku}, "ranku")
elif metric == RankMetric.BEST:
min_successor_ranku = inf
min_node_idx = np.where(comp_matrix_masked[node-_self.numExistingJobs] == min(comp_matrix_masked[node-_self.numExistingJobs]))[0][0]
logger.debug(f"\tNode {node} has minimum computation cost on processor {min_node_idx}")
for succnode in dag.successors(node):
logger.debug(f"\tLooking at successor node: {succnode}")
min_succ_idx = np.where(comp_matrix_masked[succnode-_self.numExistingJobs] == min(comp_matrix_masked[succnode-_self.numExistingJobs]))
logger.debug(f"\tThis successor node has minimum computation cost on processor {min_succ_idx}")
val = _self.communication_matrix[min_node_idx, min_succ_idx] + dag.nodes()[succnode]['ranku']
if val < min_successor_ranku:
min_successor_ranku = val
assert min_successor_ranku >= 0, f"Expected minimum successor ranku to be greater or equal to 0 but was {min_successor_ranku}"
nx.set_node_attributes(dag, { node: comp_matrix_masked[node-_self.numExistingJobs, min_node_idx] + min_successor_ranku}, "ranku")
elif metric == RankMetric.EDP:
assert "energy_dict" in kwargs, "In order to perform EDP-based Rank Method, an energy_dict is required"
energy_dict = kwargs.get("energy_dict", np.array([[]]))
energy_dict_masked = np.ma.masked_where(energy_dict[node] == inf, energy_dict[node])
max_successor_ranku = -1
for succnode in dag.successors(node):
logger.debug(f"\tLooking at successor node: {succnode}")
logger.debug(f"\tThe edge weight from node {node} to node {succnode} is {dag[node][succnode]['avgweight']}, and the ranku for node {node} is {dag.nodes()[succnode]['ranku']}")
val = float(dag[node][succnode]['avgweight']) + dag.nodes()[succnode]['ranku']
if val > max_successor_ranku:
max_successor_ranku = val
assert max_successor_ranku >= 0, f"Expected maximum successor ranku to be greater or equal to 0 but was {max_successor_ranku}"
avg_edp = np.mean(comp_matrix_masked[node-_self.numExistingJobs]) * np.mean(energy_dict_masked)
nx.set_node_attributes(dag, { node: avg_edp + max_successor_ranku }, "ranku")
else:
raise RuntimeError(f"Unrecognied Rank-U metric {metric}, unable to compute upward rank")
visit_queue.extendleft([prednode for prednode in dag.predecessors(node) if prednode not in visit_queue])
logger.debug("")
for node in dag.nodes():
logger.debug(f"Node: {node}, Rank U: {dag.nodes()[node]['ranku']}")
def _node_can_be_processed(_self, dag, node):
"""
Validates that a node is able to be processed in Rank U calculations. Namely, that all of its successors have their Rank U values properly assigned
Otherwise, errors can occur in processing DAGs of the form
A
|\
| B
|/
C
Where C enqueues A and B, A is popped off, and it is unable to be processed because B's Rank U has not been computed
"""
for succnode in dag.successors(node):
if 'ranku' not in dag.nodes()[succnode]:
logger.debug(f"Attempted to compute the Rank U for node {node} but found that it has an unprocessed successor {dag.nodes()[succnode]}. Will try with the next node in the queue")
return False
return True
def _compute_eft(_self, dag, node, proc):
"""
Computes the EFT of a particular node if it were scheduled on a particular processor
It does this by first looking at all predecessor tasks of a particular node and determining the earliest time a task would be ready for execution (ready_time)
It then looks at the list of tasks scheduled on this particular processor and determines the earliest time (after ready_time) a given node can be inserted into this processor's queue
"""
ready_time = _self.time_offset
logger.debug(f"Computing EFT for node {node} on processor {proc}")
for prednode in list(dag.predecessors(node)):
predjob = _self.task_schedules[prednode]
assert predjob != None, f"Predecessor nodes must be scheduled before their children, but node {node} has an unscheduled predecessor of {prednode}"
logger.debug(f"\tLooking at predecessor node {prednode} with job {predjob} to determine ready time")
if _self.communication_matrix[predjob.proc, proc] == 0:
ready_time_t = predjob.end
else:
ready_time_t = predjob.end + dag[predjob.task][node]['weight'] / _self.communication_matrix[predjob.proc, proc]
logger.debug(f"\tNode {prednode} can have its data routed to processor {proc} by time {ready_time_t}")
if ready_time_t > ready_time:
ready_time = ready_time_t
logger.debug(f"\tReady time determined to be {ready_time}")
computation_time = _self.computation_matrix[node-_self.numExistingJobs, proc]
job_list = _self.proc_schedules[proc]
for idx in range(len(job_list)):
prev_job = job_list[idx]
if idx == 0:
if (prev_job.start - computation_time) - ready_time > 0:
logger.debug(f"Found an insertion slot before the first job {prev_job} on processor {proc}")
job_start = ready_time
min_schedule = ScheduleEvent(node, job_start, job_start+computation_time, proc)
break
if idx == len(job_list)-1:
job_start = max(ready_time, prev_job.end)
min_schedule = ScheduleEvent(node, job_start, job_start + computation_time, proc)
break
next_job = job_list[idx+1]
#Start of next job - computation time == latest we can start in this window_list
#Max(ready_time, previous job's end) == earliest we can start in this window_list
#If there's space in there, schedule in it
logger.debug(f"\tLooking to fit a job of length {computation_time} into a slot of size {next_job.start - max(ready_time, prev_job.end)}")
if (next_job.start - computation_time) - max(ready_time, prev_job.end) >= 0:
job_start = max(ready_time, prev_job.end)
logger.debug(f"\tInsertion is feasible. Inserting job with start time {job_start} and end time {job_start + computation_time} into the time slot [{prev_job.end}, {next_job.start}]")
min_schedule = ScheduleEvent(node, job_start, job_start + computation_time, proc)
break
else:
#For-else loop: the else executes if the for loop exits without break-ing, which in this case means the number of jobs on this processor are 0
min_schedule = ScheduleEvent(node, ready_time, ready_time + computation_time, proc)
logger.debug(f"\tFor node {node} on processor {proc}, the EFT is {min_schedule}")
return min_schedule
def readCsvToNumpyMatrix(csv_file):
"""
Given an input file consisting of a comma separated list of numeric values with a single header row and header column,
this function reads that data into a numpy matrix and strips the top row and leftmost column
"""
with open(csv_file) as fd:
logger.debug(f"Reading the contents of {csv_file} into a matrix")
contents = fd.read()
contentsList = contents.split('\n')
contentsList = list(map(lambda line: line.split(','), contentsList))
contentsList = contentsList[0:len(contentsList)-1] if contentsList[len(contentsList)-1] == [''] else contentsList
matrix = np.array(contentsList)
matrix = np.delete(matrix, 0, 0) # delete the first row (entry 0 along axis 0)
matrix = np.delete(matrix, 0, 1) # delete the first column (entry 0 along axis 1)
matrix = matrix.astype(float)
logger.debug(f"After deleting the first row and column of input data, we are left with this matrix:\n{matrix}")
return matrix
def readCsvToDict(csv_file):
"""
Given an input file consisting of a comma separated list of numeric values with a single header row and header column,
this function reads that data into a dictionary with keys that are node numbers and values that are the CSV lists
"""
with open(csv_file) as fd:
matrix = readCsvToNumpyMatrix(csv_file)
outputDict = {}
for row_num, row in enumerate(matrix):
outputDict[row_num] = row
return outputDict
def readDagMatrix(dag_file, show_dag=False):
"""
Given an input file consisting of a connectivity matrix, reads and parses it into a networkx Directional Graph (DiGraph)
"""
matrix = readCsvToNumpyMatrix(dag_file)
dag = nx.DiGraph(matrix)
dag.remove_edges_from(
# Remove all edges with weight of 0 since we have no placeholder for "this edge doesn't exist" in the input file
[edge for edge in dag.edges() if dag.get_edge_data(*edge)['weight'] == '0.0']
)
if show_dag:
nx.draw(dag, pos=nx.nx_pydot.graphviz_layout(dag, prog='dot'), with_labels=True)
plt.show()
return dag
def generate_argparser():
parser = argparse.ArgumentParser(description="A tool for finding HEFT schedules for given DAG task graphs")
parser.add_argument("-d", "--dag_file",
help="File containing input DAG to be scheduled. Uses default 10 node dag from Topcuoglu 2002 if none given.",
type=str, default="test/canonicalgraph_task_connectivity.csv")
parser.add_argument("-p", "--pe_connectivity_file",
help="File containing connectivity/bandwidth information about PEs. Uses a default 3x3 matrix from Topcuoglu 2002 if none given.",
type=str, default="test/canonicalgraph_resource_BW.csv")
parser.add_argument("-t", "--task_execution_file",
help="File containing execution times of each task on each particular PE. Uses a default 10x3 matrix from Topcuoglu 2002 if none given.",
type=str, default="test/canonicalgraph_task_exe_time.csv")
parser.add_argument("-l", "--loglevel",
help="The log level to be used in this module. Default: INFO",
type=str, default="INFO", dest="loglevel", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
parser.add_argument("--metric",
help="Specify which metric to use when performing upward rank calculation",
type=RankMetric, default=RankMetric.MEAN, dest="rank_metric", choices=list(RankMetric))
parser.add_argument("--showDAG",
help="Switch used to enable display of the incoming task DAG",
dest="showDAG", action="store_true")
parser.add_argument("--showGantt",
help="Switch used to enable display of the final scheduled Gantt chart",
dest="showGantt", action="store_true")
return parser
if __name__ == "__main__":
argparser = generate_argparser()
args = argparser.parse_args()
logger.setLevel(logging.getLevelName(args.loglevel))
consolehandler = logging.StreamHandler()
consolehandler.setLevel(logging.getLevelName(args.loglevel))
consolehandler.setFormatter(logging.Formatter("%(levelname)8s : %(name)16s : %(message)s"))
logger.addHandler(consolehandler)
communication_matrix = readCsvToNumpyMatrix(args.pe_connectivity_file)
computation_matrix = readCsvToNumpyMatrix(args.task_execution_file)
dag = readDagMatrix(args.dag_file, args.showDAG)
processor_schedules, _, _ = schedule_dag(dag, communication_matrix=communication_matrix, computation_matrix=computation_matrix, rank_metric=args.rank_metric)
for proc, jobs in processor_schedules.items():
logger.info(f"Processor {proc} has the following jobs:")
logger.info(f"\t{jobs}")
if args.showGantt:
showGanttChart(processor_schedules)
| StarcoderdataPython |
1831950 | <reponame>tonybaloney/retox
from setuptools import setup
import retox
with open('README.rst') as readme:
long_description = readme.read()
_version = retox.__version__
requirements = [
'tox==2.9.1',
'eventlet==0.21.0',
'asciimatics==1.9.0',
'pathlib2==2.3.0',
]
def main():
setup(
name='retox',
description='A parallel service for tox',
long_description=long_description,
version=_version,
url='https://github.com/tonybaloney/retox',
license='MIT',
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
author='<NAME>',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python',
],
packages=['retox', ],
install_requires=[requirements],
entry_points={'console_scripts': 'retox=retox.__main__:main',
'tox': ['exclude = retox.exclude',
'proclimit = retox.proclimit',
'watch = retox.watch']},
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1617371 | from typing import Any, Optional, List
import sys
import signal
import time
import asyncio
import os.path
import shutil
import tempfile
import subprocess
import portpicker
import aiohttp
import logging
logger = logging.getLogger(__name__)
from .paths import Paths
from .controller import Controller
class kill_switch(object):
_to_kill: List[Any] = []
@classmethod
def add(cls, value):
logger.debug("kill_switch: Add switch")
cls._to_kill.append(value)
@classmethod
def kill_all(cls):
logger.info("kill_switch: Process cleanup")
for p in cls._to_kill:
p._clean()
class SC2Process:
def __init__(self, host: str = "127.0.0.1", port: Optional[int] = None, fullscreen: bool = False) -> None:
assert isinstance(host, str)
assert isinstance(port, int) or port is None
self._fullscreen = fullscreen
self._host = host
if port is None:
self._port = portpicker.pick_unused_port()
else:
self._port = port
self._tmp_dir = tempfile.mkdtemp(prefix="SC2_")
self._process = None
self._session = None
self._ws = None
async def __aenter__(self):
kill_switch.add(self)
def signal_handler(signal, frame):
kill_switch.kill_all()
signal.signal(signal.SIGINT, signal_handler)
try:
self._process = self._launch()
self._ws = await self._connect()
except:
await self._close_connection()
self._clean()
raise
return Controller(self._ws, self)
async def __aexit__(self, *args):
kill_switch.kill_all()
signal.signal(signal.SIGINT, signal.SIG_DFL)
@property
def ws_url(self):
return f"ws://{self._host}:{self._port}/sc2api"
def _launch(self):
args = [
str(Paths.EXECUTABLE),
"-listen", self._host,
"-port", str(self._port),
"-displayMode", "1" if self._fullscreen else "0",
"-dataDir", str(Paths.BASE),
"-tempDir", self._tmp_dir
]
if logger.getEffectiveLevel() <= logging.DEBUG:
args.append("-verbose")
return subprocess.Popen(args,
cwd=(str(Paths.CWD) if Paths.CWD else None),
#, env=run_config.env
)
async def _connect(self):
for i in range(60):
if self._process == None:
# The ._clean() was called, clearing the process
logger.debug("Process cleanup complete, exit")
sys.exit()
await asyncio.sleep(1)
try:
self._session = aiohttp.ClientSession()
ws = await self._session.ws_connect(self.ws_url, timeout=120)
logger.debug("Websocket connection ready")
return ws
except aiohttp.client_exceptions.ClientConnectorError:
await self._session.close()
if i > 15:
logger.debug("Connection refused (startup not complete (yet))")
logger.debug("Websocket connection to SC2 process timed out")
raise TimeoutError("Websocket")
async def _close_connection(self):
logger.info("Closing connection...")
if self._ws is not None:
await self._ws.close()
if self._session is not None:
await self._session.close()
def _clean(self):
logger.info("Cleaning up...")
if self._process is not None:
if self._process.poll() is None:
for _ in range(3):
self._process.terminate()
time.sleep(0.5)
if self._process.poll() is not None:
break
else:
self._process.kill()
self._process.wait()
logger.error("KILLED")
if os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
self._process = None
self._ws = None
logger.info("Cleanup complete")
| StarcoderdataPython |
6628710 | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/python/Advanced/remove_geometry.py
import open3d as o3d
import numpy as np
import time
import copy
def visualize_non_blocking(vis, pcds):
for pcd in pcds:
vis.update_geometry(pcd)
vis.poll_events()
vis.update_renderer()
pcd_orig = o3d.io.read_point_cloud("../../test_data/fragment.pcd")
flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
pcd_orig.transform(flip_transform)
n_pcd = 5
pcds = []
for i in range(n_pcd):
pcds.append(copy.deepcopy(pcd_orig))
trans = np.identity(4)
trans[:3, 3] = [3 * i, 0, 0]
pcds[i].transform(trans)
vis = o3d.visualization.Visualizer()
vis.create_window()
start_time = time.time()
added = [False] * n_pcd
curr_sec = int(time.time() - start_time)
prev_sec = curr_sec - 1
while True:
curr_sec = int(time.time() - start_time)
if curr_sec - prev_sec == 1:
prev_sec = curr_sec
for i in range(n_pcd):
if curr_sec % (n_pcd * 2) == i and not added[i]:
vis.add_geometry(pcds[i])
added[i] = True
print("Adding %d" % i)
if curr_sec % (n_pcd * 2) == (i + n_pcd) and added[i]:
vis.remove_geometry(pcds[i])
added[i] = False
print("Removing %d" % i)
visualize_non_blocking(vis, pcds)
| StarcoderdataPython |
97735 | import os
from tweepy.auth import OAuthHandler
from tweepy.api import API
from dotenv import load_dotenv
load_dotenv(verbose=True)
class TweepyAuth:
"""
class used to handle app authentication
"""
consumer_key = os.getenv("API_KEY")
consumer_secret = os.getenv("API_SECRET_KEY")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
@property
def authenticate(self):
"""
method to authenticate the application
"""
# Creating the authentication object
auth = OAuthHandler(self.consumer_key, self.consumer_secret)
# Setting your access token and secret
auth.set_access_token(self.access_token, self.access_token_secret)
# Creating the API object while passing in auth information
api = API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
return api
try:
api.verify_credentials()
print("Authentication OK")
except:
print("Error during authentication")
else:
return api
| StarcoderdataPython |
32213 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=no-member,relative-import
"""Unit tests for blink_idl_parser.py."""
import unittest
from blink_idl_parser import BlinkIDLParser
class BlinkIDLParserTest(unittest.TestCase):
def test_missing_semicolon_between_definitions(self):
# No semicolon after enum definition.
text = '''enum TestEnum { "value" } dictionary TestDictionary {};'''
parser = BlinkIDLParser()
parser.ParseText(filename='', data=text)
self.assertGreater(parser.GetErrors(), 0)
| StarcoderdataPython |
3204531 | <reponame>kedixa/pyworkflow
import signal
import sys
import threading
import pywf as wf
def process(task):
req = task.get_req()
resp = task.get_resp()
resp.append_body(b"<html>\n") # as bytes
resp.append_body(
f"<p>{req.get_method()} "
f"{req.get_request_uri()} "
f"{req.get_http_version()}</p>\n"
)
headers = req.get_headers()
for header in headers:
resp.append_body(f"<p>{header[0]}: {header[1]}</p>\n")
resp.append_body("</html>\n") # as str
resp.set_http_version("HTTP/1.1")
resp.set_status_code("200")
resp.set_reason_phrase("OK")
resp.add_header_pair("Content-Type", "text/html")
resp.add_header_pair("Server", "Sogou Python3 WFHttpServer")
seq = task.get_task_seq()
if seq == 9: # close after 10 reqs, seq start from 0
resp.add_header_pair("Connection", "close")
def main():
if len(sys.argv) != 2:
print("Usage {} <port>".format(sys.argv[0]))
sys.exit(1)
port = int(sys.argv[1])
server = wf.HttpServer(process)
stop_event = threading.Event()
def stop(*args):
if not stop_event.is_set():
stop_event.set()
for sig in (signal.SIGTERM, signal.SIGINT):
signal.signal(sig, stop)
# You can use server.start(socket.AddressFamily.AF_INET, "localhost", port) too
if server.start(port) == 0:
stop_event.wait()
server.stop()
""" server.stop() equal to:
server.shutdown()
server.wait_finish()
"""
else:
print("Cannot start server")
sys.exit(1)
if __name__ == "__main__":
main()
| StarcoderdataPython |
8156821 | <gh_stars>0
import numpy as np
from sidnet import MUSHR
import tflearn
import cv2
import time
WIDTH = 320
HEIGHT = 240
LR = 0.01
EPOCH = 1
MODEL_NAME ='MUSHR_-{}-{}-{}-{}.model'.format(LR,'TRAJ',EPOCH,'steering')
model = MUSHR(HEIGHT,WIDTH,LR)
model.load(MODEL_NAME)
output_path = "C:/Python37/DonkeySimWin/Outputs"
counter = 0
for i in range(1):
train_data = np.load('MUSHR_320x240_shuffled_{}.npy'.format(str(i)),allow_pickle=True)
X = np.array([i[0].reshape(HEIGHT,WIDTH,1) for i in train_data])
Y = np.array([i[1].reshape(HEIGHT,WIDTH,1) for i in train_data])
for j in range(0,len(X),1000):
img = X[j]
expected = Y[j]
now = time.time()
traj = (model.predict([img])[0]*255)
dt = time.time()-now
print(dt*1000)
# added_image = cv2.addWeighted(img,0.0,traj,1.0,0)
# added_image = cv2.addWeighted(added_image,0.8,expected,0.5,0)
cv2.imwrite(output_path+"/{}.jpg".format(str(counter)),traj)
counter += 1
inputs = None
del inputs
output = None
del output
| StarcoderdataPython |
3319275 | inp = int(input("Enter N: "))
m = []
for i in range(0 , inp):
n = int(input("Enter numbers: "))
m.append(n)
m.sort()
mis = 0
rep = 0
flg = 0
for i in range(1 , inp + 1):
flg = 0
for j in range(0 , len(m)):
if i == m[j]:
flg = 1
break
if flg == 0:
mis = i
break
for i in range(0 , len(m)):
for j in range(i+1 , len(m)):
if m[i] == m[j]:
rep = m[i]
break
print(rep ," ", mis)
| StarcoderdataPython |
8133309 | import unittest
from pyEmitter.emitter import EventEmitter
class TestEventEmitter(unittest.TestCase):
def setUp(self):
self.emitter = EventEmitter();
def test_AddingHandlerToEvent(self):
self.assertIsNone(self.emitter.on("Test On", lambda *x: print(x)));
def test_EmittingEvent(self):
self.assertIsNone(self.emitter.emit("Test On","Successful"));
def test_EmittingEventWhichDoesNotExist(self):
self.assertIsNone(self.emitter.emit("Test Emit DNE","Successful"));
def test_TestingCompleteEcosystem(self):
self.emitter.on("Test Emit",lambda *x: self.assertEqual(x[1], "Successful"));
self.emitter.emit("Test Emit","Successful");
@unittest.expectedFailure
def test_VarargsCompulsory(self):
self.emitter.on("Test Var Args",lambda x: self.assertEqual(x, "UnSuccessful"));
self.emitter.emit("Test Var Args","UnSuccessful","Hello"); | StarcoderdataPython |
1736756 | <filename>open_spiel/contrib/python/export_graph.py
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of building and exporting a Tensorflow graph.
Adapted from the <NAME>'s blog post:
https://tebesu.github.io/posts/Training-a-TensorFlow-graph-in-C++-API
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "breakthrough", "Name of the game")
flags.DEFINE_string("dir", "/tmp", "Directory to save graph")
flags.DEFINE_string("filename", "graph.pb", "Filename for the graph")
def main(_):
game = pyspiel.load_game(FLAGS.game)
# Information state length
info_state_shape = game.observation_tensor_shape()
flat_info_state_length = np.prod(info_state_shape)
# Output
num_actions = game.num_distinct_actions()
with tf.Session() as sess:
net_input = tf.placeholder(
tf.float32, [None, flat_info_state_length], name="input")
# pylint: disable=unused-variable
output = tf.placeholder(tf.float32, [None, num_actions], name="output")
legals_mask = tf.placeholder(
tf.float32, [None, num_actions], name="legals_mask")
policy_net = tf.layers.dense(net_input, 128, activation=tf.nn.relu)
policy_net = tf.layers.dense(policy_net, 128, activation=tf.nn.relu)
policy_net = tf.layers.dense(policy_net, num_actions)
# Note: subtracting the max here is to help with numerical stability.
# However, there can still be numerical problems. If you are doing a softmax
# here, it can return NaN when the max for the policy net is high on one of
# the illegal actions, because policy_net - max will be small for legal
# actions, giving all exp(small) == 0 in the denominator, returning NaN at
# the end. One fix is to set the logits to -inf and define a custom cross
# entropy op that ignores over the illegal actions.
policy_net = policy_net - tf.reduce_max(policy_net, axis=-1, keepdims=True)
masked_exp_logit = tf.multiply(tf.exp(policy_net), legals_mask)
renormalizing_factor = tf.reduce_sum(
masked_exp_logit, axis=-1, keepdims=True)
# pylint: disable=unused-variable
policy_softmax = tf.where(
tf.equal(legals_mask, 0.),
tf.zeros_like(masked_exp_logit),
tf.divide(masked_exp_logit, renormalizing_factor),
name="policy_softmax")
policy_targets = tf.placeholder(shape=[None, num_actions], dtype=tf.float32)
policy_cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=policy_net, labels=policy_targets),
axis=0)
# We make one sample.
sampled_actions = tf.random.categorical(
tf.log(policy_softmax), 1, name="sampled_actions")
# pylint: disable=unused-variable
optimizer = tf.train.AdamOptimizer(0.0001).minimize(
policy_cost, name="train")
# pylint: disable=unused-variable
init = tf.variables_initializer(tf.global_variables(),
name="init_all_vars_op")
print("Writing file: {}/{}".format(FLAGS.dir, FLAGS.filename))
tf.train.write_graph(
sess.graph_def, FLAGS.dir, FLAGS.filename, as_text=False)
if __name__ == "__main__":
app.run(main)
| StarcoderdataPython |
12809913 | <gh_stars>1-10
import tensorflow.keras.backend as K
from groupy.gconv.tensorflow_gconv.splitgconv2d import gconv2d_util
from tensorflow.keras.layers import InputSpec, Conv2D, Conv2DTranspose
from tensorflow.keras.utils import get_custom_objects
from keras_gcnn.transform_filter import transform_filter_2d_nhwc
class GConv2D(Conv2D):
def __init__(self, filters, kernel_size, h_input, h_output, strides=(1, 1), padding='valid', data_format=None,
dilation_rate=(1, 1), activation=None, use_bias=False, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, transpose=False, **kwargs):
"""
:param filters:
:param kernel_size:
:param h_input:
:param h_output:
:param h_input: one of ('Z2', 'C4', 'D4'). Use 'Z2' for the first layer. Use 'C4' or 'D4' for later layers.
:param h_output: one of ('C4', 'D4'). What kind of transformations to use (rotations or roto-reflections).
The choice of h_output of one layer should equal h_input of the next layer.
:param strides:
:param padding:
:param data_format:
:param dilation_rate:
:param activation:
:param use_bias:
:param kernel_initializer:
:param bias_initializer:
:param kernel_regularizer:
:param bias_regularizer:
:param activity_regularizer:
:param kernel_constraint:
:param bias_constraint:
:param kwargs:
"""
if use_bias:
raise NotImplementedError('Does not support bias yet') # TODO: support bias
if not isinstance(kernel_size, int) and not kernel_size[0] == kernel_size[1]:
raise ValueError('Requires square kernel')
self.h_input = h_input
self.h_output = h_output
self.transpose = transpose
super(GConv2D, self).__init__(filters, kernel_size, strides=strides, padding=padding, data_format=data_format,
dilation_rate=dilation_rate, activation=activation,
use_bias=use_bias, kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint, **kwargs)
def compute_output_shape(self, input_shape):
if self.transpose:
shape = Conv2DTranspose.compute_output_shape(self, input_shape)
else:
shape = super(GConv2D, self).compute_output_shape(input_shape)
nto = shape[3]
if self.h_output == 'C4':
nto *= 4
elif self.h_output == 'D4':
nto *= 8
return (shape[0], shape[1], shape[2], nto)
def build(self, input_shape):
if self.data_format == 'channels_first':
raise NotImplementedError('Channels first is not implemented for GConvs yet.')
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
orig_input_dim = input_dim
if self.h_input == 'C4':
input_dim //= 4
elif self.h_input == 'D4':
input_dim //= 8
self.gconv_indices, self.gconv_shape_info, w_shape = gconv2d_util(h_input=self.h_input, h_output=self.h_output,
in_channels=input_dim,
out_channels=self.filters,
ksize=self.kernel_size[0])
self.kernel = self.add_weight(shape=w_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
raise NotImplementedError()
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: orig_input_dim})
self.built = True
def call(self, inputs):
outputs = gconv2d(
inputs,
self.kernel,
self.gconv_indices,
self.gconv_shape_info,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
transpose=self.transpose,
output_shape=self.compute_output_shape(inputs.shape))
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
config = super(GConv2D, self).get_config()
config['h_input'] = self.h_input
config['h_output'] = self.h_output
return config
def gconv2d(x, kernel, gconv_indices, gconv_shape_info, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1), transpose=False, output_shape=None):
"""2D group equivariant convolution.
# Arguments
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/ouputs.
dilation_rate: tuple of 2 integers.
# Returns
A tensor, result of 2D convolution.
# Raises
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
# Transform the filters
transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
if transpose:
output_shape = (K.shape(x)[0], output_shape[1], output_shape[2], output_shape[3])
transformed_filter = transform_filter_2d_nhwc(w=kernel, flat_indices=gconv_indices, shape_info=gconv_shape_info)
transformed_filter = K.permute_dimensions(transformed_filter, [0, 1, 3, 2])
return K.conv2d_transpose(x=x, kernel=transformed_filter, output_shape=output_shape, strides=strides,
padding=padding, data_format=data_format)
return K.conv2d(x=x, kernel=transformed_filter, strides=strides, padding=padding, data_format=data_format,
dilation_rate=dilation_rate)
get_custom_objects().update({'GConv2D': GConv2D})
| StarcoderdataPython |
305168 | #!/usr/bin/env python
# Copyright 2017 <NAME>
#
# Licensed under the modified BSD (3-clause BSD) License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import shutil
import os
from emme import Config, analyze_program, ALL
from tests.input_tests import examples, ex_fast
tmp_dir = ".tmp_examples/"
outputs = "outputs.txt"
def run(config):
config.verbosity = 3
config.defines = "enc_RF=0,enc_RBF1=0,enc_RBF2=0"
config.debug = True
analyze_program(config)
def run_fresh(example, skip_solving, expand, alloy):
config = Config()
config.inputfile = example+".bex"
config.prefix = tmp_dir+example+"/"
config.sat = True
if os.path.exists(config.prefix):
shutil.rmtree(config.prefix)
config.skip_solving = skip_solving
config.expand_bounded_sets = expand
if alloy:
config.use_alloy = True
#solving one instance
run(config)
# generating the expected outputs
config.skip_solving = True
config.sat = False
run(config)
# checking if the new model is correct
with open(config.prefix+outputs, "r") as new:
with open(example+"/"+outputs, "r") as old:
linesold = [x.strip() for x in old.readlines()]
assert(len(new.readlines()) != 0)
for linenew in new.readlines():
assert(linenew in linesold)
shutil.rmtree(tmp_dir)
def run_existing(example, skip_solving, print_all):
config = Config()
config.inputfile = example+".bex"
config.prefix = example+"/"
config.sat = False
config.nexecs = 20
config.skip_solving = skip_solving
config.expand_bounded_sets = True
if print_all:
config.printing_relations = None
run(config)
def test_generation():
for example in examples:
yield run_existing, example, True, True
for example in examples:
yield run_existing, example, True, False
for example in ex_fast:
yield run_existing, example, False, False
def test_verification():
for example in ex_fast:
yield run_fresh, example, False, True, False
for example in ex_fast:
yield run_fresh, example, False, False, False
for example in ex_fast:
yield run_fresh, example, False, True, True
if __name__ == "__main__":
for example in ex_fast:
run_existing(example, True, True)
for example in ex_fast:
run_fresh(example, False, True, False)
for example in ex_fast:
run_fresh(example, False, False, False)
for example in ex_fast:
run_fresh(example, False, True, True)
| StarcoderdataPython |
3379643 | <gh_stars>1-10
# For Discord
import discord
from discord.ext import commands
# from discord_slash import cog_ext, SlashContext
from tabulate import tabulate
import os
import speech_recognition as sr
# Function to write into database
def write_db(db: dict) -> None:
with open("database", 'w+') as file:
file.write(str(db))
# Function to get database
def get_db() -> dict:
with open('database') as file:
return eval(file.read())
# Loading the config file
with open('config.json') as file:
config = eval(file.read())
class VoiceListener(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.db = get_db()
self.connections = {
voice.guild.id: voice for voice in self.bot.voice_clients}
self.playlists = {}
self.recognizer = []
# def vc_required(func):
# async def get_vc(self, msg):
# vc = await self.get_vc(msg)
# if not vc:
# return
# await func(self, msg, vc)
# return get_vc
async def get_vc(self, message):
vc = message.author.voice
if not vc:
await message.channel.send("You're not in a vc right now")
return
connection = self.connections.get(message.guild.id)
if connection:
if connection.channel.id == message.author.voice.channel.id:
return connection
await connection.move_to(vc.channel)
return connection
else:
vc = await vc.channel.connect()
self.connections.update({message.guild.id: vc})
return vc
def args_to_filters(self, args):
filters = {}
if '--time' in args:
index = args.index('--time')
try:
seconds = args[index+1]
except IndexError:
return "You must provide an amount of seconds for the time."
try:
seconds = int(seconds)
except ValueError:
return "You must provide an integer value."
filters.update({'time': seconds})
if '--users' in args:
users = []
index = args.index('--users')+1
while True:
try:
users.append(int(args[index]))
except IndexError:
break
except ValueError:
break
index += 1
if not users:
return "You must provide at least one user, or multiple users separated by spaces."
filters.update({'users': users})
return filters
def get_encoding(self, args):
if '--output' in args:
index = args.index('--output')
try:
encoding = args[index+1].lower()
if encoding not in discord.Sink.valid_encodings:
return
return encoding
except IndexError:
return
else:
return 'wav'
async def finished_callback(self, sink, channel, *args):
# Note: sink.audio_data = {user_id: AudioData}
recorded_users = [
f" <@{str(user_id)}> ({os.path.split(audio.file)[1]}) " for user_id, audio in sink.audio_data.items()]
for user_id, audio in sink.audio_data.items():
# send file to channel
await channel.send(f"<@{user_id}>", file=discord.File(audio.file))
await channel.send(f"Finished! Recorded audio for {', '.join(recorded_users)}.")
async def on_voice_state_update(self, member, before, after):
if member.id != self.user.id:
return
# Filter out updates other than when we leave a channel we're connected to
if member.guild.id not in self.connections and (not before.channel and after.channel):
return
@commands.command(name='pause_rec', aliases=['pause_recording'], pass_context=True)
async def toggle_pause(self, ctx: commands.Context):
vc = await self.get_vc(ctx)
vc.toggle_pause()
await ctx.send(f"The recording has been {'paused' if vc.paused else 'unpaused'}")
@commands.command(name='stop_rec', aliases=['stop_recording'], pass_context=True)
async def stop_recording(self, ctx: commands.Context):
vc = await self.get_vc(ctx)
vc.stop_recording()
@commands.command(name="record", pass_context=True)
async def _record(self, ctx: commands.Context, *args):
vc = await self.get_vc(ctx)
args = list(args)
print(args)
filters = self.args_to_filters(args)
print(filters)
if type(filters) == str:
return await ctx.send(filters)
encoding = self.get_encoding(args)
if encoding is None:
return await ctx.send("You must provide a valid output encoding.")
vc.start_recording(discord.Sink(
encoding=encoding, filters=filters), self.finished_callback, ctx)
await ctx.send("The recording has started!")
@commands.command(name="recognize", pass_context=True)
async def _recognize(self, ctx: commands.Context, user_id: int = None):
if user_id is None:
user_id = ctx.author.id
args = "--time 10 --users user_id"
await ctx.invoke(self.bot.get_command('record'), args)
def setup(bot):
bot.add_cog(VoiceListener(bot))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.