id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3544039 | <gh_stars>1-10
#!/usr/bin/env python3
# Written by <NAME> and released under GPL v3 license.
# See git repository (https://github.com/aidaanva/endorS.py) for full license text.
"""Script to calculate the endogenous DNA in a sample from samtools flag stats.
It can accept up to two files: pre-quality and post-quality filtering. We recommend
to use both files but you can also use the pre-quality filtering.
"""
import re
import sys
import json
import argparse
import textwrap
parser = argparse.ArgumentParser(prog='endorS.py',
usage='python %(prog)s [-h] [--version] <samplesfile>.stats [<samplesfile>.stats]',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
author:
<NAME> (aida.andrades[at]gmail.com)
description:
%(prog)s calculates endogenous DNA from samtools flagstat files and print to screen
Use --output flag to write results to a file
'''))
parser.add_argument('samtoolsfiles', metavar='<samplefile>.stats', type=str, nargs='+',
help='output of samtools flagstat in a txt file (at least one required). If two files are supplied, the mapped reads of the second file is divided by the total reads in the first, since it assumes that the <samplefile.stats> are related to the same sample. Useful after BAM filtering')
parser.add_argument('-v','--version', action='version', version='%(prog)s 0.4')
parser.add_argument('--output', '-o', nargs='?', help='specify a file format for an output file. Options: <json> for a MultiQC json output. Default: none')
parser.add_argument('--name', '-n', nargs='?', help='specify name for the output file. Default: extracted from the first samtools flagstat file provided')
args = parser.parse_args()
#Open the samtools flag stats pre-quality filtering:
try:
with open(args.samtoolsfiles[0], 'r') as pre:
contentsPre = pre.read()
#Extract number of total reads
totalReads = float((re.findall(r'^([0-9]+) \+ [0-9]+ in total',contentsPre))[0])
#Extract number of mapped reads pre-quality filtering:
mappedPre = float((re.findall(r'([0-9]+) \+ [0-9]+ mapped ',contentsPre))[0])
#Calculation of endogenous DNA pre-quality filtering:
if totalReads == 0.0:
endogenousPre = 0.000000
print("WARNING: no reads in the fastq input, Endogenous DNA raw (%) set to 0.000000")
elif mappedPre == 0.0:
endogenousPre = 0.000000
print("WARNING: no mapped reads, Endogenous DNA raw (%) set to 0.000000")
else:
endogenousPre = float("{0:.6f}".format(round((mappedPre / totalReads * 100), 6)))
except:
print("Incorrect input, please provide at least a samtools flag stats as input\nRun:\npython endorS.py --help \nfor more information on how to run this script")
sys.exit()
#Check if the samtools stats post-quality filtering have been provided:
try:
#Open the samtools flag stats post-quality filtering:
with open(args.samtoolsfiles[1], 'r') as post:
contentsPost = post.read()
#Extract number of mapped reads post-quality filtering:
mappedPost = float((re.findall(r'([0-9]+) \+ [0-9]+ mapped',contentsPost))[0])
#Calculation of endogenous DNA post-quality filtering:
if totalReads == 0.0:
endogenousPost = 0.000000
print("WARNING: no reads in the fastq input, Endogenous DNA modified (%) set to 0.000000")
elif mappedPost == 0.0:
endogenousPost = 0.000000
print("WARNING: no mapped reads, Endogenous DNA modified (%) set to 0.000000")
else:
endogenousPost = float("{0:.6f}".format(round((mappedPost / totalReads * 100),6)))
except:
print("Only one samtools flagstat file provided")
#Set the number of reads post-quality filtering to 0 if samtools
#samtools flag stats not provided:
mappedPost = "NA"
#Setting the name depending on the -name flag:
if args.name is not None:
name = args.name
else:
#Set up the name based on the first samtools flagstats:
name= str(((args.samtoolsfiles[0].rsplit(".",1)[0]).rsplit("/"))[-1])
#print(name)
if mappedPost == "NA":
#Creating the json file
jsonOutput={
"id": "endorSpy",
"plot_type": "generalstats",
"pconfig": {
"endogenous_dna": { "max": 100, "min": 0, "title": "Endogenous DNA (%)", "format": '{:,.2f}'}
},
"data": {
name : { "endogenous_dna": endogenousPre}
}
}
else:
#Creating the json file
jsonOutput={
"id": "endorSpy",
"plot_type": "generalstats",
"pconfig": {
"endogenous_dna": { "max": 100, "min": 0, "title": "Endogenous DNA (%)", "format": '{:,.2f}'},
"endogenous_dna_post": { "max": 100, "min": 0, "title": "Endogenous DNA Post (%)", "format": '{:,.2f}'}
},
"data": {
name : { "endogenous_dna": endogenousPre, "endogenous_dna_post": endogenousPost}
},
}
#Checking for print to screen argument:
if args.output is not None:
#Creating file with the named after the name variable:
#Writing the json output:
fileName = name + "_endogenous_dna_mqc.json"
#print(fileName)
with open(fileName, "w+") as outfile:
json.dump(jsonOutput, outfile)
print(fileName,"has been generated")
else:
if mappedPost == "NA":
print("Endogenous DNA (%):",endogenousPre)
else:
print("Endogenous DNA raw (%):",endogenousPre)
print("Endogenous DNA modified (%):",endogenousPost)
| StarcoderdataPython |
1790691 | <gh_stars>0
from keras.datasets import mnist
from numpy import expand_dims, ones
from numpy.random import randint
# Génère n vrais échantillons
def generate_real_samples(n):
# Charge le jeu d'entrainement depuis MNIST
(trainX, _), (_, _) = mnist.load_data()
# transforme les tableaux 2D en 3D en ajoutant un canal supplémentaire
dataset = expand_dims(trainX, axis=-1)
# Converti Int et float32
dataset = dataset.astype("float32")
# Change l'échelle [0,255] en [0,1]
dataset = (dataset - 127.5) / 127.5 # Normalize the images to [-1, 1]
# Choisi aléatoirement des index du dataset
ix = randint(0, dataset.shape[0], n)
# Récupère les images sélectionnées
x = dataset[ix]
# Ajoute la classe y = 1 pour indiquer qu'il s'agit de vrais échantillons
y = ones((n, 1))
return x, y
| StarcoderdataPython |
12817293 | CONFLICT_MARKER_START = '<<<<<<<'
CONFLICT_MARKER_MARK = '======='
CONFLICT_MARKER_END = '>>>>>>>'
import vim
from . import modes
from .settings import setting, init_cur_window_wrap
from .util import buffers, windows
from .util.log import log
def process_result():
windows.close_all()
buffers.result.open()
lines = []
in_conflict = False
for line in buffers.result.lines:
if in_conflict:
if CONFLICT_MARKER_MARK in line:
lines.append(line)
if CONFLICT_MARKER_END in line:
in_conflict = False
continue
if CONFLICT_MARKER_START in line:
in_conflict = True
continue
lines.append(line)
buffers.result.set_lines(lines)
def setlocal_fixed_buffer(b, filetype):
b.options['swapfile'] = False
b.options['modifiable'] = False
b.options['filetype'] = filetype
init_cur_window_wrap()
def setlocal_buffers():
b = buffers.result.open()
filetype = b.options['filetype']
setlocal_fixed_buffer(buffers.original.open(), filetype)
setlocal_fixed_buffer(buffers.one.open(), filetype)
setlocal_fixed_buffer(buffers.two.open(), filetype)
buffers.result.open()
init_cur_window_wrap()
b = buffers.hud.open()
w = vim.current.window
b.options['swapfile'] = False
b.options['modifiable'] = False
b.options['buflisted'] = False
b.options['buftype'] = 'nofile'
b.options['undofile'] = False
w.options['list'] = False
# following needs to be done with a vim command otherwise syntax not on
#b.options['filetype'] = 'splice'
vim.command('setlocal filetype=splice')
w.options['wrap'] = False
vim.command('resize ' + setting('hud_size', '3'))
def create_hud():
vim.command('new __Splice_HUD__')
def init():
process_result()
create_hud()
setlocal_buffers()
vim.options['hidden'] = True
initial_mode = setting('initial_mode', 'grid').lower()
log("INIT: inital mode " + initial_mode)
if initial_mode not in ['grid', 'loupe', 'compare', 'path']:
initial_mode = 'grid'
modes.current_mode = getattr(modes, initial_mode)
modes.current_mode.activate()
| StarcoderdataPython |
9634143 | import math
def print_n(s, n):
while n > 0:
print(s)
n = n - 1
def mysqrt(a):
while True:
epsilon = 0.0000000000001
x = a /2.4
y = (x + a/x) / 2
if abs(y-x) < epsilon:
break
sqrt = y
return sqrt
x = y
def test_square_root(a):
print (a, end=' ')
print('a')
print (mysqrt(a), end=' ')
print('mysqrt(a)')
print (math.sqrt(a), end=' ')
print ('math.sqrt(a)')
print (abs(mysqrt(a)-(math.sqrt(a))))
print('diff')
test_square_root(9)
| StarcoderdataPython |
242780 | # Generated by Django 3.2.4 on 2021-08-05 12:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0020_alter_child_family'),
]
operations = [
migrations.AddField(
model_name='family',
name='quotient',
field=models.IntegerField(blank=True, null=True, verbose_name='Quotient Familial'),
),
]
| StarcoderdataPython |
8094114 | from hashlib import md5
import random
import re
import string
def hash(s: str):
return md5(
bytes(
'{}'.format(s), 'utf'
)
).hexdigest()[0:32]
def hash_with_prefix(prefix: str, s: str = None):
if not s:
letters = string.ascii_lowercase
s = ''.join(random.choice(letters) for i in range(20))
s = md5(
bytes(
'{}'.format(s), 'utf'
)
).hexdigest()
return '{}-{}-{}-{}-{}-{}'.format(
md5(
bytes(
'{}'.format(prefix), 'utf'
)
).hexdigest()[0:4],
s[0:4],
s[4:8],
s[8:12],
s[12:16],
s[16:20],
)
def get_token(headers, params=None):
if params and params.get('access_token'):
return params.get('access_token')
auth_token = None
auth_header = headers.get('Authorization')
if not auth_header:
auth_header = headers.get('access-token')
if auth_header:
auth_token = auth_header.split(" ")[1]
return auth_token
| StarcoderdataPython |
5010620 | import logging
import re
import typing as t
from typing import TYPE_CHECKING
from gkeepapi.node import ColorValue, TopLevelNode
if TYPE_CHECKING:
from gkeep.api import KeepApi
logger = logging.getLogger(__name__)
FLAG_RE = re.compile(r"[+\-=]\w+", re.I)
COLOR_RE = re.compile(r"\b(?:c|colors?):([\w,]+)\b", re.I)
LABEL_RE = re.compile(r'\b(?:l|labels?):(?:"([^"]+)"|([\w,]+)\b)', re.I)
FLAG_MAP = {
"p": "pinned",
"a": "archived",
"t": "trashed",
}
FlagTest = t.Callable[[bool], bool]
def flag(ftype: str) -> FlagTest:
if ftype == "+":
return lambda _: True
elif ftype == "-":
return lambda val: not val
elif ftype == "=":
return lambda val: val
else:
raise ValueError(f"Unknown flag {ftype}")
class Query:
def __init__(self, query: str = ""):
self._parse_errors: t.List[str] = []
self.query = query
self.labels: t.Optional[t.List[str]] = None
self.colors: t.Optional[t.Set[ColorValue]] = None
self.match_str: str = ""
self.pinned: FlagTest = flag("+")
self.trashed: FlagTest = flag("-")
self.archived: FlagTest = flag("-")
self._test: t.Optional[t.Callable[[TopLevelNode], bool]] = None
self._parse()
def __eq__(self, other: t.Any) -> bool:
if other is None:
return False
elif isinstance(other, str):
return self.query == other
else:
return self.query == other.query
def __hash__(self) -> int:
return hash(self.query)
def __str__(self) -> str:
return self.query
def __repr__(self) -> str:
return f"Query({self.query})"
def _parse(self) -> None:
query = self.query
for flag_match in FLAG_RE.finditer(query):
flag_str = flag_match[0]
flag_type = flag_str[0]
for key in flag_str[1:]:
try:
attr = FLAG_MAP[key]
except KeyError:
self._parse_errors.append(f"Unknown flag '{key}'")
else:
setattr(self, attr, flag(flag_type))
for color_match in COLOR_RE.finditer(query):
for col in color_match[1].split(","):
self._add_color(col)
for label_match in LABEL_RE.finditer(query):
if label_match[1]:
self._add_label(label_match[1])
else:
for label in label_match[2].split(","):
self._add_label(label)
query = re.sub(FLAG_RE, "", query)
query = re.sub(COLOR_RE, "", query)
query = re.sub(LABEL_RE, "", query)
self.match_str = query.strip()
@property
def parse_errors(self) -> t.List[str]:
return self._parse_errors
def _add_label(self, label: str) -> None:
if self.labels is None:
self.labels = []
self.labels.append(label.lower())
def _add_color(self, color: t.Union["ColorValue", str]) -> None:
if isinstance(color, str):
color = ColorValue(color.upper())
if self.colors is None:
self.colors = set()
self.colors.add(color)
def compile(self, keep: "KeepApi") -> t.Callable[["TopLevelNode"], bool]:
labels = None
if self.labels:
labels = set()
for name in self.labels:
label = keep.find_unique_label(name)
if label is not None:
labels.add(label.id)
else:
self._parse_errors.append(f"Unknown label '{name}'")
search_re = None
if self.match_str:
# Collapse whitespace
pattern = re.sub(r"\s+", r" ", self.match_str)
# Escape regex patterns
pattern = re.escape(pattern)
# Convert space (which was turned into '\\ ' by re.escape) into \s+,
# which will search for any amount of whitespace
pattern = re.sub(r"\\ ", r"\\s+", pattern)
search_re = re.compile(pattern, re.I)
def test(node: "TopLevelNode") -> bool:
if labels is not None:
for label in node.labels.all():
if label.id in labels:
break
else:
return False
if self.colors is not None and node.color not in self.colors:
return False
if not self.pinned(node.pinned):
return False
if not self.trashed(node.trashed):
return False
if not self.archived(node.archived):
return False
if search_re is not None:
return bool(search_re.search(node.title) or search_re.search(node.text))
return True
self._test = test
return test
def match(self, keep: "KeepApi", node: "TopLevelNode") -> bool:
if self._test is None:
test = self.compile(keep)
return test(node)
return self._test(node)
| StarcoderdataPython |
5179102 | import re
from lxml.html import fromstring, tostring
from lxml.etree import SubElement
class Mention(object):
"""Replaces `@mention`'s with a URL pointing to the user's profile.
The URL can be configured by setting the ``user_url`` key in the `context`.
By default, it is set to `/users/{username}. ``{username}`` will always
be replaced by everything after the `@` symbol in the `@mention`.
Ignored usernames can be configured by setting the ``ignore_username``
key in the `context`. By default, it ignores `mention` and `mentioning`.
This should always be an iterable.
The class applied to `@mention` links defaults to ``mention``. This can
be changed by setting the ``class`` key in the `context`.
"""
PARENT_IGNORES = ('pre', 'code', 'a')
MENTION_RE = r'\B@(?P<username>\w+)'
def __init__(self, context={}):
"""Creates a new `@mention` filter.
:param dict context: The filters context
"""
self.context = {
'user_url': '/users/{username}',
'ignore_username': ['mention', 'mentioning'],
'class': 'mention'
}
self.context.update(context)
def __call__(self, content):
"""Runs the filter and replaces all `@mention`'s with links"""
fragment = fromstring(content)
for index, el in enumerate(fragment.findall(".//*")):
text = el.text
if text and '@' not in text: continue
if self._should_ignore(el): continue
el.getparent().replace(el, self._add_mention_links(el))
return tostring(fragment)
def _should_ignore(self, element):
"""Checks to see if an `@mention` should be ignored.
This will check all parent elements to see if it is contained in
any of the `PARENT_IGNORES`.
:param lxml.html.HtmlElement element: The HTML element containing
the `@mention`.
:returns: True if this `@mention` should be ignored, otherwise False
"""
if element is None: return False
if element.tag in self.PARENT_IGNORES: return True
return self._should_ignore(element.getparent())
def _mentioned_logins(self, element):
"""Generator that yields a list of all `@mentions`."""
text = element.text
mentions = set(re.findall(self.MENTION_RE, text))
for mention in mentions:
if mention not in self.context['ignore_username']:
yield "@{0}".format(mention)
def _add_mention_links(self, element):
"""Replaces `@mentions` with links
:returns: A new element with `@mentions` replaced with anchors.
"""
text = tostring(element)
for mention in self._mentioned_logins(element):
url = self.context['user_url'].format(username=mention[1:])
anchor = SubElement(element, "a")
anchor.attrib['href'] = url
anchor.attrib['class'] = self.context['class']
anchor.text = mention
text = re.sub(r'{0}\b'.format(mention), tostring(anchor), text)
return fromstring(text)
| StarcoderdataPython |
322245 | <filename>steganogan/cli.py
# -*- coding: utf-8 -*-
import argparse
from steganogan.models import SteganoGAN
def _get_steganogan(args):
steganogan_kwargs = {
'cuda': not args.cpu,
'verbose': args.verbose
}
if args.path:
steganogan_kwargs['path'] = args.path
else:
steganogan_kwargs['architecture'] = args.architecture
return SteganoGAN.load(**steganogan_kwargs)
def _encode(args):
"""Given loads a pretrained pickel, encodes the image with it."""
steganogan = _get_steganogan(args)
steganogan.encode(args.cover, args.output, args.message)
def _decode(args):
try:
steganogan = _get_steganogan(args)
message = steganogan.decode(args.image)
if args.verbose:
print('Message successfully decoded:')
print(message)
except Exception as e:
print('ERROR: {}'.format(e))
def _get_parser():
# Parent Parser - Shared options
parent = argparse.ArgumentParser(add_help=False)
parent.add_argument('-v', '--verbose', action='store_true', help='Be verbose')
group = parent.add_mutually_exclusive_group()
group.add_argument('-a', '--architecture', default='dense',
choices={'basic', 'dense', 'residual'},
help='Model architecture. Use the same one for both encoding and decoding')
group.add_argument('-p', '--path', help='Load a pretrained model from a given path.')
parent.add_argument('--cpu', action='store_true',
help='Force CPU usage even if CUDA is available')
parser = argparse.ArgumentParser(description='SteganoGAN Command Line Interface')
subparsers = parser.add_subparsers(title='action', help='Action to perform')
parser.set_defaults(action=None)
# Encode Parser
encode = subparsers.add_parser('encode', parents=[parent],
help='Hide a message into a steganographic image')
encode.set_defaults(action=_encode)
encode.add_argument('-o', '--output', default='output.png',
help='Path and name to save the output image')
encode.add_argument('cover', help='Path to the image to use as cover')
encode.add_argument('message', help='Message to encode')
# Decode Parser
decode = subparsers.add_parser('decode', parents=[parent],
help='Read a message from a steganographic image')
decode.set_defaults(action=_decode)
decode.add_argument('image', help='Path to the image with the hidden message')
return parser
def main():
parser = _get_parser()
args = parser.parse_args()
if not args.action:
parser.print_help()
parser.exit()
args.action(args)
| StarcoderdataPython |
1836651 | <reponame>simo-tuomisto/portfolio
import numpy as np
import matplotlib.pyplot as mpl
import sys
from glob import glob
import os.path
import re
if __name__=="__main__":
cityfile = sys.argv[1]
routefolder = sys.argv[2]
namereg = re.compile('mrate\_(?P<mrate>\d+)-seed\_(?P<seed>\d+)\.route')
routefiles = glob(os.path.join(routefolder,'*.route'))
cities = np.loadtxt(cityfile, skiprows=9, delimiter=' ').T
x = cities[0]
y = cities[1]
routedict = dict()
timedict = dict()
for route in sorted(routefiles):
match = namereg.match(os.path.basename(route))
if match != None:
groups = match.groupdict()
mrate = int(groups['mrate'])
if not mrate in routedict:
routedict[mrate] = []
timedict[mrate] = []
with open(route, 'r') as f:
lines = f.readlines()
routeline = lines[-1].strip().replace(' ',' ',1).split()
try:
time = float(lines[-3])
timedict[mrate].append(time)
except:
pass
route = np.asarray(routeline,dtype=int)-1
routedict[mrate].append(route)
mrate_arr = []
length_arr = []
nplots = 10
for mrate, routes in sorted(routedict.items()):
if nplots > 0:
figure = mpl.figure(facecolor='white',figsize=(9,6))
length = 0.0
routelen = len(routes[0])
mpl.plot(x[:routelen],y[:routelen],'k*',label='Cities')
for route,n in zip(routes,xrange(100)):
routex = x[route]
routey = y[route]
routex = np.append(routex, routex[0])
routey = np.append(routey, routey[0])
for i in range(0, len(routex)-1):
j = np.mod(i+1,len(routex)-1)
length += np.power(routex[i]-routex[j],2.0)
length += np.power(routey[i]-routey[j],2.0)
if nplots > 0:
mpl.plot(routex,routey)
length = length/float(len(routes))
mrate_arr.append(mrate)
length_arr.append(length)
if nplots > 0:
mpl.legend()
mpl.title('Migration rate: %d, avg(L): %f' % (mrate,length))
mpl.savefig(os.path.join(routefolder, 'mrate_%d.pdf' % mrate))
nplots -= 1
mrate_arr = np.asfarray(mrate_arr)
length_arr = np.asfarray(length_arr)
figure = mpl.figure(facecolor='white',figsize=(9,6))
mpl.plot(mrate_arr, length_arr, 'r^')
mpl.plot(mrate_arr, length_arr, 'k-')
mpl.xlabel('Migration rate')
mpl.ylabel('Length of best route')
mpl.savefig(os.path.join(routefolder, 'mrate_vs_length.pdf'))
time_arr = []
for mrate, times in sorted(timedict.items()):
if len(times)>0:
time = np.average(times)
time_arr.append(time)
if len(time_arr) > 0:
time_arr = np.asfarray(time_arr)
figure = mpl.figure(facecolor='white',figsize=(9,6))
mpl.plot(mrate_arr, time_arr, 'r^')
mpl.plot(mrate_arr, time_arr, 'k-')
mpl.xlabel('Migration rate')
mpl.ylabel('Time taken')
mpl.savefig(os.path.join(routefolder, 'mrate_vs_time.pdf'))
mpl.show() | StarcoderdataPython |
8154269 | # -*- coding: utf-8 -*-
"""
Editor: <NAME>
School: BUPT
Date: 2018-03-01
算法思想: 查找字符串
对于一个给定的 source 字符串和一个 target 字符串,你应该在 source 字符串中找出 target 字符串出现的第一个位置(从0开始)。如果不存在,则返回 -1。
"""
class Solution:
"""
@param: source: source string to be scanned.
@param: target: target string containing the sequence of characters to match
@return: a index to the first occurrence of target in source, or -1 if target is not part of source.
"""
def strStr(self, source, target):
# write your code here
if source == None or target == None or len(source) < len(target):
return -1
if target == "":
return 0
for i in range(len(source)):
if source[i] == target[0]:
if len(source) - i < len(target):
return -1
else:
l = 0
for j in range(len(target)):
if target[j] == source[i+j]:
l += 1
else:
break
if l == len(target):
return i
return -1
if __name__ == '__main__':
print Solution().strStr("abcdabcdefg", "bcd") | StarcoderdataPython |
16449 | #!/usr/bin/env python3
"""Radio scheduling program.
Usage:
album_times.py [--host=HOST] PORT
Options:
--host=HOST Hostname of MPD [default: localhost]
-h --help Show this text
Prints out the last scheduling time of every album.
"""
from datetime import datetime
from docopt import docopt
from mpd import MPDClient
def album_sticker_get(client, album, sticker):
"""Gets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_get("song", tracks[0]["file"], "album_" + sticker)
def list_albums(client):
"""Lists albums sorted by last play timestamp."""
# Get all albums
albums = client.list("album")
all_albums = list(
filter(lambda a: a not in ["", "Lainchan Radio Transitions"], albums)
)
# Group albums by when they were last scheduled
albums_by_last_scheduled = {}
last_scheduled_times = []
for album in all_albums:
# Get the last scheduled time, defaulting to 0
try:
last_scheduled = int(album_sticker_get(client, album, "last_scheduled"))
except ValueError:
last_scheduled = 0
# Put the album into the appropriate bucket
if last_scheduled in albums_by_last_scheduled:
albums_by_last_scheduled[last_scheduled].append(album)
else:
albums_by_last_scheduled[last_scheduled] = [album]
last_scheduled_times.append(last_scheduled)
# Pick the 10 oldest times
last_scheduled_times.sort()
for last_scheduled in last_scheduled_times:
dt = datetime.utcfromtimestamp(last_scheduled)
albums = albums_by_last_scheduled[last_scheduled]
print("{}: {}".format(dt.strftime("%Y-%m-%d %H:%M:%S"), albums))
if __name__ == "__main__":
args = docopt(__doc__)
try:
args["PORT"] = int(args["PORT"])
except ValueError:
print("PORT must be an integer")
exit(1)
try:
client = MPDClient()
client.connect(args["--host"], args["PORT"])
except Exception as e:
print(f"could not connect to MPD: {e.args[0]}")
exit(2)
list_albums(client)
| StarcoderdataPython |
309362 | from fetch_lords import fetch
from scrape_lords import scrape
| StarcoderdataPython |
1642491 | <filename>tests/spec/test_spec.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Platform.sh
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from pathlib import Path
from oaspec.spec import OASpec
from oaspec.spec.spec import OASpecParser, OASpecParserError
import json
import yaml as pyyaml
from oaspec.utils import yaml
def get_test_data(file_path):
return Path.cwd() / "tests/data" / file_path
def load_yaml(file_path):
with Path(file_path).open('r', encoding='utf-8') as f:
return yaml.load(f)
def load_json(file_path):
with Path(file_path).open('r', encoding='utf-8') as f:
return json.load(f)
class TestCreateOASpec(object):
def test_create_empty_object(self):
oas = OASpec()
assert oas._spec_file == None
assert oas._raw_spec == None
def test_create_empty_object_spec_is_none(self):
oas = OASpec(spec=None)
assert oas._spec_file == None
assert oas._raw_spec == None
def test_create_object_with_yaml_file(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
assert str(oas._spec_file) == spec_path_string
assert oas._raw_spec.keys() == load_yaml(spec_path_string).keys()
assert oas._raw_spec == load_yaml(spec_path_string)
def test_create_object_with_json_file(self):
spec_path_string = str(get_test_data("petstore-3.0.0.json"))
oas = OASpec(spec=spec_path_string)
assert str(oas._spec_file) == spec_path_string
assert oas._raw_spec.keys() == load_json(spec_path_string).keys()
assert oas._raw_spec == load_json(spec_path_string)
def test_create_object_with_yaml_raw(self):
spec_path = get_test_data("petstore-3.0.0.yaml")
with spec_path.open('r', encoding='utf-8') as f:
raw_spec = f.read()
oas = OASpec(spec=raw_spec)
assert oas._spec_file == None
assert oas._raw_spec.keys() == load_yaml(str(spec_path)).keys()
assert oas._raw_spec == load_yaml(str(spec_path))
def test_create_object_with_json_raw(self):
spec_path = get_test_data("petstore-3.0.0.json")
with spec_path.open('r', encoding='utf-8') as f:
raw_spec = f.read()
oas = OASpec(spec=raw_spec)
assert oas._spec_file == None
assert oas._raw_spec.keys() == load_json(str(spec_path)).keys()
assert oas._raw_spec == load_json(str(spec_path))
def test_create_object_with_json_dict(self):
spec_path = get_test_data("petstore-3.0.0.json")
with spec_path.open('r', encoding='utf-8') as f:
dict_spec = json.load(f)
oas = OASpec(spec=dict_spec)
assert oas._spec_file == None
assert oas._raw_spec.keys() == load_json(str(spec_path)).keys()
assert oas._raw_spec == load_json(str(spec_path))
def test_create_object_with_yaml_dict(self):
spec_path = get_test_data("petstore-3.0.0.yaml")
with spec_path.open('r', encoding='utf-8') as f:
dict_spec = pyyaml.load(f)
oas = OASpec(spec=dict_spec)
assert oas._spec_file == None
assert oas._raw_spec.keys() == load_yaml(str(spec_path)).keys()
assert oas._raw_spec == load_yaml(str(spec_path))
def test_create_object_with_other_raw_spec(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
second_oas = OASpec(spec=oas._raw_spec)
assert second_oas._spec_file == None
assert second_oas._raw_spec.keys() == oas._raw_spec.keys()
assert second_oas._raw_spec == oas._raw_spec
assert second_oas._raw_spec.keys() == load_yaml(spec_path_string).keys()
assert second_oas._raw_spec == load_yaml(spec_path_string)
def test_create_object_with_loaded_yaml(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec()
oas.load_file(spec_path_string)
assert str(oas._spec_file) == spec_path_string
assert oas._raw_spec.keys() == load_yaml(spec_path_string).keys()
assert oas._raw_spec == load_yaml(spec_path_string)
def test_create_object_with_loaded_json(self):
spec_path_string = str(get_test_data("petstore-3.0.0.json"))
oas = OASpec()
oas.load_file(spec_path_string)
assert str(oas._spec_file) == spec_path_string
assert oas._raw_spec.keys() == load_json(spec_path_string).keys()
assert oas._raw_spec == load_json(spec_path_string)
def test_create_object_with_parsed_yaml(self):
spec_path = get_test_data("petstore-3.0.0.yaml")
spec_path_string = str(spec_path)
with spec_path.open('r', encoding='utf-8') as f:
raw_spec = f.read()
oas = OASpec()
oas.load_raw(raw_spec)
assert oas._spec_file == None
assert oas._raw_spec.keys() == load_yaml(spec_path_string).keys()
assert oas._raw_spec == load_yaml(spec_path_string)
def test_create_object_with_invalid_type(self):
with pytest.raises(TypeError):
oas = OASpec(spec=list())
class TestOASpecParser(object):
def test_parser_creation(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
parser = OASpecParser(oas, oas._raw_spec)
assert parser.spec_object is oas
assert parser.raw_spec is oas._raw_spec
def test_version_parser(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
oas.parse_spec()
assert oas.openapi == "3.0.0"
def test_invalid_version_variations(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
oas.parse_spec()
assert oas.openapi == "3.0.0"
variations = [
"1.0.0",
"2.0.0",
"3.0",
"3",
"3.100.0",
"3.0.100"
]
for ver in variations:
oas._raw_spec["openapi"] = ver
with pytest.raises(OASpecParserError) as excinfo:
oas.parse_spec()
assert "Invalid version number" in str(excinfo.value)
def test_version_missing(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
del oas._raw_spec["openapi"]
with pytest.raises(OASpecParserError) as excinfo:
oas.parse_spec()
assert "No value specified" in str(excinfo.value)
def test_version_swagger_key(self):
spec_path_string = str(get_test_data("petstore-3.0.0.yaml"))
oas = OASpec(spec=spec_path_string)
del oas._raw_spec["openapi"]
oas._raw_spec["swagger"] = "2.0"
with pytest.raises(OASpecParserError) as excinfo:
oas.parse_spec()
assert "oaspec only supports OpenAPI" in str(excinfo.value)
| StarcoderdataPython |
5197513 | <gh_stars>1-10
import os, glob, sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
import re
def load_data(path):
"""Load training and testing datasets based on their path
Parameters
----------
path : relative path to location of data, should be always the same (string)
Returns
-------
Training and testing Dataframes
"""
train = pd.read_csv(os.path.join(path,'train.csv'))
test = pd.read_csv(os.path.join(path,'test.csv'))
return train, test | StarcoderdataPython |
5039848 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from ._util import midpoint_root
from ._fasttree import fasttree
from ._raxml import raxml, raxml_rapid_bootstrap
from ._iqtree import iqtree, iqtree_ultrafast_bootstrap
from ._filter import filter_table
from ._version import get_versions
from ._align_to_tree_mafft_fasttree import align_to_tree_mafft_fasttree
__version__ = get_versions()['version']
del get_versions
__all__ = ["midpoint_root", "fasttree", "align_to_tree_mafft_fasttree",
"raxml", "raxml_rapid_bootstrap", "iqtree", "filter_table",
"iqtree_ultrafast_bootstrap"]
| StarcoderdataPython |
12840740 | # Copyright (c) 2020-2021, <NAME>
# License: MIT License
from pathlib import Path
import ezdxf
from ezdxf.render.forms import sphere
DIR = Path("~/Desktop/Outbox").expanduser()
doc = ezdxf.new()
doc.layers.new("form", dxfattribs={"color": 5})
doc.layers.new("csg", dxfattribs={"color": 1})
doc.layers.new("normals", dxfattribs={"color": 6})
doc.set_modelspace_vport(6, center=(5, 0))
msp = doc.modelspace()
sphere1 = sphere(count=32, stacks=16, radius=1, quads=True)
sphere1.render_polyface(msp, dxfattribs={"layer": "form"})
sphere1.render_normals(msp, dxfattribs={"layer": "normals"})
doc.saveas(DIR / "sphere.dxf")
| StarcoderdataPython |
4937392 | <filename>lisa/tools/qemu_img.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from lisa.executable import Tool
class QemuImg(Tool):
@property
def command(self) -> str:
return "qemu-img"
def create_diff_qcow2(self, output_img_path: str, backing_img_path: str) -> None:
params = f'create -F qcow2 -f qcow2 -b "{backing_img_path}" "{output_img_path}"'
self.run(
params,
force_run=True,
expected_exit_code=0,
expected_exit_code_failure_message="Failed to create differential disk.",
)
| StarcoderdataPython |
8075505 |
from wm_nctools import save_multiplier
import numpy as np
from ProcessMultipliers import processMultipliers as pM
indices = {
0: {'dir': 'n', 'min': 0., 'max': 22.5, 'fill': 0},
1: {'dir': 'ne', 'min': 22.5, 'max': 67.5, 'fill': 1},
2: {'dir': 'e', 'min': 67.5, 'max': 112.5, 'fill': 2},
3: {'dir': 'se', 'min': 112.5, 'max': 157.5, 'fill': 3},
4: {'dir': 's', 'min': 157.5, 'max': 202.5, 'fill': 4},
5: {'dir': 'sw', 'min': 202.5, 'max': 247.5, 'fill': 5},
6: {'dir': 'w', 'min': 247.5, 'max': 292.5, 'fill': 6},
7: {'dir': 'nw', 'min': 292.5, 'max': 337.5, 'fill': 7},
8: {'dir': 'n', 'min': 337.5, 'max': 360., 'fill': 0},
9: {'dir': 'max'}
}
def example():
multiplier_name = 'Ms' # what the?
multiplier_values = np.asarray([[1.0]])
lat = np.asarray([[-24.0]])
lon = np.asarray([[144.0]])
nc_name = 'example.nc'
save_multiplier(multiplier_name, multiplier_values, lat, lon, nc_name)
# Get rid of this. It's moving to processMultipliers
def course_yasi_img():
tl_y = np.asarray([-16]) # top left y
tl_x = np.asarray([140]) # top left x
dx = 2
dy = -2
multiplier_values = np.zeros((2, 3))
for index in indices:
multiplier_values.fill(index)
img_name = 'm4_' + indices[index]['dir'] + '.img'
pM.createRaster(multiplier_values, tl_x, tl_y,
dx, dy,
filename=img_name)
def course_yasi_nc():
"""
This was the wrong file format.
:return:
"""
multiplier_name = 'Ms' # what the?
lat = np.asarray([ -23, -20, -17, -14, -11, -8, -5])
lon = np.asarray([137, 140, 143, 146, 149, 152, 155, 158])
multiplier_values = np.zeros(([lat.shape[0], lon.shape[0]]))
for index in indices:
multiplier_values.fill(index)
nc_name = 'syn_' + indices[index]['dir'] + '.nc'
save_multiplier(multiplier_name, multiplier_values, lat, lon, nc_name)
# -------------------------------------------------------------
if __name__ == "__main__":
#example()
#course_yasi_nc()
course_yasi_img() | StarcoderdataPython |
3518731 |
class Model(object):
"""ABC for models."""
@classmethod
def from_config(cls, **config):
raise NotImplementedError
| StarcoderdataPython |
1929898 |
# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This file follows the
# PEP8 Python style guide and uses a max-width of 120 characters per line.
#
# Author(s):
# <NAME> <www.cedricnugteren.nl>
NL = "\n"
def header():
"""Generates the header for the API documentation"""
result = "CLBlast: API reference" + NL
result += "================" + NL + NL + NL
return result
def generate(routine):
"""Generates the API documentation for a given routine"""
result = ""
# Routine header
result += "x" + routine.name.upper() + ": " + routine.description + NL
result += "-------------" + NL + NL
result += routine.details + NL + NL
# Routine API
result += "C++ API:" + NL
result += "```" + NL
result += routine.routine_header_cpp(12, "") + NL
result += "```" + NL + NL
result += "C API:" + NL
result += "```" + NL
for flavour in routine.flavours:
result += routine.routine_header_c(flavour, 20, "") + NL
result += "```" + NL + NL
# Routine arguments
result += "Arguments to " + routine.name.upper() + ":" + NL + NL
for argument in routine.arguments_doc():
result += "* " + argument + NL
result += "* `cl_command_queue* queue`: "
result += "Pointer to an OpenCL command queue associated with a context and device to execute the routine on." + NL
result += "* `cl_event* event`: "
result += "Pointer to an OpenCL event to be able to wait for completion of the routine's OpenCL kernel(s). "
result += "This is an optional argument." + NL + NL
# Routine requirements
if len(routine.requirements_doc()) > 0:
result += "Requirements for " + routine.name.upper() + ":" + NL + NL
for requirement in routine.requirements_doc():
result += "* " + requirement + NL
result += NL
# Routine footer
result += NL + NL
return result
| StarcoderdataPython |
395517 | #
# Lincense: Academic Free License (AFL) v3.0
#
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
plt.rc('text', usetex=False)
import tables as tb
import os
import tqdm
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-17+11:39/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-17+21:16/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-23+01:56/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-23+13:05/'
outputdir = 'output/'+'dsc_on_hc1_run.py.2016-03-29+12:46/'
ofile = outputdir+'result.h5'
fh =tb.open_file(ofile,'r')
sigma_all = fh.root.sigma.read().squeeze()
pi_all = fh.root.pi.read().squeeze()
W_all = fh.root.W.read()
H = W_all.shape[-1]
D = W_all.shape[-2]
h5nodes = [f.name for f in fh.root._f_list_nodes()]
rseries=None
if 'rseries' in h5nodes:
rseries = fh.root.rseries.read()
series=None
if 'series' in h5nodes:
series = fh.root.series.read()
channel=None
if 'channel' in h5nodes:
channel = fh.root.channel.read()
overlap=0.
# overlap=0.5
if 'overlap' in h5nodes:
overlap = fh.root.overlap.read()
inf_poster=None
if 'infered_posterior' in h5nodes:
inf_poster = fh.root.infered_posterior.read().squeeze()
inf_states=None
if 'infered_states' in h5nodes:
inf_states = fh.root.infered_states.read().squeeze()
ry=None
if 'ry' in h5nodes:
ry = fh.root.ry.read().squeeze()
rs=None
if 'rs' in h5nodes:
rs = fh.root.rs.read().squeeze()
N = fh.root.N.read()[0]
psz = np.int(np.sqrt(D))
fh.close()
epochs = W_all.shape[0]
def findpeaks(a,inds,max=True):
d=np.diff(a.squeeze())
di=np.diff(inds.squeeze())
p=[]
for i in range(2,d.shape[0]):
if max:
if a[i-2]<a[i-1] and a[i]<a[i-1] and np.all(di[i-2:i]==1):
p.append(i-1)
else:
if a[i-2]>a[i-1] and a[i]>a[i-1] and np.all(di[i-2:i]==1):
p.append(i-1)
p = np.array(p)
if p.shape[0]==0:
return np.array([])
else:
return inds[p]
cscale='local'
if not os.path.exists(outputdir+'montage_images'):
os.mkdir(outputdir+'montage_images')
if not os.path.exists(outputdir+'_images'):
os.mkdir(outputdir+'_images')
if not os.path.exists(outputdir+'reconstructions'):
os.mkdir(outputdir+'reconstructions')
for e in range(0,epochs,5)[::-1]:
# minwg = -np.max(np.abs(W_all[e]))
# maxwg = -minwg
minwg = np.min(W_all[e])
maxwg = np.max(W_all[e])
meanw = np.mean(W_all)
if not os.path.exists('{}montage_images/W_e_{:03}.jpg'.format(outputdir, e)):
for h in range(H):
if os.path.exists(outputdir+'_images/'+'W_e_{:03}_h_{:03}.jpg'.format(e,h)):
continue
this_W = W_all[e,:,h]
# this_W=this_W.reshape((psz,psz))
# minwl = -np.max(np.abs(this_W))
# maxwl = -minwl
minwl = np.min(this_W)
maxwl = np.max(this_W)
meanwl = np.mean(this_W)
if cscale == 'global':
maxw, minw = maxwg, minwg
elif cscale == 'local':
maxw, minw = maxwl, minwl
plt.plot(np.linspace(0,D/10,num=D) ,this_W)#scale in kHz
plt.axis([0,D/10,minwg,maxwg])
plt.savefig(outputdir+'_images/'+'W_e_{:03}_h_{:03}.jpg'.format(e,h))
plt.clf()
if h%30 == 0 :
print "Finished epoch {:03} basis {:03}".format(e,h)
print "\tPlot settings scale: '{}', min: {}, max: {}, mean:{}".format(cscale,minwg,maxwg,meanw)
plt.clf()
os.system("montage -trim {}_images/W_e_{:03}_h*.jpg {}montage_images/W_e_{:03}.jpg".format(outputdir,e,outputdir,e))
os.system("rm {}_images/W_e_{:03}_h*.jpg ".format(outputdir, e))
# os.system("convert -delay 10 {}montage_images/* {}W_training.gif".format(outputdir,outputdir))
if series is not None and rseries is not None:
IC = series.squeeze()[5,:].squeeze()
series = series.squeeze()[channel,:].squeeze()
rseries = rseries.squeeze()
T = rseries.shape[0]
l = 1000
s=0
step=int(D*(1-overlap))
reconstates = []
recondata = []
lims = []
for n in tqdm.tqdm(range(N),'reconstructing'):
e = s+D
lims.append([s,e])
s+=step
reconseries2 = np.zeros((lims[-1][1],))
assert overlap<=0.5
s=0
lims2 = [0]
rsts = [rs[0]]
# import ipdb; ipdb.set_trace()
for n in tqdm.tqdm(range(1,N),'reconstructing'):
padding = lims2[-1]-lims[n-1][0]
if inf_poster[n,0]>inf_poster[n-1,0]:
reconseries2[lims2[-1]:lims[n-1][1]]=ry[n-1][padding:]
# reconseries2[lims[n-1][0]:lims[n-1][1]]=ry[n-1]
reconseries2[lims[n][0]:lims[n][1]]=ry[n]
lims2.append(lims[n][0])
# rsts.append(rs[n])
# rsts.append(rs[n])
else:
reconseries2[lims[n][0]:lims[n][1]]=ry[n]
reconseries2[lims2[-1]:lims[n-1][1]]=ry[n-1][padding:]
# rsts.append(rs[n])
lims2.append(lims[n-1][1])
# rsts.append(rs[n])
if lims2[-1]==lims2[-2]:
lims2.pop()
rsts.pop()
# else:
rsts.append(rs[n])
lims2=np.array(lims2)
rsts=np.array(rsts)
# reconstates = np.array(reconstates)
# lims3 = np.array(lims3)
mins = np.min(series)
maxs = np.max(series)
minrs = np.min(rseries)
maxrs = np.max(rseries)
minic = np.min(IC)
maxic = np.max(IC)
minb = np.minimum(mins,minrs)
maxb = np.maximum(maxs,maxrs)
c=0
# plt.clf()
# import ipdb; ipdb.set_trace() # breakpoint 3ef80612 //
c=0
for s in range(0,T-l,l):
# for s in tqdm.tqdm(range(0,T-l,l),'plotting'):
# for s in tqdm.tqdm(range(30000,T-l,l),'plotting'):
fig = plt.figure(1,(10,15))
ax1 = fig.add_subplot(3,1,1)
orig = series[s:s+l]
recon = reconseries2[s:s+l]
# trsts = reconstates[s/step:s/step+l/step]
thisIC = IC[s:s+l]
xdata = np.linspace(s,s+l, l)
these_lims = lims2[lims2>=s]
these_lims = these_lims[these_lims<s+l]
trsts = rsts[lims2>=s]
trsts = trsts[:these_lims.shape[0]]
# xdata = np.linspace(s,s+l, l)
# print xdata.shape,orig.shape,recon.shape
ax1.plot(xdata,orig,label='original')
ax1.plot(xdata,recon,label='reconstruction')
ax1.axis([s,s+l,minb,maxb],fontsize=16)
ax1.tick_params(axis='both',labelsize=16)
# ax.yticks(fontsize=16)
handles, labels = ax1.get_legend_handles_labels()
lgd1 = ax1.legend(handles,labels, loc='upper right', bbox_to_anchor=(1.,1.),fontsize=9)
ax1.grid('on')
# plt.savefig(outputdir+'reconstructions/'+'series_{}_{}.jpg'.format(s,s+l))
# if not os.path.exists('{}montage_images/orig_{:03}.jpg'.format(outputdir, e)):
ax2 = fig.add_subplot(3,1,2)
ax2.axis([s,s+l,0,1],fontsize=16)
for w in range(trsts.shape[0]):
# for w in tqdm.tqdm(range(trsts.shape[0]),'point',nested=True):
for h in range(H):
width = these_lims[w]+2
height = float(h)/H
# if these_lims[w,0]==these_lims[w,1]:
# continue
# print width,height
# if trsts[w,h]!=0:
ax2.text(width,height,'{}'.format(trsts[w,h]),fontsize=5)
ax2.axis('off')
ax3 = fig.add_subplot(3,1,3)
ax3.plot(xdata,thisIC,label='IC')
thisICg200 = np.argwhere(thisIC>200)
# print thisICg200,thisIC[thisICg200]
peaks = findpeaks(thisIC[thisICg200],thisICg200)
if peaks.shape[0]>0:
ploc = peaks
for p in ploc:
# print (p[0]-25.)/l,0.01,50./l,0.99
pat = patches.Rectangle((s+p[0]-25,minic),50.,4*(maxic-minic),fill=True,alpha=0.3,color='red')
ax3.add_patch(pat)
# ax.axvspan(p[0]-25,p[0]+25,color='red',alpha=0.5)
ax3.axis([s,s+l,minic,maxic],fontsize=16)
handles, labels = ax3.get_legend_handles_labels()
lgd2 = ax3.legend(handles, labels, loc='upper right', bbox_to_anchor=(1.,1.),fontsize=9)
ax3.tick_params(axis='both',labelsize=16)
ax3.grid('on')
for i in range(these_lims.shape[0]):
ax1.axvline(x=these_lims[i],ymin=0,ymax=1,c="red",linewidth=.5,zorder=0, clip_on=False)
ax2.axvline(x=these_lims[i],ymin=-0.2,ymax=1.2,c="green",linewidth=.5,zorder=0, clip_on=False)
ax3.axvline(x=these_lims[i],ymin=0,ymax=1,c="blue",linewidth=.5,zorder=0, clip_on=False)
fig.savefig(outputdir+'reconstructions/'+'series_{}_{}_n.jpg'.format(s,s+l), bbox_extra_artists=(lgd1,lgd2), bbox_inches = 'tight')
plt.close(fig)
c+=1
plt.clf()
plt.plot(sigma_all,label='$\sigma$')
plt.savefig(outputdir+'sigma.jpg')
plt.clf()
| StarcoderdataPython |
11379307 | <filename>pynoob/simpleprogram.py
__author__ = 'silvio'
def main(args):
print('hello')
| StarcoderdataPython |
8037965 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
#import tensorflow.contrib.eager as tfe
import math
import train
from tqdm import tqdm
import pandas as pd
#
def test(model, dataset, num_dataset, conf, f_val=False, epoch=0, f_val_snn=False):
avg_loss = tf.keras.metrics.Mean('loss')
if conf.nn_mode=='SNN' or f_val_snn:
accuracy_times = []
accuracy_result = []
accuracy_time_point = list(range(conf.time_step_save_interval,conf.time_step,conf.time_step_save_interval))
accuracy_time_point.append(conf.time_step)
argmax_axis_predictions=1
num_accuracy_time_point=len(accuracy_time_point)
if f_val==False:
print('accuracy_time_point')
print(accuracy_time_point)
print('num_accuracy_time_point: {:d}'.format(model.num_accuracy_time_point))
for i in range(num_accuracy_time_point):
accuracy_times.append(tf.keras.metrics.Accuracy('accuracy'))
num_batch=int(math.ceil(float(num_dataset)/float(conf.batch_size)))
print_loss = True
if conf.f_train_time_const and print_loss:
list_loss_prec = list(range(num_batch))
list_loss_min = list(range(num_batch))
list_loss_max = list(range(num_batch))
list_tc = list(range(num_batch))
list_td = list(range(num_batch))
if f_val==False:
pbar = tqdm(range(1,num_batch+1),ncols=80)
pbar.set_description("batch")
if f_val_snn:
model.f_done_preproc = False
idx_batch=0
for (images, labels_one_hot) in dataset:
labels = tf.argmax(labels_one_hot,axis=1,output_type=tf.int32)
f_resize_output = False
if conf.batch_size != labels.shape:
concat_dim = conf.batch_size-labels.numpy().shape[0]
f_resize_output = True
labels = tf.concat([labels,tf.zeros(shape=[concat_dim],dtype=tf.int32)],0)
images = tf.concat([images,tf.zeros(shape=(concat_dim,)+tuple(images.shape[1:]),dtype=images.dtype)],0)
if idx_batch!=-1:
# predictions_times - [saved time step, batch, output dim]
predictions_times = model(images, f_training=False, f_val_snn=f_val_snn, epoch=epoch)
if f_resize_output:
labels = labels[0:conf.batch_size-concat_dim]
predictions_times = predictions_times[:,0:conf.batch_size-concat_dim,:]
if predictions_times.shape[1] != labels.numpy().shape[0]:
predictions_times = predictions_times[:,0:labels.numpy().shape[0],:]
tf.reshape(predictions_times,(-1,)+labels.numpy().shape)
if f_val:
predictions = predictions_times[-1]
accuracy = accuracy_times[-1]
accuracy(tf.argmax(predictions,axis=argmax_axis_predictions,output_type=tf.int32), labels)
else:
for i in range(num_accuracy_time_point):
predictions=predictions_times[i]
accuracy = accuracy_times[i]
accuracy(tf.argmax(predictions,axis=argmax_axis_predictions,output_type=tf.int32), labels)
predictions = predictions_times[-1]
avg_loss(train.loss_cross_entoropy(predictions,labels_one_hot))
if conf.f_train_time_const and print_loss:
[loss_prec, loss_min, loss_max] = model.get_time_const_train_loss()
list_loss_prec[idx_batch]=loss_prec.numpy()
list_loss_min[idx_batch]=loss_min.numpy()
list_loss_max[idx_batch]=loss_max.numpy()
if f_val==False:
pbar.update()
if conf.f_train_time_const:
print("idx_batch: {:d}".format(idx_batch))
num_data=(idx_batch+1)*conf.batch_size+conf.time_const_num_trained_data+(epoch)*conf.num_test_dataset
print("num_data: {:d}".format(num_data))
if num_data%conf.time_const_save_interval==0:
fname = conf.time_const_init_file_name + '/' + conf.model_name
fname+="/tc-{:d}_tw-{:d}_itr-{:d}".format(conf.tc,conf.time_window,num_data)
if conf.f_train_time_const_outlier:
fname+="_outlier"
print("save time constant: file_name: {:s}".format(fname))
f = open(fname,'w')
# time const
for name_neuron, neuron in model.list_neuron.items():
if not ('fc3' in name_neuron):
f.write("tc,"+name_neuron+","+str(neuron.time_const_fire.numpy())+"\n")
f.write("\n")
# time delay
for name_neuron, neuron in model.list_neuron.items():
if not ('fc3' in name_neuron):
f.write("td,"+name_neuron+","+str(neuron.time_delay_fire.numpy())+"\n")
f.close()
idx_batch += 1
#
if f_val_snn:
assert False
model.defused_bn()
if f_val == False:
for i in range(num_accuracy_time_point):
accuracy_result.append(accuracy_times[i].result().numpy())
print('')
ret_accu = 100*accuracy_result[-1]
else:
ret_accu = 100*accuracy_times[-1].result().numpy()
if f_val == False:
pd.set_option('display.float_format','{:.4g}'.format)
#
df=pd.DataFrame({'time step': model.accuracy_time_point, 'accuracy': accuracy_result, 'spike count': model.total_spike_count_int[:,-1]/num_dataset, 'spike_count_c1':model.total_spike_count_int[:,0]/num_dataset, 'spike_count_c2':model.total_spike_count_int[:,1]/num_dataset})
df.set_index('time step', inplace=True)
print(df)
if conf.f_save_result:
# ts: time step
# tssi: time step save interval
#f_name_result = conf.path_result_root+'/'+conf.date+'_'+conf.neural_coding
#f_name_result = conf.path_result_root+'/'+conf.input_spike_mode+conf.neural_coding+'_ts-'+str(conf.time_step)+'_tssi-'+str(conf.time_step_save_interval)
f_name_result = '{}/{}_{}_ts-{}_tssi-{}_vth-{}'.format(conf.path_result_root,conf.input_spike_mode,conf.neural_coding,str(conf.time_step),str(conf.time_step_save_interval),conf.n_init_vth)
if conf.neural_coding=="TEMPORAL":
f_name_result += outfile_name_temporal(conf)
f_name_result += '.xlsx'
df.to_excel(f_name_result)
print("output file: "+f_name_result)
if conf.f_train_time_const and print_loss:
df=pd.DataFrame({'loss_prec': list_loss_prec, 'loss_min': list_loss_min, 'loss_max': list_loss_max})
fname="./time-const-train-loss_b-"+str(conf.batch_size)+"_d-"+str(conf.num_test_dataset)+"_tc-"+str(conf.tc)+"_tw-"+str(conf.time_window)+".xlsx"
df.to_excel(fname)
print('f write date: '+conf.date)
if conf.verbose_visual:
model.figure_hold()
else:
accuracy=tf.metrics.Accuracy('accuracy')
if f_val==False:
num_batch=int(math.ceil(float(conf.num_test_dataset)/float(conf.batch_size)))
pbar = tqdm(range(1,num_batch+1),ncols=80)
pbar.set_description("batch")
idx_batch = 0
for (images, labels_one_hot) in dataset:
if idx_batch!=-1:
predictions = model(images, f_training=False, epoch=epoch)
accuracy(tf.argmax(predictions,axis=1,output_type=tf.int32), tf.argmax(labels_one_hot,axis=1,output_type=tf.int32))
avg_loss(train.loss_cross_entoropy(predictions,labels_one_hot))
if f_val==False:
pbar.update()
idx_batch += 1
ret_accu = 100*accuracy.result()
if conf.f_write_stat:
model.save_activation()
return avg_loss.result(), ret_accu, 0.0
############################################
# output file name
############################################
def outfile_name_temporal(conf):
f_name_result = '_tc-'+str(conf.tc)+'_tw-'+str(conf.time_window)+'_tfs-'+str(int(conf.time_fire_start)) \
+'_tfd-'+str(int(conf.time_fire_duration))
if conf.f_load_time_const:
if conf.f_train_time_const:
f_name_result += '_trained_data-'+str(conf.time_const_num_trained_data+conf.num_test_dataset)
else:
f_name_result += '_trained_data-'+str(conf.time_const_num_trained_data)
if conf.f_train_time_const:
if conf.f_train_time_const_outlier:
f_name_result += '_outlier'
f_name_result += '_train-tc'
return f_name_result
| StarcoderdataPython |
1699201 | <reponame>ChocoYokan/PlayMix<filename>accounts/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from accounts.models import Follow, User
class UserCustomAdmin(admin.ModelAdmin):
list_display = ('username', 'email')
admin.site.register(User, UserCustomAdmin)
admin.site.register(Follow)
| StarcoderdataPython |
4988110 | from django.urls import path
from ..views import RegistroCorrispettiviView
urlpatterns = [
path("<int:azienda>/", RegistroCorrispettiviView.as_view()),
path("<int:azienda>/<periodo>/", RegistroCorrispettiviView.as_view()),
]
| StarcoderdataPython |
4935170 | from modbus_driver import Modbus_Driver
obj = Modbus_Driver("modbus_config.yaml",)
# DEFAULT: {'temperature_setpoint1': 44, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 17, 'zone1_temp': 46, 'zone2_temp': 0}
# bottom 7 seg display is setpoint and top is current temperature. top one's segments are broken though
obj.initialize_modbus()
output = obj.get_data(0)
print("-----current values of the registers-----")
print(output)
zone1_temp = output['zone1_temp']
# print(zone1_temp & 0x1100)
# print(zone1_temp & 0x0011)
# obj.write_register('temperature_setpoint1', 42)
# {'temperature_setpoint1': 42, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 1, 'zone1_temp': 47, 'zone2_temp': 0}
# {'temperature_setpoint1': 42, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 0, 'zone1_temp': 46, 'zone2_temp': 0}
# {'temperature_setpoint1': 42, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 1, 'zone1_temp': 45, 'zone2_temp': 0}
# {'temperature_setpoint1': 42, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 0, 'zone1_temp': 44, 'zone2_temp': 0}
# {'temperature_setpoint1': 44, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 1, 'zone1_temp': 44, 'zone2_temp': 0, 'zone1_status': -6976}
obj.kill_modbus()
# 1 LP Switch Output 4 Output 3 Output 2 Output 1 Evapora-tor OUT probe Evapora-tor IN probe 1 Zone ON Comp. Overload Low Oil Pressure Freeze-stat High Refrig. Pressure Low Refrig. Pressure Low Water Flow
# 1 1 1 0 0 1 0 0 1 1 0 0 0 0 0 0
# {'temperature_setpoint1': 44, 'temperature_setpoint2': 1, 'temperature_high_alarm': 6, 'temperature_low_alarm': 5, 'heartbeat': 16, 'zone1_temp': 45, 'zone4_temp': 0, 'zone1_status': -8000, 'zone4_status': 0}
# 1 LP Switch Output 4 Output 3 Output 2 Output 1 Evapora-tor OUT probe Evapora-tor IN probe 1 Zone ON Comp. Overload Low Oil Pressure Freeze-stat High Refrig. Pressure Low Refrig. Pressure Low Water Flow
# 1 1 1 0 0 0 0 0 1 1 0 00000
# When output1 became 0, the sound stopped
| StarcoderdataPython |
9749546 | <reponame>p12tic/buildbot_travis<gh_stars>10-100
# Copyright 2014-2013 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.moves.urllib.parse import urlparse
import os
from .base import VCSBase, PollerMixin
from buildbot_travis.changes import svnpoller
import subprocess
from buildbot.steps.source.svn import SVN
from twisted.python import log
# XXX untested code!
class SVNChangeSplitter(object):
def __init__(self, repository):
self.repository = repository
self.roots = []
def add(self, repository, branch, project):
assert repository.startswith(self.repository)
repository = repository[len(self.repository):]
self.roots.append((repository, branch, project))
def split_file(self, path):
pieces = path.split("/")
if pieces[0] == 'trunk':
return 'trunk', '/'.join(pieces[1:])
elif pieces[0] == 'branches':
return '/'.join(pieces[0:2]), '/'.join(pieces[2:])
return None
def __call__(self, path):
log.msg("Looking for match for '%s'" % path)
for root, branch, project in self.roots:
if path.startswith(root):
log.msg("Found match - project '%s'" % project)
f = svnpoller.SVNFile()
f.project = project
f.repository = self.repository + root
path = path[len(root):]
if not branch:
log.msg("Determining branch")
where = self.split_file(path)
if where is None:
return None
f.branch, f.path = where # noqa
else:
log.msg("Trying to force branch")
if not path.startswith(branch):
log.msg("'%s' doesnt start with '%s'" % (path, branch))
continue
f.branch = branch
f.path = path[len(branch):]
return f
log.msg("No match found")
log.msg(self.roots)
class SVNPoller(VCSBase, PollerMixin):
description = "Source code hosted on svn, with detection of changes using poll method"
repositories = {} # class variable!
username = None
password = None
subrepos = None
def addRepository(self, factory, project=None, repository=None, branch=None, **kwargs):
kwargs = dict(kwargs)
branch = branch or "trunk"
kwargs.update(dict(
baseURL=repository,
defaultBranch=branch,
username=self.username,
password=<PASSWORD>,
codebase=project,
haltOnFailure=True,
flunkOnFailure=True,
))
factory.addStep(SVN(**kwargs))
def getRepositoryRoot(self):
options = {}
cmd = ["svn", "info", self.repository, "--non-interactive"]
if self.username:
cmd.extend(["--username", self.username])
if self.password:
cmd.extend(["--password", self.password])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env={'LC_MESSAGES': 'C'})
s, _ = p.communicate()
for line in s.split("\n"):
if ":" in line:
k, v = line.split(": ")
k = k.strip().lower().replace(" ", "-")
v = v.strip()
options[k] = v
return options["repository-root"] + "/"
def setupChangeSource(self, changeSources):
for repo in self.repositories:
if self.repository.startswith(repo):
splitter = self.repositories[repo]
break
else:
repo = self.getRepositoryRoot()
scheme, netloc, path, _, _, _ = urlparse(repo)
name = "%s-%s-%s" % (scheme, netloc.replace(".", "-"), path.rstrip("/").lstrip("/").replace("/", "-"))
pollerdir = self.makePollerDir(name)
splitter = self.repositories[repo] = SVNChangeSplitter(repo)
changeSources.append(svnpoller.SVNPoller(
repourl=repo,
cachepath=os.path.join(pollerdir, "pollerstate"),
project=None,
split_file=splitter,
svnuser=self.username,
svnpasswd=self.password,
))
splitter.add(self.repository, self.branch, self.name)
| StarcoderdataPython |
4987866 | <filename>NMCE/data/datasets.py
import os
import numpy as np
import torchvision
from torch.utils.data import ConcatDataset
from .aug import load_transforms, ContrastiveLearningViewGenerator
def load_dataset(data_name, transform_name=None, use_baseline=False, train=True, contrastive=False, n_views=2, path="../../data/"):
"""Loads a dataset for training and testing. If augmentloader is used, transform should be None.
Parameters:
data_name (str): name of the dataset
transform_name (torchvision.transform): name of transform to be applied (see aug.py)
use_baseline (bool): use baseline transform or augmentation transform
train (bool): load training set or not
contrastive (bool): whether to convert transform to multiview augmentation for contrastive learning.
n_views (bool): number of views for contrastive learning
path (str): path to dataset base path
Returns:
dataset (torch.data.dataset)
"""
aug_transform, baseline_transform = load_transforms(transform_name)
transform = baseline_transform if use_baseline else aug_transform
if contrastive:
transform = ContrastiveLearningViewGenerator(transform,n_views=n_views)
_name = data_name.lower()
if _name == "cifar10":
trainset = torchvision.datasets.CIFAR10(root=os.path.join(path, "CIFAR10"), train=train,
download=True, transform=transform)
trainset.num_classes = 10
elif _name == "cifar100":
trainset = torchvision.datasets.CIFAR100(root=os.path.join(path, "CIFAR100"), train=train,
download=True, transform=transform)
trainset.num_classes = 100
elif _name == "cifar100coarse":
trainset = torchvision.datasets.CIFAR100(root=os.path.join(path, "CIFAR100"), train=train,
download=True, transform=transform)
trainset.targets = sparse2coarse(trainset.targets)
trainset.num_classes = 20
elif _name == "mnist":
trainset = torchvision.datasets.MNIST(root=os.path.join(path, "MNIST"), train=train,
download=True, transform=transform)
trainset.num_classes = 10
elif _name == "imagenet-dogs":
trainset = torchvision.datasets.ImageFolder(root=os.path.join(path, "imagenet/Imagenet-dogs"),transform=transform)
trainset.num_classes = 15
elif _name == "imagenet-10":
trainset = torchvision.datasets.ImageFolder(root=os.path.join(path, "imagenet/Imagenet-10"),transform=transform)
trainset.num_classes = 10
elif _name == "fashionmnist":
trainset = torchvision.datasets.FashionMNIST(root=os.path.join(path, "FashionMNIST"), train=True,
download=True, transform=transform)
testset = torchvision.datasets.FashionMNIST(root=os.path.join(path, "FashionMNIST"), train=False,
download=True, transform=transform)
trainset = ConcatDataset([trainset,testset])
trainset.num_classes = 10
elif _name == "stl10":
#combine stl10 train and test data, total 13k images...
trainset = torchvision.datasets.STL10(root=os.path.join(path, "STL-10"), split='train',
transform=transform, download=True)
testset = torchvision.datasets.STL10(root=os.path.join(path, "STL-10"), split='test',
transform=transform, download=True)
trainset.num_classes = 10
testset.num_classes = 10
if not train:
return testset
else:
trainset.data = np.concatenate([trainset.data, testset.data])
trainset.labels = trainset.labels.tolist() + testset.labels.tolist()
trainset.targets = trainset.labels
return trainset
elif _name == "stl10sup":
#separate stl10 train and test set
trainset = torchvision.datasets.STL10(root=os.path.join(path, "STL-10"), split='train',
transform=transform, download=True)
testset = torchvision.datasets.STL10(root=os.path.join(path, "STL-10"), split='test',
transform=transform, download=True)
trainset.num_classes = 10
testset.num_classes = 10
if not train:
return testset
else:
trainset.targets = trainset.labels
return trainset
elif _name == "stl10unsup":
#100k unlabled images of stl10
trainset = torchvision.datasets.STL10(root=os.path.join(path, "STL-10"), split='unlabeled',
transform=transform, download=True)
trainset.num_classes = 10
else:
raise NameError("{} not found in trainset loader".format(_name))
return trainset
def sparse2coarse(targets):
"""CIFAR100 Coarse Labels. """
coarse_targets = [ 4, 1, 14, 8, 0, 6, 7, 7, 18, 3, 3, 14, 9, 18, 7, 11, 3,
9, 7, 11, 6, 11, 5, 10, 7, 6, 13, 15, 3, 15, 0, 11, 1, 10,
12, 14, 16, 9, 11, 5, 5, 19, 8, 8, 15, 13, 14, 17, 18, 10, 16,
4, 17, 4, 2, 0, 17, 4, 18, 17, 10, 3, 2, 12, 12, 16, 12, 1,
9, 19, 2, 10, 0, 1, 16, 12, 9, 13, 15, 13, 16, 19, 2, 4, 6,
19, 5, 5, 8, 19, 18, 1, 2, 15, 6, 0, 17, 8, 14, 13]
return np.array(coarse_targets)[targets] | StarcoderdataPython |
5100889 | import pytest
from mikan import Counter, Number
@pytest.mark.parametrize(
"number,expkanji,expdigits,expkana",
[
(1, '一回', '1回', 'いっかい'),
(6, '六回', '6回', 'ろっかい'),
(8, '八回', '8回', 'はっかい'),
(8, '八回', '8回', 'はちかい'),
(10, '十回', '10回', 'じゅっかい'),
]
)
def test_hon_counter(number, expkanji, expdigits, expkana):
word = Number(number) + Counter('回', 'かい')
assert expkanji in word.writings
assert expdigits in word.writings
assert expkana in word.readings
| StarcoderdataPython |
12854309 | <reponame>CyrusBiotechnology/django-headmaster
from setuptools import setup
setup(name='django-headmaster',
version='0.0.1',
description='Add extra headers to your site via your settings file',
url='http://github.com/CyrusBiotechnology/django-headmaster',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['django_headmaster'],
zip_safe=True)
| StarcoderdataPython |
8121490 | <reponame>IsaacXNG/CryptoArbitrage
import requests
transaction_fee = 0.005
major_currencies = ["DOGE"]
ignore_currencies = []
try:
markets = requests.get("https://www.cryptopia.co.nz/api/GetMarkets").json()
if not markets["Success"]:
exit()
except:
exit()
class graph:
"""
Directed Cyclical Digraph
"""
def __init__(self):
self.currencies = {} #dictionary that maps currency name to currency objects
for i in markets['Data']:
if i["Volume"] > 0:
coin1, coin2 = i["Label"].split("/")
if coin1 in ignore_currencies or coin2 in ignore_currencies:
continue
if coin1 not in self.currencies.keys():
self.currencies[coin1] = node(coin1)
if coin2 not in self.currencies.keys():
self.currencies[coin2] = node(coin2)
self.currencies[coin1].addChildren(self.currencies[coin2], i["BidPrice"])
self.currencies[coin2].addChildren(self.currencies[coin1], 1/i["AskPrice"])
def __repr__(self):
return str(self.currencies)
class node:
def __init__(self, name):
self.name = name
self.children = {} #dictionary that maps transactions to their price (e.g, if 1 unit of X gives 10 units of Y then we have {Y:10})
def __repr__(self):
return self.name
def addChildren(self, newNode, conversion_rate):
self.children[newNode] = conversion_rate
def printChildren(self):
print(str(self.children))
accumulator = {}
def find_good_cycles(graph):
for currency in major_currencies:
dfs([graph.currencies[currency]], 1, max_depth = 4)
def dfs(visited, conversion_product, max_depth):
if(max_depth <= 0):
return
for nextNode, conversion_rate in visited[-1].children.items():
p = (1 - transaction_fee)*conversion_rate*conversion_product
if nextNode in visited:
if len(visited) > 3 and visited[0] == nextNode and p > 1:
accumulator[tuple(visited + [nextNode])] = p
else:
newVisited = list(visited)
newVisited.append(nextNode)
dfs(newVisited, p, max_depth - 1)
g = graph()
find_good_cycles(g)
sorted_list = sorted(accumulator.items(), key=lambda kv: kv[1], reverse=True)
for item in sorted_list:
print(item)
| StarcoderdataPython |
3254018 | # Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Observation: If you factor 9 from 9, 90, 99, 900, 909, 990, 999, ...
# you ge the binary numbers 1, 10, 11, 100, 101, 110, 111, ...
t = int(raw_input())
for i in range(t):
n = int(raw_input())
j = 1
while(int(str(bin(j))[2:].replace('1','9'))%n!=0):
j += 1
d = str(bin(j))[2:].replace('1','9')
print d | StarcoderdataPython |
12843586 | # When your package is installed in editable mode, you can call
# instances of that package from any directory. For example, this
# script may be run by calling
#
# python scripts/say-hello-world.py
#
# and it will call methods inside our python_ml_template project.
from math_so.utils import say_hello_world
if __name__ == '__main__':
say_hello_world()
| StarcoderdataPython |
12816156 | <reponame>jachiike-madubuko/macro-scrapy<gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from sqlalchemy.orm import sessionmaker
from scrapy.exceptions import DropItem
from .models import Quote, Author, Tag, db_connect, create_table
import logging
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate('./macro-config.json')
firebase_admin.initialize_app(cred)
class NewsPipeline(object):
def __init__(self):
"""
Initializes database connection and sessionmaker
Creates tables
"""
# self.db = firestore.Client()
logging.info("****SaveQuotePipeline: database connected****")
def process_item(self, item, spider):
"""Save quotes in the database
This method is called for every item pipeline component
"""
logging.info("****SaveItem: firestore****")
# self.db.collection(item['item_type']).add(item)
# try:
# session.add(quote)
# session.commit()
# except:
# session.rollback()
# raise
# finally:
# session.close()
return item
# class DuplicatesPipeline(object):
# def __init__(self):
# """
# Initializes database connection and sessionmaker.
# Creates tables.
# """
# engine = db_connect()
# create_table(engine)
# self.Session = sessionmaker(bind=engine)
# logging.info("****DuplicatesPipeline: database connected****")
# def process_item(self, item, spider):
# session = self.Session()
# exist_quote = session.query(Quote).filter_by(quote_content = item["quote_content"]).first()
# if exist_quote is not None: # the current quote exists
# raise DropItem("Duplicate item found: %s" % item["quote_content"])
# session.close()
# else:
# return item
# session.close()
# class SaveQuotesPipeline(object):
# def __init__(self):
# """
# Initializes database connection and sessionmaker
# Creates tables
# """
# engine = db_connect()
# create_table(engine)
# self.Session = sessionmaker(bind=engine)
# logging.info("****SaveQuotePipeline: database connected****")
# def process_item(self, item, spider):
# """Save quotes in the database
# This method is called for every item pipeline component
# """
# session = self.Session()
# quote = Quote()
# author = Author()
# tag = Tag()
# author.name = item["author_name"]
# author.birthday = item["author_birthday"]
# author.bornlocation = item["author_bornlocation"]
# author.bio = item["author_bio"]
# quote.quote_content = item["quote_content"]
# # check whether the author exists
# exist_author = session.query(Author).filter_by(name = author.name).first()
# if exist_author is not None: # the current author exists
# quote.author = exist_author
# else:
# quote.author = author
# # check whether the current quote has tags or not
# if "tags" in item:
# for tag_name in item["tags"]:
# tag = Tag(name=tag_name)
# # check whether the current tag already exists in the database
# exist_tag = session.query(Tag).filter_by(name = tag.name).first()
# if exist_tag is not None: # the current tag exists
# tag = exist_tag
# quote.tags.append(tag)
# try:
# session.add(quote)
# session.commit()
# except:
# session.rollback()
# raise
# finally:
# session.close()
# return item
| StarcoderdataPython |
3267488 | from cupy.cuda import device
import matplotlib.pyplot as plt
from mrrt.mri.operators.bench.bench_mri import (
bench_mri_2d_nocoils_nofieldmap,
bench_mri_2d_16coils_nofieldmap,
bench_mri_2d_16coils_fieldmap,
bench_mri_2d_16coils_fieldmap_multispectral,
bench_mri_3d_nocoils_nofieldmap,
bench_mri_3d_nocoils_fieldmap,
bench_mri_3d_16coils_nofieldmap,
bench_mri_3d_16coils_fieldmap,
)
def run_all_bench(
plot_timings=True,
include_GPU=True,
include_CPU=True,
print_timings=True,
phasings=["real", "complex"],
save_dir="/tmp",
):
common_kwargs = dict(
plot_timings=plot_timings,
include_GPU=include_GPU,
include_CPU=include_CPU,
print_timings=print_timings,
phasings=phasings,
save_dir=save_dir,
)
# 2d cases
bench_mri_2d_nocoils_nofieldmap(navg_time=10, **common_kwargs)
bench_mri_2d_16coils_nofieldmap(navg_time=4, **common_kwargs)
bench_mri_2d_16coils_fieldmap(navg_time=4, **common_kwargs)
bench_mri_2d_16coils_fieldmap_multispectral(
navg_time=2, nspectra=2, **common_kwargs
)
# 3d cases
bench_mri_3d_nocoils_nofieldmap(navg_time=4, **common_kwargs)
bench_mri_3d_nocoils_fieldmap(navg_time=4, **common_kwargs)
bench_mri_3d_16coils_nofieldmap(navg_time=2, **common_kwargs)
bench_mri_3d_16coils_fieldmap(navg_time=2, **common_kwargs)
common_kwargs = dict(
plot_timings=True,
include_GPU=True,
include_CPU=True,
print_timings=True,
phasings=["complex"],
save_dir="/tmp",
)
device.get_cusparse_handle()
# cupy.fft.cache.enable()
# bench_mri_3d_16coils_fieldmap(navg_time=2, **common_kwargs)
# bench_mri_2d_16coils_nofieldmap(navg_time=64, **common_kwargs)
bench_mri_2d_16coils_fieldmap(navg_time=32, **common_kwargs)
# bench_mri_2d_16coils_fieldmap_multispectral(
# navg_time=64, nspectra=2, **common_kwargs
# )
plt.show()
| StarcoderdataPython |
11310580 | #!/usr/bin/env python
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""See environment_library for details.
"""
try:
from fabric.api import env
from fabric.api import execute
from fabric.api import hosts
from fabric.api import run
import pytest
except ImportError:
pass
def add_diff_env_to_controller(differentiated_environment):
"""Add a differentiated environment remotely and bounce services.
This function is used in:
* test/functional/test_environment_add.py
Examine that example for further explanation.
Given an appropriate host_string and password, this function:
(0) halts services on a Neutron controller;
(1) reconfigures the relevant files to add an "environment"
service_provider;
(2) restarts the services.
(CRITICAL NOTE: The relevant credentials are hardcoded
via the 'source keystonerc_testlab' line.
NOT apropriate for use in a production environment.)
"""
env.host_string = ''.join(
[pytest.symbols.tenant_name,
'@',
pytest.symbols.controller_ip,
':22'])
@hosts(env.host_string)
def setup_env_oncontroller(diff_env):
env.password = pytest.symbols.tenant_password
execute(lambda: run('sudo ls -la'))
# Stop existing agent
execute(lambda: run('sudo systemctl stop f5-openstack-agent'))
# Stop neutron server / f5_plugin
execute(lambda: run('sudo systemctl stop neutron-server'))
# Edit agent configuration to use new environment
sedtempl = '''sed -i "s/^\(environment_prefix = \)\(.*\)$/\\1%s/"''' +\
''' /etc/neutron/services/f5/f5-openstack-agent.ini'''
sedstring = 'sudo ' + sedtempl % diff_env
execute(lambda: run(sedstring))
# Add diff env to neutron_lbaas.conf and installed Python package
add_string = 'sudo add_f5agent_environment %s' % diff_env
execute(lambda: run(add_string))
# Start neutron-server / f5_plugin
execute(lambda: run('sudo systemctl start neutron-server'))
# Start existing agent
execute(lambda: run('source keystonerc_testlab && '
'sudo systemctl start f5-openstack-agent'))
setup_env_oncontroller(differentiated_environment)
| StarcoderdataPython |
4805728 | <reponame>tferreira/slackron<gh_stars>1-10
from pathlib import Path
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
class Config:
def __init__(self):
self._config = load(
open('{}/.slackron.yml'.format(str(Path.home())), 'r'),
Loader=Loader
)
@property
def webhook_url(self):
if 'webhook_url' not in self._config:
raise Exception('url missing in ~/.slackron.yml config file')
return self._config['webhook_url']
@property
def channel(self):
if 'channel' not in self._config:
raise Exception('channel missing in ~/.slackron.yml config file')
return self._config['channel']
@property
def username(self):
# if not specified, it will be "Slackron"
return self._config['username'] if 'username' in self._config else "Slackron"
@property
def emoji(self):
# if not specified, it will be slack default
return self._config['emoji'] if 'emoji' in self._config else None
| StarcoderdataPython |
11210981 | from helper.tile import *
def translate (mat):
# print(mat)
graph = {}
for row in range(len(mat)):
for col in range(len(mat[row])):
graph[(mat[row][col].Position)] = neighbours(row,col,mat)
return graph
def neighbours(row,col,mat):
neighbours = dict()
if(row-1 >= 0 and isValidTile(mat[row-1][col].TileContent)):
if mat[row-1][col].TileContent == TileContent.Wall:
weight = 3
else:
weight = 1
neighbours[(mat[row-1][col].Position)] = weight
if(col-1 >= 0 and isValidTile(mat[row][col-1].TileContent)):
if mat[row][col-1].TileContent == TileContent.Wall:
weight = 3
else:
weight = 1
neighbours[(mat[row][col-1].Position)] = weight
if(row+1 < len(mat[:]) and isValidTile(mat[row+1][col].TileContent)):
if mat[row+1][col].TileContent == TileContent.Wall:
weight = 3
else:
weight = 1
neighbours[(mat[row+1][col].Position)] = weight
if(col+1 < len(mat[0]) and isValidTile(mat[row][col+1].TileContent)):
if mat[row][col+1].TileContent == TileContent.Wall:
weight = 3
else:
weight = 1
neighbours[(mat[row][col+1].Position)] = weight
return neighbours
def isValidTile(tileContent):
return tileContent == TileContent.Empty or tileContent == TileContent.House or tileContent == TileContent.Shop or tileContent == TileContent.Wall or tileContent == TileContent.Resource | StarcoderdataPython |
8196671 | from data import load_data_from_json_file
from matsmart import get_available_items
"""
Super-rudimentary testing, invoke as module;
python -m test
Returns "Passed" if the get_available_items_json result corresponds to the saved test_results.json
"""
# test_item_list = ["bovete", "chia", "matvete", "quinoa", "havregryn"]
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def test_available_result():
test_result = get_available_items(as_dict=True)
correct_result = load_data_from_json_file(filename="test_data/results.json")
assert ordered(test_result) == ordered(correct_result), "Should be same result"
if __name__ == "__main__":
test_available_result()
print("Passed")
| StarcoderdataPython |
1809368 | '''NXOS Implementation for Igmp modify triggers'''
# python
from copy import deepcopy
# pyats
from ats import aetest
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.modify.modify import TriggerModify
# pyats
from ats.utils.objects import Not, NotExists
# Which key to exclude for Igmp Ops comparison
igmp_exclude = ['maker', 'expire', 'up_time']
class TriggerModifyIgmpVersion(TriggerModify):
"""Modify dynamically learned enabled Igmp interface(s) version then restore the
configuration by reapplying the whole running configuration."""
__description__ = """Modify dynamically learned enabled Igmp interface(s) version
then restore the configuration by reapplying the whole running configuration.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
interface: `str`
vrf: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Igmp Ops object and store the Igmp interface version
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Modify the learned Igmp interface version from step 1
with Igmp Conf object
4. Verify the Igmp interface version from step 3 is reflected in device configuration
5. Recover the device configurations to the one in step 2
6. Learn Igmp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={
'ops.igmp.igmp.Igmp':{
'requirements':[\
['info', 'vrfs', '(?P<vrf>.*)', 'interfaces',
'(?P<interface>.*)', 'enable', True],
['info', 'vrfs', '(?P<vrf>.*)', 'interfaces',
'(?P<interface>.*)', 'version', '(?P<version>2)']],
'all_keys': True,
'kwargs':{'attributes': [
'info[vrfs][(.*)]']},
'exclude': igmp_exclude}},
config_info={'conf.igmp.Igmp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'interface_attr', '(?P<interface>.*)', 'version',
3]],
'verify_conf':False,
'kwargs':{}}},
verify_ops={
'ops.igmp.igmp.Igmp':{
'requirements':[\
['info', 'vrfs', '(?P<vrf>.*)', 'interfaces',
'(?P<interface>.*)', 'version', 3]],
'kwargs':{'attributes': [
'info[vrfs][(.*)]']},
'exclude': igmp_exclude}},
num_values={'vrf': 1, 'interface': 1})
| StarcoderdataPython |
1773214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''分类器,负责模型训练流程和预测流程.
fit_with_file:从文件中加载训练集和验证集
fit:意图分类模型的训练过程
predict_with_file:意图分类模型 batch test
predict:意图分类模型预测接口
save:将意图分类模型导出到模型文件中
restore:从文件中加载模型
'''
import os
import sys
import time
import traceback
import logging
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from scipy.sparse import hstack
from util.preprocess.tfidf_processor import TFIDFProcessor
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(threadName)-10s %(message)s')
LOGGER = logging.getLogger('DataTransformer')
LOGGER.setLevel(logging.DEBUG)
class DataTransformer(object):
def __init__(self, corpus_file, term_file=None, normalizer='basic_with_num'):
'''
initialize model
Args:
corpus_file: Fit it to TFIDFProcessor for learning idf vector.
Usually it is the training data.
term_file: for normalization
Returns:
None
'''
assert term_file is None or os.path.exists(term_file), "term_file not found in %s" % term_file
assert os.path.exists(corpus_file), "corpus_file not found in %s" % corpus_file
# init TFIDFProcessor
self.char_unigram_preprocessor = TFIDFProcessor(normalizer=normalizer, term_file=term_file,
token_level='char', ngram_size=1)
#self.char_bigram_preprocessor = TFIDFProcessor(normalizer=normalizer, term_file=term_file,
# token_level='char', ngram_size=2)
self.word_unigram_preprocessor = TFIDFProcessor(normalizer=normalizer, term_file=term_file,
token_level='word', ngram_size=1)
#self.word_bigram_preprocessor = TFIDFProcessor(normalizer=normalizer, term_file=term_file,
# token_level='word', ngram_size=2)
# learn idf vector
corpus_data, labels = self.read_data(corpus_file)
LOGGER.debug('corpus data size {}'.format(len(corpus_data)))
self.char_unigram_preprocessor.fit(corpus_data)
#self.char_bigram_preprocessor.fit(corpus_data)
self.word_unigram_preprocessor.fit(corpus_data)
#self.word_bigram_preprocessor.fit(corpus_data)
#对分类标签进行编码
self.postprocessor = preprocessing.LabelEncoder()
self.postprocessor.fit(labels)
self.label_list = list(set(labels))
#for i, label in enumerate(self.label_list):
# print('label map:{} -> {}'.format(label, self.postprocessor.transform([label])))
def read_data(self, filepath):
df = pd.read_csv(filepath, dtype=object)
df = shuffle(df)
return df['text'], df['class']
def fit_with_file(self, train_file, n_class=None):
'''
从文件中加载训练集和验证集, 并转换成向量
Args:
train_file:训练集数据文件
Returns:
文本向量数组
标签向量数组
'''
assert os.path.exists(train_file), "train_file invalid: {}".format(train_file)
x_train, y_train = self.read_data(train_file)
x_train, y_train = self.fit(x_train, y_train)
# Generate labels
y_train = self.generate_labels(y_train, n_class)
return x_train, y_train
def generate_labels(self, y_train, n_class):
label_vec = np.zeros((len(y_train), n_class))
for i, label in enumerate(y_train):
label_vec[i][label] = 1
return label_vec.astype(np.float32)
def fit(self, x_train, y_train):
'''
Args:
x_train, y_train:训练集的文本及对应的分类标签
Returns:
None
'''
#对文本进行字符串预处理、分词、提取特征,得到文本的特征向量
LOGGER.debug('run extract_features')
x_train = self.__extract_features(x_train)
##对分类标签进行编码
#self.postprocessor = preprocessing.LabelEncoder()
#self.postprocessor.fit(y_train)
start_time = time.time()
y_train = self.postprocessor.transform(y_train)
end_time = time.time()
return x_train, y_train
def __extract_features(self, input_data):
'''
transform sentences into feature maxtrix
Args:
input_data: a list of sentences to transform
Returns:
a feature matrix of input_data of shape (n, m) in which 'n' is the number of sentences and 'm' is the size of vector
'''
x_feature_maxtrix = hstack([
self.char_unigram_preprocessor.transform(input_data),
#self.char_bigram_preprocessor.transform(input_data),
self.word_unigram_preprocessor.transform(input_data),
#self.word_bigram_preprocessor.transform(input_data)
], format='csr').toarray().astype(np.float32)
LOGGER.debug('data size is %d' % (len(input_data)))
LOGGER.debug('data matrix shape is %s' % str(x_feature_maxtrix.shape))
return x_feature_maxtrix
if __name__ == '__main__':
training_data = './data/mojie/sample.csv'
data_transformaer = DataTransformer(corpus_file=training_data)
x_train, y_train = data_transformaer.fit_with_file(training_data)
#print x_train, y_train
| StarcoderdataPython |
3282092 | <filename>kaggle_scripts.py<gh_stars>0
import numpy as np
# Functions to save solution files in the correct format for Kaggle Competition
def save_classification_file(file_to_save, names, labels):
'''
Saves the classification results in the format:
Id,Prediction
24551-2934-8931,ajuntament
30017-26696-17117,desconegut
3398-20429-27862,farmacia_albinyana
4611-17202-4774,catedral
etc
:param file_to_save: name of the file to be saved
:param names: list of image ids
:param labels: list of predictions to the image Ids
'''
# Write header
file_to_save.write("Id,Prediction\n")
# Write image Ids and class labels
for i in range(len(names)):
file_to_save.write(names[i] + ','+ labels[i] + '\n')
file_to_save.close()
def save_ranking_file(file_to_save,image_id,ranking):
'''
:param file_to_save: name of the file to be saved
:param image_id: name of the query image
:param ranking: ranking for the image image_id
:return: the updated state of the file to be saved
'''
# Write query name
file_to_save.write(image_id.split('.')[0] + ',')
# Convert elements to string and ranking to list
ranking = np.array(ranking).astype('str').tolist()
# Write space separated ranking
for item in ranking:
file_to_save.write(item[0] + " ")
file_to_save.write('\n')
return file_to_save
def convert_ranking_annotation(annotation_val, annotation_train, file_to_save):
# Convert ranking annotation and store it for Kaggle (only needed for teachers)
file_to_save.write('Query,RetrievedDocuments\n')
for image_id in annotation_val['ImageID']:
i_class = list(annotation_val.loc[annotation_val['ImageID'] == image_id]['ClassID'])[0]
if not i_class == 'desconegut':
file_to_save.write(image_id + ',')
to_write = annotation_train.loc[annotation_train['ClassID'].isin([i_class])]['ImageID'].tolist()
for i in to_write:
file_to_save.write(i + ' ')
file_to_save.write('\n')
file_to_save.close()
print "Done. Annotation file saved"
| StarcoderdataPython |
5159767 | <reponame>malfonsoNeoris/maskrcnn_tf2<filename>src/coco_minitrain.py<gh_stars>0
import os
import random
import tensorflow as tf
from common.utils import tf_limit_gpu_memory
from model import mask_rcnn_functional
from preprocess import augmentation as aug
from samples.coco import coco
from training import train_model
if __name__ == '__main__':
# Init random seed
random.seed(42)
# Limit GPU memory for tensorflow container
tf_limit_gpu_memory(tf, 4500)
# Load Mask-RCNN config
from common.config import CONFIG
CONFIG.update(coco.COCO_CONFIG)
# Set only 5 COCO classes
CONFIG.update({'class_dict': {'background': 0,
'person': 1,
'bicycle': 2,
'car': 3,
'motorcycle': 4,
},
'num_classes': 5,
'meta_shape': 1 + 3 + 3 + 4 + 1 + 5, # 4 COCO classes + 1 background class
'image_shape': (1024, 1024, 3),
'image_resize_mode': 'square',
'img_size': 1024,
'image_min_dim': 800,
'image_min_scale': 0,
'image_max_dim': 1024,
'backbone': 'mobilenet',
'epochs': 50,
'batch_size': 1,
'images_per_gpu': 1,
'train_bn': False,
}
)
# Init training and validation datasets
base_dir = r''
train_dir = os.path.join(base_dir, 'train')
val_dir = os.path.join(base_dir, 'val')
# Initialize training and validation datasets
train_dataset = coco.CocoDataset(dataset_dir=train_dir,
subset='train',
class_ids=[1, 2, 3, 4],
year=2017,
auto_download=False,
# SegmentationDataset necessary parent attributes
augmentation=aug.get_training_augmentation(
image_size=CONFIG['img_size'],
normalize=CONFIG['normalization']
),
**CONFIG
)
val_dataset = coco.CocoDataset(dataset_dir=val_dir,
subset='val',
class_ids=[1, 2, 3, 4],
year=2017,
auto_download=False,
# SegmentationDataset necessary parent attributes
augmentation=aug.get_validation_augmentation(
image_size=CONFIG['img_size'],
normalize=CONFIG['normalization']
),
**CONFIG
)
# Use only 1000 random images for train and 100 random images for validation
train_imgs = 1000
val_imgs = 100
random.shuffle(train_dataset.images_names)
random.shuffle(val_dataset.images_names)
train_dataset.images_names = train_dataset.images_names[:train_imgs]
val_dataset.images_names = val_dataset.images_names[:val_imgs]
# Init Mask-RCNN model
model = mask_rcnn_functional(config=CONFIG)
# Train
train_model(model,
train_dataset=train_dataset,
val_dataset=val_dataset,
config=CONFIG,
weights_path=None)
| StarcoderdataPython |
6460363 | # Generated by Django 2.0.1 on 2018-06-20 20:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unlabel_backend', '0002_auto_20180620_2017'),
]
operations = [
migrations.AlterField(
model_name='project',
name='slug',
field=models.SlugField(blank=True, max_length=255, verbose_name='Slug'),
),
]
| StarcoderdataPython |
44846 | import cv2
'''
gets a video file and dumps each frame as a jpg picture in an output dir
'''
# Opens the Video file
cap = cv2.VideoCapture('./Subt_2.mp4')
i = 0
while(cap.isOpened()):
ret, frame = cap.read()
if i%(round(25*0.3)) == 0:
print(i)
if ret == False:
break
cv2.imwrite('./output/cave2-'+str(i)+'.jpg',frame)
i+=1
cap.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
3564948 | <gh_stars>100-1000
from vit.formatter import Duration
class Recur(Duration):
def colorize(self, recur):
return self.colorizer.recurring(recur)
| StarcoderdataPython |
1728247 | <gh_stars>0
import ConfigParser
import ast
import grp
import json
import os
import numpy as np
import xlrd
def parseExcel(filename, clmnnames=-1, datastart=0, sheetname='Sheet1', *argv):
"""
Parse excel file into ....
filename = excel file name
clmnnames = row number where column names are stored (-1 if generic clmn1,clmn2...)
datastart = row number where data starts
sheetname = name of the sheet
"""
try:
book = xlrd.open_workbook(filename)
sheet = book.sheet_by_name(sheetname)
except:
print "Problem opening " + filename
return False
types = sheet.row_types(datastart)
if clmnnames == -1:
nmcols = ['clmn' + str(k) for k in range(len(types))]
else:
nmcols = corrUniqCols([str(q) for q in sheet.row_values(clmnnames)])
data = []
for i in range(datastart, sheet.nrows):
data.append(corrUnicodeErr(sheet.row_values(i)))
return nmcols, types, data
def corrUniqCols(arr):
newarr = []
k = 0
for cl in arr:
k += 1
newcl = cl.replace(" ", "_").lower()
if newcl not in newarr and newcl != '':
newarr.append(newcl)
else:
newarr.append('clmn_' + str(k) + "_" + newcl)
return newarr
def corrUnicodeErr(row):
newrow = []
for q in row:
try:
str(q)
newrow.append(q)
except:
newrow.append('-')
return newrow
def pythonToPHP(result, tmp_file):
to_php = json.dumps(result)
file_down = open(tmp_file, 'w')
file_down.write(to_php)
file_down.close()
gid = grp.getgrnam("brewers").gr_gid
os.chown(tmp_file, -1, gid)
os.chmod(tmp_file, 0777)
def PHPTopython(infile):
return json.loads(open(infile).read(1000000))
def parse_ini_arguments(ini_file, section):
config = ConfigParser.ConfigParser()
config.read(ini_file)
results = {arg: tryeval(config.get(section, arg)) for arg in config.options(section)}
return results
def tryeval(val):
"""
Convert to the proper type, e.g. '87' -> 87
:param val: string
:return: mixed
"""
try:
val = ast.literal_eval(val)
except (SyntaxError, ValueError) as ex:
pass
return val
## correct if forward slash is missing at the end of 'path'
def corrpath(path):
return os.path.join(path, '')
## parse True to 'y' and False to 'n'
def booltostring(inpbool):
if inpbool:
return 'y'
return 'n'
## parse single vector to value i.e. [5] -> 5
def singlevec(value):
if isinstance(value, np.ndarray) and len(value) == 1:
return value[0]
return value
| StarcoderdataPython |
12842900 | ''' script to get predictions for movielens data '''
from measures import predictions
from processing import preprocessing
import time
import pickle
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--movielens_data', choices=['small', '100k'], required=True)
script_arguments = vars(parser.parse_args())
movielens_data = script_arguments['movielens_data']
if movielens_data == 'small':
ratings = pickle.load(open("data/MOVIELENS/ml-latest-small/small_ratings_movielens.pkl","rb"))
films = pickle.load(open("data/MOVIELENS/ml-latest-small/small_films_movielens.pkl","rb"))
elif movielens_data == '100k':
ratings = pickle.load(open("data/MOVIELENS/ml-100k/100k_benchmark_ratings.pkl","rb"))
films = pickle.load(open("data/MOVIELENS/ml-100k/100k_benchmark_films_movielens.pkl","rb"))
# remove from ratings the missing films (that were missing info and hence were discarded)
ids_to_del_rf = set(ratings.keys()).difference(set(films.keys()))
ids_to_del_fr = set(films.keys()).difference(set(ratings.keys()))
ids_to_del = ids_to_del_rf.union(ids_to_del_fr)
corrected_ratings = dict()
for x in ratings.keys():
if x not in ids_to_del:
curr_rats = []
for curr_rat in ratings[x]:
temp_dict = dict()
temp_dict['user_rating'] = curr_rat['user_rating']
temp_dict['user_rating_date'] = curr_rat['user_rating_date']
temp_dict['user_id'] = 'x'+curr_rat['user_id']
curr_rats.append(temp_dict)
corrected_ratings[x] = curr_rats
ratings = corrected_ratings
corrected_films = dict()
for x in films.keys():
if x not in ids_to_del:
corrected_films[x] = films[x]
films = corrected_films
assert len(ratings) == len(films)
films, ratings_dict, compressed_test_ratings_dict, sims, movies_all_genres_matrix, movies_all_directors_matrix, movies_all_actors_matrix = preprocessing(ratings, films, movielens_data)
start = time.time()
MUR = 0.1
MUG = 0.6
MUA = 0.1
MUD = 0.1
nr_predictions, accuracy, rmse, mae, precision, recall, f1 = predictions(MUR, MUG, MUA, MUD, films, compressed_test_ratings_dict, ratings_dict, sims, movies_all_genres_matrix, movies_all_directors_matrix, movies_all_actors_matrix, movielens_data)
# print results
print("Number of user-items pairs: %d" % nr_predictions)
print("Accuracy: %.2f " % accuracy)
print("RMSE: %.2f" % rmse)
print("MAE: %.2f" % mae)
print("Precision: %.2f" % precision)
print("Recall: %.2f" % recall)
print("F1: %.2f" % f1)
end = time.time()
print("\nComputing strengths took %d seconds" % (end-start))
| StarcoderdataPython |
11300710 | <reponame>alifoliveira/rep-estudos<filename>test/list_ex.py
# Function for each item
fun = [abs(x) for x in [-2, -1, 0, 1, 2]]
# List or Tuple
tup = [(y, y**2) for y in range(1, 11)]
# Loop on Loop
lista1 = []
for a in range(4):
for b in range(4):
if a != b:
lista1.append([a, b])
lista2 = [[a, b] for a in range(4) for b in range(4) if a != b]
| StarcoderdataPython |
3582317 | <reponame>sukio-1024/codeitPy
# 빈 리스트 만들기
numbers = []
# numbers에 자연수 1부터 10까지 추가
i = 0
while (i < 10):
numbers.append(i + 1)
i += 1
print(numbers)
# numbers에서 홀수 제거
j = 0
while (j < len(numbers)):
if ((numbers[j] % 2) != 0):
del numbers[j]
j += 1
print(numbers)
# numbers의 인덱스 0 자리에 20이라는 값 삽입
numbers.insert(0, 20)
print(numbers)
# numbers를 정렬해서 출력
numbers = sorted(numbers)
print(numbers) | StarcoderdataPython |
390762 | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.light_sources import LightSources
from pycatia.in_interfaces.viewer import Viewer
from pycatia.in_interfaces.viewpoint_3d import Viewpoint3D
class Viewer3D(Viewer):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| InfInterfaces.Viewer
| Viewer3D
|
| Represents a 3D viewer.
| The 3D viewer aggregates a 3D viewpoint to display a 3D scene. In addition, the
| Viewer3D object manages the lighting, the depth effects, the navigation style,
| and the rendering mode.
|
| See also:
| Viewpoint3D, CatLightingMode, CatNavigationStyle, CatRenderingMode
"""
def __init__(self, com_object):
super().__init__(com_object)
self.viewer_3d = com_object
@property
def clipping_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ClippingMode() As CatClippingMode
|
| Returns or sets the clipping mode.
|
| Example:
| This example sets the depth effect for the My3DViewer 3D viewer to
| catClippingModeNearAndFar.
|
| My3DViewer.ClippingMode = catClippingModeNearAndFar
:return: int
:rtype: int
"""
return self.viewer_3d.ClippingMode
@clipping_mode.setter
def clipping_mode(self, value: int):
"""
:param int value:
"""
self.viewer_3d.ClippingMode = value
@property
def far_limit(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FarLimit() As double
|
| Returns or sets the far limit for the far clipping plane. The distance is
| measured from the eye location, that is the origin of the viewpoint, and is
| expressed in model unit. The far clipping plane is available with the
| catClippingModeFar and catClippingModeNearAndFar values of the CatClippingMode
| enumeration only.
|
| Example:
| This example sets the far limit for the far clipping plane of the
| My3DViewer 3D viewer to 150 model units.
|
| My3DViewer.FarLimit = 150
:return: float
:rtype: float
"""
return self.viewer_3d.FarLimit
@far_limit.setter
def far_limit(self, value: float):
"""
:param float value:
"""
self.viewer_3d.FarLimit = value
@property
def foggy(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Foggy() As boolean
|
| Returns or sets the fog mode. Useful when clipping is
| enabled.
|
| Example:
| This example sets the fog on for the My3DViewer 3D
| viewer:
|
| My3DViewer.Foggy = True
:return: bool
:rtype: bool
"""
return self.viewer_3d.Foggy
@foggy.setter
def foggy(self, value: bool):
"""
:param bool value:
"""
self.viewer_3d.Foggy = value
@property
def ground(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Ground() As boolean
|
| Returns or sets the ground displaying mode.
|
| Example:
| This example makes the ground visible for the My3DViewer 3D
| viewer:
|
| My3DViewer.Ground = True
:return: bool
:rtype: bool
"""
return self.viewer_3d.Ground
@ground.setter
def ground(self, value: bool):
"""
:param bool value:
"""
self.viewer_3d.Ground = value
@property
def light_sources(self) -> LightSources:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property LightSources() As LightSources (Read Only)
|
| Returns the viewer's light source collection.
|
| Example:
| This example retrieves the light source collection for the My3DViewer
| 3D viewer in VPLightSources.
|
| Set VPLightSources = My3DViewer.LightSources
:return: LightSources
:rtype: LightSources
"""
return LightSources(self.viewer_3d.LightSources)
@property
def lighting_intensity(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property LightingIntensity() As double
|
| Returns or sets the lighting intensity. The lighting intensity ranges
| between 0 and 1.
|
| Example:
| This example sets the lighting intensity for the My3DViewer 3D viewer
| to 0.35.
|
| My3DViewer.LightingIntensity = 0.35
:return: float
:rtype: float
"""
return self.viewer_3d.LightingIntensity
@lighting_intensity.setter
def lighting_intensity(self, value: float):
"""
:param float value:
"""
self.viewer_3d.LightingIntensity = value
@property
def lighting_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property LightingMode() As CatLightingMode
|
| Returns or sets the lighting mode.
|
| Example:
| This example sets the lighting mode for the My3DViewer 3D viewer to
| catInfiniteLightSource.
|
| My3DViewer.LightingMode = catInfiniteLightSource
:return: int
:rtype: int
"""
return self.viewer_3d.LightingMode
@lighting_mode.setter
def lighting_mode(self, value: int):
"""
:param int value:
"""
self.viewer_3d.LightingMode = value
@property
def navigation_style(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NavigationStyle() As CatNavigationStyle
|
| Returns or sets the navigation style.
|
| Example:
| This example sets the navigation style for the My3DViewer 3D viewer to
| catNavigationWalk.
|
| My3DViewer.NavigationStyle = catNavigationWalk
:return: int
:rtype: int
"""
return self.viewer_3d.NavigationStyle
@navigation_style.setter
def navigation_style(self, value: int):
"""
:param int value:
"""
self.viewer_3d.NavigationStyle = value
@property
def near_limit(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property NearLimit() As double
|
| Returns or sets the near limit for the near clipping plane. The distance is
| measured from the eye location, that is the origin of the viewpoint, and is
| expressed in model unit. The near clipping plane is available with the
| catClippingModeNear and catClippingModeNearAndFar values of the CatClippingMode
| enumeration only.
|
| Example:
| This example sets the near limit for the near clipping plane of the
| My3DViewer 3D viewer to 75 model units.
|
| My3DViewer.NearLimit = 75
:return: float
:rtype: float
"""
return self.viewer_3d.NearLimit
@near_limit.setter
def near_limit(self, value: float):
"""
:param float value:
"""
self.viewer_3d.NearLimit = value
@property
def rendering_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property RenderingMode() As CatRenderingMode
|
| Returns or sets the rendering mode.
|
| Example:
| This example sets the rendering mode for the My3DViewer 3D viewer to
| catRenderShadingWithEdges.
|
| My3DViewer.RenderingMode = catRenderShadingWithEdges
:return: int
:rtype: int
"""
return self.viewer_3d.RenderingMode
@rendering_mode.setter
def rendering_mode(self, value: int):
"""
:param int value:
"""
self.viewer_3d.RenderingMode = value
@property
def viewpoint_3d(self) -> Viewpoint3D:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Viewpoint3D() As Viewpoint3D
|
| Returns or sets the 3D viewpoint of a 3D viewer.
|
| Example:
| This example retrieves the Nice3DViewpoint 3D viewpoint from the
| My3DViewer 3D viewer.
|
| Dim Nice3DViewpoint As Viewpoint3D
| Set Nice3DViewpoint = My3DViewer.Viewpoint3D
:return: Viewpoint3D
:rtype: Viewpoint3D
"""
return Viewpoint3D(self.viewer_3d.Viewpoint3D)
@viewpoint_3d.setter
def viewpoint_3d(self, value: Viewpoint3D):
"""
:param Viewpoint3D value:
"""
self.viewer_3d.Viewpoint3D = value
def rotate(self, i_axis: tuple, i_angle: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Rotate(CATSafeArrayVariant iAxis,
| double iAngle)
|
| Applies a rotation. The rotation of iAngle degrees is applied to the
| viewer's contents around the axis iAxis (an array of 3 Variants), the invariant
| point being the target (ie: Origin +
| FocusDistance*SightDirection).
|
| Example:
| This applies a rotation of 10 degrees around the Up Direction to the
| contents of the MyViewer3D viewer.
|
| MyViewer3D.Rotate MyViewer3D.UpDirection, 10
:param tuple i_axis:
:param float i_angle:
:return: None
:rtype: None
"""
return self.viewer_3d.Rotate(i_axis, i_angle)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'rotate'
# # vba_code = """
# # Public Function rotate(viewer_3d)
# # Dim iAxis (2)
# # viewer_3d.Rotate iAxis
# # rotate = iAxis
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def translate(self, i_vector: tuple) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Translate(CATSafeArrayVariant iVector)
|
| Applies a translation. The translation vector is iVector (an array of 3
| Variants).
|
| Example:
| This applies a translation along (1, 1, 1) to the contents of the
| MyViewer3D viewer.
|
| MyViewer3D.Translate Array(1, 1, 1)
:param tuple i_vector:
:return: None
:rtype: None
"""
return self.viewer_3d.Translate(i_vector)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'translate'
# # vba_code = """
# # Public Function translate(viewer_3d)
# # Dim iVector (2)
# # viewer_3d.Translate iVector
# # translate = iVector
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def __repr__(self):
return f'Viewer3D(name="{self.name}")'
| StarcoderdataPython |
4939910 | '''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Evaluation of frozen/quantized graph
Author: <NAME>
'''
import os
import sys
import argparse
import shutil
import numpy as np
import cv2
from progressbar import ProgressBar
# Silence TensorFlow messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# workaround for TF1.15 bug "Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
import tensorflow.contrib.decent_q
from tensorflow.python.platform import gfile
from preprocess import preprocess
DIVIDER = '-----------------------------------------'
def graph_eval(input_graph_def, input_node, output_node, dataset, batchsize):
images = []
ground_truth = []
for root, dirs, files in os.walk(os.path.join(dataset, 'test')):
for filename in files:
class_id,_ = filename.split('.', 1)
images.append(preprocess(os.path.join(root,filename)))
ground_truth.append(class_id)
print('Found',len(images),'images and',len(ground_truth),'ground_truth')
tf.import_graph_def(input_graph_def,name = '')
# Get input placeholders & tensors
input_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(input_node+':0')
# get output tensors
predict = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node+':0')
# Create the Computational graph
with tf.compat.v1.Session() as sess:
predictions = []
progress = ProgressBar()
sess.run(tf.compat.v1.initializers.global_variables())
for i in progress(range(len(images)//batchsize)):
# make batches of images
img_batch = images[i*batchsize:i*batchsize+batchsize]
# run session to get a batch of predictions
feed_dict={input_tensor: img_batch}
pred = sess.run([predict], feed_dict)
for i in range(len(pred[0])):
if np.argmax(pred[0][i]) == 1:
predictions.append('dog')
else:
predictions.append('cat')
# iterate over the list of predictions and compare to ground truth
correct_predictions = 0
wrong_predictions = 0
for i in range (len(predictions)):
if predictions[i] == ground_truth[i]:
correct_predictions += 1
else:
wrong_predictions += 1
# calculate accuracy
acc = (correct_predictions/len(predictions)) * 100
print('Correct Predictions: ',correct_predictions)
print('Wrong Predictions : ',wrong_predictions)
print('Prediction Accuracy: ',acc,'%')
return
def run_main():
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', type=str, default='./dataset', help='root folder of dataset')
ap.add_argument('-g', '--graph', type=str, default='./freeze/frozen_graph.pb', help='graph file (.pb) to be evaluated.')
ap.add_argument('-i', '--input_node', type=str, default='input_1', help='input node.')
ap.add_argument('-o', '--output_node',type=str, default='flatten_1/Reshape', help='output node.')
ap.add_argument('-b', '--batchsize', type=int, default=1, help='Evaluation batchsize. Default is 1.')
args = ap.parse_args()
print('\n'+DIVIDER)
print('Keras version : ',tf.keras.__version__)
print('TensorFlow version : ',tf.__version__)
print(sys.version)
print(DIVIDER)
print(' Command line options:')
print ('--dataset : ',args.dataset)
print ('--graph : ',args.graph)
print ('--input_node : ',args.input_node)
print ('--output_node : ',args.output_node)
print ('--batchsize : ',args.batchsize)
print(DIVIDER)
input_graph_def = tf.Graph().as_graph_def()
input_graph_def.ParseFromString(tf.io.gfile.GFile(args.graph, "rb").read())
graph_eval(input_graph_def, args.input_node, args.output_node, args.dataset, args.batchsize)
if __name__ == '__main__':
run_main()
| StarcoderdataPython |
8127604 | <reponame>iangregson/advent-of-code<gh_stars>0
#!/usr/bin/env python3
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
input_txt = file.readlines()
lines = [line.strip() for line in input_txt]
# print(lines)
class LightGrid:
def __init__(self, size):
self.g = [[0 for i in range(size)] for j in range(size)]
def switch_on(self, start, end):
self.switch(start, end, "on")
def switch_off(self, start, end):
self.switch(start, end, "off")
def toggle(self, start, end):
self.switch(start, end, "toggle")
def switch(self, start, end, operation):
[x1, y1] = [int(x) for x in start.split(',')]
[x2, y2] = [int(x) for x in end.split(',')]
y = y1
while y <= y2:
x = x1
while x <= x2:
if operation == "toggle":
if self.g[y][x] == 0:
self.g[y][x] = 1
elif self.g[y][x] == 1:
self.g[y][x] = 0
elif operation == "on":
self.g[y][x] = 1
elif operation == "off":
self.g[y][x] = 0
x += 1
y += 1
def print(self):
for row in self.g:
print(row)
print("\n")
def parse_instruction(self, line):
word_bag = line.split(" ")
if word_bag[0] == "toggle":
return [word_bag[1], word_bag[3], word_bag[0]]
else:
return [word_bag[2], word_bag[4], word_bag[1]]
def count_lit(self):
count = 0
for row in self.g:
for light in row:
count += light
return count
lg = LightGrid(1000)
for line in lines:
start, end, operation = lg.parse_instruction(line)
lg.switch(start, end, operation)
print("Part 1 answer:", lg.count_lit())
class LightGrid_part2:
def __init__(self, size):
self.g = [[0 for i in range(size)] for j in range(size)]
def switch_on(self, start, end):
self.switch(start, end, "on")
def switch_off(self, start, end):
self.switch(start, end, "off")
def toggle(self, start, end):
self.switch(start, end, "toggle")
def switch(self, start, end, operation):
[x1, y1] = [int(x) for x in start.split(',')]
[x2, y2] = [int(x) for x in end.split(',')]
y = y1
while y <= y2:
x = x1
while x <= x2:
if operation == "toggle":
self.g[y][x] += 2
elif operation == "on":
self.g[y][x] += 1
elif operation == "off":
self.g[y][x] = max(self.g[y][x] - 1, 0)
x += 1
y += 1
def print(self):
for row in self.g:
print(row)
print("\n")
def parse_instruction(self, line):
word_bag = line.split(" ")
if word_bag[0] == "toggle":
return [word_bag[1], word_bag[3], word_bag[0]]
else:
return [word_bag[2], word_bag[4], word_bag[1]]
def count_brightness(self):
count = 0
for row in self.g:
for light in row:
count += light
return count
lg = LightGrid_part2(1000)
for line in lines:
start, end, operation = lg.parse_instruction(line)
lg.switch(start, end, operation)
print("Part 2 answer:", lg.count_brightness())
| StarcoderdataPython |
3575810 | """Test suite for experimental functions."""
import random
import json
from os import environ
from os.path import join, dirname
from requests.exceptions import HTTPError
from unittest import TestCase, skip
from pangea_api import (
Knex,
Sample,
Organization,
SampleGroup,
User,
RemoteObjectError,
)
from pangea_api.work_orders import WorkOrderProto, WorkOrder, JobOrder
ENDPOINT = environ.get('PANGEA_API_TESTING_ENDPOINT', 'http://127.0.0.1:8000')
def random_str(len=12):
"""Return a random alphanumeric string of length `len`."""
out = random.choices('abcdefghijklmnopqrtuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ0123456789', k=len)
return ''.join(out)
class TestPangeaApiWorkOrders(TestCase):
"""Test suite for packet building."""
pass
| StarcoderdataPython |
197313 | # Copyright 2011 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Thanks to grt for the fixes
import odf.opendocument
from odf.table import *
from odf.text import P
class ODSReader:
# loads the file
def __init__(self, file):
self.doc = odf.opendocument.load(file)
self.SHEETS = {}
for sheet in self.doc.spreadsheet.getElementsByType(Table):
self.readSheet(sheet)
# reads a sheet in the sheet dictionary, storing each sheet as an array (rows) of arrays (columns)
def readSheet(self, sheet):
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
row_comment = ""
arrCells = []
cells = row.getElementsByType(TableCell)
# for each cell
for cell in cells:
# repeated value?
repeat = cell.getAttribute("numbercolumnsrepeated")
if(not repeat):
repeat = 1
ps = cell.getElementsByType(P)
textContent = ""
# for each text node
for p in ps:
for n in p.childNodes:
if (n.nodeType == 3):
textContent = textContent + str(n.data)
if(textContent or textContent == ""):
if(textContent == "" or textContent[0] != "#"): # ignore comments cells
for rr in range(int(repeat)): # repeated?
arrCells.append(textContent)
else:
row_comment = row_comment + textContent + " ";
# if row contained something
if(len(arrCells)):
arrRows.append(arrCells)
#else:
# print "Empty or commented row (", row_comment, ")"
self.SHEETS[name] = arrRows
# returns a sheet as an array (rows) of arrays (columns)
def getSheet(self, name):
return self.SHEETS[name] | StarcoderdataPython |
1650533 | <filename>niimpy/exploration/eda/test_lineplot.py
"""
Created on Tue Nov 2 13:57:00 2021
@author: arsii
"""
import pytest
import plotly
from niimpy.exploration import setup_dataframe
from niimpy.exploration.eda import lineplot
def test_timeplot_single_ts():
df = setup_dataframe.create_dataframe()
fig = lineplot.timeplot(df,
users=['user_1'],
columns=['col_1'],
title='Test title',
xlabel='Xlabel',
ylabel='Ylabel',
resample='H',
interpolate=True,
window=1,
reset_index=False,
by=False
)
assert (type(fig) == plotly.graph_objs._figure.Figure)
def test_timeplot_two_ts():
df = setup_dataframe.create_dataframe()
fig = lineplot.timeplot(df,
users=['user_1','user_2'],
columns=['col_1'],
title='Test title',
xlabel='Xlabel',
ylabel='Ylabel',
resample='D',
interpolate=True,
window=1,
reset_index=True,
by=False
)
assert (type(fig) == plotly.graph_objs._figure.Figure)
def test_timeplot_two_users():
df = setup_dataframe.create_dataframe()
fig = lineplot.timeplot(df,
users=['user_1','user_2'],
columns=['col_1'],
title='Test title',
xlabel='Xlabel',
ylabel='Ylabel',
resample='H',
interpolate=True,
window=1,
reset_index=False,
by=False
)
assert (type(fig) == plotly.graph_objs._figure.Figure)
def test_timeplot_two_users_and_columns():
df = setup_dataframe.create_dataframe()
fig = lineplot.timeplot(df,
users=['user_1','user_2'],
columns=['col_1','col_2'],
title='Test title',
xlabel='Xlabel',
ylabel='Ylabel',
resample='H',
interpolate=True,
window=1,
reset_index=False,
by='hour'
)
assert (type(fig) == plotly.graph_objs._figure.Figure)
def test_group_averages():
df = setup_dataframe.create_dataframe()
#df = df.rename_axis('timestamp')
fig = lineplot.timeplot(df,
users='Group',
columns=['col_1'],
title='Test title',
xlabel='Xlabel',
ylabel='Ylabel',
resample='D',
interpolate=True,
window=1,
reset_index=False,
by='weekday'
)
assert (type(fig) == plotly.graph_objs._figure.Figure) | StarcoderdataPython |
3255795 | from setuptools import setup, find_packages
setup(
name='trello-cli',
version="1.1.1",
description='Trello CLI',
author='<NAME>',
url='https://github.com/whitef0x0/trello-cli',
author_email='<EMAIL>',
license='BSD License',
install_requires=["py-trello", "docopt", "python-dotenv"],
packages=['trello_cli'],
entry_points={
'console_scripts': 'trello = trello_cli.start:main'
},
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License"
]
)
| StarcoderdataPython |
6498091 | import json
import scipy
import numpy as np
from PIL import Image
from scipy.optimize import minimize, rosen, rosen_der
def xy_convert_paranoma(xs, rs, FOV = 240):
us = xs * (FOV / 180) * np.pi
coorsx = np.multiply(np.cos(us), rs)
coorsy = np.multiply(np.sin(us), rs)
return coorsx, coorsy
# estimate r0 r1 r2 r3, force r0 == 1
def x_residual_func(rs, xs):
us = xs * ((240. / 180.) * np.pi)
coorsx = np.multiply(-np.cos(us), rs)
coorsy = np.multiply(np.sin(us), rs)
len_r = rs.shape[0]
residual = np.zeros(len_r + 1);
residual[0] = (rs[0] - 1.)**2
for i in range(1, 1 + len_r):
cur_indx = i-1;
vec_cur_last_x = coorsx[cur_indx-1] - coorsx[cur_indx]
vec_cur_last_y = coorsy[cur_indx-1] - coorsy[cur_indx]
vec_cur_next_x = coorsx[(cur_indx+1)%len_r] - coorsx[cur_indx]
vec_cur_next_y = coorsy[(cur_indx+1)%len_r] - coorsy[cur_indx]
norm_vec_cur_last = vec_cur_last_x**2 + vec_cur_last_y**2
norm_vec_cur_next = vec_cur_next_x**2 + vec_cur_next_y**2
residual[i] = ((vec_cur_last_x * vec_cur_next_x + vec_cur_next_y * vec_cur_next_y) / (norm_vec_cur_last * norm_vec_cur_next))**2
print(residual)
return np.sum(residual)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument('--layout', required=True,
# help='Txt file containing layout corners (cor_id)')
args = parser.parse_args()
#with open(args.layout) as f:
# inferenced_result = json.load(f)
r = [1, 1, 1, 1]
res = minimize(x_residual_func, r, args=(np.array([0.1, 0.4, 0.7, 0.9])), tol=1e-6)
print(res.x)
coorsx, coorsy = xy_convert_paranoma(np.array([0.1, 0.2, 0.3, 0.4]), res.x)
print(coorsx)
print(coorsy)
| StarcoderdataPython |
11261404 | import skimage
from skimage.color import rgb2gray
from skimage import data, io
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.size'] = 18
import numpy as np
import os
def kernel_creator(kernel_s,kernel_v=1, f_type=1):
kernel = np.ones(kernel_s*kernel_s).reshape(kernel_s,kernel_s)
if f_type ==1 : #paso bajo
kernel = kernel * kernel_v
elif f_type ==2: # paso bajo dando peso al medio
kernel[0,0] = 0
kernel[kernel_s-1,0] = 0
kernel[0,kernel_s-1] = 0
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
kernel[(kernel_s-1),(kernel_s-1) ]=0
elif f_type == 3: #paso alto dando peso en al medio
kernel = kernel * -1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
elif f_type == 4: #paso alto con variacion de peso al medio
kernel = kernel * - 2
kernel[0,0] = 1
kernel[kernel_s-1,0] = 1
kernel[0,kernel_s-1] = 1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
kernel[(kernel_s-1),(kernel_s-1) ]=1
elif f_type == 5: #paso alto con variacion de peso al medio
kernel = kernel * -1
kernel[0,0] = 0
kernel[kernel_s-1,0] = 0
kernel[0,kernel_s-1] = 0
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = kernel_v
kernel[(kernel_s-1),(kernel_s-1) ]=0
elif f_type ==6: #for segmentation horizontal
kernel = kernel * 0
kernel [round((kernel_s-1)/2),round((kernel_s-1)/2):] = -1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = 1
elif f_type ==7: #for segmentation vertical
kernel = kernel * 0
kernel [:round((kernel_s-1)/2),round((kernel_s-1)/2)] = -1
kernel[round((kernel_s-1)/2),round((kernel_s-1)/2)] = 1
else:
kernel = 0
return kernel
def mediana(matrix):
l = np.shape(matrix)[0] * np.shape(matrix)[0]
vector = np.sort(matrix.reshape(l))
m_p = round(l/2)
if ( l%2 ==0 ):
median = (vector[m_p] + vector[m_p-1]) /2
else:
median = (vector[m_p])
return median
def filter_application(image,kernel_size=3, kernel_value=1, filter_type=0):
if( round(kernel_size,0) <2):
return "error: the kernel size should be higher than 3"
print ("filter type: ", filter_type)
if filter_type ==0 :
row, col = np.shape(image)
else:
kernel = kernel_creator (kernel_size,kernel_value,filter_type)
print ( "...the kernel that you are using...")
print ( kernel )
padimage = np.pad(image,kernel_size, pad_with)
row, col = np.shape(padimage)
filtered_image = np.empty([row-kernel_size-1, col-kernel_size-1])
for i in range(row-kernel_size-1):
for j in range(col-kernel_size-1):
if filter_type ==0:
subm_ = image[ i:kernel_size+i , j:kernel_size+j]
median = mediana(subm_)
filtered_image[i,j] = median
elif filter_type == 3:
subm_ = padimage[ i:kernel_size+i , j:kernel_size+j]
mult_ = np.multiply(subm_,kernel)
filter_ = np.sum(mult_) / kernel_value
filtered_image[i,j] = filter_
else:
subm_ = padimage[ i:kernel_size+i , j:kernel_size+j]
mult_ = np.multiply(subm_,kernel)
filter_ = np.sum(mult_) / np.sum(np.absolute(kernel))
filtered_image[i,j] = filter_
return filtered_image
filename = os.path.join('images/arrow.png')
imageRGB = io.imread(filename)
#imageRGB = data.astronaut()
#plt.figure()
#plt.imshow(image)
#plt.show()
image = rgb2gray(imageRGB)
row, col = np.shape(image)
alpha = 15
alpha_rad = np.pi * alpha / 180
cx = col/2
cy = row/2
dx = cx - cx*np.cos(alpha_rad) - cy*np.sin(alpha_rad)
dy = cy + cx*np.sin(alpha_rad) - cy*np.cos(alpha_rad)
rot_m = np.matrix([[np.cos(alpha_rad), np.sin(alpha_rad), dx],\
[-np.sin(alpha_rad), np.cos(alpha_rad), dy]])
p0 = np.round(rot_m * np.array([0,0,1]).reshape(3,1),0).astype(int) # x0,y0
p1 = np.round(rot_m * np.array([col,0,1]).reshape(3,1),0).astype(int) # x1,y0
p2 = np.round(rot_m * np.array([0,row,1]).reshape(3,1),0).astype(int) # x0,y1
p3 = np.round(rot_m * np.array([col,row,1]).reshape(3,1),0).astype(int) # x0,y0
p = [p0,p1,p2,p3]
i=0
print ("rotation ange...")
print ( str(alpha) + "degrees")
print ( "checking Rotated vertex...")
for items in p:
print ("point : ", i)
print ("x: {} , y: {}".format(items[0],items[1]))
i+=1
print ( "image center...")
print ("x: {} , y: {}".format(cx,cy))
print ( "image size...")
print ("x: {} , y: {}".format(col,row))
a = np.array(p).reshape(4,2)
pmin = np.min(a,0)
pmax = np.max(a,0)
print ( "min point...")
print ( pmin )
print ( "max point...")
print ( pmax )
new_col = pmax[0]-pmin[0]
new_row = pmax[1]-pmin[1]
print ("the new image rotaged will have shape of")
print ("x: {}, y: {}".format(new_col, new_row))
rot = np.ones((new_row,new_col))
#rot = np.ones((row+1,col+1))
for x in range ( col ):
for y in range (row):
p = np.round(rot_m * np.array([x,y,1]).reshape(3,1),0).astype(int)
x_ = p[0] + np.abs(pmin[0])
y_ = p[1] + np.abs(pmin[1])
try:
rot[y_,x_] = image[y,x]
except:
pass
#print ("x = {}, y = {}, x_ = {}, y_ = {}".format(x,y,x_,y_))
rot = filter_application(rot,kernel_size=3,filter_type=0)
x1 = int((new_col-col)/2)
x2 = int(new_col - x1)
y1 = int((new_row-row)/2)
y2 = int(new_row - y1)
rot = rot[x1:x2,y1:y2]
print( "size: ", np.shape(rot))
print ( "x1: {} , x2: {}, y1: {} , y2: {}".format(x1,x2,y1,y2))
plt.figure()
plt.subplot(1,2,1)
plt.title("Original")
plt.imshow(image, cmap='gray')
plt.subplot(1,2,2)
plt.title("Rotated")
plt.imshow(rot, cmap='gray')
plt.show()
image_size=[0,0]
columns=0
rows=0
mean=0
area_=0
centroid_x=0
centroid_y=0
max_x=0
max_y=0
max_x_r=0
max_y_r=0
index = 0
radius = 0
contour_points = []
contour_points_r = []
mask_1=np.array([ [0., 0., -1.], # definir filtro pasa-altas personalizado
[0., 3., -1.],
[0., 0., -1.]])
mask_2=np.array([ [0., 0., 0.], # definir filtro pasa-altas personalizado
[0., 3., 0.],
[-1., -1., -1.]])
corner=[0,0]
local_average_1=0
local_average_2=0
input_image = image
input_image_r = rot
image_size = np.shape(rot)
segmented_image = np.zeros(np.shape(input_image))
segmented_image_r = np.zeros(np.shape(input_image))
#area and centroid function
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
if(input_image[rows,columns]==1):
area_ = area_ + 1
centroid_x += columns
centroid_y += rows
centroid_x=centroid_x/area_
centroid_y=centroid_y/area_
#print area and centroid location
print("area: ",area_)
print("centroid_x: ",centroid_x)
print("centroid_y: ",centroid_y)
#segmentation function of first image
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
corner[0] = rows-1
corner[1] = columns-1
for i in range(0,3):
for j in range(0,3):
local_average_1 = local_average_1 + mask_1[i,j]*input_image[corner[0]+i,corner[1]+j]
local_average_2 = local_average_2 + mask_2[i,j]*input_image[corner[0]+i,corner[1]+j]
segmented_image[rows,columns]=np.abs(local_average_1 - local_average_2)
local_average_1=0
local_average_2=0
#segmentation function of second image
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
corner[0] = rows-1
corner[1] = columns-1
for i in range(0,3):
for j in range(0,3):
local_average_1 = local_average_1 + mask_1[i,j]*input_image_r[corner[0]+i,corner[1]+j]
local_average_2 = local_average_2 + mask_2[i,j]*input_image_r[corner[0]+i,corner[1]+j]
segmented_image_r[rows,columns]=np.abs(local_average_1 - local_average_2)
local_average_1=0
local_average_2=0
#binarize first image
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
if segmented_image[rows,columns]>0.5:
segmented_image[rows,columns]=1
else:
segmented_image[rows,columns]=0
#binarize second image
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
if segmented_image_r[rows,columns]>0.5:
segmented_image_r[rows,columns]=1
else:
segmented_image_r[rows,columns]=0
#find signature of first image and its more distant corner
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
if(segmented_image[rows,columns]==1):
radius = np.sqrt(np.abs(centroid_x-columns)**2 + np.abs(centroid_y-rows)**2)
if(len(contour_points)>0):
if(radius>np.max(contour_points)):
max_x=columns
max_y=rows
contour_points.append(radius)
#find signature of second image and its more distant corner
for rows in range(1,image_size[0]-1):
for columns in range(1,image_size[1]-1):
if(segmented_image_r[rows,columns]==1):
radius = np.sqrt(np.abs(centroid_x-columns)**2 + np.abs(centroid_y-rows)**2)
if(len(contour_points_r)>0):
if(radius>np.max(contour_points_r)):
max_x_r=columns
max_y_r=rows
contour_points_r.append(radius)
print(np.argmax(contour_points))
print(np.max(contour_points))
print("max x:", max_x, "max y:", max_y)
print("second image")
print(np.argmax(contour_points_r))
print(np.max(contour_points_r))
print("max x:", max_x_r, "max y:", max_y_r)
dx1 = max_x-centroid_x
dy1 = max_y-centroid_y
tetha1=np.degrees(np.arctan(dy1/dx1))
dx2 = max_x_r-centroid_x
dy2 = max_y_r-centroid_y
tetha2=np.degrees(np.arctan(dy2/dx2))
tetha = tetha2-tetha1
print ( "... Detecting rotation angle of: .")
print(tetha)
plt.figure(1)
plt.subplot(2,3,1)
plt.imshow(input_image, cmap='gray')
plt.subplot(2,3,2)
plt.imshow(segmented_image, cmap='gray')
plt.subplot(2,3,3)
y_pos = np.arange(len(contour_points))
plt.bar(y_pos, contour_points, align='center', alpha=0.5)
plt.xlabel('histogram 1')
plt.subplot(2,3,4)
plt.imshow(input_image_r, cmap='gray')
plt.subplot(2,3,5)
plt.imshow(segmented_image_r, cmap='gray')
plt.subplot(2,3,6)
y_pos = np.arange(len(contour_points_r))
plt.bar(y_pos, contour_points_r, align='center', alpha=0.5)
plt.xlabel('histogram 1')
plt.show() | StarcoderdataPython |
5073152 | <reponame>aadrm/breakoutwagtail
# Generated by Django 3.1.4 on 2021-03-19 11:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('streams', '0006_auto_20210319_1118'),
]
operations = [
migrations.AddField(
model_name='review',
name='platform',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='streams.reviewplatform'),
),
]
| StarcoderdataPython |
8068649 | """
This module defines a single AssociationItem in the AssociationsPanel.
"""
from threading import Thread
from PySide2.QtWidgets import QComboBox
from xdgprefs.gui.mime_item import MimeTypeItem
class AssociationItem(MimeTypeItem):
def __init__(self, mime_type, apps, main_window, listview):
MimeTypeItem.__init__(self, mime_type, listview)
self.apps = apps
self.main_window = main_window
self.selector = QComboBox()
self.selector.addItems(self.apps)
self.selector.currentTextChanged.connect(self._on_selected)
self.hbox.addWidget(self.selector, 2)
def _on_selected(self, _):
mime = self.mime_type.identifier
app = self.selector.currentText()
self.main_window.status.showMessage(f'Setting {mime} to {app}...')
def run():
success = self.main_window.assocdb.set_app_for_mimetype(mime, app)
if success:
msg = f'{app} was successfully set to open {mime}.'
else:
msg = f'Could not set {app} to open {mime}, please check ' \
f'the logs!'
self.main_window.status.showMessage(msg)
t = Thread(target=run)
t.start()
def __hash__(self):
return hash(self.mime_type)
| StarcoderdataPython |
282909 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Unreal utility functions and classes
"""
from __future__ import print_function, division, absolute_import
import unreal
def get_unreal_version_name():
"""
Returns the version name of Unreal engine
:return: str
"""
return unreal.SystemLibrary.get_engine_version()
def get_unreal_version():
"""
Returns current version of Unreal engine
:return: list(int)
"""
version_name = get_unreal_version_name()
version_split = version_name.split('+++')[0]
versions = version_split.split('-')
main_version = versions[0].split('.')
extra_version = versions[-1]
version_int = [int(version) for version in main_version]
version_int.append(int(extra_version))
return version_int
| StarcoderdataPython |
11201159 | #!/usr/bin/python
import os
import csv
import time
import json
import paramiko
from datetime import datetime, date
from paramiko_expect import SSHClientInteraction
BACKUP_SERVER_FQDN = ''
BACKUP_SERVER_IP = ''
BACKUP_USER = ''
BACKUP_PASS = ''
BACKUP_PATH = '/'
BACKUP_PORT = 22
def dump(host, commands, date, save_dir):
# 1: Cisco enable mode, 2: Linux prompt, you may need to add prompt expression
PROMPT = ['.*>\s*', '.*#\s*', '.*$\s*']
print('Taking {} backup configuration file'.format(host[0]))
hostname, ipaddress, username, password = host[0], host[1], host[2], host[3]
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ipaddress, username=username, password=password, timeout=10, look_for_keys=False)
with SSHClientInteraction(client, timeout=10, display=True) as interact:
interact.send('')
index = interact.expect(PROMPT)
interact.expect('.*#\s')
for command in commands:
interact.send('')
interact.expect('.*#\s')
interact.send('n')
interact.expect('.*#\s')
output = interact.current_output_clean
filename = hostname + '_' + date + '.txt'
path = os.path.join(save_dir, filename)
with open(path, 'a') as config_file:
config_file.write(str(output) + '\n')
interact.send('exit')
index = interact.expect(PROMPT)
except Exception as e:
print('Exception throws: {}'.format(e.args))
def backup(files, transport):
try:
for file in files:
print('Transferring {} to the SFTP server'.format(file))
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.put(file, BACKUP_PATH)
sftp.close()
except Exception as e:
print('Exception throws with backup to the SFTP server: {}'.format(e.args))
def main():
today = datetime.today()
date = "{0:%Y%m%d}".format(today)
hosts = []
commands = []
commands.append('/c/d') # Configuration dump
try:
os.mkdir(str(date))
with open('sample.csv', 'r') as host_file:
reader = csv.reader(host_file)
hosts = [host for host in reader if host != '']
hosts.pop()
except Exception as e:
print('Exception throws: {}'.format(e.args))
for host in hosts:
ret = dump(host, commands, date, save_dir)
if ret is not None:
print('{} configuration backup has been taken successfully.'.format(ret))
try:
transport = paramiko.Transport((BACKUP_SERVER_IP, BACKUP_PORT))
transport.connect(username=BACKUP_USER, password=<PASSWORD>)
backup(glob.glob(save_dir + '/*')), transport)
except Exception as e:
print('Exception throws with backup to the SFTP server:{}'.format(e.args))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3961 | <filename>opensteer/teams/admin.py
from django.contrib import admin
from opensteer.teams.models import Team, Member
admin.site.register(Team)
admin.site.register(Member)
| StarcoderdataPython |
5189466 | from arango.exceptions import CollectionCreateError
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models.base import ModelBase
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def create_model(self, model: ModelBase):
# TODO: Diferenciar se é edge collection.
name = model._meta.db_table
try:
self.connection.database.create_collection(name, edge=False)
except CollectionCreateError:
print("Collection {} already exists.".format(name))
def delete_model(self, model):
raise NotImplementedError
| StarcoderdataPython |
12840467 | try:
import feedparser, html2text, asyncio, json, datetime, telepot
from loguru import logger
from telepot.aio.loop import MessageLoop
from telepot.aio.delegate import per_chat_id, create_open, pave_event_space
except ImportError:
print("Failed to import required modules.")
class RSS(telepot.aio.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(RSS, self).__init__(*args, **kwargs)
async def date_title(self, file_name, object_name, date_title: str):
"""Set the date/title of latest post from a source.
file_name: File name to open.
Object_name: Name of the object: feed name or twitter screen name.
date_title: Date/title of the object being posted."""
try:
with open(file_name, "r+") as data_file:
# Load json structure into memory.
items = json.load(data_file)
for name, data in items.items():
if ((name) == (object_name)):
# Replace value of date/title with date_title
data["date_title"] = date_title
# Go to the top of feeds.json file.
data_file.seek(0)
# Dump the new json structure to the file.
json.dump(items, data_file, indent=2)
data_file.truncate()
data_file.close()
except IOError:
logger.debug("date_title(): Failed to open requested file.")
async def feed_to_md(self, state, name, feed_data):
"""A Function for converting rss feeds into markdown text.
state: Either `set` or `None`: To execute date_title()
name: Name of RSS feed object: eg: hacker_news
feed_data: Data of the feed: URL and post_date from feeds.json"""
# Parse rss feed.
d = feedparser.parse(feed_data["url"])
# Target the first post.
first_post = d["entries"][0]
title = first_post["title"]
summary = first_post["summary"]
post_date = first_post["published"]
link = first_post["link"]
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_links = True
summary = h.handle(summary)
if ((state) == ("set")):
logger.debug(f"Running date_title for feeds.json at {datetime.datetime.now()}")
# date_title() see utils.py
await self.date_title("feeds.json", name, title)
results = []
result = {"title": title, "summary": summary,
"url": link, "post_date": post_date}
results.append(result)
# A list containing the dict object result.
return results
async def file_reader(self, path, mode):
"""Loads json data from path specified.
path: Path to target_file.
mode: Mode for file to be opened in."""
try:
with open(path, mode) as target_file:
data = json.load(target_file)
target_file.close()
return data
except IOError:
logger.debug(f"Failed to open {path}")
async def on_chat_message(self, msg):
if msg["text"] == "/start":
logger.start("file_{time}.log", rotation="300 MB")
while True:
logger.debug("Checking Feeds!")
feeds = await self.file_reader("feeds.json", "r")
for name, feed_data in feeds.items():
results = await self.feed_to_md(None, name, feed_data)
# Checking if title is the same as date in feeds.json file.
# If the same, pass; do nothing.
if ((feed_data["date_title"]) == (results[0]["title"])):
pass
elif ((feed_data["date_title"]) != (results[0]["title"])):
results = await self.feed_to_md("set", name, feed_data)
logger.debug(f"Running feed_to_md at {datetime.datetime.now()}")
rss_msg = f"""[{results[0]["title"]}]({results[0]["url"]})\n{results[0]["summary"]}"""
await self.bot.sendMessage(msg["chat"]["id"], rss_msg, parse_mode="Markdown")
# Sleep for 30 mins before re-checking.
logger.debug("Sleeping for 30 mins.")
await asyncio.sleep(1800)
if __name__ == "__main__":
TOKEN = "Insert Key Here."
bot = telepot.aio.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, RSS, timeout=10),
])
loop = asyncio.get_event_loop()
loop.create_task(MessageLoop(bot).run_forever())
print('Listening ...')
loop.run_forever()
| StarcoderdataPython |
1846240 | import arviz as az
import warnings
from importlib import reload
from typing import List, Any
from copy import copy
import altair as alt
import numpy as np
import pandas as pd
import xarray as xr
from bayes_window import models, BayesWindow
from bayes_window import utils
from bayes_window import visualization
from bayes_window.fitting import fit_numpyro
from .visualization import plot_posterior
class BayesRegression:
b_name: str
chart_data_line: alt.Chart
chart_posterior_kde: alt.Chart
chart_zero: alt.Chart
chart_posterior_intercept: alt.Chart
chart: alt.Chart
chart_data_boxplot: alt.Chart
chart_posterior_whiskers: alt.Chart
chart_posterior_center: alt.Chart
chart_base_posterior: alt.Chart
charts_for_facet: List[Any]
chart_posterior_hdi_no_data: alt.LayerChart
add_data: bool
data_and_posterior: pd.DataFrame
posterior: dict
trace: xr.Dataset
def __init__(self, window=None, add_data=True, **kwargs):
window = copy(window) if window is not None else BayesWindow(**kwargs)
window.add_data = add_data
self.window = window
def fit(self, model=models.model_hierarchical, do_make_change='subtract', fold_change_index_cols=None,
do_mean_over_trials=True, fit_method=fit_numpyro, add_condition_slope=True, **kwargs):
self.model_args = kwargs
if do_make_change not in ['subtract', 'divide', False]:
raise ValueError(f'do_make_change should be subtract or divide, not {do_make_change}')
if not add_condition_slope:
warnings.warn(
f'add_condition_slope is not requested. Slopes will be the same across {self.window.condition}')
# if self.b_name is not None:
# raise SyntaxError("A model is already present in this BayesWindow object. "
# "Please create a new one by calling BayesWindow(...) again")
self.window.do_make_change = do_make_change
self.model = model
if fold_change_index_cols is None:
fold_change_index_cols = self.window.levels
fold_change_index_cols = list(fold_change_index_cols)
if self.window.detail and (self.window.detail in self.window.data.columns) and (
self.window.detail not in fold_change_index_cols):
fold_change_index_cols += [self.window.detail]
if add_condition_slope:
add_condition_slope = self.window.condition[0] and (
np.unique(self.window.data['combined_condition']).size > 1)
fold_change_index_cols.append('combined_condition')
self.b_name = 'slope_per_condition' if add_condition_slope else 'slope'
if add_condition_slope and (not self.window.condition[0] in fold_change_index_cols):
[fold_change_index_cols.extend([condition]) for condition in self.window.condition
if not (condition in fold_change_index_cols)]
# Fit
self.trace = fit_method(y=self.window.data[self.window.y].values,
treatment=self.window.data[self.window.treatment].values,
# condition=self.window.data[self.window.condition[0]].values if self.window.condition[0] else None,
condition=self.window.data['combined_condition'].values if self.window.condition[
0] else None,
group=self.window.data[self.window.group].values if self.window.group else None,
model=model,
add_condition_slope=add_condition_slope,
**kwargs)
df_data = self.window.data.copy()
if do_mean_over_trials:
df_data = df_data.groupby(fold_change_index_cols).mean().reset_index()
# Make (fold) change
if do_make_change:
try:
df_data, _ = utils.make_fold_change(df_data, y=self.window.y, index_cols=fold_change_index_cols,
treatment_name=self.window.treatment,
fold_change_method=do_make_change)
except Exception as e:
print(e)
reload(utils)
self.trace.posterior = utils.rename_posterior(self.trace.posterior, self.b_name,
posterior_index_name='combined_condition',
group_name=self.window.group, group2_name=self.window.group2)
# HDI and MAP:
self.posterior = {var: utils.get_hdi_map(self.trace.posterior[var],
prefix=f'{var} '
if (var != self.b_name) and (
var != 'slope_per_condition') else '')
for var in self.trace.posterior.data_vars}
# Fill posterior into data
self.data_and_posterior = utils.insert_posterior_into_data(posteriors=self.posterior,
data=df_data.copy(),
group=self.window.group,
group2=self.window.group2)
try:
self.posterior = utils.recode_posterior(self.posterior, self.window.levels, self.window.data,
self.window.original_data,
self.window.condition)
except Exception as e:
print(e)
self.trace.posterior = utils.recode_trace(self.trace.posterior, self.window.levels, self.window.data,
self.window.original_data,
self.window.condition)
self.default_regression_charts()
return self
def plot(self, x: str = ':O', color: str = ':N', detail: str = ':N', independent_axes=None,
add_data=None,
**kwargs):
# Set some options
if (x == '') or (x[-2] != ':'):
x = f'{x}:O'
if color[-2] != ':':
color = f'{color}:N'
if add_data is None:
add_data = self.window.add_data
if add_data or self.posterior is None: # LME
posterior = self.data_and_posterior
elif 'slope_per_condition' in self.posterior.keys():
posterior = self.posterior['slope_per_condition']
elif 'mu_intercept_per_group' in self.posterior.keys():
posterior = self.posterior['mu_intercept_per_group'] # TODO fix data_and_posterior
else:
posterior = self.data_and_posterior
if len(x) > 2 and len(posterior[x[:-2]].unique() == 1):
add_x_axis = True
# x = f'{self.window.condition[0]}:O'
else:
add_x_axis = False
if not ((x != ':O') and (x != ':N') and x[:-2] in posterior.columns and len(posterior[x[:-2]].unique()) < 10):
# long_x_axis = False
# else:
# long_x_axis = True
x = f'{x[:-1]}Q' # Change to quantitative encoding
print(f'changing x to {x}')
# If we are only plotting posterior and not data, independenet axis does not make sense:
self.window.independent_axes = independent_axes or f'{self.window.y} diff' in posterior
self.charts = []
# 1. Plot posterior
if posterior is not None:
base_chart = alt.Chart(posterior)
# Add zero for zero line
base_chart.data['zero'] = 0
self.chart_base_posterior = base_chart
# No-data plot
(self.chart_posterior_whiskers, self.chart_posterior_whiskers75,
self.chart_posterior_center, self.chart_zero) = plot_posterior(title=f'{self.window.y}',
x=x,
base_chart=base_chart,
do_make_change=self.window.do_make_change, **kwargs)
# if no self.data_and_posterior, use self.posterior to build slope per condition:
if (self.b_name != 'lme') and (type(self.posterior) == dict):
main_effect = (self.posterior[self.b_name] if self.posterior[self.b_name] is not None
else self.posterior['slope_per_condition'])
self.chart_posterior_hdi_no_data = alt.layer(
*plot_posterior(df=main_effect, title=f'{self.window.y}', x=x,
do_make_change=self.window.do_make_change))
self.chart_posterior_hdi = alt.layer(self.chart_posterior_whiskers, self.chart_posterior_whiskers75,
self.chart_posterior_center)
self.charts.append(self.chart_posterior_whiskers)
self.charts.append(self.chart_posterior_center)
self.charts.append(self.chart_zero)
self.charts_for_facet = self.charts.copy() # KDE cannot be faceted so don't add it
if (self.b_name != 'lme') and not add_x_axis:
# Y Axis limits to match self.chart
minmax = [float(posterior['lower interval'].min()), 0,
float(posterior['higher interval'].max())]
y_domain = [min(minmax), max(minmax)]
self.chart_posterior_kde = visualization.plot_posterior_density(base_chart, self.window.y, y_domain,
self.trace,
posterior,
self.b_name,
do_make_change=self.window.do_make_change)
self.charts.append(self.chart_posterior_kde)
# self.charts_for_facet.append(self.chart_posterior_kde) # kde cannot be faceted
else:
base_chart = alt.Chart(self.window.data)
# 2. Plot data
y = f'{self.window.y} diff'
if y in posterior:
if (detail != ':N') and (detail != ':O'):
assert detail in self.window.data
# Plot data:
y_domain = list(np.quantile(base_chart.data[y], [.05, .95]))
if x != ':O':
self.chart_data_line, chart_data_points = visualization.line_with_highlight(base_chart, x, y,
color, detail,
highlight=False)
self.charts.append(self.chart_data_line)
self.charts.append(chart_data_points)
self.charts_for_facet.append(chart_data_points)
self.charts_for_facet.append(self.chart_data_line)
self.chart_data_boxplot = base_chart.mark_boxplot(
clip=True, opacity=.3, size=9, color='black',
median=alt.MarkConfig(color='red', strokeWidth=20)
).encode(
x=x,
y=alt.Y(f'{y}:Q',
axis=alt.Axis(orient='right', title=''),
scale=alt.Scale(zero=False, domain=y_domain)
)
)
self.charts.append(self.chart_data_boxplot)
self.charts_for_facet.append(self.chart_data_boxplot)
else: # No data overlay
warnings.warn("Did you have Uneven number of entries in conditions? I can't add data overlay")
# Layer and facet:
self.chart = visualization.auto_layer_and_facet(
self.charts, self.charts_for_facet, self.window.independent_axes, **kwargs)
# self.chart_posterior_hdi_no_data = visualization.auto_layer_and_facet(
# self.chart_posterior_hdi_no_data, charts_for_facet=None, independent_axes=self.window.independent_axes, **kwargs)
# 4. Make overlay for data_detail_plot
# self.plot_slopes_shading()
return self.chart
def plot_slopes_shading(self): # TODO this method is WIP
# 0. Use
pd.concat([utils.get_hdi_map(self.trace.posterior[var], prefix=f'{var} ')
for var in self.trace.posterior.data_vars], axis=1)
# 1. intercepts for stim=1
self.data_and_posterior['mu_intercept_per_group center interval']
# 2. slopes+ intercepts
self.data_and_posterior['intercept'] * self.data_and_posterior['slope']
# 3. Overlay with
self.chart_data_detail
# 4. color by dimension of slope (condition (and group if self.window.group))
def plot_intercepts(self, x=':O', y='mu_intercept_per_group center interval', **kwargs):
"""
Plot intercepts of a regression model, mostly for a better understanding of slopes
Parameters
----------
x
y
kwargs
Returns
-------
"""
assert self.posterior is not None
if self.window.do_make_change:
# combine posterior with original data instead, not diff TODO
# Fill posterior into data
data_and_posterior = utils.insert_posterior_into_data(posteriors=self.posterior,
data=self.window.original_data.copy(),
group=self.window.group,
group2=self.window.group2)
else:
data_and_posterior = self.data_and_posterior
# Redo boxplot (no need to show):
self.window.data_box_detail(data=data_and_posterior, autofacet=False)
# Make stand-alone posterior intercept chart:
self.chart_posterior_intercept = visualization.posterior_intercept_chart(data_and_posterior,
x=x, y=y,
group=self.window.group)
# Redo chart_intercept with x=treatment for overlay with self.chart_data_box_detail:
chart_intercept = visualization.posterior_intercept_chart(data_and_posterior,
x=':O', y=y,
group=self.window.group)
chart = alt.layer(chart_intercept, self.window.chart_data_box_detail).resolve_scale(y='independent')
# Check
if len(chart.data) == 0:
raise IndexError('was layer chart from different sources?')
if ('column' in kwargs) or ('row' in kwargs):
return visualization.facet(chart, **kwargs)
else: # Auto facet
return visualization.facet(chart, **visualization.auto_facet(self.window.group, self.window.condition))
def default_regression_charts(self, **kwargs):
reload(visualization)
# Default plots:
# try:
# facet_kwargs=visualization.auto_facet(self.window.group,self,condition)
if self.window.condition[0] and len(self.window.condition) > 2:
try:
return self.plot(x=self.window.condition[0], column=self.window.condition[1],
row=self.window.condition[2],
**kwargs)
except KeyError:
return self.plot(x=self.window.condition[0], row=self.window.condition[1], **kwargs)
elif self.window.condition[0] and len(self.window.condition) > 1:
try:
return self.plot(x=self.window.condition[0], column=self.window.group, row=self.window.condition[1],
**kwargs)
except KeyError:
return self.plot(x=self.window.condition[0], row=self.window.condition[1], **kwargs)
elif self.window.condition[0] and self.b_name != 'lme':
try:
return self.plot(x=self.window.condition[0], column=self.window.group, **kwargs)
except KeyError:
return self.plot(x=self.window.condition[0], **kwargs)
else: # self.window.group:
return self.plot(x=self.window.condition[0] if self.window.condition[0] else ':O', **kwargs)
# self.regression_charts(column=self.window.group)
# except Exception as e: # In case I haven't thought of something
# print(f'Please use window.regression_charts(): {e}')
# # import traceback
# # traceback.(e)
def facet(self, **kwargs):
return BayesWindow.facet(self, **kwargs)
def explore_model_kinds(self, parallel=True, add_group_slope=True, **kwargs):
from bayes_window.model_comparison import compare_models
if self.b_name is None:
raise ValueError('Fit a model first')
elif 'slope' in self.b_name:
models = {
'full': self.model,
'no_condition': self.model,
'no_condition_or_treatment': self.model,
'no-treatment': self.model,
'no_group': self.model,
}
extra_model_args = [
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group},
{'treatment': self.window.treatment, 'condition': None},
{'treatment': None, 'condition': None},
{'treatment': None, 'condition': self.window.condition},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': None},
]
if add_group_slope and self.window.group is not None:
models['with_group_slope'] = self.model
# add_group_slope is False by default in model_hierarchical
extra_model_args.extend([{'treatment': self.window.treatment, 'condition': self.window.condition,
'group': self.window.group,
'add_group_slope': True}])
return compare_models(
df=self.window.data,
models=models,
extra_model_args=extra_model_args,
y=self.window.y,
parallel=parallel,
dist_y=self.model_args['dist_y'] if 'dist_y' in self.model_args.keys() else None,
**kwargs
)
def explore_models(self, parallel=True, add_group_slope=False, **kwargs):
from bayes_window.model_comparison import compare_models
if self.b_name is None:
raise ValueError('Fit a model first')
elif 'slope' in self.b_name:
models = {
'full_normal': self.model,
'no_condition': self.model,
'no_condition_or_treatment': self.model,
'no-treatment': self.model
}
extra_model_args = [
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group},
{'treatment': self.window.treatment, 'condition': None},
{'treatment': None, 'condition': None},
{'treatment': None, 'condition': self.window.condition}]
if self.window.group:
models.update({
'no_group': self.model,
'full_student': self.model,
'full_lognormal': self.model,
'full_gamma': self.model,
'full_exponential': self.model,})
extra_model_args+=[
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': None},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'student'},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'lognormal'},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'gamma'},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'exponential'},
]
if add_group_slope:
if self.window.group is None:
raise KeyError(
'You asked to include group slope. Initalize BayesWindow object with group input')
models['with_group_slope'] = self.model
# add_group_slope is False by default in model_hierarchical
extra_model_args.extend(
[{'treatment': self.window.treatment, 'condition': self.window.condition,
'group': self.window.group,
'add_group_slope': True}])
return compare_models(
df=self.window.data,
models=models,
extra_model_args=extra_model_args,
y=self.window.y,
parallel=parallel,
**kwargs
)
def fit_twostep(self, dist_y_step_one='gamma', **kwargs):
from bayes_window import BayesConditions
if self.window.detail not in self.window.condition:
self.window.condition += [self.window.detail]
window_step_one = BayesConditions(dist_y=dist_y_step_one)
window_step_two = BayesRegression(df=window_step_one.posterior['mu_per_condition'],
y='center interval', treatment=self.window.treatment,
condition=list(
set(self.window.condition) -
{self.window.treatment, self.window.group,
self.window.detail}),
group=self.window.group, detail=self.window.detail)
window_step_two.window_step_one = window_step_one
window_step_two.fit(model=models.model_hierarchical,
**kwargs
# fold_change_index_cols=('stim', 'mouse', 'neuron_x_mouse')
)
return window_step_two
def fit_twostep_by_group(self, dist_y_step_one='gamma', groupby=None, dist_y='student', parallel=True, **kwargs):
from joblib import Parallel, delayed
from bayes_window import BayesConditions
assert self.window.detail is not None
def fit_subset(df_m_n, i):
window_step_one = BayesConditions(df=df_m_n, y=self.window.y, treatment=self.window.treatment,
condition=[self.window.detail], group=self.window.group)
window_step_one.fit(dist_y=dist_y_step_one, n_draws=1000, num_chains=1)
posterior = window_step_one.posterior['mu_per_condition'].copy()
posterior[groupby] = i
return posterior
groupby = groupby or self.window.condition[0]
if parallel:
step1_res = Parallel(n_jobs=12, verbose=1)(delayed(fit_subset)(df_m_n, i)
for i, df_m_n in self.window.data.groupby(groupby))
else:
from tqdm import tqdm
step1_res = [fit_subset(df_m_n, i) for i, df_m_n in tqdm(self.window.data.groupby(groupby))]
window_step_two = BayesRegression(df=pd.concat(step1_res).rename({'center interval': self.window.y}, axis=1),
y=self.window.y, treatment=self.window.treatment,
condition=list(
set(self.window.condition) - {self.window.treatment, self.window.group,
self.window.detail}),
group=self.window.group, detail=self.window.detail)
window_step_two.fit(model=models.model_hierarchical,
dist_y=dist_y,
robust_slopes=False,
add_group_intercept=False,
add_group_slope=False,
**kwargs
)
return window_step_two
def plot_model_quality(self, var_names=None, **kwargs):
assert hasattr(self, 'trace'), 'Run bayesian fitting first!'
try:
az.plot_trace(self.trace, var_names=var_names, show=True, **kwargs)
except IndexError:
pass
az.plot_pair(
self.trace,
var_names=var_names,
kind="hexbin",
# coords=coords,
colorbar=False,
divergences=True,
# backend="bokeh",
)
def plot_BEST(self, rope=(-.01, .01), **kwargs):
if 'slope' in self.trace.posterior.data_vars:
az.plot_posterior(
self.trace.posterior,
'slope',
rope=rope,
ref_val=0
)
elif 'slope_per_condition' in self.trace.posterior.data_vars:
az.plot_posterior(
self.trace.posterior,
'slope_per_condition',
rope=rope,
ref_val=0
)
else:
raise KeyError(f'No "slope" or "slope_per_condition" in posterior: {self.trace.posterior.data_vars}')
| StarcoderdataPython |
81717 | import discord
import random
import yaml
from .ignore import canDm
with open("./conf.yaml", encoding="utf-8") as conf:
config = yaml.load(conf, Loader=yaml.FullLoader)
async def haunt(ctx, user):
target = user or ctx.author
if (canDm(target.id)):
channel = await target.create_dm()
f = random.choice(["doot_intensifies.png", "skelly_gun.gif", "oh_no.png", "skelly_dance.gif", "skelly_daddy.png", "doot.png", "education.png"])
m = random.choice(["HAHA get haunted punk", "Someone wanted me to haunt you **real** bad", "Uh oh! Haunting incoming!", "Hi, I brought you this :)", "I'm very lost right now",
"Wow being a dead bot is a really odd experience, I can't feel my toes, I'M NOT SURE I EVEN HAVE TOES!"])
await channel.send(m, file=discord.File("images/" + f))
await ctx.message.add_reaction(config["emotions"]["_hauntmoji"])
else:
await ctx.message.add_reaction(config["emotions"]["_prohibited"]) | StarcoderdataPython |
4886007 | <reponame>gchure/phd
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import phd.viz
colors, palette = phd.viz.phd_style()
# Load the data.
data = pd.read_csv('../../data/ch9_mscl_si/MLG910_electrophysiology.csv')
data.columns = ['time', 'pa', 'mmHg']
# Instantiate the figure.
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(3, 1)
ax1 = fig.add_subplot(gs[:2, 0])
ax2 = fig.add_subplot(gs[2, 0])
# Format the axes
phd.viz.despine([ax1, ax2])
ax1.set_xlim([19, 23.8])
ax2.set_xlim([19, 23.8])
ax1.set_ylim([0, 525])
ax2.set_ylim([-350, 0])
# Format the axes
ax1.xaxis.set_ticklabels([])
# Add the appropriate labels
ax1.set_ylabel('current [pA]')
ax2.set_ylabel('pressure\n [mmHg]')
ax2.set_xlabel('time [s]')
# Add marker labels
ax1.text(0.08, 0.93, 'MscS', fontsize=8, transform=ax1.transAxes)
ax2.text(0.46, 0.93, 'MscL-sfGFP', fontsize=8, transform=ax1.transAxes)
# Plot the traces and color red
_ = ax1.plot(data['time'], data['pa'], '-', color=colors['purple'], lw=0.5)
_ = ax2.plot(data['time'], data['mmHg'], '-', color=colors['purple'], lw=0.75)
# Label the MscS points
_ = ax1.vlines(19.6, -1.5, 525, lw=31, color=colors['light_blue'], zorder=-1, alpha=0.5)
_ = ax2.vlines(19.6, 4, -350, lw=31, color=colors['light_blue'], zorder=-1, alpha=0.5)
# Label the MscL points
_ = ax1.vlines(21.7, -1.5, 525, lw=100, color=colors['light_orange'], zorder=-1, alpha=0.5)
_ = ax2.vlines(21.7, 4, -350, lw=100, color=colors['light_orange'], zorder=-1, alpha=0.5)
plt.savefig('../figs/ch9_figS1.pdf', bbox_inches='tight')
plt.savefig('../figs/ch9_figS1.png', bbox_inches='tight')
# %%
| StarcoderdataPython |
5122477 | import logging
from behave import when, given, then
from veripy import custom_types # noqa
from veripy.pages import Page
logger = logging.getLogger('navigation')
@given('that the browser is at "{name}"')
def given_browser_is_at(context, name):
""" Tells the browser to load a specific page designated by an identifer.
::
Given that the browser is at "google.com"
In order to interact with a page, you must first perform this check. This
statement sets the page context so that later steps can refer to the page
by it's predefined identifiers.
"""
logger.info(f'Navigating to page named "{name}"')
context.page = Page(name, context.browser)
context.page.browser.visit(context.page.url)
@given('the browser is now at "{name}"')
@when('the browser is now at "{name}"')
def given_when_page_switch(context, name):
""" Allow the user to specify that the browser has implicitly navigated
to a new page (usually by clicking a link or submitting a form).
::
When the browser is now at "Requisitions"
# or
Given the browser is now at "Requisitions"
This step simply changes the context of the browser page to allow the user
to specify elements using the page's convenience selectors.
In some cases it is not possible to assert that the page URL is some value
because the value is determined at runtime (dynamic URLs, etc). In most cases
users should prefer the assertion statement:
::
Then the browser should be at "Requisitions"
This statement not only switches the context, but asserts that the current URL
is correct for the given page context.
"""
logger.info(f'Switching page context to "{name}"')
context.page = Page(name, context.browser)
@then('the browser should be at "{name}"')
def then_page_switch(context, name):
""" Assert that the browser has navigated to the new given page, and switch
the page context to the new page.
::
Then the browser should be at "Requisitions"
This step simply changes the context of the browser page to allow the user
to specify elements using the page's convenience selectors.
If the user has implicitly landed on a page (as a result of a button click,
or form submission) that has a dynamic URL, asserting the page URL will cause
a failure. In those cases, use the following variation.
::
When the browser is now at "Requisitions"
# or
Given the browser is now at "Requisitions"
These variations do the same context switch without asserting the current URL
is the same as the page URL value.
"""
logger.info(f'Asserting the page is "{name}" and switching contexts.')
page = Page(name, context.browser)
assert page.url == context.browser.url
context.page = page
@given('the browser window is {width:d} by {height:d} pixels')
def given_resize_window(context, width, height):
""" Tells the browser to resize the viewport.
::
Given the browser window is 500 by 1000 pixels
Resizing the browser viewport will be important for testing the web applciation
in various device screen sizes such as desktop, phone, tablet, etc.
"""
logger.info(f'Resizing the browser window to {width}x{height}')
# Splinter does not support window resize, so we must do it via driver instead
# https://stackoverflow.com/a/21062539/148781
context.page.browser.driver.set_window_size(width, height)
@given('the user waits {seconds:d} seconds for the "{element_name}" to be visible')
@when('the user waits {seconds:d} seconds for the "{element_name}" to be visible')
def when_wait_for_element(context, seconds, element_name):
""" Wait for a given element on the page to become visible.
::
When the user waits 10 seconds for the "Search Field" to be visible
"""
logger.info(f'Waiting {seconds} for "{element_name}" to be visible.')
context.page.wait_for(element_name, wait_time=seconds)
@when('the user presses the "{keyboard_key:pressable_key_type}" key')
def when_press_key(context, keyboard_key):
""" Press the given key.
::
the user presses the "Return" key
"""
logger.info(f'Pressing the "{keyboard_key}" key.')
active_web_element = context.browser.driver.switch_to.active_element
active_web_element.send_keys(keyboard_key)
@given('the user waits {seconds:d} seconds')
@when('the user waits {seconds:d} seconds')
def when_wait(context, seconds):
""" Wait for a given element on the page to become visible.
::
When the user waits 10 seconds for the "Search Field" to be visible
"""
logger.info(f'Waiting {seconds}')
context.page.wait_for('body', wait_time=seconds)
@given('the user clicks on the {position:d}{ordinal:ordinal_indicator} {sub_element:w} \
in the "{element_name}"')
@when('the user clicks on the {position:d}{ordinal:ordinal_indicator} {sub_element:w} \
in the "{element_name}"')
def when_click_nth_element(context, position, ordinal, sub_element, element_name):
""" Tells the browser to click on the nth element within the element of the given identifier.
::
When the user clicks 2nd Entry the "Table"
"""
logger.info(f'Clicking on {position}{ordinal} "{sub_element}" \
of the element: "{element_name}".')
chosen_elements = context.page.find_children(sub_element, parent=element_name)
chosen_elements[position-1].click()
| StarcoderdataPython |
8000029 | """empty message
Revision ID: 8c17c134ecd4
Revises: b8b77bef3e60
Create Date: 2020-03-13 14:24:18.177162
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8c17c134ecd4'
down_revision = 'b8b77bef3e60'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('upgrade',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cost', sa.Numeric(), nullable=True),
sa.Column('available_at', sa.Numeric(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('upgrade_purchase',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('upgrade_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['upgrade_id'], ['upgrade.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('upgrade_purchase')
op.drop_table('upgrade')
# ### end Alembic commands ###
| StarcoderdataPython |
11254681 | <reponame>Fak3/websubsub<filename>websubsub/management/commands/websub_purge_all.py
from uuid import uuid4
import logging
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand
from django.urls import resolve, reverse, NoReverseMatch
from websubsub.models import Subscription
from websubsub.tasks import subscribe
log = logging.getLogger('websubsub')
class Command(BaseCommand):
help = 'Delete all subscriptions from database.'
def add_arguments(self, parser):
parser.add_argument(
'-y', '--yes',
action='store_true',
help='answer yes to all',
)
def handle(self, *args, **kwargs):
count = Subscription.objects.count()
if not count:
print('No subscriptions in the database.')
return
if not kwargs['yes']:
text = f'Are you sure you want to delete {count} subscriptions? (y/N): '
while True:
answer = input(text)
if not answer or answer in ('n','N'):
return
if answer in ('y', 'Y'):
break
Subscription.objects.all().delete()
print(f'{count} subscriptions was successfully removed from database')
| StarcoderdataPython |
8028371 | <filename>examples/uploads/upload_items_and_custom_format_annotations.py
def main():
"""
This is an example how to upload files and annotations to Dataloop platform.
Image folder contains the images to upload.
Annotations folder contains json file of the annotations. Same name as the image.
We read the images one by one and create the Dataloop annotations using the annotation builder.
Finally, we upload both the image and the matching annotations
:return:
"""
import json
import os
import dtlpy as dl
# Get project and dataset
project = dl.projects.get(project_name='Yachts')
dataset = project.datasets.get(dataset_name='Open Seas')
images_folder = '/home/local/images'
annotations_folder = '/home/local/annotations'
for img_filename in os.listdir(images_folder):
# get the matching annotations json
_, ext = os.path.splitext(img_filename)
ann_filename = os.path.join(annotations_folder, img_filename.replace(ext, '.json'))
img_filename = os.path.join(images_folder, img_filename)
# Upload or get annotations from platform (if already exists)
item = dataset.items.upload(local_path=img_filename,
remote_path='/in_storm',
overwrite=False)
assert isinstance(item, dl.Item)
# read annotations from file
with open(ann_filename, 'r') as f:
annotations = json.load(f)
# create a Builder instance and add all annotations to it
builder = item.annotations.builder()
for annotation in annotations:
# line format if 4 points of bbox
# this is where you need to update according to your annotation format
label_id = annotation['label']
left = annotation['left']
top = annotation['top']
right = annotation['right']
bottom = annotation['bottom']
builder.add(annotation_definition=dl.Box(top=top,
left=left,
bottom=bottom,
right=right,
label=str(label_id)))
# upload annotations
item.annotations.upload(builder)
| StarcoderdataPython |
3450549 | import os
def _styles_contains(font, value):
for style in font["styles"]:
if value.lower() in style.lower():
return True
return False
class FontList(list):
def __init__(self, fonts=list()):
list.__init__(self, fonts)
def all():
return FontList(sorted(eval("[" + os.popen(r'''fc-list -f "{\"name\":\"%{family[0]|cescape}\",
\"path\":\"%{file|cescape}\",
\"style\":\"\"\"%{style[0]|cescape}\"\"\".strip(),
\"styles\":\"\"\"%{style|cescape}\"\"\".strip().split(\",\"),
\"weight\":%{weight|cescape},
\"spacing\":%{spacing:-0}},\n"''').read() + "]"),
key=lambda font: font["name"]))
def bold(self):
return FontList([font for font in self
if _styles_contains(font, "Bold")])
def italic(self):
return FontList([font for font in self
if _styles_contains(font, "Italic")])
def slanted(self):
return FontList([font for font in self
if (_styles_contains(font, "Italic") or
_styles_contains(font, "Oblique") or
_styles_contains(font, "Slanted"))])
# This may not catch all monospaced fonts; e.g. Nimbus Mono L Bold
# Oblique has no spacing specified (a mistake? the rest of the
# family has spacing 100)
def mono(self):
return FontList([font for font in self
if font["spacing"] == 100])
def regular(self):
return FontList([font for font in self
if font not in self.bold() + self.slanted()])
def proportional(self):
return FontList([font for font in self
if font not in self.mono()])
def by_style(self, style):
return FontList([font for font in self
if _styles_contains(font, style)])
def lacking_style(self, style):
return FontList([font for font in self
if font not in self.by_style(style)])
def by_partial_name(self, partial):
return FontList([font for font in self
if partial.lower() in font["name"].lower()])
def by_weight(self, weight):
return FontList([font for font in self
if font["weight"] == weight])
def by_spacing(self, spacing):
return FontList([font for font in self
if font["spacing"] == spacing])
if __name__ == "__main__":
# styles = []
# for font in FontList.all():
# styles += font["styles"][0].strip("1234567890 \t").split(" ")
# print(set(styles))
all_fonts = FontList.all()
print(all_fonts.by_partial_name("nimbus mono"))
fonts = [font for font in all_fonts.by_partial_name("mono")
if font not in all_fonts.mono()]
for font in fonts:
print(font["name"] + " " + " ".join(font["styles"]))
| StarcoderdataPython |
11364121 | <gh_stars>1-10
import io
import os
import random
import textwrap
from io import BytesIO
import discord
from bot.cogs.utils.embed import Embeds
import PIL.Image
from discord.ext import commands
from PIL import ImageDraw, ImageFont
eupvote = '<:Upvote:822667264406192198>'
edownvote = '<:Downvote:822667263571525664>'
ecomment = '<:Comment:822668322293940224> '
class Image(commands.Cog):
'''
The commands here are used to make memes and show them to your friends!
'''
def __init__(self, client):
self.client = client
@property
def _session(self):
return self.client.http._HTTPClient__session
async def get_data(self, data_type: str = "json", url: str = None):
response = await self._session.get(url)
datatype = data_type.lower()
if datatype == "json":
return await response.json()
elif 'text' in data_type:
return await response.text()
elif 'image' in data_type:
return response
else:
return 400
@commands.command(description="Fuck this meme, all my homies hate this meme.")
@commands.cooldown(1, 10, commands.BucketType.user)
async def fuck(self, ctx, *, val):
try:
val1, val2 = val.split(',')
except ValueError:
val1 = val
val2 = None
img = PIL.Image.open("bot/src/MemeTemplates/AllMyHomiesHateTemplate.jpg")
font = ImageFont.truetype("bot/src/arial.ttf", 50)
draw = ImageDraw.Draw(img)
fill_color = (255, 255, 255)
stroke_color = (0, 0, 0)
draw.text((311, 26), val1, font=font, fill=fill_color, stroke_width=2, stroke_fill=stroke_color)
if val2 is not None:
draw.text((153, 535), val2, font=font, fill=fill_color, stroke_width=2, stroke_fill=stroke_color)
else:
draw.text((153, 535), val1, font=font, fill=fill_color, stroke_width=2, stroke_fill=stroke_color)
img.save("text.png")
await ctx.send(file=discord.File("text.png"))
file = 'text.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="\"Are you going to sleep?\" \"Yes, now shut up.\" \"You should go use ConchBot\"")
@commands.cooldown(1, 10, commands.BucketType.user)
async def brain(self, ctx, *, content):
msg = await ctx.send("Creating your meme...")
img = PIL.Image.open("bot/src/MemeTemplates/Brain.png")
font = ImageFont.truetype("bot/src/arial.ttf", 10)
draw = ImageDraw.Draw(img)
text = textwrap.fill(content, width=25)
draw.text((17, 176), text, font=font, fill="Black")
img.save("Meme.png")
await msg.delete()
await ctx.send(file=discord.File("Meme.png"))
file = 'Meme.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="These drawings are made by people with mental illnesses.")
@commands.cooldown(1, 10, commands.BucketType.user)
async def mentalillness(self, ctx, url=None):
msg = await ctx.send("Creating your meme...")
if url is None:
url = ctx.message.attachments[0].url
try:
response = self.get_data("image", url)
except Exception:
await ctx.send("You must provide a valid image URL.")
return
img1 = PIL.Image.open(BytesIO(response.read()))
img2 = PIL.Image.open("bot/src/MemeTemplates/MentalIlness.png")
basewidth = 175
wpercent = (basewidth/float(img1.size[0]))
hsize = int((float(img1.size[1])*float(wpercent)))
img1 = img1.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img2.paste(img1, (227, 286))
img2.save("Meme.png")
await msg.delete()
await ctx.send(file=discord.File("Meme.png"))
file = 'Meme.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="This is where I'd put my meme, *IF I HAD ONE*")
@commands.cooldown(1, 10, commands.BucketType.user)
async def idputmy(self, ctx, *, text):
msg = await ctx.send("Creating your meme...")
img = PIL.Image.open("bot/src/MemeTemplates/IdPutMy.png")
font = ImageFont.truetype("bot/src/arial.ttf", 22)
draw = ImageDraw.Draw(img)
text = textwrap.fill(text, width=20)
draw.text((45, 41), text, font=font, fill="White", stroke_width=2, stroke_fill="Black")
img.save("Meme.png")
await msg.delete()
await ctx.send(file=discord.File("Meme.png"))
file = 'Meme.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="Is this a meme?")
@commands.cooldown(1, 10, commands.BucketType.user)
async def isthis(self, ctx, *, text):
try:
text_one, text_two, text_three = text.split(',')
except ValueError:
await ctx.send("You must separate three values by commas.")
return
msg = await ctx.send("Creating your meme...")
img = PIL.Image.open("bot/src/MemeTemplates/IsThis.jpg")
font = ImageFont.truetype("bot/src/arial.ttf", 100)
draw = ImageDraw.Draw(img)
text_one = textwrap.fill(text_one, width=11)
text_two = textwrap.fill(text_two, width=8)
draw.text((181, 841), text_one, font=font, fill="White", stroke_width=5, stroke_fill="Black")
draw.text((1097, 165), text_two, font=font, fill="White", stroke_width=5, stroke_fill="Black")
draw.text((345, 1317), text_three, font=font, fill="White", stroke_width=5, stroke_fill="Black")
img.save("Meme.png")
await msg.delete()
await ctx.send(file=discord.File("Meme.png"))
file = 'Meme.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="I receive: Nothing. You receive: This meme!")
@commands.cooldown(1, 10, commands.BucketType.user)
async def tradeoffer(self, ctx, *, text):
try:
text_one, text_two = text.split(',')
except ValueError:
await ctx.send("You must separate three values by commas.")
return
msg = await ctx.send("Creating your meme...")
img = PIL.Image.open("bot/src/MemeTemplates/TradeOffer.jpg")
font = ImageFont.truetype("bot/src/arial.ttf", 50)
draw = ImageDraw.Draw(img)
text_one = textwrap.fill(text_one, width=15)
text_two = textwrap.fill(text_two, width=13)
draw.text((32, 179), text_one, font=font, fill="White", stroke_width=5, stroke_fill="Black")
draw.text((320, 184), text_two, font=font, fill="White", stroke_width=5, stroke_fill="Black")
img.save("Meme.png")
await msg.delete()
await ctx.send(file=discord.File("Meme.png"))
file = 'Meme.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="\"My daughter tells me you like Discord bots\" \"Yes sir ConchBot is the worst\" "
"\"You have six seconds to get the fuck out of my house\"")
async def getout(self, ctx, *, text):
msg = await ctx.send("Creating your meme...")
img = PIL.Image.open("bot/src/MemeTemplates/stayout.jpg")
font = ImageFont.truetype("bot/src/arial.ttf", 40)
draw = ImageDraw.Draw(img)
text = textwrap.fill(text, width=20)
draw.text((26, 45), text, font=font, fill="black", stroke_width=2, stroke_fill="white")
img.save("Meme.png")
await msg.delete()
await ctx.send(file=discord.File("Meme.png"))
file = 'Meme.png'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="Make a picture of yourself or a friend in a WANTED poster!\n[member] value is "
"optional.")
async def wanted(self, ctx, member: discord.Member = None):
if member is None:
member = ctx.author
wanted = Image.open("bot/src/MemeTemplates/wanted.jpg")
asset = member.avatar.replace(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = pfp.resize((308, 306))
wanted.paste(pfp, (69, 143))
wanted.save("profile.jpg")
await ctx.send(file=discord.File("profile.jpg"))
file = 'profile.jpg'
location = "./"
path = os.path.join(location, file)
os.remove(path)
@commands.command(description="Show an image and a fact about the given animal!\n[animal] value is optional.")
@commands.cooldown(1, 5, commands.BucketType.user)
async def animal(self, ctx, animal=None):
animal_options = ["dog", "cat", "panda", "fox", "bird", "koala", "red_panda", "racoon", "kangaroo", "elephant",
"giraffe", "whale"]
if animal is None:
animal = random.choice(animal_options)
if (animal := animal.lower()) in animal_options:
animal_fact_url = f"https://some-random-api.ml/facts/{animal}"
animal_image_url = f"https://some-random-api.ml/img/{animal}"
async with ctx.typing():
response = await self.get_data('json', animal_image_url)
try:
animal_api = await response.json()
image_link = animal_api["link"]
except:
image_link = None
try:
animal_api = self.get_data('json', animal_fact_url)
except:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=400)
embed = discord.Embed(title=f"{animal.title()} fact")
embed.add_field(name="Fact", value=animal_api["fact"])
if image_link is not None:
embed.set_image(url=image_link)
await ctx.send(embed=embed)
else:
await ctx.send(f"Sorry but {animal} isn't in my api")
@commands.command(description="Get a random meme!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def meme(self, ctx):
try:
res = await self.get_data('json', 'https://www.reddit.com/r/memes/hot.json')
embed = discord.Embed(title="Meme")
embed.set_image(url=res['data']['children'][random.randint(0, 25)]['data']['url'])
await ctx.send(embed=embed)
except:
meme_link = 'https://some-random-api.ml/meme'
api = await self.get_data('json', meme_link)
try:
image = api["image"]
caption = api["caption"]
embed = discord.Embed(title="Meme", description=caption)
embed.set_image(url=image)
except:
embed = Embeds().OnApiError(command_name=ctx.command.qualified_name, status=400)
await ctx.send(embed=embed)
@commands.command(description="This command makes anyone *glassed*.\n[member] value is optional.")
async def glass(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
glassImage = await self.get_data('image', f'https://some-random-api.ml/canvas/glass?avatar='
f'{member.avatar.replace(size=1024, format="png")}')
imageData = io.BytesIO(await glassImage.read())
await ctx.reply(file=discord.File(imageData, 'glass.gif'))
@commands.command(description="This command makes anyone *inverted*.\n[member] value is optional.")
async def invert(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
invertImage = await self.get_data('image', f'https://some-random-api.ml/canvas/invert?avatar='
f'{member.avatar.replace(size=1024, format="png")}')
imageData = io.BytesIO(await invertImage.read())
await ctx.reply(file=discord.File(imageData, 'invert.gif'))
@commands.command(description="This command makes anyone *bright*.\n[member] value is optional.")
async def bright(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
brightImage = await self.get_data('image', f'https://some-random-api.ml/canvas/bright?avatar='
f'{member.avatar.replace(size=1024, format="png")}')
imageData = io.BytesIO(await brightImage.read())
await ctx.reply(file=discord.File(imageData, 'bright.gif'))
@commands.command(description="This command converts rgb to hex")
async def hex(self, ctx, hex):
if not hex:
await ctx.send("Please input a hex value")
hexImage = await self.get_data('image', f'https://some-random-api.ml/canvas/colorviewer?hex={hex}')
imageData = io.BytesIO(await hexImage.read())
await ctx.reply(file=discord.File(imageData, 'hex.gif'))
@commands.command(description="Make a YouTube comment!")
async def comment(self, ctx, member: discord.Member, comment: str):
member_avatar = member.avatar.replace(format="png", size=256)
api_link = f"https://some-random-api.ml/canvas/youtube-comment?" \
f"avatar={member_avatar}&comment={comment}&username={member.name}"
youtubeComment = await self.get_data('image', api_link)
imageData = io.BytesIO(await youtubeComment.read())
await ctx.reply(file=discord.File(imageData, 'youtube.gif'))
@commands.command(description="Blur an avatar.\n[member] value is optional.")
async def blur(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
blurImage = await self.get_data('image', 'https://some-random-api.ml/canvas/blur?avatar={}'
.format(member.avatar.replace(format="png", size=1024)))
imageData = io.BytesIO(await blurImage.read())
await ctx.reply(file=discord.File(imageData, 'blur.gif'))
@commands.command(description="This command makes you gay, basically.\n[member] value is optional.",
aliases=['gay'])
async def rainbow(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
gayImage = await self.get_data('image', 'https://some-random-api.ml/canvas/gay?avatar={}'
.format(member.avatar.replace(format="png", size=1024)))
imageData = io.BytesIO(await gayImage.read())
await ctx.reply(file=discord.File(imageData, 'gay.gif'))
@commands.command(description="Pixelate an avatar.\n[member] value is optional.")
async def pixel(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
pixelImage = await self.get_data('image', 'https://some-random-api.ml/canvas/pixelate?avatar={}'
.format(member.avatar.replace(format="png", size=1024)))
imageData = io.BytesIO(await pixelImage.read())
await ctx.reply(file=discord.File(imageData, 'pixel.gif'))
@commands.command(description="Returns an image of an anime pat!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def pat(self, ctx):
pat_image = "https://some-random-api.ml/animu/pat"
async with ctx.typing():
api = await self.get_data('json', pat_image)
image = api['link']
await ctx.send(image)
@commands.command(description="This command makes anyone *triggered*.\n[member] value is optional.")
async def triggered(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
wastedImage = await self.get_data('image', 'https://some-random-api.ml/canvas/triggered?avatar={}'
.format(member.avatar.replace(format="png", size=1024)))
imageData = io.BytesIO(await wastedImage.read())
await ctx.reply(file=discord.File(imageData, 'triggered.gif'))
@commands.command(aliases=['passed'], description="What you see when you vote for ConchBot on Top.gg\n[member] "
"value is optional.")
async def missionpassed(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
passedImage = await self.get_data('image', 'https://some-random-api.ml/canvas/mission-passed?avatar={}'
.format(member.avatar.replace(format="png", size=1024)))
imageData = io.BytesIO(await passedImage.read())
await ctx.reply(file=discord.File(imageData, 'passed.gif'))
@commands.command(description="You're wasted.\n[member] value is optional.")
async def wasted(self, ctx, member: discord.Member = None):
if not member:
member = ctx.author
wastedImage = await self.get_data('image', 'https://some-random-api.ml/canvas/wasted?avatar={}'
.format(member.avatar.replace(format="png", size=1024)))
imageData = io.BytesIO(await wastedImage.read())
await ctx.reply(file=discord.File(imageData, 'wasted.gif'))
@commands.command(description="Get an anime wink!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def wink(self, ctx):
wink_image = 'https://some-random-api.ml/animu/wink'
async with ctx.typing():
api = await self.get_data('json', wink_image)
image = api['link']
await ctx.send(image)
@commands.command(description="Get an anime hug.")
@commands.cooldown(1, 5, commands.BucketType.user)
async def hug(self, ctx):
hug_image = 'https://some-random-api.ml/animu/hug'
async with ctx.typing():
api = await self.get_data('json', hug_image)
image = api['link']
await ctx.send(image)
@commands.command(description="Get a random picture of Pikachu!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def pikachu(self, ctx):
pikachu_image = 'https://some-random-api.ml/img/pikachu'
async with ctx.typing():
api = await self.get_data('json', pikachu_image)
image = api['link']
await ctx.send(image)
def setup(client):
client.add_cog(Image(client))
| StarcoderdataPython |
1621654 | <filename>folderDefs_example.py
trainingDataDir = './'
nc_file = trainingDataDir+'SPCAM_outputs_sample.nc'
nc_norm_file= trainingDataDir+'normalization.nc'
LogDirMain = './TRAINS'
| StarcoderdataPython |
9760593 | <gh_stars>0
"""
Remove noise ICs from preprocessed data using fsl_regfilt
(https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/MELODIC#fsl_regfilt_command-line_program).
@author: <NAME>
"""
import os, subprocess
import pandas as pd
# input
projectdir = '/nfs/z1/zhenlab/MotorMap'
subject_list = pd.read_csv('/nfs/z1/userhome/MaSai/workingdir/code/motor/subject_list.csv',header=None)[0].to_list()
session= 'ses-1'
run_list = ['run-1', 'run-2', 'run-3', 'run-4', 'run-5', 'run-6']
# /nfs/z1/zhenlab/MotorMap/data/bold/derivatives/melodic/sub-01/ses-1/sub-01_ses-1_task-motor_run-1.ica/series_original/
for subject in subject_list:
for run in run_list:
# read noise ICs number from results.txt
ica_dir = os.path.join(projectdir, 'data', 'bold', 'derivatives', 'melodic', subject, session, subject+'_'+session+'_task-motor_'+run+'.ica')
error_run = []
with open(os.path.join(ica_dir, 'results_suggest.txt')) as results:
noise_id = results.readlines()[-1].replace('[','').replace(']','').replace(' ','')
# run fsl_regfilt
input_file = os.path.join(projectdir, 'data', 'bold', 'derivatives', 'fmriprep', subject, session, 'func', subject + '_' + session + '_' + 'task-motor' + '_' + run + '_space-T1w_desc-preproc_bold.nii.gz')
output_file = os.path.join(projectdir, 'data', 'bold', 'derivatives', 'fmriprep', subject, session, 'func', subject + '_' + session + '_' + 'task-motor' + '_' + run + '_space-T1w_desc-preproc_bold_denoised.nii.gz')
mix_file = os.path.join(ica_dir, 'series_original', 'melodic_mix')
fsl_regfilt_command = ' '.join(['fsl_regfilt',
'-i', input_file,
'-o', output_file,
'-d', mix_file,
'-f', '"{}"'.format(noise_id)
])
print('RUN CMD: ' + fsl_regfilt_command)
try:
subprocess.check_call(fsl_regfilt_command, shell=True)
except:
error_run.append(run)
print() | StarcoderdataPython |
3383898 | <filename>docs/python_docs/themes/mx-theme/mxtheme/__init__.py
from os import path
from .card import CardDirective
__version__ = '0.3.9'
__version_full__ = __version__
package_dir = path.dirname(path.abspath(__file__))
def get_path():
return package_dir
def setup(app):
app.add_html_theme('mxtheme', package_dir)
| StarcoderdataPython |
8136016 | <reponame>jonathansick/astropy-librarian<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for reducing HTML pages into search records.
"""
from __future__ import annotations
import logging
from copy import deepcopy
from dataclasses import dataclass
from typing import TYPE_CHECKING, Callable, Generator, List, Optional
if TYPE_CHECKING:
import lxml.html
__all__ = ["Section", "iter_sphinx_sections", "iter_nbcollection_sections"]
logger = logging.getLogger(__name__)
_HEADING_TAGS = ("h1", "h2", "h3", "h4", "h5", "h6")
@dataclass
class Section:
"""A section of content."""
content: str
"""The plain-text content of the section.
"""
headings: List[str]
"""The section headers, ordered by hierarchy.
The header of the present section is the last element.
"""
url: str
"""The URL of this section (typically an anchor link).
"""
@property
def header_level(self) -> int:
"""The heading level of this section.
For example, ``1`` corresponds to an "H1" section.
"""
return len(self.headings)
def new_section(self, *, tag: str, header: str, url: str) -> Section:
new_level = int(tag[1]) # turn e.g. h1 into 1
if new_level > self.header_level:
new_headers = self.headings + [header]
else:
new_headers = self.headings[: new_level - 1] + [header]
return Section(content="", headings=new_headers, url=url)
def iter_sphinx_sections(
*,
root_section: "lxml.html.HtmlElement",
base_url: str,
headers: List[str],
header_callback: Optional[Callable[[str], str]] = None,
content_callback: Optional[Callable[[str], str]] = None,
) -> Generator[Section, None, None]:
"""Iterate through the hierarchical sections in a root HTML element,
yielding the content between that section header and the next section
header.
This class is designed specifically for Sphinx-generated HTML, where
``div.section`` or ``section`` elements to contain each hierarchical
section of content.
Parameters
----------
root_section : lxml.html.HtmlElement
The root HTML element. It should begin with the highest level of
heading hierarchy, which is usually the "h1" header.
base_url : str
The URL of the HTML page itself.
headers : list of str
The ordered list of heading titles at hierarchical levels above the
present section. This parameter should be an empty list for the
*first* (h1) section.
header_callback : callable
This callback function processes the section title. The callable takes
a string and returns a string.
content_callback : callable
This callback function processes the section content. The callable
takes a string and returns a string.
Yields
------
section : Section
Yields `Section` objects for each section segment. Sections are yielded
depth-first. The top-level section is yielded last.
"""
id_ = root_section.attrib["id"]
url = f"{base_url}#{id_}"
text_elements: List[str] = []
for element in root_section:
if element.tag in _HEADING_TAGS:
current_header = element.text_content()
if header_callback:
current_header = header_callback(current_header)
current_headers = headers + [current_header]
elif (element.tag == "section") or (
element.tag == "div" and "section" in element.classes
):
yield from iter_sphinx_sections(
root_section=element,
base_url=base_url,
headers=current_headers,
header_callback=header_callback,
content_callback=content_callback,
)
else:
# To modify this element to extract content from it
# To extract content from this element we may need to modify it
# We don't want to affect the whole document tree though, so
# we make this temporary copy.
content_element = deepcopy(element)
# Delete "cell_output" divs, which are the code outputs from
# Jupyter-based pages (Jupyter Notebook). The outputs can be large
# and are often less relevant.
try:
output_divs = content_element.find_class("cell_output")
for output_div in output_divs:
output_div.drop_tree()
except ValueError:
# Raised because "HtmlComment" element does not support
# find_class().
pass
# Get plain-text content of the section
try:
if content_callback:
text_elements.append(
content_callback(content_element.text_content())
)
else:
text_elements.append(content_element.text_content())
except ValueError:
logger.debug("Could not get content from %s", content_element)
continue
yield Section(
content="\n\n".join(text_elements), headings=current_headers, url=url
)
def iter_nbcollection_sections(
*,
root_element: "lxml.html.HtmlElement",
base_url: str,
header_callback: Optional[Callable[[str], str]] = None,
content_callback: Optional[Callable[[str], str]] = None,
) -> Generator[Section, None, None]:
"""Iterate through the hierarchical sections of a nbcollection-generated
tutorial page
Parameters
----------
root_element : lxml.html.HtmlElement
The root HTML element. For nbcollection-based pages, this should
be the element with the ``.jp-Notebook`` class.
base_url : str
The URL of the HTML page itself.
header_callback : callable
This callback function processes the section title. The callable takes
a string and returns a string.
content_callback : callable
This callback function processes the section content. The callable
takes a string and returns a string.
Yields
------
section : Section
Yields `Section` objects for each section segment. Sections are yielded
depth-first. The top-level section is yielded last.
"""
current_section = Section(content="", headings=[], url="")
for content_element in iter_nbcollection_content_elements(
root_element=root_element,
base_url=base_url,
header_callback=header_callback,
content_callback=content_callback,
):
logger.debug(
"Processing %s %s ",
content_element.tag,
content_element.attrib.get("class"),
)
if content_element.tag in _HEADING_TAGS:
# A new heading can trigger a new section.
# First yield the current content if it already has content
if current_section.headings and current_section.content:
yield current_section
# Now reset the content stack
header_id = ""
if "id" in content_element.attrib.keys():
header_id = content_element.attrib["id"]
if header_callback:
header_content = header_callback(
content_element.text_content()
)
else:
header_content = content_element.text_content()
logger.debug("Got header %s\n", header_content)
current_section = current_section.new_section(
tag=content_element.tag,
header=header_content,
url=f"{base_url}#{header_id}",
)
else:
if content_callback:
new_content = content_callback(content_element.text_content())
current_section.content += (
f" {content_callback(content_element.text_content())}"
)
else:
new_content = content_element.get_content()
current_section.content += f" {new_content}"
logger.debug("Got content\n%s\n", new_content)
if current_section.headings:
yield current_section
def iter_nbcollection_content_elements(
*,
root_element: lxml.html.HtmlElement,
base_url: str,
header_callback: Optional[Callable[[str], str]] = None,
content_callback: Optional[Callable[[str], str]] = None,
) -> Generator[lxml.html.HtmlElement, None, None]:
"""Iterate through the content elements in an nbcollection-generated
HTML document.
This function is means to be used by `iter_nbcollection_sections`
Yields
------
content_element
An lxml.html.HtmlElement with useful content.
Notes
-----
This method yields elements of two kinds:
- Child elements of the div with a ``jp-RenderedHtmlCommon`` class. These
are prose cells. Elements yielded from this wrapper include headers
(``h1``, ``h2``, etc) and content like ``p`` tags.
- The div with a ``jp-CodeMirrorEditor`` class. These are source code
content cells, without the outputs that we don't index.
"""
selector = ".jp-CodeMirrorEditor, .jp-RenderedHTMLCommon"
for element in root_element.cssselect(selector):
if element.tag == "div" and "jp-RenderedHTMLCommon" in element.classes:
# Prose content elements are child elements of
# jp-RenderedHTMLCommon
for child_element in element:
yield child_element
else:
# jp-CodeMirrorEditor's content is code, so no need to decompose
# into header elements for sectioning
yield element
| StarcoderdataPython |
8080880 | import sys
from datetime import datetime
from typing import Union
from PyQt5 import QtWidgets
from media import Episode
from ui import MessageBox, add_grid_to_layout
from ui import media_objects
class EpisodeDialog(QtWidgets.QDialog):
"""The Episode Dialog handles a user adding an Episode
to a Limited Series, a TV Show, or a Podcast
The user will enter in the Season number, if applicable, the
"""
def __init__(self, parent: QtWidgets.QWidget = None,
*, show_season: Union[bool, None] = True):
super().__init__(parent)
self.show_season = show_season
self.button_box_add = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.button_box_edit = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Save)
self.button_box_add.accepted.connect(self.add)
self.button_box_add.rejected.connect(self.cancel)
self.button_box_edit.accepted.connect(self.add)
self.button_box_edit.rejected.connect(self.cancel)
self.season_spinner = None
self.episode_spinner = None
self.name_line_edit = None
self.runtime_spinner = None
self.watched_checkbox = None
self.month_combobox = None
self.day_spinner = None
self.setup_ui()
self.result = self.exec_()
# # # # # # # # # # # # # # # # # # # # # # # # #
def setup_ui(self):
"""Sets up the UI and layout of the Episode dialog"""
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.setup_episode_ui(self))
if media_objects.get_episode() is not None:
layout.addWidget(self.button_box_edit)
self.window().setWindowTitle(f"Edit {media_objects.get_episode().get_name()}")
else:
layout.addWidget(self.button_box_add)
self.window().setWindowTitle("Add Episode")
self.setLayout(layout)
self.show()
def setup_episode_ui(self, parent: QtWidgets.QWidget) -> QtWidgets.QWidget:
"""Creates and returns the widgets for the Episode widgets
such as the Season spinner, Episode spinner, Name Entry, Runtime Spinner,
and Watched checkbox.
:param parent: The parent widget of the Episode widgets
"""
widget = QtWidgets.QWidget(parent)
layout = QtWidgets.QGridLayout()
season_label = QtWidgets.QLabel("Season" if self.show_season is not None else "Year", widget)
name_label = QtWidgets.QLabel("Name", widget)
runtime_label = QtWidgets.QLabel("Runtime (in minutes)", widget)
if self.show_season is not None:
episode_label = QtWidgets.QLabel("Episode", widget)
month_label = day_label = None
else:
month_label = QtWidgets.QLabel("Month", widget)
day_label = QtWidgets.QLabel("Day", widget)
episode_label = None
self.season_spinner = QtWidgets.QSpinBox(widget)
self.name_line_edit = QtWidgets.QLineEdit(widget)
self.name_line_edit.setPlaceholderText("Episode Name")
self.name_line_edit.selectAll()
self.runtime_spinner = QtWidgets.QSpinBox(widget)
self.runtime_spinner.setRange(1, 1000)
self.watched_checkbox = QtWidgets.QCheckBox("Watched?", widget)
if self.show_season is not None:
self.season_spinner.setRange(1, 10000)
self.episode_spinner = QtWidgets.QSpinBox(widget)
self.episode_spinner.setRange(1, 1000)
else:
self.season_spinner.setRange(1900, datetime.now().year)
self.season_spinner.setValue(datetime.now().year)
self.season_spinner.valueChanged.connect(self.update_podcast_widgets)
self.month_combobox = QtWidgets.QComboBox(widget)
self.month_combobox.addItems(["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"])
self.month_combobox.setCurrentIndex(datetime.now().month)
self.month_combobox.currentIndexChanged.connect(self.update_podcast_widgets)
self.day_spinner = QtWidgets.QSpinBox(widget)
self.day_spinner.setRange(1, 31)
self.day_spinner.setValue(datetime.now().day)
episode = media_objects.get_episode()
if episode is not None:
self.season_spinner.setValue(episode.get_season())
self.name_line_edit.setText(episode.get_name())
self.runtime_spinner.setValue(episode.get_runtime())
self.watched_checkbox.setChecked(episode.is_watched())
if self.show_season is not None:
self.episode_spinner.setValue(episode.get_episode())
else:
self.month_combobox.setCurrentIndex(episode.get_episode() // 1000)
self.day_spinner.setValue(episode.get_episode() % 1000)
if self.show_season is not None:
grid = [[season_label, self.season_spinner],
[episode_label, self.episode_spinner],
[name_label, self.name_line_edit],
[runtime_label, self.runtime_spinner],
[self.watched_checkbox]]
else:
grid = [[season_label, self.season_spinner],
[month_label, self.month_combobox],
[day_label, self.day_spinner],
[name_label, self.name_line_edit],
[runtime_label, self.runtime_spinner],
[self.watched_checkbox]]
if self.show_season is False:
_ = grid.pop(0)
season_label.setVisible(False)
self.season_spinner.setVisible(False)
add_grid_to_layout(grid, layout)
widget.setLayout(layout)
return widget
# # # # # # # # # # # # # # # # # # # # # # # # #
def update_podcast_widgets(self):
"""Updates the episode widgets and their values
whenever a Podcast episode is being added or edited
"""
day_max = [
31, 29 if self.season_spinner.value() % 4 == 0 else 28,
31, 30, 31, 30, 31, 31, 30, 31, 30, 31
]
max_day_value = day_max[self.month_combobox.currentIndex()]
self.day_spinner.setRange(1, max_day_value)
if self.day_spinner.value() > max_day_value:
self.day_spinner.setValue(max_day_value)
def clear_widgets(self):
"""Clears the widgets completely in the EpisodeDialog"""
self.season_spinner.setValue(1)
self.name_line_edit.setText("")
self.runtime_spinner.setValue(1)
self.watched_checkbox.setChecked(False)
if self.show_season is not None:
self.episode_spinner.setValue(1)
else:
self.month_combobox.setCurrentIndex(datetime.now().month)
self.day_spinner.setValue(datetime.now().day)
def cancel(self):
"""Cancels creating or editing an Episode"""
self.clear_widgets()
self.reject()
def add(self):
"""Creates the episode object from the widget values
If a value is invalid in the Episode object,
a Message Box will appear saying that something is wrong
"""
try:
if self.show_season is not None:
episode_value = self.episode_spinner.value()
else:
episode_value = self.month_combobox.currentIndex() * 1000 + self.day_spinner.value()
media_objects.set_episode(Episode(
self.season_spinner.value() if self.show_season is not False else 1,
episode_value,
self.name_line_edit.text() or None,
self.runtime_spinner.value(),
watched=self.watched_checkbox.isChecked()
))
self.clear_widgets()
self.accept()
except ValueError:
MessageBox("Missing Values",
"You must specify the Name of the episode",
self.parent())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
EpisodeDialog()
print(media_objects.get_episode())
| StarcoderdataPython |
4935631 | <reponame>adriEzeMartinez/terminalDungeon
import json
import numpy as np
class Map:
"""
A helper class for easy loading of maps.
Each sprite is a dict with keys "pos","image","relative" for position,
sprite image number, and relative position to player (which will be set
after first call to cast_sprites in the renderer).
"""
def __init__(self, file_name):
self._load(file_name)
def _load(self, file_name):
with open(file_name + ".json", 'r') as file:
map_dict = json.load(file)
self._map = np.array(map_dict["map"]).T
self.sprites = map_dict["sprites"]
for sprite in self.sprites: #lists --> numpy arrays
sprite["pos"] = np.array(sprite["pos"])
def __getitem__(self, key):
return self._map[key]
| StarcoderdataPython |
3596311 | <gh_stars>0
# Crie um programa que leia vários números inteiros pelo teclado. No final da execução, mostre a média entre todos os
# valores e qual foi o maior e o menor valores lidos. O programa deve perguntar ao usuário se ele quer ou não continuar
# a digitar valores.
n = 0
c = 0
s = 0
m = 0
continuar = ''
while continuar != 'N':
n = int(input('Digite um número: '))
if continuar == 'S':
s += n
c += 1
m = s / c
continuar = str(input('Deseja continuar? ')).upper().strip()
print('Média {}'.format(m))
| StarcoderdataPython |
3582092 | <filename>johann_web_basic/main.py
# Copyright (c) 2020-present, The Johann Web Basic Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE file. See the AUTHORS file for names of contributors.
import secrets
import logzero
import requests
from flask import abort, jsonify, render_template, request
from flask_bootstrap import Bootstrap
from werkzeug.middleware.proxy_fix import ProxyFix
from johann_web_basic import app
from johann_web_basic.scenarios import scenarios_bp
logger = logzero.setup_logger(__name__)
app.secret_key = secrets.token_hex(16) # WTF CSRF
app.config["BOOTSTRAP_SERVE_LOCAL"] = True
logger.debug(app.config)
bootstrap = Bootstrap(app)
logger.debug(bootstrap)
app.register_blueprint(scenarios_bp)
app.wsgi_app = ProxyFix(app.wsgi_app)
logger.debug(ProxyFix)
@app.route("/")
@app.route("/index")
@app.route("/about")
def index():
return render_template("about.html")
@app.route("/johann/scores/<scenario_name>/status_alt")
def get_score_status_alt(scenario_name):
try:
url = f"http://johann_conductor:5000/scores/{scenario_name}/status_alt"
r = requests.get(url)
if not r.ok:
msg = f"Failed to get status_alt for score '{scenario_name}': {r.reason}"
logger.warn(msg)
abort(r.status_code)
else:
resp_json = r.json()
# logger.debug(resp_json)
except Exception as e:
msg = f"Exception getting status_alt for score '{scenario_name}': {str(e)}"
logger.warning(msg, exc_info=True)
abort(502)
return jsonify(resp_json)
@app.route("/johann/read_score/<scenario_name>")
def read_score(scenario_name):
try:
query_string = request.query_string.decode()
url = f"http://johann_conductor:5000/read_score/{scenario_name}?{query_string}"
logger.debug(f"read_score() request URL: {url}")
r = requests.get(url)
if not r.ok:
msg = f"Failed to read score '{scenario_name}': {r.reason}"
logger.warn(msg)
abort(r.status_code)
else:
resp_json = r.json()
# logger.debug(scenario_json)
except Exception as e:
msg = f"Exception reading score '{scenario_name}': {str(e)}"
logger.warning(msg, exc_info=True)
abort(502)
return jsonify(resp_json)
@app.errorhandler(404)
def handle_404(_):
return render_template("404.html"), 404
@app.errorhandler(500)
def handle_500(_):
return render_template("500.html"), 500
@app.errorhandler(502)
def handle_502(_):
return render_template("502.html"), 502
if __name__ == "__main__":
# Only for debugging while developing, i.e., `make dev`
app.run(host="0.0.0.0", debug=True, port=80) # nosec
| StarcoderdataPython |
12828660 | <reponame>h11r/eventsourcing
from abc import ABC, abstractmethod
from collections import defaultdict
from threading import Event, Lock, Thread
from typing import Dict, Iterable, Iterator, List, Set, Tuple, Type, TypeVar
from eventsourcing.application import Application, NotificationLog, Section
from eventsourcing.domain import Aggregate, AggregateEvent
from eventsourcing.persistence import (
Mapper,
Notification,
ProcessRecorder,
Tracking,
)
from eventsourcing.utils import get_topic, resolve_topic
class ProcessEvent:
"""
Keeps together a :class:`~eventsourcing.persistence.Tracking`
object, which represents the position of a domain event notification
in the notification log of a particular application, and the
new domain events that result from processing that notification.
"""
def __init__(self, tracking: Tracking):
"""
Initalises the process event with the given tracking object.
"""
self.tracking = tracking
self.events: List[AggregateEvent] = []
def save(self, *aggregates: Aggregate) -> None:
"""
Collects pending domain events from the given aggregate.
"""
for aggregate in aggregates:
self.events += aggregate.collect_events()
class Follower(Application):
"""
Extends the :class:`~eventsourcing.application.Application` class
by using a process recorder as its application recorder, by keeping
track of the applications it is following, and pulling and processing
new domain event notifications through its :func:`policy` method.
"""
def __init__(self) -> None:
super().__init__()
self.readers: Dict[
str,
Tuple[
NotificationLogReader,
Mapper[AggregateEvent],
],
] = {}
self.recorder: ProcessRecorder
def construct_recorder(self) -> ProcessRecorder:
"""
Constructs and returns a :class:`~eventsourcing.persistence.ProcessRecorder`
for the application to use as its application recorder.
"""
return self.factory.process_recorder()
def follow(self, name: str, log: NotificationLog) -> None:
"""
Constructs a notification log reader and a mapper for
the named application, and adds them to its collection
of readers.
"""
assert isinstance(self.recorder, ProcessRecorder)
reader = NotificationLogReader(log)
mapper = self.construct_mapper(name)
self.readers[name] = (reader, mapper)
def pull_and_process(self, name: str) -> None:
"""
Pulls and processes unseen domain event notifications
from the notification log reader of the names application.
Converts received event notifications to domain
event objects, and then calls the :func:`policy`
with a new :class:`ProcessEvent` object which
contains a :class:`~eventsourcing.persistence.Tracking`
object that keeps track of the name of the application
and the position in its notification log from which the
domain event notification was pulled. The policy will
save aggregates to the process event object, using its
:func:`~ProcessEvent.save` method, which collects pending
domain events using the aggregates'
:func:`~eventsourcing.domain.Aggregate.collect_events`
method, and the process event object will then be recorded
by calling the :func:`record` method.
"""
reader, mapper = self.readers[name]
start = self.recorder.max_tracking_id(name) + 1
for notification in reader.read(start=start):
domain_event = mapper.to_domain_event(notification)
process_event = ProcessEvent(
Tracking(
application_name=name,
notification_id=notification.id,
)
)
self.policy(
domain_event,
process_event,
)
self.record(process_event)
@abstractmethod
def policy(
self,
domain_event: AggregateEvent,
process_event: ProcessEvent,
) -> None:
"""
Abstract domain event processing policy method. Must be
implemented by event processing applications. When
processing the given domain event, event processing
applications must use the :func:`~ProcessEvent.save`
method of the given process event object (instead of
the application's :func:`~eventsourcing.application.Application.save`
method) to collect pending events from changed aggregates,
so that the new domain events will be recorded atomically
with tracking information about the position of the given
domain event's notification.
"""
def record(self, process_event: ProcessEvent) -> None:
"""
Records given process event in the application's process recorder.
"""
self.events.put(
**process_event.__dict__,
)
self.notify(process_event.events)
class Promptable(ABC):
"""
Abstract base class for "promptable" objects.
"""
@abstractmethod
def receive_prompt(self, leader_name: str) -> None:
"""
Receives the name of leader that has new domain
event notifications.
"""
class Leader(Application):
"""
Extends the :class:`~eventsourcing.application.Application`
class by also being responsible for keeping track of
followers, and prompting followers when there are new
domain event notifications to be pulled and processed.
"""
def __init__(self) -> None:
super().__init__()
self.followers: List[Promptable] = []
def lead(self, follower: Promptable) -> None:
"""
Adds given follower to a list of followers.
"""
self.followers.append(follower)
def notify(self, new_events: List[AggregateEvent]) -> None:
"""
Extends the application :func:`~eventsourcing.application.Application.notify`
method by calling :func:`prompt_followers` whenever new events have just
been saved.
"""
super().notify(new_events)
if len(new_events):
self.prompt_followers()
def prompt_followers(self) -> None:
"""
Prompts followers by calling their :func:`~Promptable.receive_prompt`
methods with the name of the application.
"""
name = self.__class__.__name__
for follower in self.followers:
follower.receive_prompt(name)
class ProcessApplication(Leader, Follower, ABC):
"""
Base class for event processing applications
that are both "leaders" and followers".
"""
class System:
"""
Defines a system of applications.
"""
def __init__(
self,
pipes: Iterable[Iterable[Type[Application]]],
):
nodes: Dict[str, Type[Application]] = {}
edges: Set[Tuple[str, str]] = set()
# Build nodes and edges.
for pipe in pipes:
follower_cls = None
for cls in pipe:
nodes[cls.__name__] = cls
if follower_cls is None:
follower_cls = cls
else:
leader_cls = follower_cls
follower_cls = cls
edges.add(
(
leader_cls.__name__,
follower_cls.__name__,
)
)
self.edges = list(edges)
self.nodes: Dict[str, str] = {}
for name in nodes:
topic = get_topic(nodes[name])
self.nodes[name] = topic
# Identify leaders and followers.
self.follows: Dict[str, List[str]] = defaultdict(list)
self.leads: Dict[str, List[str]] = defaultdict(list)
for edge in edges:
self.leads[edge[0]].append(edge[1])
self.follows[edge[1]].append(edge[0])
# Check followers are followers.
for name in self.follows:
if not issubclass(nodes[name], Follower):
raise TypeError("Not a follower class: %s" % nodes[name])
# Check each process is a process application class.
for name in self.processors:
if not issubclass(nodes[name], ProcessApplication):
raise TypeError("Not a process application class: %s" % nodes[name])
@property
def leaders(self) -> Iterable[str]:
return self.leads.keys()
@property
def leaders_only(self) -> Iterable[str]:
for name in self.leads.keys():
if name not in self.follows:
yield name
@property
def followers(self) -> Iterable[str]:
return self.follows.keys()
@property
def processors(self) -> Iterable[str]:
return set(self.leaders).intersection(self.followers)
def get_app_cls(self, name: str) -> Type[Application]:
cls = resolve_topic(self.nodes[name])
assert issubclass(cls, Application)
return cls
def leader_cls(self, name: str) -> Type[Leader]:
cls = self.get_app_cls(name)
if issubclass(cls, Leader):
return cls
else:
cls = type(
cls.__name__,
(Leader, cls),
{},
)
assert issubclass(cls, Leader)
return cls
def follower_cls(self, name: str) -> Type[Follower]:
cls = self.get_app_cls(name)
assert issubclass(cls, Follower)
return cls
A = TypeVar("A")
class Runner(ABC):
"""
Abstract base class for system runners.
"""
def __init__(self, system: System):
self.system = system
self.is_started = False
@abstractmethod
def start(self) -> None:
"""
Starts the runner.
"""
if self.is_started:
raise RunnerAlreadyStarted()
self.is_started = True
@abstractmethod
def stop(self) -> None:
"""
Stops the runner.
"""
@abstractmethod
def get(self, cls: Type[A]) -> A:
"""
Returns an application instance for given application class.
"""
class RunnerAlreadyStarted(Exception):
"""
Raised when runner is already started.
"""
class SingleThreadedRunner(Runner, Promptable):
"""
Runs a :class:`System` in a single thread.
A single threaded runner is a runner, and so implements the
:func:`start`, :func:`stop`, and :func:`get` methods.
A single threaded runner is also a :class:`Promptable` object, and
implements the :func:`receive_prompt` method by collecting prompted
names.
"""
def __init__(self, system: System):
"""
Initialises runner with the given :class:`System`.
"""
super().__init__(system)
self.apps: Dict[str, Application] = {}
self.prompts_received: List[str] = []
self.is_prompting = False
def start(self) -> None:
"""
Starts the runner.
The applications are constructed, and setup to lead and follow
each other, according to the system definition.
The followers are setup to follow the applications they follow
(have a notification log reader with the notification log of the
leader), and their leaders are setup to lead the runner itself
(send prompts).
"""
super().start()
# Construct followers.
for name in self.system.followers:
self.apps[name] = self.system.follower_cls(name)()
# Construct leaders.
for name in self.system.leaders_only:
self.apps[name] = self.system.leader_cls(name)()
# Lead and follow.
for edge in self.system.edges:
leader = self.apps[edge[0]]
follower = self.apps[edge[1]]
assert isinstance(leader, Leader)
assert isinstance(follower, Follower)
leader.lead(self)
follower.follow(leader.__class__.__name__, leader.log)
def receive_prompt(self, leader_name: str) -> None:
"""
Receives prompt by appending name of
leader to list of prompted names.
Unless this method has previously been called but not
yet returned, it will then proceed to forward the prompts
received to its application by calling the application's
:func:`~Follower.pull_and_process` method for each prompted name.
"""
if leader_name not in self.prompts_received:
self.prompts_received.append(leader_name)
if not self.is_prompting:
self.is_prompting = True
while self.prompts_received:
prompt = self.prompts_received.pop(0)
for name in self.system.leads[prompt]:
follower = self.apps[name]
assert isinstance(follower, Follower)
follower.pull_and_process(prompt)
self.is_prompting = False
def stop(self) -> None:
self.apps.clear()
def get(self, cls: Type[A]) -> A:
app = self.apps[cls.__name__]
assert isinstance(app, cls)
return app
class MultiThreadedRunner(Runner):
"""
Runs a :class:`System` with a :class:`MultiThreadedRunnerThread` for each
follower in the system definition.
It is a runner, and so implements the :func:`start`, :func:`stop`,
and :func:`get` methods.
"""
def __init__(self, system: System):
"""
Initialises runner with the given :class:`System`.
"""
super().__init__(system)
self.apps: Dict[str, Application] = {}
self.threads: Dict[str, MultiThreadedRunnerThread] = {}
self.is_stopping = Event()
def start(self) -> None:
"""
Starts the runner.
A multi-threaded runner thread is started for each
'follower' application in the system, and constructs
an instance of each non-follower leader application in
the system. The followers are then setup to follow the
applications they follow (have a notification log reader
with the notification log of the leader), and their leaders
are setup to lead the follower's thead (send prompts).
"""
super().start()
# Construct followers.
for name in self.system.followers:
app_class = self.system.follower_cls(name)
thread = MultiThreadedRunnerThread(
app_class=app_class,
is_stopping=self.is_stopping,
)
self.threads[name] = thread
thread.start()
if (not thread.is_running.wait(timeout=5)) or thread.has_stopped.is_set():
self.stop()
raise Exception(f"Thread for '{app_class.__name__}' failed to start")
self.apps[name] = thread.app
# Construct non-follower leaders.
for name in self.system.leaders_only:
app = self.system.leader_cls(name)()
self.apps[name] = app
# Lead and follow.
for edge in self.system.edges:
leader = self.apps[edge[0]]
follower = self.apps[edge[1]]
assert isinstance(leader, Leader)
assert isinstance(follower, Follower)
follower.follow(leader.__class__.__name__, leader.log)
thread = self.threads[edge[1]]
leader.lead(thread)
def stop(self) -> None:
self.is_stopping.set()
for thread in self.threads.values():
thread.is_prompted.set()
thread.join()
@property
def has_stopped(self) -> bool:
return all([t.has_stopped.is_set() for t in self.threads.values()])
def get(self, cls: Type[A]) -> A:
app = self.apps[cls.__name__]
assert isinstance(app, cls)
return app
class MultiThreadedRunnerThread(Promptable, Thread):
"""
Runs one process application for a
:class:`~eventsourcing.system.MultiThreadedRunner`.
A multi-threaded runner thread is a :class:`~eventsourcing.system.Promptable`
object, and implements the :func:`receive_prompt` method by collecting
prompted names and setting its threading event 'is_prompted'.
A multi-threaded runner thread is a Python :class:`threading.Thread` object,
and implements the thread's :func:`run` method by waiting until the
'is_prompted' event has been set and then calling its process application's
:func:`~eventsourcing.system.Follower.pull_and_process`
method once for each prompted name. It is expected that
the process application will have been set up by the runner
with a notification log reader from which event notifications
will be pulled.
"""
def __init__(
self,
app_class: Type[Follower],
is_stopping: Event,
):
super().__init__()
self.app_class = app_class
self.is_stopping = is_stopping
self.has_stopped = Event()
self.has_errored = Event()
self.is_prompted = Event()
self.prompted_names: List[str] = []
self.prompted_names_lock = Lock()
self.setDaemon(True)
self.is_running = Event()
def run(self) -> None:
"""
Begins by constructing an application instance from
given application class and then loops forever until
stopped. The loop blocks on waiting for the 'is_prompted'
event to be set, then forwards the prompts already received
to its application by calling the application's
:func:`~Follower.pull_and_process` method for each prompted name.
"""
try:
self.app: Follower = self.app_class()
except Exception:
self.has_errored.set()
self.has_stopped.set()
raise
finally:
self.is_running.set() # pragma: no cover
# -----------------------^ weird branch coverage thing with Python 3.9
try:
while True:
self.is_prompted.wait()
if self.is_stopping.is_set():
self.has_stopped.set()
break
with self.prompted_names_lock:
prompted_names = self.prompted_names
self.prompted_names = []
self.is_prompted.clear()
for name in prompted_names:
self.app.pull_and_process(name)
except Exception:
self.has_errored.set()
self.has_stopped.set()
self.is_stopping.is_set()
raise
def receive_prompt(self, leader_name: str) -> None:
"""
Receives prompt by appending name of
leader to list of prompted names.
"""
with self.prompted_names_lock:
if leader_name not in self.prompted_names:
self.prompted_names.append(leader_name)
self.is_prompted.set()
class NotificationLogReader:
"""
Reads domain event notifications from a notification log.
"""
DEFAULT_SECTION_SIZE = 10
def __init__(
self,
notification_log: NotificationLog,
section_size: int = DEFAULT_SECTION_SIZE,
):
"""
Initialises a reader with the given notification log,
and optionally a section size integer which determines
the requested number of domain event notifications in
each section retrieved from the notification log.
"""
self.notification_log = notification_log
self.section_size = section_size
def read(self, *, start: int) -> Iterator[Notification]:
"""
Returns a generator that yields event notifications
from the reader's notification log, starting from
given start position (a notification ID).
This method traverses the linked list of sections presented by
a notification log, and yields the individual event notifications
that are contained in each section. When all the event notifications
from a section have been yielded, the reader will retrieve the next
section, and continue yielding event notification until all subsequent
event notifications in the notification log from the start position
have been yielded.
"""
section_id = "{},{}".format(start, start + self.section_size - 1)
while True:
section: Section = self.notification_log[section_id]
for item in section.items:
# Todo: Reintroduce if supporting
# sections with regular alignment?
# if item.id < start:
# continue
yield item
if section.next_id is None:
break
else:
section_id = section.next_id
| StarcoderdataPython |
114341 | import tensorflow as tf
# Taken from https://www.tensorflow.org/guide/eager
# Eager execution works nicely with NumPy. NumPy operations accept tf.Tensor arguments.
# TensorFlow math operations convert Python objects and NumPy arrays to tf.Tensor objects.
# The tf.Tensor.numpy method returns the object's value as a NumPy ndarray.
tf.enable_eager_execution()
a = tf.constant([
[1, 2],
[3, 4]]
)
print(a)
# Broadcasting support
b = tf.add(a, 1)
print(b)
# Operator overloading is supported
print(a * b)
# Use NumPy values
import numpy as np
c = np.multiply(a, b)
print(c)
| StarcoderdataPython |
9683461 | # This program is free software; you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright <NAME> <<EMAIL>>
"""
Steps and objects related to lintian
"""
from __future__ import absolute_import
from __future__ import print_function
from buildbot import config
from buildbot.process import buildstep
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.steps.package import util as pkgutil
from buildbot.steps.shell import ShellCommand
class MaxQObserver(buildstep.LogLineObserver):
def __init__(self):
buildstep.LogLineObserver.__init__(self)
self.failures = 0
def outLineReceived(self, line):
if line.startswith('TEST FAILURE:'):
self.failures += 1
class DebLintian(ShellCommand):
name = "lintian"
description = ["Lintian running"]
descriptionDone = ["Lintian"]
fileloc = None
suppressTags = []
warnCount = 0
errCount = 0
flunkOnFailure = False
warnOnFailure = True
def __init__(self, fileloc=None, suppressTags=None, **kwargs):
"""
Create the DebLintian object.
@type fileloc: str
@param fileloc: Location of the .deb or .changes to test.
@type suppressTags: list
@param suppressTags: List of tags to suppress.
@type kwargs: dict
@param kwargs: all other keyword arguments.
"""
ShellCommand.__init__(self, **kwargs)
if fileloc:
self.fileloc = fileloc
if suppressTags:
self.suppressTags = suppressTags
if not self.fileloc:
config.error("You must specify a fileloc")
self.command = ["lintian", "-v", self.fileloc]
if self.suppressTags:
for tag in self.suppressTags:
self.command += ['--suppress-tags', tag]
self.obs = pkgutil.WEObserver()
self.addLogObserver('stdio', self.obs)
def createSummary(self, log):
"""
Create nice summary logs.
@param log: log to create summary off of.
"""
warnings = self.obs.warnings
errors = self.obs.errors
if warnings:
self.addCompleteLog('%d Warnings' % len(warnings), "\n".join(warnings))
self.warnCount = len(warnings)
if errors:
self.addCompleteLog('%d Errors' % len(errors), "\n".join(errors))
self.errCount = len(errors)
def evaluateCommand(self, cmd):
if (cmd.rc != 0 or self.errCount):
return FAILURE
if self.warnCount:
return WARNINGS
return SUCCESS
| StarcoderdataPython |
4960162 | <filename>scenic/projects/vivit/data/video_tfrecord_dataset.py
"""Data-loader to read from SSTables using the MediaSequence format."""
import functools
import os
from typing import Dict, Iterator, List, Optional, Text, Tuple, Union
from absl import logging
from dmvr import builders
from dmvr import modalities
from dmvr import video_dataset
from flax import jax_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.dataset_lib import datasets
from scenic.dataset_lib import video_ops
from scenic.projects.vivit.data import file_utils
import tensorflow as tf
# Aliases for custom types:
Batch = Dict[str, jnp.ndarray]
Rng = Union[jnp.ndarray, Dict[str, jnp.ndarray]]
def get_sharded_files(
data_path: str,
fraction_data: float = 1.0,
num_groups: Optional[int] = None,
group_index: Optional[int] = None) -> List[str]:
"""Returns a list of shards, which may be postprocessed.
Args:
data_path: Path to the data, either sharded or a single file.
fraction_data: Fraction of the data to be consumed. Only that fraction of
the shards is returned.
num_groups: Number of groups to split the data. All the shards will be split
in `num_groups` groups (of approximately same number of files) and the
given `group_index` group only will be returned. This is useful when
distributing the data over multiple hosts, which will make sure that the
same shard is not processed in two different hosts. If `num_groups` is
provided `group_index` must be provided as well.
group_index: Index of the group of data being returned. See `num_groups`.
Returns:
A list of shard filenames.
Raises:
ValueError: If `fraction_data` is not between 0 and 1.
ValueError: If `num_groups` requested is not consistent with the number of
shards available.
ValueError: If `group_index` >= `num_groups`
ValueError: If only one of `num_groups` and `group_index` is given.
"""
if fraction_data <= 0 or fraction_data > 1:
raise ValueError(
f'The fraction of data must be in (0, 1] but is {fraction_data}.')
if file_utils.is_sharded_file_spec(data_path):
shards = list(file_utils.generate_sharded_filenames(data_path))
else:
shards = [data_path]
num_used_shards = int(np.ceil(fraction_data * len(shards)))
shards = shards[:num_used_shards]
if num_groups is None and group_index is None:
return shards
if num_groups is None or group_index is None:
raise ValueError('Both `num_groups` and `group_index` should be specified.')
if group_index >= num_groups:
raise ValueError(
f'Cannot request index {group_index} of {num_groups} groups')
if num_groups > num_used_shards:
raise ValueError(
f'After applying `fraction_data={fraction_data}` we have '
f'{num_used_shards} data shards, which cannot be split into '
f'{num_groups} groups.')
split_shard_ids = np.array_split(np.arange(num_used_shards), num_groups)
begin_loc = split_shard_ids[group_index][0]
end_loc = split_shard_ids[group_index][-1] + 1
shards = shards[begin_loc:end_loc]
return shards
class TFRecordDatasetFactory(video_dataset.BaseVideoDatasetFactory):
"""Reader for TFRecords using the MediaSequence format.
Attributes:
num_classes: int. The number of classes in the dataset.
base_dir: str. The base directory from which the TFRecords are read.
subset: str. The subset of the dataset. In Scenic, the subsets are
"train", "validation" and "test".
"""
def __init__(
self,
base_dir: str,
tables: Dict[str, Union[str, List[str]]],
examples_per_subset: Dict[str, int],
num_classes: int,
subset: str = 'train',
fraction_data: float = 1.0,
num_groups: Optional[int] = None,
group_index: Optional[int] = None):
"""Initializes the instance of TFRecordDatasetFactory.
Initializes a data-loader using DeepMind Video Reader (DMVR) pre-processing
(https://github.com/deepmind/dmvr).
TFRecords are assumed to consist of tf.SequenceExample protocol buffers in
the MediaSequence
(https://github.com/google/mediapipe/tree/master/mediapipe/util/sequence)
format.
Args:
base_dir: The base directory of the TFRecords.
tables: A dictionary mapping the subset name (train, val or test) to the
relative path of the TFRecord containing them. Follows DMVR convention.
The values of the dictionary can either be a string or a list. If it is
a string, it specifies all the shards in the TFRecord.
Example - "/path/to/tfrecord@10".
If passing a list, each entry is a shard of the TFRecord.
Example - "[/path/to/tfrecord_shard_1_of_10, ...,
/path/to/tfrecord_shard_10_of_10]."
The latter scenario is useful for debugging.
examples_per_subset: A dictionary mapping the subset name (train, val or
test) to the number of examples in the dataset for that subset.
num_classes: The number of classes in the dataset.
subset: The subset of the dataset to load. Must be a key of "tables"
fraction_data: The fraction of the data to load. If less than 1.0, this
fraction of the total TFRecord shards are read.
num_groups: If specified will reshard the data according to `num_groups`.
A `group_index` should be specified if using `num_groups`.
group_index: Index of the shard to return after resharding. `num_groups`
should be specified if using `group_index`. This is useful in
distributed setting where one wants to ensure that different data is
read by different workers.
Raises:
ValueError: If subset is not a key of tables or examples_per_subset
"""
if (subset not in tables) or (subset not in examples_per_subset):
raise ValueError(f'Invalid subset {subset!r}. '
f'The available subsets are: {set(tables)!r}')
self.num_classes = num_classes
self.base_dir = base_dir
self.subset = subset
self.num_examples = examples_per_subset[subset]
data_relative_path = tables[subset]
if isinstance(data_relative_path, list):
shards = [os.path.join(self.base_dir, x) for x in data_relative_path]
else:
data_path = os.path.join(self.base_dir, data_relative_path)
shards = get_sharded_files(data_path=data_path,
fraction_data=fraction_data,
num_groups=num_groups,
group_index=group_index)
super().__init__(shards=shards)
def _build(self,
is_training: bool = True,
# Video related parameters.
num_frames: int = 32,
stride: int = 1,
num_test_clips: int = 1,
min_resize: int = 256,
crop_size: int = 224,
zero_centering_image: bool = False,
train_frame_sampling_mode: str = 'random',
# Label related parameters.
one_hot_label: bool = True,
get_label_str: bool = False,
label_offset: int = 0):
"""Adds DMVR pre-processors to the dataset.
Args:
is_training: whether or not in training mode.
num_frames: number of frames per subclip.
stride: temporal stride to sample frames.
num_test_clips: number of test clip (1 by default). If more than one,
this will sample multiple linearly spaced clips within each video at
test time. If 1, then a single clip in the middle of the video is
sampled.
min_resize: frames are resized so that min width/height is min_resize.
crop_size: final size of the frame after cropping the resized frames.
zero_centering_image: whether to have image values in the interval [-1, 1]
or [0, 1].
train_frame_sampling_mode: Method of sampling frames during training.
Options are one of {random, random_sample_with_centre, centre}
one_hot_label: whether or not to return one hot version of labels.
get_label_str: whether or not to return label as text.
label_offset: If non-zero, this value is subtracted from the parsed label.
Useful when dataset is 1-indexed.
"""
modalities.add_image(
parser_builder=self.parser_builder,
sampler_builder=self.sampler_builder,
decoder_builder=self.decoder_builder,
preprocessor_builder=self.preprocessor_builder,
postprocessor_builder=self.postprocessor_builder,
is_training=is_training,
num_frames=num_frames, stride=stride,
num_test_clips=num_test_clips,
min_resize=min_resize, crop_size=crop_size,
zero_centering_image=zero_centering_image)
modalities.add_label(
parser_builder=self.parser_builder,
decoder_builder=self.decoder_builder,
preprocessor_builder=self.preprocessor_builder,
one_hot_label=one_hot_label,
num_classes=self.num_classes,
add_label_name=get_label_str)
if label_offset:
self.preprocessor_builder.add_fn(
fn=lambda x: x - label_offset,
feature_name=builders.LABEL_INDEX_FEATURE_NAME,
fn_name=f'label_offset_{label_offset}',
add_before_fn_name=(
f'{builders.LABEL_INDEX_FEATURE_NAME}_one_hot'))
def load_split(
ds_factory,
batch_size: int,
subset: Text = 'train',
num_frames: int = 32,
stride: int = 2,
num_test_clips: int = 1,
min_resize: int = 256,
crop_size: int = 224,
one_hot_label: bool = True,
zero_centering: bool = True,
get_label_str: bool = False,
augmentation_params: Optional[ml_collections.ConfigDict] = None,
keep_key: bool = False,
do_three_spatial_crops: bool = False,
label_offset: int = 0) -> Tuple[tf.data.Dataset, int]:
"""Loads dataset using DMVR for pre-processing.
DMVR dataset loader already does basic augmentation (random crop and flip in
train mode. It also already shuffles and batches the data.
Args:
ds_factory: A DMVR factory to instantiate with the subset.
batch_size: The batch_size to use.
subset: train, validation or test
num_frames: Number of frames per subclip.
stride: Temporal stride to sample frames.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggreagated in the batch dimension.
min_resize: Frames are resized so that min(height, width) is min_resize.
crop_size: Final size of the frame after cropping the resized frames. Both
height and width are the same.
one_hot_label: If True, return one-hot version of the labels (ie [N, C])
array. Otherwise, return [N]-array of labels.
zero_centering: If True, frames are normalized to values in the interval
[-1, 1]. If False, values are in the interval [0, 1].
get_label_str: whether or not to return label as text.
Note that strings cannot be used in pmapped functions in Jax!
augmentation_params: Augmentation configurations in train mode.
keep_key: bool; If true, also return the key for each example.
do_three_spatial_crops: If true, take three spatial crops of each clip
during testing.
label_offset: If non-zero, this value is subtracted from the parsed label.
Useful when dataset is 1-indexed.
Returns:
A pair `(ds, num_examples)` with
ds: A `tf.data.Dataset` object
num_examples: Number of examples in the dataset.
"""
dataset = ds_factory(subset=subset).configure(
is_training=(subset == 'train'),
num_frames=num_frames,
stride=stride,
num_test_clips=num_test_clips,
min_resize=min_resize,
crop_size=crop_size,
zero_centering_image=zero_centering,
one_hot_label=one_hot_label,
get_label_str=get_label_str,
label_offset=label_offset)
if subset == 'train' and augmentation_params:
dataset = video_ops.additional_augmentations(dataset, augmentation_params,
crop_size, num_frames,
zero_centering)
if subset != 'train' and do_three_spatial_crops:
rgb_feature_name = builders.IMAGE_FEATURE_NAME
dataset.preprocessor_builder.replace_fn(
f'{rgb_feature_name}_central_crop',
functools.partial(video_ops.three_spatial_crops, crop_size=crop_size))
if num_test_clips == 1:
# This means that reshaping is not part of the post-processing graph
output_shape = (-1, num_frames, crop_size, crop_size, 3)
dataset.postprocessor_builder.add_fn(
fn=lambda x: tf.reshape(x, output_shape),
feature_name=rgb_feature_name,
fn_name=f'{rgb_feature_name}_reshape')
logging.info('Preprocessing graph: %s',
dataset.preprocessor_builder.get_summary())
logging.info('Postprocessing graph: %s',
dataset.postprocessor_builder.get_summary())
num_examples = dataset.num_examples
ds = dataset.make_dataset(batch_size=batch_size,
shuffle=(subset == 'train'),
drop_remainder=(subset == 'train'),
keep_key=(subset != 'train' and keep_key))
options = tf.data.Options()
options.threading.private_threadpool_size = 48
ds = ds.with_options(options)
return ds, num_examples
def map_keys(batch: Batch) -> Batch:
"""DMVR dataset returns 'image' and 'label'. We want 'inputs' and 'label'."""
batch['inputs'] = batch['image']
return batch
def tile_label_key(batch: Batch) -> Batch:
"""Tile labels and keys to match input videos when num_test_clips > 1.
When multiple test crops are used (ie num_test_clips > 1), the batch dimension
of batch['inputs'] = test_batch_size * num_test_clips.
However, labels and keys remain of size [test_batch_size].
This function repeats label and key to match the inputs.
Args:
batch: Batch from iterator
Returns:
Batch with 'label' and 'key' tiled to match 'inputs'. The input batch is
mutated by the function.
"""
n_repeats = batch['inputs'].shape[0] // batch['label'].shape[0]
batch['label'] = np.repeat(batch['label'], n_repeats, axis=0)
if 'key' in batch:
batch['key'] = np.repeat(batch['key'], n_repeats, axis=0)
return batch
@datasets.add_dataset('video_tfrecord_dataset')
def get_dataset(
*,
batch_size: int,
eval_batch_size: int,
num_shards: int,
dtype_str: Text = 'float32',
shuffle_seed: Optional[int] = 0,
rng: Optional[Rng] = None,
dataset_configs: ml_collections.ConfigDict,
dataset_service_address: Optional[str] = None) -> dataset_utils.Dataset:
"""Returns a generator for the dataset."""
del rng # Parameter was required by caller API, but is unused.
def validate_config(field):
if dataset_configs.get(field) is None:
raise ValueError(f'{field} must be specified for TFRecord dataset.')
validate_config('base_dir')
validate_config('tables')
validate_config('examples_per_subset')
validate_config('num_classes')
num_frames = dataset_configs.get('num_frames', 32)
num_test_clips = dataset_configs.get('num_test_clips', 1)
stride = dataset_configs.get('stride', 2)
min_resize = dataset_configs.get('min_resize', 256)
crop_size = dataset_configs.get('crop_size', 224)
one_hot_label = dataset_configs.get('one_hot_label', True)
zero_centre_data = dataset_configs.get('zero_centering', True)
augmentation_params = dataset_configs.get('augmentation_params', None)
num_train_val_clips = dataset_configs.get('num_train_val_clips', 1)
keep_test_key = dataset_configs.get('keep_test_key', False)
# For the test set, the actual batch size is
# test_batch_size * num_test_clips.
test_batch_size = dataset_configs.get('test_batch_size', eval_batch_size)
do_three_spatial_crops = dataset_configs.get('do_three_spatial_crops', False)
num_spatial_crops = 3 if do_three_spatial_crops else 1
test_split = dataset_configs.get('test_split', 'test')
label_offset = dataset_configs.get('label_offset', 0)
ds_factory = functools.partial(
TFRecordDatasetFactory,
base_dir=dataset_configs.base_dir,
tables=dataset_configs.tables,
examples_per_subset=dataset_configs.examples_per_subset,
num_classes=dataset_configs.num_classes,
num_groups=jax.process_count(),
group_index=jax.process_index())
def create_dataset_iterator(
subset: Text,
batch_size_local: int,
num_clips: int,
keep_key_local: bool = False) -> Tuple[Iterator[Batch], int]:
is_training = subset == 'train'
is_test = subset == 'test'
logging.info('Loading split %s', subset)
dataset, num_examples = load_split(
ds_factory,
batch_size=batch_size_local,
subset=subset,
num_frames=num_frames,
stride=stride,
num_test_clips=num_clips,
min_resize=min_resize,
crop_size=crop_size,
one_hot_label=one_hot_label,
zero_centering=zero_centre_data,
augmentation_params=augmentation_params,
keep_key=keep_key_local,
do_three_spatial_crops=do_three_spatial_crops and is_test,
label_offset=label_offset)
if dataset_service_address and is_training:
if shuffle_seed is not None:
raise ValueError('Using dataset service with a random seed causes each '
'worker to produce exactly the same data. Add '
'config.shuffle_seed = None to your config if you '
'want to run with dataset service.')
logging.info('Using the tf.data service at %s', dataset_service_address)
dataset = dataset_utils.distribute(dataset, dataset_service_address)
pad_batch_size = batch_size_local
if is_test:
pad_batch_size = batch_size_local * num_clips * num_spatial_crops
maybe_pad_batches = functools.partial(
dataset_utils.maybe_pad_batch,
train=is_training,
batch_size=pad_batch_size)
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
current_ds_iterator = (
map_keys(dataset_utils.tf_to_numpy(data)) for data in iter(dataset)
)
if is_test and num_clips * num_spatial_crops > 1:
current_ds_iterator = map(tile_label_key, current_ds_iterator)
current_ds_iterator = map(maybe_pad_batches, current_ds_iterator)
current_ds_iterator = map(shard_batches, current_ds_iterator)
if is_training and dataset_configs.get('prefetch_to_device'):
# Async bind batch to device which speeds up training.
current_ds_iterator = jax_utils.prefetch_to_device(
current_ds_iterator, dataset_configs.get('prefetch_to_device'))
return current_ds_iterator, num_examples
train_iter, n_train_examples = create_dataset_iterator(
'train', batch_size, num_train_val_clips)
eval_iter, n_eval_examples = create_dataset_iterator(
'validation', eval_batch_size, num_train_val_clips)
test_iter, n_test_examples = create_dataset_iterator(
test_split, test_batch_size, num_test_clips, keep_test_key)
meta_data = {
'num_classes': dataset_configs.num_classes,
'input_shape': (-1, num_frames, crop_size, crop_size, 3),
'num_train_examples': (n_train_examples * num_train_val_clips),
'num_eval_examples': (n_eval_examples * num_train_val_clips),
'num_test_examples':
(n_test_examples * num_test_clips * num_spatial_crops),
'input_dtype': getattr(jnp, dtype_str),
'target_is_onehot': True,
}
logging.info('Dataset metadata:\n%s', meta_data)
return dataset_utils.Dataset(train_iter, eval_iter, test_iter, meta_data)
| StarcoderdataPython |
1988929 | <reponame>blackbotinc/AWS-Attack<filename>ttp/iam_backdoor_users_password.py
#!/usr/bin/env python3
#'description': ''This module attempts to add a password to users in the account. If all users are going to be backdoored, if it has not already been run, this module will run "enum_users_roles_policies_groups" to fetch all of the users in the account. Passwords can not be added to user accounts that 1) have a password already or 2) have ever had a password, regardless if it has been used before or not. If the module detects that a user already has a password, they will be ignored.',
import datetime
import argparse
from random import choice
import string
from botocore.exceptions import ClientError
import importlib
target = ''
technique_info = {
'blackbot_id': 'T1531.b.001',
'external_id': '',
'controller': 'iam_backdoor_users_password',
'services': ['IAM'],
'prerequisite_modules': ['iam_enum_users'],
'arguments_to_autocomplete': ['--usernames', '--update'],
'version': '1',
'aws_namespaces': [],
'last_updated_by': 'Blackbot, Inc. Sun Sep 20 04:13:33 UTC 2020' ,
'ttp_exec': '',
'ttp_mitigation': '',
'ttp_detection': '',
'intent': 'Adds a password to users without one.',
'name': 'Account Access Removal:Modify Target Accounts Authentication' ,
}
parser = argparse.ArgumentParser(add_help=False, description=technique_info['name'])
parser.add_argument('--usernames', required=False, default=None, help='A comma-separated list of usernames of users in the AWS account to backdoor. If not supplied, it defaults to every user in the account')
parser.add_argument('--update', required=False, default=False, action='store_true', help='Try to update login profiles instead of creating a new one. This can be used to change other users passwords who already have a login profile.')
def main(args, awsattack_main):
session = awsattack_main.get_active_session()
args = parser.parse_args(args)
fetch_data = awsattack_main.fetch_data
users = []
if args.usernames is not None:
if ',' in args.usernames:
users = args.usernames.split(',')
else:
users = [args.usernames]
else:
if fetch_data(['IAM', 'Users'], technique_info['prerequisite_modules'][0], '--users') is False:
print('FAILURE')
print(' SUB-MODULE EXECUTION FAILED')
return None
for user in session.IAM['Users']:
if 'PasswordLastUsed' not in user or args.update:
users.append(user['UserName'])
import_path = 'ttp.src.iam_backdoor_users_password_src'
src_code = __import__(import_path, globals(), locals(), ['technique_info'], 0)
importlib.reload(src_code)
awsattack_main.chain = True
return src_code.main(args, awsattack_main, data=users)
def summary(data, awsattack_main):
out = ''
if 'backdoored_password_count' in data:
count = data['backdoored_password_count']
out += ' {} user(s) backdoored.\n'.format(count)
return out
| StarcoderdataPython |
11390940 | <filename>python/num_swap.py
def num_swap(a,b):
a = a + b
b = a - b
a = a - b
return a, b
| StarcoderdataPython |
3510364 | import logging
import time
from datetime import datetime
from datetime import timedelta
from .orm import get_scoped_session, Voting, Karma, cast, Float
from .parse import Parse
from .words import Color
# FIXME: check every where for succeded POST
class KarmaManager:
__slots__ = [
'_initial_value', '_max_shot', '_self_karma', '_vote_timeout',
'_upvote_emoji', '_downvote_emoji',
'_transport', '_format', '_backup', '_session', '_logger'
]
def __init__(self, karma_config, db_config, transport, fmt, backup_provider):
self._initial_value = karma_config['initial_value']
self._max_shot = karma_config['max_shot']
self._self_karma = karma_config['self_karma']
self._vote_timeout = karma_config['vote_timeout']
self._upvote_emoji = karma_config['upvote_emoji']
self._downvote_emoji = karma_config['downvote_emoji']
self._transport = transport
self._format = fmt
self._backup = backup_provider
self._session = get_scoped_session(db_config)
self._logger = logging.getLogger('KarmaManager')
def get(self, user_id, channel):
karma = self._session.query(Karma).filter_by(user_id=user_id).first()
if karma:
value = karma.karma
else:
value = self._initial_value
self._session.add(Karma(user_id=user_id, karma=value))
self._session.commit()
username = self._transport.lookup_username(user_id)
self._transport.post(channel, self._format.report_karma(username, value))
return True
def set(self, user_id, karma, channel):
karma_change = self._session.query(Karma).filter_by(user_id=user_id).first()
if karma_change:
karma_change.karma = karma
else:
self._session.add(Karma(user_id=user_id, karma=karma))
self._session.commit()
username = self._transport.lookup_username(user_id)
self._transport.post(channel, self._format.report_karma(username, karma))
return True
def digest(self, channel):
result = ['*username* => *karma*']
for r in self._session.query(Karma).filter(Karma.karma != 0).order_by(Karma.karma.desc()).all():
item = '_{}_ => *{}*'.format(self._transport.lookup_username(r.user_id), r.karma)
result.append(item)
# TODO: add translations
if len(result) == 1:
result = 'Seems like nothing to show. All the karma is zero'
else:
result.append('The rest are full ZERO')
result = '\n'.join(result)
self._transport.post(channel, self._format.message(Color.INFO, result))
return True
def pending(self, channel):
result = ['*initiator* | *receiver* | *channel* | *karma* | *expired*']
for r in self._session.query(Voting).all():
dt = timedelta(seconds=self._vote_timeout)
time_left = datetime.fromtimestamp(float(r.initial_msg_ts)) + dt
item = '{} | {} | {} | {} | {}'.format(
self._transport.lookup_username(r.initiator_id),
self._transport.lookup_username(r.user_id),
self._transport.lookup_channel_name(r.channel),
r.karma,
time_left.isoformat())
result.append(item)
if len(result) == 1:
result = 'Seems like nothing to show'
else:
result = '\n'.join(result)
self._transport.post(channel, self._format.message(Color.INFO, result))
return True
def create(self, initiator_id, channel, text, ts):
# Check for an already existing voting
instance = self._session.query(Voting).filter_by(uuid=(ts, channel)).first()
if instance:
self._logger.fatal('Voting already exists: ts=%s, channel=%s',
ts, channel)
return False
# Report an error if a request has not been parsed
result = Parse.karma_change(text)
if not result:
self._transport.post(channel, self._format.parsing_error(), ts=ts)
return None
bot_id, user_id, points = result
error = self._karma_change_sanity_check(initiator_id,
user_id,
bot_id,
points)
if error:
self._transport.post(channel, error, ts=ts)
return None
username = self._transport.lookup_username(user_id)
msg = self._format.new_voting(username, points)
result = self._transport.post(channel, msg, ts=ts)
karma = Voting(initial_msg_ts=ts,
bot_msg_ts=result['ts'],
channel=channel,
user_id=user_id,
initiator_id=initiator_id,
karma=points,
text=text)
self._session.add(karma)
self._session.commit()
return True
def close_expired_votes(self):
result = True
now = time.time()
expired = self._session.query(Voting) \
.filter(cast(Voting.bot_msg_ts, Float) + self._vote_timeout < now).all()
# in order to avoid needlees backup
if not expired:
return result
for e in expired:
self._logger.debug('Expired voting: %s', e)
reactions = self._transport.reactions_get(e.channel,
e.initial_msg_ts,
e.bot_msg_ts)
if reactions is None:
result = False
self._logger.error('Failed to get messages for: %s', e)
self._session.delete(e)
continue
success = self._determine_success(reactions)
if success:
karma = self._session.query(Karma).filter_by(user_id=e.user_id).first()
if karma:
karma.karma += e.karma
else:
self._session.add(Karma(user_id=e.user_id, karma=self._initial_value + e.karma))
self._close(e, success)
self._session.delete(e)
self._session.commit()
self._backup()
return result
def _close(self, karma_change, success):
username = self._transport.lookup_username(karma_change.user_id)
result = self._format.voting_result(username, karma_change.karma, success)
return self._transport.update(karma_change.channel, result,
karma_change.bot_msg_ts)
def _determine_success(self, reactions):
self._logger.debug('Reactions: %s', reactions)
upvotes = [reactions[r] for r in self._upvote_emoji if r in reactions]
downvotes = [reactions[r] for r in self._downvote_emoji if r in reactions]
self._logger.debug('Upvotes: %s', upvotes)
self._logger.debug('Downvotes: %s', downvotes)
return sum(upvotes) - sum(downvotes) > 0
def _karma_change_sanity_check(self,
initiator_id,
user_id,
bot_id,
karma):
if not self._self_karma and initiator_id == user_id:
return self._format.strange_error()
if user_id == bot_id:
return self._format.robo_error()
if abs(karma) > self._max_shot:
return self._format.max_shot_error(self._max_shot)
return None
| StarcoderdataPython |
1655097 | def main():
bd = sorted(map(lambda x: tuple([x.split()[0], x.split()[1], int(x.split()[2])]), open("first_task.txt", "r").read().split("\n")))
print bd
user_list = {}
for index, user in enumerate(bd):
if user[0] not in user_list:
user_list[user[0]] = {}
if user[1] not in user_list[user[0]]:
user_list[user[0]][user[1]] = 0
user_list[user[0]][user[1]] += user[2]
for key in sorted(user_list):
print "%s: %s" % (key, user_list[key])
main()
| StarcoderdataPython |
9791749 | <reponame>hvuhsg/Crawler
from Crawler.storage_types.sqlite_storage import Storage
from Crawler.crawler import Crawler
from worker import Worker
def main():
base_url = "en.wikipedia.org/wiki/Main_Page"
depth = 2
sqlite_storage = Storage(db_name="storage.db", base_url=base_url, max_depth=depth)
crawler = Crawler(
base_url=base_url,
depth=depth,
storage=sqlite_storage,
worker_class=Worker,
workers_number=2,
)
crawler.create_workers()
crawler.run_workers()
crawler.idle()
main()
| StarcoderdataPython |
344524 | from django.db.models import QuerySet
class ThreadQuerySet(QuerySet):
def of_user(self, user):
return self.filter(user_threads__user=user)
def inbox(self):
return self.filter(user_threads__is_active=True)
def deleted(self):
return self.filter(user_threads__is_active=False)
def unread(self):
return self.filter(
user_threads__is_active=True,
user_threads__is_read=False
)
def order_by_latest(self):
return self.filter(last_message_at__isnull=False).order_by('-last_message_at')
| StarcoderdataPython |
11212547 | from paraview.simple import *
import os
mainDirName = os.getcwd() + '\\vtk\\'
fileRootFmt = '5MW_Land_ModeShapes.Mode{:d}.LinTime1.' # keep the format specifier {:d} for the mode number
nModes = 15 # number of modes to visualize
fps = 30 # frames per second (rate to save in the .avi file)
StructureModule = 'ED'
BladeMesh = "AD_Blade"
print('')
for iMode in range(nModes): # iMode starts at 0, so add 1
#fileRootName = fileRoot + str(iMode+1) + '.LinTime1.' #.LinTime1 depends on visualization options
fileRootName = fileRootFmt.format(iMode+1)
print('***' + fileRootName + '***')
# determine number of leading zeros in this mode shape
nLeadingZeros = 0
exists = False
while (not exists) and nLeadingZeros < 6:
nLeadingZeros = nLeadingZeros + 1
txt = '{:0' + str(nLeadingZeros) + 'd}'
fileLeadingZeros = txt.format(1)
Blade1File = mainDirName + fileRootName + BladeMesh + '1.' + fileLeadingZeros + '.vtp'
exists = os.path.isfile(Blade1File)
if not exists:
print(' Could not find files to load.')
else:
LoadState('ED_Surfaces.pvsm', LoadStateDataFileOptions='Choose File Names',
DataDirectory=mainDirName,
a5MW_Land_DLL_WTurbMode1LinTime1AD_Blade10FileName=[Blade1File],
a5MW_Land_DLL_WTurbMode1LinTime1AD_Blade20FileName=[mainDirName + fileRootName + BladeMesh + '2.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1AD_Blade30FileName=[mainDirName + fileRootName + BladeMesh + '3.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1Blade1Surface0FileName=[mainDirName + fileRootName + 'Blade1Surface.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1Blade2Surface0FileName=[mainDirName + fileRootName + 'Blade2Surface.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1Blade3Surface0FileName=[mainDirName + fileRootName + 'Blade3Surface.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1ED_Hub0FileName=[mainDirName + fileRootName + StructureModule + '_Hub.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1ED_Nacelle0FileName=[mainDirName + fileRootName + StructureModule + '_Nacelle.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1ED_TowerLn2Mesh_motion0FileName=[mainDirName + fileRootName + StructureModule + '_TowerLn2Mesh_motion.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1HubSurface0FileName=[mainDirName + fileRootName + 'HubSurface.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1NacelleSurface0FileName=[mainDirName + fileRootName + 'NacelleSurface.' + fileLeadingZeros + '.vtp'],
a5MW_Land_DLL_WTurbMode1LinTime1TowerSurface0FileName=[mainDirName + fileRootName + 'TowerSurface.' + fileLeadingZeros + '.vtp']
)
## find new sources
# blade 1
for iBlade in range(3):
Blade = FindSource(fileRootName + BladeMesh + str(iBlade+1) + '...vtp')
SetActiveSource(Blade)
ExtendFileSeries(Blade)
Blade = FindSource(fileRootName + 'Blade' + str(iBlade+1) + 'Surface...vtp')
SetActiveSource(Blade)
ExtendFileSeries(Blade)
# hub
Hub = FindSource(fileRootName + StructureModule + '_Hub...vtp')
SetActiveSource(Hub)
ExtendFileSeries(Hub)
Hub = FindSource(fileRootName + 'HubSurface...vtp')
SetActiveSource(Hub)
ExtendFileSeries(Hub)
# nacelle
Nacelle = FindSource(fileRootName + StructureModule + '_Nacelle...vtp')
SetActiveSource(Nacelle)
ExtendFileSeries(Nacelle)
Nacelle = FindSource(fileRootName + 'NacelleSurface...vtp')
SetActiveSource(Nacelle)
ExtendFileSeries(Nacelle)
# tower
Tower = FindSource(fileRootName + StructureModule + '_TowerLn2Mesh_motion...vtp')
SetActiveSource(Tower)
ExtendFileSeries(Tower)
Tower = FindSource(fileRootName + 'TowerSurface...vtp')
SetActiveSource(Tower)
ExtendFileSeries(Tower)
#####
SetActiveView(GetRenderView())
#view = GetActiveView()
layout = GetLayout()
SaveAnimation(fileRootName + 'avi', viewOrLayout=layout, FrameRate=fps )
# SaveAnimation(fileRootName + 'avi', viewOrLayout=layout, FrameRate=fps, ImageResolution=(1544,784) )
# this .pvsm file defaults to (2734,1178) without ImageResolution arguments, resulting in a bunch of warnings
# For some reason, ParaView is ignoring the FrameRate argument and always uses a value of 1.
print(' Saved animation file.')
| StarcoderdataPython |
12866587 | from PyConstants import Paths
from PyConstants import Codes
from PyConstants import CacheTimes
from PyBaseTest import BaseTest
from PyRequest import PyRequest
import time
class Authentication(BaseTest):
password = "<PASSWORD>"
invalidPassword = "<PASSWORD>"
def runTests(self):
print("Running authentication tests")
self.testRegister(self.username, self.email)
token = self.testLogin(self.username)
self.testRegister(self.target, self.targetEmail)
self.testLogout(token)
time.sleep(CacheTimes.USER_USERNAME)
token = self.testLogin(self.username)
targetToken = self.testLogin(self.target)
time.sleep(CacheTimes.USER_USERNAME)
return targetToken, token
def testRegister(self, username, email):
invalidBody = {"username":username, "email":email}
body = {"username":username, "email":email, "password":<PASSWORD>, "confirmNewPassword":<PASSWORD>, "ageMinimum":True, "recaptchaResponse":"test"}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid)
invalidBody = {"email":email, "password":<PASSWORD>}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid)
invalidBody = {"username":username, "password":<PASSWORD>}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid)
invalidBody = {"username":username, "email":email, "password":<PASSWORD>}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid)
invalidBody = {"username":username, "email":email, "password":<PASSWORD>, "confirmNewPassword":<PASSWORD> + "s", "recaptchaResponse":"test"}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid)
invalidBody = {"username":username, "email":email, "password":<PASSWORD>, "confirmNewPassword":<PASSWORD>, "ageMinimum":False, "recaptchaResponse":"test"}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid)
restrictedBody = {"username":username, "password":"<PASSWORD>", "email":email, "confirmNewPassword":"<PASSWORD>", "ageMinimum":True, "recaptchaResponse":"test"}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, restrictedBody, self.expectedRestrictedPassword)
restrictedBody = {"username":"penstro", "password":<PASSWORD>, "email":email, "confirmNewPassword":<PASSWORD>, "ageMinimum":True, "recaptchaResponse":"test"}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, restrictedBody, self.expectedRestrictedUsername)
restrictedBody = {"username":username, "password":<PASSWORD>, "email":"<EMAIL>", "confirmNewPassword":<PASSWORD>, "ageMinimum":True, "recaptchaResponse":"test"}
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, restrictedBody, self.expectedRestrictedEmail)
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, body, self.expectedResultCreated)
PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, body, self.expectedExistsUsernameEmail)
def testLogin(self, username):
body = {"username":username, "password":<PASSWORD>}
PyRequest().expectResponse(Paths.LOGIN, PyRequest.POST, None, self.expectedInvalid)
PyRequest().expectResponse(Paths.LOGIN, PyRequest.POST, body, self.expectedDenied)
body = {"username":username, "password":<PASSWORD>}
data = PyRequest().expectResponse(Paths.LOGIN, PyRequest.POST, body, self.expectedResultSuccess)
if 'dto' in data:
if 'result' in data['dto']:
print("TOKEN: " + str(data['dto']['result']))
return str(data['dto']['result'])
return None
def testLogout(self, token):
PyRequest().expectResponse(Paths.LOGOUT, PyRequest.POST, None, self.expectedDenied)
PyRequest(token).expectResponse(Paths.LOGOUT, PyRequest.POST, None, self.expectedSuccess)
| StarcoderdataPython |
3358139 | # encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
"""
Calculate pi using a Monte Carlo method using IPython Parallel.
"""
from IPython.parallel import Client, interactive
from util import timer
client = Client()
view = client[:]
view.execute('import numpy')
@interactive # this runs on the engins
def calc_pi_on_engines(n):
x = numpy.random.rand(n)
y = numpy.random.rand(n)
r = numpy.hypot(x, y)
return 4. * (r < 1.).sum() / n
@timer
def calc_pi(n):
"""Estimate pi using IPython.parallel."""
n_engines = n/len(view)
results = view.apply_sync(calc_pi_on_engines, n_engines)
return float(sum(results))/len(results)
def main(N):
result, time = calc_pi(N)
print('time : %3.4g\nresult: %.7f' % (time, result))
client.purge_everything()
if __name__ == '__main__':
import argparse
formatter = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=formatter)
parser.add_argument("npoints", metavar="N", type=int,
help=("number of points to use in estimation"))
args = parser.parse_args()
main(args.npoints)
| StarcoderdataPython |
3552726 | """
Everyone spend more time climbing.
"""
from climb import Climber
__all__ = ['Climber'] | StarcoderdataPython |
3468905 | <gh_stars>0
import random
n1 = str(('joao'))
n2 = str(('jose'))
n3 = str(('maria'))
n4 = str(('ana'))
ordem= [n1,n2,n3,n4]
random.shuffle (ordem)
print('a orden dos alunos será {}' .format(ordem)) | StarcoderdataPython |
6532020 | <gh_stars>1-10
import time
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from ..db import get_db
from ..utils import (
get_format_timestamp, timestamp_to_sec
)
from .auth import login_required
from .user import (
get_user_prefs
)
bp = Blueprint('workout', __name__, url_prefix='/workout')
# List all workouts
@bp.route('/')
@login_required
def list():
db = get_db()
prefs = get_user_prefs()
sort_pref = ('ASC' if prefs['sortType'] == 0 else 'DESC')
if prefs['filterType'] == 0: # No Filter
where_clause = 'userId = 1 OR userId = ?'
elif prefs['filterType'] == 1: # Default only
where_clause = 'userId = 1'
elif prefs['filterType'] == 2: # Custom only
where_clause = 'userId = ?'
else: # Fallback
where_clause = 'userId = 1 OR userId = ?'
if '?' in where_clause:
workouts_res = db.execute(
'SELECT id, userId, name, description, datetime'
' FROM table_workout'
' WHERE (' + where_clause + ')'
' ORDER BY name ' + sort_pref,
(g.user['id'],)
).fetchall()
else:
workouts_res = db.execute(
'SELECT id, userId, name, description, datetime'
' FROM table_workout'
' WHERE (' + where_clause + ')'
' ORDER BY name ' + sort_pref
).fetchall()
tags = db.execute(
'SELECT w.id AS workoutId, t.id AS tagId, t.tag'
' FROM table_workout w, table_tags t'
' INNER JOIN table_workout_tags on table_workout_tags.tagId = t.id'
' AND table_workout_tags.workoutId = w.id'
' WHERE (w.userId = 1 OR w.userId = ?)',
(g.user['id'],)
).fetchall()
workouts = link_workouts_to_tags(workouts_res, tags)
return render_template(
'workout/workout.html',
prefs=prefs,
workouts=workouts,
userId=g.user['id'])
# Get workout info
@bp.route('/<int:workout_id>')
@login_required
def info(workout_id):
error = None
workout = get_workout(workout_id)
if workout is None:
error = 'User or Workout ID is invalid.'
else:
scores = get_db().execute(
'SELECT id, workoutId, score, rx, datetime, note'
' FROM table_workout_score WHERE workoutId = ? AND userId = ?'
' ORDER BY datetime ASC',
(workout_id, g.user['id'],)
).fetchall()
if error is not None:
flash(error)
if workout is None:
return redirect(url_for('workout.list'))
else:
return render_template(
'workout/entry.html',
workout=workout,
scores=scores,
userId=g.user['id'],
cur_format_time=get_format_timestamp(),
get_format_timestamp=get_format_timestamp,
timestamp_to_sec=timestamp_to_sec)
# Add new workout
@bp.route('/add', methods=('GET', 'POST'))
@login_required
def add():
if request.method == 'POST':
workout_name = request.form['name'].strip()
workout_description = request.form['description'].strip()
error = None
# @todo: Regex check
if not workout_name:
error = 'Name is required.'
if not workout_description:
error = 'Description is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO table_workout'
' (userId, name, description, datetime)'
' VALUES (?, ?, ?, ?)',
(g.user['id'], workout_name, workout_description, time.time(),)
)
db.commit()
inserted_workout = db.execute(
'SELECT last_insert_rowid()'
' FROM table_workout WHERE userId = ? LIMIT 1',
(g.user['id'],)
).fetchone()
if inserted_workout['last_insert_rowid()']:
return redirect(url_for(
'workout.info',
workout_id=inserted_workout['last_insert_rowid()']))
return redirect(url_for('workout.list'))
# Update workout
@bp.route('/<int:workout_id>/update', methods=('GET', 'POST'))
@login_required
def update(workout_id):
if request.method == 'POST':
workout = get_workout(workout_id, True)
workout_name = request.form['name'].strip()
workout_description = request.form['description'].strip()
error = None
if not workout_name:
error = 'Name is required.'
if not workout_description:
error = 'Description is required.'
if workout is None:
error = 'User or Workout ID is invalid.'
if error is not None:
flash(error)
if workout is None:
return redirect(url_for('workout.list'))
else:
db = get_db()
db.execute(
'UPDATE table_workout'
' SET name = ?, description = ?, datetime = ?'
' WHERE id = ? AND userId = ?',
(workout_name, workout_description, int(time.time()),
workout_id, g.user['id'],)
)
db.commit()
return redirect(url_for('workout.info', workout_id=workout_id))
# Delete workout
@bp.route('/<int:workout_id>/delete')
@login_required
def delete(workout_id):
error = None
if get_workout(workout_id, True) is None:
error = 'User or Workout ID is invalid.'
else:
db = get_db()
db.execute(
'DELETE FROM table_workout'
' WHERE id = ? AND userId = ?',
(workout_id, g.user['id'],)
)
db.commit()
# @todo: use current delete_score function
db.execute(
'DELETE FROM table_workout_score'
' WHERE workoutId = ? AND userId = ?',
(workout_id, g.user['id'],)
)
db.commit()
if error is not None:
flash(error)
return redirect(url_for('workout.list'))
# Get the workout and tag list from db
def get_workout(workout_id, force_user_id=False):
db = get_db()
workout_res = db.execute(
'SELECT id, userId, name, description, datetime'
' FROM table_workout WHERE id = ?',
(workout_id,)
).fetchone()
# @todo Raise custom exception here
if workout_res is None:
return None
tags = db.execute(
'SELECT w.id AS workoutId, t.id AS tagId, t.tag'
' FROM table_workout w, table_tags t'
' INNER JOIN table_workout_tags on table_workout_tags.tagId = t.id'
' AND table_workout_tags.workoutId = w.id'
' WHERE w.id = ?',
(workout_id,)
).fetchall()
workout = link_workout_to_tags(workout_res, tags)
if force_user_id:
if workout['userId'] != g.user['id']:
return None
else:
if workout['userId'] != 1 and workout['userId'] != g.user['id']:
return None
return workout
# Add tags to all workouts
def link_workouts_to_tags(workouts, tags):
workouts_res = []
for workout in workouts:
workouts_res.append(link_workout_to_tags(workout, tags))
return workouts_res
# Add tags to the workout entry
def link_workout_to_tags(workout, tags):
workout_tags = []
for tag in tags:
if tag['workoutId'] == workout['id']:
workout_tags.append(
{'tagId': tag['tagId'], 'tag': tag['tag']}
)
workout_res = dict(workout)
workout_res['tags'] = workout_tags
return workout_res
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.