id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3312115 | <reponame>gumerov-amir/Pyttcl
from threading import Thread
from wx import CallAfter
from .msg_dlg import MessageDialog
from lib.TeamTalk5 import Channel, User
# from lib import Tolk
class EventThread(Thread):
def __init__(self, pyttcl):
Thread.__init__(self )
self.pyttcl = pyttcl
self.allow_notify = True
def run(self):
while True:
msg = self.pyttcl.TeamTalk.getMessage()
if msg.nClientEvent > 0:
self.handleEvent(msg)
def handleEvent(self, msg):
if msg.nClientEvent == 10:
self.notify(_('Connected'))
self.pyttcl.is_connected = True
elif msg.nClientEvent == 20:
self.notify(_('Not connected'))
self.pyttcl.is_connected = False
elif msg.nClientEvent == 210:
self.notify("error" + str(msg.__dict__))
elif msg.nClientEvent == 220:
pass
elif msg.nClientEvent == 230:
self.notify(_('loggedin'))
self.pyttcl.is_loggedin = True
elif msg.nClientEvent == 280:
self.updateUser(msg.user)
elif msg.nClientEvent == 290:
self.joinUser(msg.user)
elif msg.nClientEvent == 310:
CallAfter(lambda pyttcl, arg: MessageDialog(pyttcl, arg.textmessage), self.pyttcl, msg)
elif msg.nClientEvent == 320:
self.addChannelToTreeview(msg.channel)
def notify(self, txt):
if self.allow_notify:
# Tolk.speak(txt)
def addChannelToTreeview(self, channel):
if channel.nChannelID == 1:
self.pyttcl.MainTreeviewData[
self.pyttcl.gui.frame.tree.AddRoot(
f'{self.pyttcl.TeamTalk.getServerProperties().szServerName} ({len(self.pyttcl.TeamTalk.getServerUsers())})'
)
] = channel
self.pyttcl.MessageData['C'][channel.nChannelID] = {}
elif channel.nChannelID != 1 and channel.nParentID == 1:
self.pyttcl.MainTreeviewData[
self.pyttcl.gui.frame.tree.AppendItem(
self.pyttcl.gui.frame.tree.GetRootItem(),
f'{channel.szName} ({len(self.pyttcl.TeamTalk.getChannelUsers(channel.nChannelID))})'
)
] = channel
self.pyttcl.MessageData['C'][channel.nChannelID] = {}
else:
for parentChannel in list(self.pyttcl.MainTreeviewData.keys()):
if self.pyttcl.MainTreeviewData[parentChannel].nChannelID == channel.nParentID:
self.pyttcl.MainTreeviewData[
self.pyttcl.gui.frame.tree.AppendItem(
parentChannel,
f'{channel.szName} ({len(self.pyttcl.TeamTalk.getChannelUsers(channel.nChannelID))})'
)
] = channel
self.pyttcl.MessageData['C'][channel.nChannelID] = {}
def joinUser(self, user):
if user.nUserID in [u.nUserID if type(u) ==User else -1 for u in self.pyttcl.MainTreeviewData.values()]:
self.pyttcl.gui.frame.tree.Delete(list(self.pyttcl.MainTreeviewData.keys())[[u.nUserID if type(u) ==User else -1 for u in self.pyttcl.MainTreeviewData.values()].index(user.nUserID)])
for i in list(self.pyttcl.MainTreeviewData.keys()):
if type(self.pyttcl.MainTreeviewData[i]) ==Channel and user.nChannelID == self.pyttcl.MainTreeviewData[i].nChannelID:
self.pyttcl.MainTreeviewData[
self.pyttcl.gui.frame.tree.AppendItem(
i,
user.szNickname
)
] = user
if user.nUserID not in self.pyttcl.MessageData['U'].keys():
self.pyttcl.MessageData['U'][user.nUserID] = {}
break
def updateUser(self, user):
for item in list(self.pyttcl.MainTreeviewData.keys()):
if type(self.pyttcl.MainTreeviewData[item]) ==User and self.pyttcl.MainTreeviewData[item].nUserID == user.nUserID:
self.pyttcl.gui.frame.tree.SetItemText(item, user.szNickname)
| StarcoderdataPython |
4874028 | import templated_email
from django.conf import settings
def get_context():
return {
'footer_text': settings.EMAIL_FOOTER_TEXT
}
def send_templated_mail(
template_name, from_email, recipient_list, context,
*args, **kwargs):
context.update(get_context())
return templated_email.send_templated_mail(
template_name, from_email, recipient_list, context,
*args, **kwargs)
| StarcoderdataPython |
3231487 | # BOJ 1799
import copy
import sys
si = sys.stdin.readline
dy = [-1, 1, -1, 1]
dx = [-1, 1, 1, -1]
def backtrack(idx, maps):
if idx == len(s_point):
return
for i in range(len(s_point)):
y, x = s_point[i]
cp = copy.deepcopy(maps)
if not cp[y][x]:
cp[y][x] = True
for d in range(4):
nx = x + dx[d]
ny = y + dy[d]
while True:
if ny < 0 or ny >= n or nx < 0 or nx >= n:
break
cp[ny][nx] = True
ny += dy[d]
nx += dx[d]
max_value[0] = max(max_value[0], idx + 1)
backtrack(idx + 1, cp)
n = int(si())
graph = [list(map(int, si().split())) for _ in range(n)]
# n = 5
# graph = [
# [1, 1, 0, 1, 1],
# [0, 1, 0, 0, 0],
# [1, 0, 1, 0, 1],
# [1, 0, 0, 0, 0],
# [1, 0, 1, 1, 1],
# ]
visited = [[False for _ in range(n)] for _ in range(n)]
s_point = []
max_value = [0]
for i in range(n):
for j in range(n):
if graph[i][j] == 1:
s_point.append((i, j))
backtrack(0, visited)
print(max_value[0])
| StarcoderdataPython |
4801573 | <reponame>Adel-Charef/scripts
import os
def recurse_dir(root):
root = os.path.abspath(root)
for item in os.listdir(root):
item_full_path = os.path.join(root, item)
if os.path.isdir(item_full_path):
recurse_dir(item_full_path)
else:
print("%s - %s" % (item_full_path, os.stat(item_full_path).st_size))
| StarcoderdataPython |
5191076 | <reponame>Tdev95/JWM<gh_stars>0
class JWMException(Exception):
pass
class DeserializationException(JWMException):
pass
class VerificationFailure(Exception):
"""
Verification of a Macaroon was attempted but failed for an unknown reason
"""
class CriticalClaimException(VerificationFailure):
"""
The Verifier object did not find a critical claim in a Macaroon.
"""
class UniqueClaimException(VerificationFailure):
"""
The Verifier object found a unique claim to be used more than once.
"""
| StarcoderdataPython |
8036738 | <reponame>CornellDataScience/CoalescenceML<filename>src/coalescenceml/utils/json_utils.py
import json
from pathlib import Path
from typing import Any, Dict
from coalescenceml.io import fileio, utils
def write_json(file_path: str, contents: Dict[str, Any]) -> None:
"""Write contents as JSON to file_path.
Args:
file_path: Path to JSON file.
contents: Contents of json as dict.
Raises:
FileNotFoundError: if parent directory of file_path does not exist
"""
if not utils.is_remote(file_path):
# If it is a local path
directory = str(Path(file_path).parent)
if not fileio.isdir(directory):
# If it doesn't exist, then raise exception
raise FileNotFoundError(f"Directory '{directory}' does not exist")
utils.write_file_contents_as_string(file_path, json.dumps(contents))
def read_json(file_path: str) -> Dict[str, Any]:
"""Read JSON at file path and return contents.
Args:
file_path: path to JSON file.
Returns:
Contents of the file as a dict
Raises:
FileNotFoundError: if file path does not exist
"""
if fileio.exists(file_path):
contents = utils.read_file_contents_as_string(file_path)
return json.loads(contents)
else:
raise FileNotFoundError(f"File '{file_path}' does not exist")
| StarcoderdataPython |
3531168 | <reponame>ansabgillani/binarysearchcomproblems
class Solution:
def solve(self,rect0,rect1):
return ((rect0[1]<=rect1[3]<=rect0[3] or rect0[1]<=rect1[1]<=rect0[3]) and not(rect1[2] <= rect0[0] or rect1[0] >= rect0[2])) or ((rect0[0]<=rect1[0]<=rect0[2] or rect0[0]<=rect1[2]<=rect0[2]) and not(rect1[3] <= rect0[1] or rect1[1] >= rect0[3]))
| StarcoderdataPython |
140744 | <reponame>EliasOPrado/tour-project
from django.contrib import admin
from .models import Order, OrderLineItem
# Register your models here.
"""
TubularInline subclass defines the template used
to render the Order in the admin interface. StackInline is other one.
"""
class OrderLineAdminInline(admin.TabularInline):
model = OrderLineItem
class OrderAdmin(admin.ModelAdmin):
inlines = (OrderLineAdminInline, )
admin.site.register(Order, OrderAdmin)
| StarcoderdataPython |
221760 | <filename>backend/ringi/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='ringi.index'),
path('create', views.create, name='ringi.create'),
path('show/<int:id>', views.show, name='ringi.show'),
path('edit/<int:id>', views.edit, name='ringi.edit'),
path('delete/<int:id>', views.delete, name='ringi.delete'),
] | StarcoderdataPython |
1673953 | from .TS3Bot import Bot
from .guild_service import GuildService
__all__ = ['GuildService', 'Bot']
| StarcoderdataPython |
5197975 | <gh_stars>0
#1)
print([x+8 for x in range(3,7)])
#2)
print([c for c in "programa"])
#3)
print([[z,k] for z in range(3) for k in range(3,5)])
#4)
print([s.upper() for s in "hoy es viernes"])
#5)
print([len(z) for z in "hoy es viernes 24".split()])
##########################################
#Ej1)
print("\nEjercicio 1\n")
##1. Crear una función que retorne una lista creada por comprensión con los
##números pares hasta un número dado por parámetro.
def pareshastan(n):
return [numero for numero in range(1,n+1) if numero%2==0]
print(pareshastan(25))
#Ej2)
print("\nEjercicio 2:\n")
##2. Utilizar comprensión de listas para obtener los números que se obtienen
##como “(n+1)*(n–1)” para n>1, y que son menores de 1000. La lista de
##estos números comienza con [0, 3, 8, 15, 24, ...].
print([(n+1)*(n-1) for n in range(1000) if n>1])
#Ej3)
print("\nEjercicio 3:\n")
##3. Utilizando listas por comprensión implementar una función que toma
##una cadena y devuelve la cadena en mayúsculas sin espacios en blanco
##ni símbolos de puntuación (los dígitos no se cambian).
def cadenalimpia(cadena):
return "".join([letra.upper() for letra in cadena.strip() if not letra in "0123456789 :,;."])
print(cadenalimpia("Es 1 la idea, que vale por: 100, muchas palabras"))
#Ej4)
print("\nEjercicio 4:\n")
##4. Dado una cadena de caracteres con fechas en formato “MM/DD/YYYY”
##separadas por coma, obtener una lista de cadenas de caracteres con
##las fechas en formato “DD/MM/YYYY”
##Ej.: "10/11/2016,01/02/2016" ['11/10/2016', '02/01/2016']
print(["{}/{}/{}".format(fecha.split("/")[1],fecha.split("/")[0],fecha.split("/")[2]) for fecha in "10/11/2016,01/02/2016,05/06/2017,07/31/2017".split(",")])
#Otra solución:
def reformatFecha(fecha):
fecha_parts = fecha.split("/")
return "{}/{}/{}".format(fecha_parts[1],fecha_parts[0],fecha_parts[2])
fechas = ["10/11/2016","01/02/2016","05/06/2017","07/31/2017"]
print([reformatFecha(fecha) for fecha in fechas])
#Ej5)
print("\nEjercicio 5:\n")
##5. Dada la siguiente clase:
##class Mail(object):
## def __init__(self, asunto, cuerpo, destinatarios):
## self.asunto = asunto
## self.cuerpo = cuerpo
## self.destinatarios = [ ... ]
## self.copia = [ ... ]
## self.copia_oculta = [ ... ]
##a. Utilizando listas por comprensión, completar el constructor de la
##clase a partir de la lista de destinatarios (cadenas de caracteres
##con las direcciones de email) que se recibe como parámetro
##siguiendo las siguientes condiciones:
##b. Las direcciones de email que pertenezcan al dominio “ucu.edu.uy”
##van en el campo (lista) copia.
##c. Las direcciones de email que pertenezcan al dominio “gmail.com”
##van en el campo (lista) copia_oculta.
##d. Todas las demás dirección van en la lista de destinatarios del mail.
class Mail(object):
def __init__(self, asunto, cuerpo, destinatarios):
self.asunto = asunto
self.cuerpo = cuerpo
self.destinatarios = [mail for mail in destinatarios if (not "ucu.edu.uy" in mail) and (not "gmail.com" in mail)]
self.copia = [mail for mail in destinatarios if "ucu.edu.uy" in mail]
self.copia_oculta = [mail for mail in destinatarios if "gmail.com" in mail]
mail = Mail("hola","saludo",["<EMAIL>", "<EMAIL>","<EMAIL>","<EMAIL>","<EMAIL>"])
print(mail.destinatarios)
print(mail.copia)
print(mail.copia_oculta)
| StarcoderdataPython |
1613939 | <reponame>ShubhamPandey28/sunpy
"""
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
import numpy as np
import astropy.units as u
from astropy.coordinates import Attribute, ConvertError
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping
from astropy.coordinates.representation import (CartesianRepresentation, SphericalRepresentation,
CylindricalRepresentation,
UnitSphericalRepresentation)
from sunpy.sun.constants import radius as _RSUN
from .frameattributes import TimeFrameAttributeSunPy, ObserverCoordinateAttribute
__all__ = ['HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective']
class SunPyBaseCoordinateFrame(BaseCoordinateFrame):
"""
* Defines a default longitude wrap angle of 180 degrees, which can be overridden by the class
variable `_wrap_angle`.
* Inject a nice way of representing the object which the coordinate represents.
"""
_wrap_angle = 180*u.deg
def __init__(self, *args, **kwargs):
self.object_name = None
# If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame
if not kwargs.pop('wrap_longitude', True):
self._wrap_angle = None
return super().__init__(*args, **kwargs)
def represent_as(self, base, s='base', in_frame_units=False):
"""
If a frame wrap angle is set, use that wrap angle for any spherical representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
if self._wrap_angle is not None and \
isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):
data.lon.wrap_angle = self._wrap_angle
return data
def __str__(self):
"""
We override this here so that when you print a SkyCoord it shows the
observer as the string and not the whole massive coordinate.
"""
if getattr(self, "object_name", None):
return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>"
else:
return super().__str__()
class HeliographicStonyhurst(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Stonyhurst Heliographic system.
In a cartesian representation this is also known as the Heliocentric
Earth Equatorial (HEEQ) system. This frame has its origin at the solar
centre and the north pole above the solar north pole, and the zero line on
longitude pointing towards the Earth.
A new instance can be created using the following signatures
(note that all the arguments must be supplied as keywords)::
HeliographicStonyhurst(lon, lat, obstime)
HeliographicStonyhurst(lon, lat, radius, obstime)
HeliographicStonyhurst(x, y, z, obstime, representation_type='cartesian')
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or `None`
A representation object or None to have no data.
lon : `~astropy.coordinates.Angle`, optional
The longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `~astropy.coordinates.Angle`, optional
The latitude for this object (``lon`` must also be given and
``representation`` must be None).
radius : `~astropy.units.Quantity`, optional
This quantity holds the radial distance. If not specified, it is, by
default, the radius of the photosphere.
x : `~astropy.units.Quantity`, optional
x coordinate.
y : `~astropy.units.Quantity`, optional
y coordinate.
z : `~astropy.units.Quantity`, optional
z coordinate.
obstime: `~sunpy.time.Time`
The date and time of the observation, used to convert to heliographic
carrington coordinates.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,
... frame="heliographic_stonyhurst",
... obstime="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc.frame
<HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km)
(1., 1., 2.)>
>>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HeliographicStonyhurst: obstime=None): (lon, lat, radius) in (deg, deg, km)
(-10., 2., 695700.)>
Notes
-----
This frame will always be converted a 3D frame where the radius defaults to
rsun.
"""
name = "heliographic_stonyhurst"
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='lon',
defaultunit=u.deg),
RepresentationMapping(reprname='lat',
framename='lat',
defaultunit=u.deg),
RepresentationMapping(reprname='distance',
framename='radius',
defaultunit=None)],
CartesianRepresentation: [RepresentationMapping(reprname='x',
framename='x'),
RepresentationMapping(reprname='y',
framename='y'),
RepresentationMapping(reprname='z',
framename='z')]
}
obstime = TimeFrameAttributeSunPy()
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation_type', None)
if ('radius' in kwargs and kwargs['radius'].unit is u.one and
u.allclose(kwargs['radius'], 1*u.one)):
kwargs['radius'] = _RSUN.to(u.km)
super().__init__(*args, **kwargs)
# Make 3D if specified as 2D
# If representation was explicitly passed, do not change the rep.
if not _rep_kwarg:
# If we were passed a 3D rep extract the distance, otherwise
# calculate it from _RSUN.
if isinstance(self._data, UnitSphericalRepresentation):
distance = _RSUN.to(u.km)
self._data = SphericalRepresentation(lat=self._data.lat,
lon=self._data.lon,
distance=distance)
class HeliographicCarrington(HeliographicStonyhurst):
"""
A coordinate or frame in the Carrington Heliographic system.
- The origin is the centre of the Sun
- The z-axis is aligned with the Sun's north pole
- The x and y axes rotate with a period of 25.38 days. The line of zero
longitude passed through the disk centre as seen from Earth at
21:36 on 9th Nov 1853.
This frame differs from the Stonyhurst version in the definition of the
longitude, which is defined using the time-dependant offset described
above.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form.
lon: `Angle` object.
The longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat: `Angle` object.
The latitude for this object (``lon`` must also be given and
``representation`` must be None).
radius: `astropy.units.Quantity` object, optional, must be keyword.
This quantity holds the radial distance. Defaults to the solar radius.
obstime: SunPy Time
The date and time of the observation, used to convert to heliographic
carrington coordinates.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,
... frame="heliographic_carrington",
... obstime="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000): (lon, lat, radius) in (deg, deg, km)
(1., 2., 3.)>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
... obstime="2010/01/01T00:00:45", frame="heliographic_carrington")
>>> sc
<SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km)
[(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>
"""
name = "heliographic_carrington"
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='lon',
defaultunit=u.deg),
RepresentationMapping(reprname='lat',
framename='lat',
defaultunit=u.deg),
RepresentationMapping(reprname='distance',
framename='radius',
defaultunit=None)],
UnitSphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='lon',
defaultunit=u.deg),
RepresentationMapping(reprname='lat',
framename='lat',
defaultunit=u.deg)],
}
_wrap_angle = 360*u.deg
obstime = TimeFrameAttributeSunPy()
class Heliocentric(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system.
- The origin is the centre of the Sun
- The z-axis points from the centre of the Sun to the observer.
- The y-axis is perpendicular to the z-axis, and lies in the plane that
contains the z-axis and the solar rotation axis, pointing towards the
Sun's north pole.
This frame may either be specified in Cartesian or cylindrical
representation. Cylindrical representation replaces (x, y) with (rho, psi)
where rho is the impact parameter and psi is the position angle in degrees.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form and if x, y and z are specified, it must
be None.
x: `Quantity` object.
X-axis coordinate, optional, must be keyword.
y: `Quantity` object.
Y-axis coordinate, optional, must be keyword.
z: `Quantity` object. Shared by both representations.
Z-axis coordinate, optional, must be keyword.
observer: `~sunpy.coordinates.frames.HeliographicStonyhurst`, optional
The coordinate of the observer in the solar system. Defaults to the
Earth.
obstime: SunPy Time
The date and time of the observation, used to convert to heliographic
carrington coordinates.
Examples
--------
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
... obstime="2011/01/05T00:00:50", frame="heliocentric")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km
(10., 1., 2.)>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm, frame="heliocentric", obstime="2011/01/01T00:00:54")
>>> sc
<SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)
[(1., 3., 5.), (2., 4., 6.)]>
"""
default_representation = CartesianRepresentation
_frame_specific_representation_info = {
CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]
}
obstime = TimeFrameAttributeSunPy()
observer = ObserverCoordinateAttribute(HeliographicStonyhurst, default="earth")
class Helioprojective(SunPyBaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective (Cartesian) system.
This is a projective coordinate system centered around the observer.
It is a full spherical coordinate system with position given as longitude
theta_x and latitude theta_y.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form.
Tx: `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
X-axis coordinate.
Ty: `~astropy.coordinates.Angle` or `~astropy.units.Quantity`
Y-axis coordinate.
distance: `~astropy.units.Quantity`
The radial distance from the observer to the coordinate point.
obstime: SunPy Time
The date and time of the observation, used to convert to heliographic
carrington coordinates.
observer: `~sunpy.coordinates.frames.HeliographicStonyhurst`, str
The coordinate of the observer in the solar system. If you supply a string,
it must be a solar system body that can be parsed by
`~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`.
rsun: `~astropy.units.Quantity`
The physical (length) radius of the Sun. Used to calculate the position
of the limb for calculating distance from the observer to the
coordinate.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.coordinates
>>> import astropy.units as u
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km, obstime="2010/01/01T00:00:00",
... frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)
(0., 0., 5.)>
>>> sc = SkyCoord(0*u.deg, 0*u.deg, obstime="2010/01/01T00:00:00", frame="helioprojective")
>>> sc
<SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec
(0., 0.)>
"""
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='Tx',
defaultunit=u.arcsec),
RepresentationMapping(reprname='lat',
framename='Ty',
defaultunit=u.arcsec),
RepresentationMapping(reprname='distance',
framename='distance',
defaultunit=None)],
UnitSphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='Tx',
defaultunit=u.arcsec),
RepresentationMapping(reprname='lat',
framename='Ty',
defaultunit=u.arcsec)],
}
obstime = TimeFrameAttributeSunPy()
rsun = Attribute(default=_RSUN.to(u.km))
observer = ObserverCoordinateAttribute(HeliographicStonyhurst, default="earth")
def calculate_distance(self):
"""
This method calculates the third coordinate of the Helioprojective
frame. It assumes that the coordinate point is on the disk of the Sun
at the rsun radius.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.HelioProjective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
distance = self.spherical.distance
if not (distance.unit is u.one and u.allclose(distance, 1*u.one)):
return self
if not isinstance(self.observer, BaseCoordinateFrame):
raise ConvertError("Cannot calculate distance to the solar disk "
"for observer '{}' "
"without `obstime` being specified.".format(self.observer))
rep = self.represent_as(UnitSphericalRepresentation)
lat, lon = rep.lat, rep.lon
alpha = np.arccos(np.cos(lat) * np.cos(lon)).to(lat.unit)
c = self.observer.radius**2 - self.rsun**2
b = -2 * self.observer.radius * np.cos(alpha)
# Ingore sqrt of NaNs
with np.errstate(invalid='ignore'):
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2
return self.realize_frame(SphericalRepresentation(lon=lon,
lat=lat,
distance=d))
| StarcoderdataPython |
6555645 | <filename>web_controller.py
from flask import Flask, render_template, request
import redis_phones
app = Flask(__name__)
@app.route('/index')
def index():
return render_template('index.html')
self.con.
@app.route('/set', methods=['POST',])
def set():
return render_template(
'set.html',
name=request.form.get('name', ''),
phone=request.form.get('phone', ''),
)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3498119 | <gh_stars>0
from math import sqrt
from sklearn.metrics import mean_squared_error
class ImagesMeanSquareError:
@staticmethod
def get_mean_square_error(image1data, image2data):
return sqrt(mean_squared_error(image1data, image2data))
| StarcoderdataPython |
4828997 | <reponame>rohe/otest
"""
Assertion test module
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by <NAME>.
:license: APACHE 2.0, see LICENSE for more details.
"""
from future.backports.urllib.parse import parse_qs
import json
import inspect
import traceback
import sys
from otest.events import EV_PROTOCOL_REQUEST
from otest.events import EV_PROTOCOL_RESPONSE
from otest.events import EV_REDIRECT_URL
from otest.events import EV_RESPONSE
from otest.events import EV_HTTP_RESPONSE
from oic.oic import message
__author__ = 'rolandh'
INFORMATION = 0
OK = 1
WARNING = 2
ERROR = 3
CRITICAL = 4
INTERACTION = 5
INCOMPLETE = 6
NOT_APPLICABLE = 7
STATUSCODE = ["INFORMATION", "OK", "WARNING", "ERROR", "CRITICAL",
"INTERACTION", 'PARTIAL RESULT']
STATUSCODE_TRANSL = dict([(STATUSCODE[i], i) for i in range(len(STATUSCODE))])
END_TAG = "==== END ===="
class TestResult(object):
name = 'test_result'
def __init__(self, test_id, status=OK, name='', mti=False, message='',
**kwargs):
self.test_id = test_id
self.status = status
self.name = name
self.mti = mti
self.message = message
self.http_status = 0
self.cid = ''
self.extra = kwargs
def __str__(self):
if self.status:
return '{}: status={}, message={}'.format(self.test_id,
STATUSCODE[self.status],
self.message)
else:
return '{}: status=?'.format(self.test_id)
class State(object):
name = 'state'
def __init__(self, test_id, status, name='', mti=False, message='',
context='', **kwargs):
self.test_id = test_id
self.status = status
self.name = name
self.mti = mti
self.message = message
self.context = context
self.kwargs = kwargs
def __str__(self):
_info = {
'ctx': self.context, 'id': self.test_id,
'stat': STATUSCODE[self.status], 'msg': self.message
}
if self.status != OK:
if self.context:
txt = '{ctx}:{id}: status={stat}, message={msg}'.format(
**_info)
else:
txt = '{id}: status={stat}, message={msg}'.format(**_info)
else:
if self.context:
txt = '{ctx}:{id}: status={stat}'.format(**_info)
else:
txt = '{id}: status={stat}'.format(**_info)
if self.name:
txt = '{} [{}]'.format(txt, self.name)
return txt
class Check(object):
""" General test
"""
cid = "check"
msg = "OK"
mti = True
state_cls = State
def __init__(self, **kwargs):
self._status = OK
self._message = ""
self.content = None
self.url = ""
self._kwargs = kwargs
def _func(self, conv):
return TestResult('')
def __call__(self, conv=None, output=None):
_stat = self._func(conv)
if isinstance(_stat, dict):
_stat = self.response(**_stat)
if output is not None:
output.append(_stat)
return _stat
def response(self, **kwargs):
try:
name = " ".join(
[str(s).strip() for s in self.__doc__.strip().split("\n")])
except AttributeError:
name = ""
res = self.state_cls(test_id=self.cid, status=self._status, name=name,
mti=self.mti)
if self._message:
res.message = self._message
else:
if self._status != OK:
res.message = self.msg
for key, val in kwargs.items():
setattr(self, key, val)
return res
class ExpectedError(Check):
pass
class CriticalError(Check):
status = CRITICAL
class Information(Check):
status = INFORMATION
class Warnings(Check):
status = WARNING
class Error(Check):
status = ERROR
class ResponseInfo(Information):
"""Response information"""
def _func(self, conv=None):
self._status = self.status
_msg = conv.events.last_item(EV_RESPONSE)
if isinstance(_msg, str):
self._message = _msg
else:
self._message = _msg.to_dict()
return {}
class WrapException(CriticalError):
"""
A runtime exception
"""
cid = "exception"
msg = "Test tool exception"
def _func(self, conv=None):
self._status = self.status
self._message = traceback.format_exception(*sys.exc_info())
return {}
class Other(CriticalError):
""" Other error """
msg = "Other error"
class CheckHTTPResponse(CriticalError):
"""
Checks that the HTTP response status is within a specified range
"""
cid = "http_response"
msg = "Incorrect HTTP status_code"
def _func(self, conv):
_response = conv.events.last_item(EV_HTTP_RESPONSE)
res = {}
if not _response:
return res
if 'status_code' in self._kwargs:
if _response.status_code not in self._kwargs['status_code']:
self._status = self.status
self._message = self.msg
res["http_status"] = _response.status_code
else:
if _response.status_code >= 400:
self._status = self.status
self._message = self.msg
res["http_status"] = _response.status_code
return res
def factory(cid):
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
try:
if obj.cid == cid:
return obj
except AttributeError:
pass
return None
def get_provider_info(conv):
_pi = conv.entity.provider_info
if not _pi:
_pi = conv.provider_info
return _pi
def get_protocol_response(conv, cls):
return conv.events.get_messages(EV_PROTOCOL_RESPONSE, cls)
def get_protocol_request(conv, cls):
return conv.events.get_messages(EV_PROTOCOL_REQUEST, cls)
def get_id_tokens(conv):
res = []
# In access token responses
for inst in get_protocol_response(conv, message.AccessTokenResponse):
try:
res.append(inst["id_token"])
except KeyError:
pass
# implicit, id_token in authorization response
for inst in get_protocol_response(conv, message.AuthorizationResponse):
try:
res.append(inst["id_token"])
except KeyError:
pass
return res
def get_signed_id_tokens(conv):
res = []
for item in conv.events.get_data(EV_RESPONSE):
if isinstance(item, dict):
ent = item
else:
try:
ent = json.loads(item)
except Exception as err:
try:
ent = parse_qs(item)
except:
continue
else:
try:
res.append(ent['id_token'][0])
except KeyError:
pass
else:
continue
try:
res.append(ent['id_token'])
except KeyError:
pass
return res
def get_authorization_request(conv, cls):
authz_req = conv.events.get_data(EV_REDIRECT_URL)[0].split('?')[1]
return cls().from_urlencoded(authz_req)
| StarcoderdataPython |
3408131 | import requests
from bs4 import BeautifulSoup
import time
class Linktree:
def __init__(self, config, permutations_list):
# 1000 ms
self.delay = config['plateform']['linktree']['rate_limit'] / 1000
# https://linktr.ee/{username}
self.format = config['plateform']['linktree']['format']
# linktree usernames are not case sensitive
self.permutations_list = [perm.lower() for perm in permutations_list]
# social
self.type = config['plateform']['linktree']['type']
# Generate all potential linktree usernames
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation = permutation,
))
return possible_usernames
def search(self):
linktree_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
try:
r = requests.get(username, timeout=5)
except requests.ConnectionError:
print("failed to connect to linktree")
# If the account exists
if r.status_code == 200:
# Account object
account = {}
# Get the username
account["value"] = username
# Parse HTML response content with beautiful soup
soup = BeautifulSoup(r.text, 'html.parser')
# Scrape the user links
try:
user_services= []
services = soup.find_all("div", {"data-testid": "StyledContainer"})
for service in services[1:]:
user_services.append({
"service": str(service.get_text().strip()),
"link": str(service.find_all('a', href=True)[0]['href'].strip())
})
account["user_services"] = {"name": "Services", "value": user_services}
except:
pass
# Append the account to the accounts table
linktree_usernames["accounts"].append(account)
time.sleep(self.delay)
return linktree_usernames | StarcoderdataPython |
251133 | <reponame>baajur/benchm-ml<filename>4-DL/3-keras.py
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
import numpy as np
import time
import pandas as pd
from sklearn import metrics
d_train = pd.read_csv("train-1m.csv")
d_test = pd.read_csv("test.csv")
d_train_test = d_train.append(d_test)
d_train_test["DepTime"] = d_train_test["DepTime"]/2500
d_train_test["Distance"] = np.log10(d_train_test["Distance"])/4
vars_categ = ["Month","DayofMonth","DayOfWeek","UniqueCarrier", "Origin", "Dest"]
vars_num = ["DepTime","Distance"]
def get_dummies(d, col):
dd = pd.get_dummies(d.ix[:, col])
dd.columns = [col + "_%s" % c for c in dd.columns]
return(dd)
X_train_test_categ = pd.concat([get_dummies(d_train_test, col) for col in vars_categ], axis = 1)
X_train_test = pd.concat([X_train_test_categ, d_train_test.ix[:,vars_num]], axis = 1)
y_train_test = np.where(d_train_test["dep_delayed_15min"]=="Y", 1, 0)
X_train = X_train_test[0:d_train.shape[0]]
y_train = y_train_test[0:d_train.shape[0]]
X_test = X_train_test[d_train.shape[0]:]
y_test = y_train_test[d_train.shape[0]:]
X_train = X_train.as_matrix()
X_test = X_test.as_matrix()
y_train = np_utils.to_categorical(y_train, 2)
model = Sequential()
model.add(Dense(200, activation = 'relu', input_dim = X_train.shape[1]))
model.add(Dense(200, activation = 'relu'))
model.add(Dense(2, activation = 'softmax'))
sgd = SGD(lr = 0.01, momentum = 0.9)
model.compile(loss = 'categorical_crossentropy', optimizer = sgd, metrics = ['accuracy'])
start = time.time()
model.fit(X_train, y_train, batch_size = 128, nb_epoch = 1)
end = time.time()
print('Train time:', end - start, 'sec')
phat = model.predict_proba(X_test)[:,1]
metrics.roc_auc_score(y_test, phat)
## on Tensorflow:
## GPU:
# Train time: 34.6609380245 sec
# 0.71491495195154053
## CPU 4 cores
### export CUDA_VISIBLE_DEVICES=""
# Train time: 58.4619350433 sec
## on Theano:
## GPU
### export KERAS_BACKEND=theano
### export THEANO_FLAGS='cuda.root=/usr/local/cuda-7.5,device=gpu,floatX=float32,lib.cnmem=0.9'
## Train time: 23.2013888359 sec
## CPU - uses 1 core
### export KERAS_BACKEND=theano
# Train time: 68.615885973 sec
| StarcoderdataPython |
342953 | # -*- coding: utf-8 -*-
"""
Module for evaluating detection performance.
.. automodule:: pydriver.evaluation.evaluation
"""
from __future__ import absolute_import, division
from .evaluation import Evaluator, EvaluatorPoint
| StarcoderdataPython |
8111112 | <filename>src/sentry/utils/colors.py
from __future__ import absolute_import
import hashlib
import colorsys
def get_hashed_color(string, l=0.5, s=0.5): # noqa: E741
val = int(hashlib.md5(string.encode("utf-8")).hexdigest()[:3], 16)
tup = colorsys.hls_to_rgb(val / 4096.0, l, s)
return "#%02x%02x%02x" % (int(tup[0] * 255), int(tup[1] * 255), int(tup[2] * 255))
| StarcoderdataPython |
3267763 | <filename>src/rewriteu2a.py
#!/usr/bin/env python
# Replaces Unicode characters in input XML file text content with
# ASCII approximations based on file with mappings between the two.
# This is a component in a pipeline to convert PMC NXML files into
# text and standoffs. The whole pipeline can be run as
#
# python rewritetex.py FILE.xml -s | python rewriteu2a.py - -s | python respace.py - -s | python standoff.py - FILE.{txt,so}
from __future__ import with_statement
import sys
import os
import re
import codecs
from lxml import etree as ET
# The name of the file from which to read the replacement. Each line
# should contain the hex code for the unicode character, TAB, and
# the replacement string.
MAPPING_FILE_NAME = os.path.join(os.path.dirname(__file__),
'../data/entities.dat')
# XML tag to use to mark text content rewritten by this script.
REWRITTEN_TAG = 'n2t-u2a'
# XML attribute to use for storing the original for rewritten text.
ORIG_TEXT_ATTRIBUTE = 'orig-text'
# File into which to append unicode codepoints missing from the
# mapping, if any
MISSING_MAPPING_FILE_NAME = 'missing-mappings.txt'
INPUT_ENCODING="UTF-8"
OUTPUT_ENCODING="UTF-8"
# command-line options
options = None
# all codepoints for which a mapping was needed but not found
missing_mappings = set()
def read_mapping(f, fn="mapping data"):
"""
Reads in mapping from Unicode to ASCII from the given input stream
and returns a dictionary keyed by Unicode characters with the
corresponding ASCII characters as values. The expected mapping
format defines a single mapping per line, each with the format
CODE\tASC where CODE is the Unicode code point as a hex number and
ASC is the replacement ASCII string ("\t" is the literal tab
character). Any lines beginning with "#" are skipped as comments.
"""
# read in the replacement data
linere = re.compile(r'^([0-9A-Za-z]{4,})\t(.*)$')
mapping = {}
for i, l in enumerate(f):
# ignore lines starting with "#" as comments
if len(l) != 0 and l[0] == "#":
continue
m = linere.match(l)
assert m, "Format error in %s line %s: '%s'" % (fn, i+1, l.replace("\n","").encode("utf-8"))
c, r = m.groups()
c = unichr(int(c, 16))
assert c not in mapping or mapping[c] == r, "ERROR: conflicting mappings for %.4X: '%s' and '%s'" % (ord(c), mapping[c], r)
# exception: literal '\n' maps to newline
if r == '\\n':
r = '\n'
mapping[c] = r
return mapping
def mapchar(c, mapping):
if c in mapping:
return mapping[c]
else:
# make a note of anything unmapped
global missing_mappings, options
missing_mappings.add("%.4X" % ord(c))
# remove missing by default, output codepoint as hex as an option
if not options.hex:
return ''
else:
return "<%.4X>" % ord(c)
def replace_mapped_text(e, mapping):
# TODO: inefficient, improve
for i, c in enumerate(e.text):
if ord(c) >= 128:
s = mapchar(c, mapping)
# create new element for the replacement
r = ET.Element(REWRITTEN_TAG)
r.attrib[ORIG_TEXT_ATTRIBUTE] = c
r.text = s
# ... make it the first child of the current element
e.insert(0, r)
# ... and split the text between the two
r.tail = e.text[i+1:]
e.text = e.text[:i]
# terminate search; the rest of the text is now
# in a different element
break
def parent_index(e, parent):
for i, c in enumerate(parent):
if c == e:
return i
return None
def replace_mapped_tail(e, mapping, parent):
# TODO: inefficient, improve
for i, c in enumerate(e.tail):
if ord(c) >= 128:
s = mapchar(c, mapping)
# create new element for the replacement
r = ET.Element(REWRITTEN_TAG)
r.attrib[ORIG_TEXT_ATTRIBUTE] = c
r.text = s
# ... make it the next child of the parent after the
# current
pidx = parent_index(e, parent)
parent.insert(pidx+1, r)
# ... and split the text between the two
r.tail = e.tail[i+1:]
e.tail = e.tail[:i]
# process the rest in the new element
replace_mapped_tail(r, mapping, parent)
# terminate search; done in recursion.
break
def replace_mapped(e, mapping, parent=None):
# process text content
if e.text is not None and e.text != "":
replace_mapped_text(e, mapping)
# process children recursively
for c in e:
replace_mapped(c, mapping, e)
# process tail unless at root
if parent is not None and e.tail is not None and e.tail != "":
replace_mapped_tail(e, mapping, parent)
def process(fn, mapping):
global options
try:
tree = ET.parse(fn)
except ET.XMLSyntaxError:
print >> sys.stderr, "Error parsing %s" % fn
raise
root = tree.getroot()
replace_mapped(root, mapping)
# processing done, output
if options.stdout:
tree.write(sys.stdout, encoding=OUTPUT_ENCODING)
return True
if options is not None and options.directory is not None:
output_dir = options.directory
else:
output_dir = ""
output_fn = os.path.join(output_dir, os.path.basename(fn))
# TODO: better checking of path identify to protect against
# clobbering.
if output_fn == fn and not options.overwrite:
print >> sys.stderr, 'rewriteu2a: skipping output for %s: file would overwrite input (consider -d and -o options)' % fn
else:
# OK to write output_fn
try:
with open(output_fn, 'w') as of:
tree.write(of, encoding=OUTPUT_ENCODING)
except IOError, ex:
print >> sys.stderr, 'rewriteu2a: failed write: %s' % ex
return True
def argparser():
import argparse
ap=argparse.ArgumentParser(description='Rewrite Unicode text content with approximately equivalent ASCII in PMC NXML files.')
ap.add_argument('-d', '--directory', default=None, metavar='DIR', help='output directory')
ap.add_argument('-o', '--overwrite', default=False, action='store_true', help='allow output to overwrite input files')
ap.add_argument('-s', '--stdout', default=False, action='store_true', help='output to stdout')
ap.add_argument('-x', '--hex', default=False, action='store_true', help='write hex sequence for missing mappings')
ap.add_argument('file', nargs='+', help='input PubMed Central NXML file')
return ap
def main(argv):
global options
options = argparser().parse_args(argv[1:])
# read in mapping
try:
mapfn = MAPPING_FILE_NAME
if not os.path.exists(mapfn):
# fall back to trying in script dir
mapfn = os.path.join(os.path.dirname(__file__),
os.path.basename(MAPPING_FILE_NAME))
with codecs.open(mapfn, encoding="utf-8") as f:
mapping = read_mapping(f, mapfn)
except IOError, e:
print >> sys.stderr, "Error reading mapping from %s: %s" % (MAPPING_FILE_NAME, e)
return 1
for fn in options.file:
process(fn, mapping)
# if there were any missing mappings and an output file name is
# defined for these, try to append them in that file.
if len(missing_mappings) > 0 and MISSING_MAPPING_FILE_NAME is not None:
try:
with open(MISSING_MAPPING_FILE_NAME, 'a+') as mmf:
for mm in missing_mappings:
print >> mmf, "%s\t%s" % (fn, mm)
except IOError, e:
print >> sys.stderr, "Warning: failed to write missing mappings to %s: %s" % (MISSING_MAPPING_FILE_NAME, e)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| StarcoderdataPython |
1696228 | <reponame>Antonio-Neves/Brasil-Portugal<gh_stars>1-10
"""
Models for User Account
- The username is the Email and not a name.
- The user is staff
"""
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('Email is required')
email = self.normalize_email(email)
user = self.model(email=email, username=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_staff', True)
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser need to be is_superuser=True')
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser need to be is_staff=True')
return self._create_user(email, password, **extra_fields)
class CustomUser(AbstractUser):
DEPARTMENT_CHOICES = (
('ad', 'Administration'),
('ed', 'Edition'),
('su', 'Subscription'),
)
email = models.EmailField('Email', unique=True)
is_staff = models.BooleanField('Team member', default=True)
department = models.CharField(
'Department', max_length=2, default='su', choices=DEPARTMENT_CHOICES
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name', 'department']
class Meta:
verbose_name = 'User'
verbose_name_plural = 'Users'
ordering = ['first_name']
def __str__(self):
return self.email
objects = UserManager()
| StarcoderdataPython |
3318457 | <reponame>tomijarvi/kerrokantasi<filename>democracy/factories/user.py
# -*- coding: utf-8 -*-
import factory
import factory.fuzzy
from django.contrib.auth import get_user_model
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = get_user_model() # XXX: This makes this file not safe to import before `django.setup()`
username = factory.fuzzy.FuzzyText()
first_name = factory.Faker("first_name", locale="fi")
last_name = factory.Faker("last_name")
email = factory.Faker("email")
| StarcoderdataPython |
6633587 | from math import *
from decimal import *
import numpy as np
# getcontext().prec define a quantidade de casas decimais a calculadas com precisão
getcontext().prec = 8
# define novo valor pi com uma precisão maior que a nativa na biblioteca math
pi = Decimal('3.141592653589793238462643383279502884197169399375')
# Gera valores igualmente espaçados
def igualmenteEspacados(n, inicio = 0, fim = 1):
ptos = []
intervalo = Decimal(Decimal(fim-inicio)/(n-1))
for j in range(n):
ptos.append(Decimal(inicio+j*intervalo))
return ptos
# Gera valores com espaçamento de Chebychev
def chebyshev(n, inicio = 0, fim = 0):
if n == 1:
return Decimal((fim-inicio)/2)
cheb = list(map(cos, np.arange(0, pi+pi/(n-1), pi/(n-1))))
if not(inicio == 0 and fim == 0):
intervalo = (fim-inicio)/2
cheb = list(map(lambda x: inicio+intervalo+x*intervalo, cheb))
return list(map(Decimal, cheb))
# Calcula valores de polinomios de 3º grau
def valoresPoli3(n, inicio, fim, a = [2, -7, -22, 8]):
Ys = []
Xs = []
intervalo = Decimal(Decimal(fim-inicio)/(n-1))
for i in range(n):
Xs.append(Decimal(inicio+(i*intervalo)))
for j in Xs:
aux = Decimal(0.0)
aux += a[0] * (j ** 3)
aux += a[1] * (j ** 2)
aux += a[2] * j
aux += a[3]
Ys.append(aux)
return Xs, Ys
# função sin() para graus
def seno(numDec):
numRad = Decimal(pi/180)*Decimal(numDec)
return Decimal(sin(numRad))
# Cálculo do erro entre duas séries de valores
def erro(x, y):
if len(x) != len(y):
print('Erro: quantidade diferente de elementos')
exit()
erros = []
for i in range(len(x)):
erros.append(np.abs(y[i]-x[i]))
return erros | StarcoderdataPython |
204743 | """Given GO ids and an obo, creates a small sub-graph DAG.
Sub-graphs can be used to create shortcut paths and eliminate nodes.
"""
from collections import defaultdict
from goatools.godag_small import GODagSmall
__copyright__ = "Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved."
__author__ = "<NAME>"
class OboToGoDagSmall(object):
"""Given a list of GO ids and the full obo dag, return a sub GO DAG graph."""
def __init__(self, **kws):
# Initial setup
self.godag = GODagSmall()
self.godag.go_sources = None
self._init_go2obj(**kws)
# Initialize remainder of graph
self.traverse_child = kws['traverse_child'] if 'traverse_child' in kws else False
self.traverse_parent = kws['traverse_parent'] if 'traverse_parent' in kws else True
self.seen_cids = set() if self.traverse_parent else None
self.seen_pids = set() if self.traverse_child else None
self._init()
assert self.traverse_child or self.traverse_parent, "NO EDGES IN GRAPH"
def _init_go2obj(self, **kws):
"""Initialize go2obj in small dag for source gos."""
if 'goids' in kws and 'obodag' in kws:
self.godag.go_sources = kws['goids']
obo = kws['obodag']
for goid in self.godag.go_sources:
self.godag.go2obj[goid] = obo[goid]
elif 'goid2goobj' in kws:
goid2goobj = kws['goid2goobj']
self.godag.go_sources = goid2goobj.keys()
for goid, goobj in goid2goobj.items():
self.godag.go2obj[goid] = goobj
elif 'goea_results' in kws:
goea_results = kws['goea_results']
self.godag.go_sources = [rec.GO for rec in goea_results]
self.godag.go2obj = {rec.GO:rec.goterm for rec in goea_results}
def _init(self):
"""Given GO ids and GOTerm objects, create mini GO dag."""
for goid in self.godag.go_sources:
goobj = self.godag.go2obj[goid]
self.godag.go2obj[goid] = goobj
# Traverse up parents
if self.traverse_parent and goid not in self.seen_cids:
self._traverse_parent_objs(goobj)
# Traverse down children
if self.traverse_child and goid not in self.seen_pids:
self._traverse_child_objs(goobj)
def _traverse_parent_objs(self, goobj_child):
"""Traverse from source GO up parents."""
child_id = goobj_child.id
# mark child as seen
self.seen_cids.add(child_id)
self.godag.go2obj[child_id] = goobj_child
# Loop through parents of child object
for parent_obj in goobj_child.parents:
parent_id = parent_obj.id
self.godag.p_from_cs[parent_id].add(child_id)
# If parent has not been seen, traverse
if parent_id not in self.seen_cids:
self._traverse_parent_objs(parent_obj)
def _traverse_child_objs(self, goobj_parent):
"""Traverse from source GO down children."""
parent_id = goobj_parent.id
# mark parent as seen
self.seen_pids.add(parent_id)
self.godag.go2obj[parent_id] = goobj_parent
# Loop through children
for child_obj in goobj_parent.children:
child_id = child_obj.id
self.godag.p2cs[parent_id].add(child_id)
# If child has not been seen
if child_id not in self.seen_pids:
self._traverse_child_objs(child_obj)
# Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.
| StarcoderdataPython |
11315363 | <reponame>carsonmclean/CSC411-CSC2515
import csv
import nltk
from nltk.tokenize import word_tokenize
import re, string; pattern = re.compile('[^a-zA-Z0-9_]+')
def write_fake():
titles = set()
try:
for line in csv.DictReader(open("data/fake.csv")):
if line['thread_title']:
otitle = line['thread_title'].lower()
if "trump" not in otitle:
continue
title = otitle.replace("(video)","") \
.replace("[video]","") \
.replace("re:","") \
.replace("?","") \
.replace("100percentfedup.com","")
title = pattern.sub(' ', title)
twords = word_tokenize(title)
twords = [w for w in twords if w != 's']
ntitle = ' '.join(twords)
# "don t" -> "dont"; "wasn t" -> "wasnt"; etc
ntitle = ntitle.replace("n t ", "nt ")
titles.add(ntitle)
except:
pass
outfile = open("data/clean_fake.txt", "w")
for ntitle in titles:
outfile.write(ntitle + "\n")
def write_real():
titles = set()
for line in csv.reader(open("data/abcnews-date-text.csv")):
date = line[0]
if date[:5] >= "20161":
title = line[1].lower()
if "trump" not in title:
continue
title = pattern.sub(' ', title)
twords = word_tokenize(title)
twords = [w for w in twords if w != 's']
ntitle = ' '.join(twords)
titles.add(ntitle)
outfile = open("data/clean_real.txt", "w")
for ntitle in titles:
outfile.write(ntitle + "\n")
if __name__ == "__main__":
write_fake()
write_real()
| StarcoderdataPython |
6553510 | # Generated by Django 2.1.4 on 2019-02-11 22:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0005_carrealtimetest'),
]
operations = [
migrations.AlterField(
model_name='carrealtimetest',
name='Chargingstatus',
field=models.CharField(choices=[('0', '停车充电'), ('1', '行驶充电'), ('2', '未充电'), ('3', '充电完成')], max_length=24, verbose_name='充电状态'),
),
migrations.AlterField(
model_name='carrealtimetest',
name='DC_DC',
field=models.CharField(choices=[('0', '工作'), ('1', '断开')], max_length=24, verbose_name='DC/DC状态'),
),
migrations.AlterField(
model_name='carrealtimetest',
name='Statevehicle',
field=models.CharField(choices=[('0', '启动'), ('1', '熄火'), ('2', '其他')], max_length=24, verbose_name='车辆状态'),
),
]
| StarcoderdataPython |
1970477 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\apartments\situations\neighbor_complaint_response.py
# Compiled at: 2016-03-24 21:44:32
# Size of source mod 2**32: 4158 bytes
from interactions.base.interaction import Interaction
from interactions.context import InteractionContext, QueueInsertStrategy
from interactions.priority import Priority
from sims4.tuning.instances import lock_instance_tunables
from sims4.utils import classproperty
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation_complex import SituationComplexCommon, CommonSituationState, TunableSituationJobAndRoleState, SituationStateData, SituationState
class _WaitForNeighborToSpawnState(SituationState):
pass
class _AnswerDoorState(CommonSituationState):
FACTORY_TUNABLES = {'interaction_to_push': Interaction.TunableReference(description='\n The interaction that will be pushed on all non-selectable sims\n when this situation state begins if there is a front door.\n ')}
def __init__(self, *args, interaction_to_push=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._interaction_to_push = interaction_to_push
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner._neighbor_sim is not None:
context = InteractionContext((self.owner._neighbor_sim), (InteractionContext.SOURCE_SCRIPT),
(Priority.High),
insert_strategy=(QueueInsertStrategy.NEXT))
self.owner._neighbor_sim.push_super_affordance(self._interaction_to_push, self.owner._neighbor_sim, context)
class NeighborResponseSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'answer_door_state':_AnswerDoorState.TunableFactory(description='\n The situation state for the loud neighbor to answer the door.\n ',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP,
display_name='01_answer_door_situation_state'),
'loud_neighbor_job_and_role_state':TunableSituationJobAndRoleState(description='\n The job and role state of the loud neighbor.\n ')}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._neighbor_sim = None
@classproperty
def allow_user_facing_goals(cls):
return False
@classmethod
def _states(cls):
return (SituationStateData(1, _WaitForNeighborToSpawnState),
SituationStateData(2, _AnswerDoorState, factory=(cls.answer_door_state)))
@classmethod
def default_job(cls):
pass
def _on_set_sim_job(self, sim, job_type):
super()._on_set_sim_job(sim, job_type)
self._neighbor_sim = sim
self._change_state(self.answer_door_state())
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.loud_neighbor_job_and_role_state.job, cls.loud_neighbor_job_and_role_state.role_state)]
def start_situation(self):
super().start_situation()
self._change_state(_WaitForNeighborToSpawnState())
lock_instance_tunables(NeighborResponseSituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
_implies_greeted_status=False) | StarcoderdataPython |
326538 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class SpotFleetRequest(pulumi.CustomResource):
allocation_strategy: pulumi.Output[str]
"""
Indicates how to allocate the target capacity across
the Spot pools specified by the Spot fleet request. The default is
`lowestPrice`.
"""
client_token: pulumi.Output[str]
excess_capacity_termination_policy: pulumi.Output[str]
"""
Indicates whether running Spot
instances should be terminated if the target capacity of the Spot fleet
request is decreased below the current size of the Spot fleet.
"""
fleet_type: pulumi.Output[str]
"""
The type of fleet request. Indicates whether the Spot Fleet only requests the target
capacity or also attempts to maintain it. Default is `maintain`.
"""
iam_fleet_role: pulumi.Output[str]
"""
Grants the Spot fleet permission to terminate
Spot instances on your behalf when you cancel its Spot fleet request using
CancelSpotFleetRequests or when the Spot fleet request expires, if you set
terminateInstancesWithExpiration.
"""
instance_interruption_behaviour: pulumi.Output[str]
"""
Indicates whether a Spot
instance stops or terminates when it is interrupted. Default is
`terminate`.
"""
instance_pools_to_use_count: pulumi.Output[float]
"""
The number of Spot pools across which to allocate your target Spot capacity.
Valid only when `allocation_strategy` is set to `lowestPrice`. Spot Fleet selects
the cheapest Spot pools and evenly allocates your target Spot capacity across
the number of Spot pools that you specify.
"""
launch_specifications: pulumi.Output[list]
"""
Used to define the launch configuration of the
spot-fleet request. Can be specified multiple times to define different bids
across different markets and instance types.
"""
load_balancers: pulumi.Output[list]
"""
A list of elastic load balancer names to add to the Spot fleet.
"""
replace_unhealthy_instances: pulumi.Output[bool]
"""
Indicates whether Spot fleet should replace unhealthy instances. Default `false`.
"""
spot_price: pulumi.Output[str]
"""
The maximum bid price per unit hour.
"""
spot_request_state: pulumi.Output[str]
"""
The state of the Spot fleet request.
"""
target_capacity: pulumi.Output[float]
"""
The number of units to request. You can choose to set the
target capacity in terms of instances or a performance characteristic that is
important to your application workload, such as vCPUs, memory, or I/O.
"""
target_group_arns: pulumi.Output[list]
"""
A list of `aws_alb_target_group` ARNs, for use with Application Load Balancing.
"""
terminate_instances_with_expiration: pulumi.Output[bool]
"""
Indicates whether running Spot
instances should be terminated when the Spot fleet request expires.
"""
valid_from: pulumi.Output[str]
"""
The start date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.
"""
valid_until: pulumi.Output[str]
"""
The end date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. Defaults to 24 hours.
"""
wait_for_fulfillment: pulumi.Output[bool]
"""
If set, Terraform will
wait for the Spot Request to be fulfilled, and will throw an error if the
timeout of 10m is reached.
"""
def __init__(__self__, resource_name, opts=None, allocation_strategy=None, excess_capacity_termination_policy=None, fleet_type=None, iam_fleet_role=None, instance_interruption_behaviour=None, instance_pools_to_use_count=None, launch_specifications=None, load_balancers=None, replace_unhealthy_instances=None, spot_price=None, target_capacity=None, target_group_arns=None, terminate_instances_with_expiration=None, valid_from=None, valid_until=None, wait_for_fulfillment=None, __name__=None, __opts__=None):
"""
Provides an EC2 Spot Fleet Request resource. This allows a fleet of Spot
instances to be requested on the Spot market.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] allocation_strategy: Indicates how to allocate the target capacity across
the Spot pools specified by the Spot fleet request. The default is
`lowestPrice`.
:param pulumi.Input[str] excess_capacity_termination_policy: Indicates whether running Spot
instances should be terminated if the target capacity of the Spot fleet
request is decreased below the current size of the Spot fleet.
:param pulumi.Input[str] fleet_type: The type of fleet request. Indicates whether the Spot Fleet only requests the target
capacity or also attempts to maintain it. Default is `maintain`.
:param pulumi.Input[str] iam_fleet_role: Grants the Spot fleet permission to terminate
Spot instances on your behalf when you cancel its Spot fleet request using
CancelSpotFleetRequests or when the Spot fleet request expires, if you set
terminateInstancesWithExpiration.
:param pulumi.Input[str] instance_interruption_behaviour: Indicates whether a Spot
instance stops or terminates when it is interrupted. Default is
`terminate`.
:param pulumi.Input[float] instance_pools_to_use_count:
The number of Spot pools across which to allocate your target Spot capacity.
Valid only when `allocation_strategy` is set to `lowestPrice`. Spot Fleet selects
the cheapest Spot pools and evenly allocates your target Spot capacity across
the number of Spot pools that you specify.
:param pulumi.Input[list] launch_specifications: Used to define the launch configuration of the
spot-fleet request. Can be specified multiple times to define different bids
across different markets and instance types.
:param pulumi.Input[list] load_balancers: A list of elastic load balancer names to add to the Spot fleet.
:param pulumi.Input[bool] replace_unhealthy_instances: Indicates whether Spot fleet should replace unhealthy instances. Default `false`.
:param pulumi.Input[str] spot_price: The maximum bid price per unit hour.
:param pulumi.Input[float] target_capacity: The number of units to request. You can choose to set the
target capacity in terms of instances or a performance characteristic that is
important to your application workload, such as vCPUs, memory, or I/O.
:param pulumi.Input[list] target_group_arns: A list of `aws_alb_target_group` ARNs, for use with Application Load Balancing.
:param pulumi.Input[bool] terminate_instances_with_expiration: Indicates whether running Spot
instances should be terminated when the Spot fleet request expires.
:param pulumi.Input[str] valid_from: The start date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). The default is to start fulfilling the request immediately.
:param pulumi.Input[str] valid_until: The end date and time of the request, in UTC [RFC3339](https://tools.ietf.org/html/rfc3339#section-5.8) format(for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance requests are placed or enabled to fulfill the request. Defaults to 24 hours.
:param pulumi.Input[bool] wait_for_fulfillment: If set, Terraform will
wait for the Spot Request to be fulfilled, and will throw an error if the
timeout of 10m is reached.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['allocation_strategy'] = allocation_strategy
__props__['excess_capacity_termination_policy'] = excess_capacity_termination_policy
__props__['fleet_type'] = fleet_type
if iam_fleet_role is None:
raise TypeError('Missing required property iam_fleet_role')
__props__['iam_fleet_role'] = iam_fleet_role
__props__['instance_interruption_behaviour'] = instance_interruption_behaviour
__props__['instance_pools_to_use_count'] = instance_pools_to_use_count
if launch_specifications is None:
raise TypeError('Missing required property launch_specifications')
__props__['launch_specifications'] = launch_specifications
__props__['load_balancers'] = load_balancers
__props__['replace_unhealthy_instances'] = replace_unhealthy_instances
__props__['spot_price'] = spot_price
if target_capacity is None:
raise TypeError('Missing required property target_capacity')
__props__['target_capacity'] = target_capacity
__props__['target_group_arns'] = target_group_arns
__props__['terminate_instances_with_expiration'] = terminate_instances_with_expiration
__props__['valid_from'] = valid_from
__props__['valid_until'] = valid_until
__props__['wait_for_fulfillment'] = wait_for_fulfillment
__props__['client_token'] = None
__props__['spot_request_state'] = None
super(SpotFleetRequest, __self__).__init__(
'aws:ec2/spotFleetRequest:SpotFleetRequest',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
1813766 | # Utility module for RF objects
#-------------------------------------------------------------------------------
import collections
import numpy as np
from probayes.vtypes import isscalar
from probayes.pscales import iscomplex, rescale, prod_rule, prod_pscale
#-------------------------------------------------------------------------------
def rv_prod_rule(*args, rvs, pscale=None):
""" Returns the probability product treating all rvs as independent.
Values (=args[0]) are keyed by RV name and rvs are a list of RVs.
"""
values = args[0]
pscales = [rv.pscale for rv in rvs]
pscale = pscale or prod_pscale(pscales)
use_logs = iscomplex(pscale)
probs = [rv.eval_prob(values[rv.name]) for rv in rvs]
prob, pscale = prod_rule(*tuple(probs),
pscales=pscales,
pscale=pscale)
# This section below is there just to play nicely with conditionals
if len(args) > 1:
if use_logs:
prob = rescale(prob, pscale, 0.j)
else:
prob = rescale(prob, pscale, 1.)
for arg in args[1:]:
if use_logs:
offs, _ = rv_prod_rule(arg, rvs=rvs, pscale=0.j)
prob = prob + offs
else:
coef, _ = rv_prod_rule(arg, rvs=rvs, pscale=1.)
prob = prob * coef
if use_logs:
prob = prob / float(len(args))
prob = rescale(prob, 0.j, pscale)
else:
prob = prob ** (1. / float(len(args)))
prob = rescale(prob, 1., pscale)
return prob, pscale
#-------------------------------------------------------------------------------
def call_scipy_prob(func, pscale, *args, **kwds):
index = 1 if iscomplex(pscale) else 0
return func[index](*args, **kwds)
#-------------------------------------------------------------------------------
def sample_cond_cov(*args, cond_cov=None, unknown=None, **kwds):
kwds = dict(kwds)
cond_pdf = False if 'cond_pdf' not in kwds else kwds.pop('cond_pdf')
assert cond_cov, "coveig object mandatory"
if len(args) == 1 and isinstance(args[0], dict):
vals = dict(args[0])
if unknown is not None:
vals[unknown] = None
args = [np.array(val) if val is not None else {0} \
for val in vals.values()]
elif not len(args) and len(kwds):
vals = dict(kwds)
if unknown is not None:
kwds[unknown] = {0}
args = list(kwds.values())
return cond_cov.interp(*tuple(args), cond_pdf=cond_pdf)
#-------------------------------------------------------------------------------
def slice_by_keyvals(spec, vals, prob, vals_dims=None, spec_dims=None):
""" Slices prob by values of spec in vals.
:param spec: dictionary of {key:val} to match with vals.
:param vals: dictionary of {key:val} describing prob.
:param prob: a multidimensional NumPy array to slice.
:param vals_dims: dictionary of dimensional decription for vals.
:param spec_dims: dictionary of dimensional decription for spec.
If vals_dims and/or spec_dims are not entered, they are 'guessed' from vals
and spec respectively, but correct guessing is not assured. Non-none dimensions
for vals_dims and spec_dims must be mutually ordered monotically by key.
"""
# Check for consistent keys
keys = list(spec.keys())
assert set(keys) == set(vals.keys()), "Keys for spec and vals unmatched"
# Function to default dimensions if not given
def dims_from_vals(vals_dict):
if not isinstance(vals_dict, dict):
raise TypeError("Dictionary type expected")
dims = collections.OrderedDict()
run_dim = 0
for key, val in vals_dict.items():
if isscalar(val):
dims.update({key: None})
else:
assert val.size == np.product(vals.shape), \
"Multiple non-singleton dimensions: {}".format(val.size)
if val.size > 1:
run_dim = np.argmax(val.shape)
dims.update({key: run_dim})
run_dim += 1
# Default spec_dims and vals_dims if not given
if spec_dims is None:
spec_dims = dims_from_vals(spec)
else:
assert set(spec.keys()) == set(spec_dims.keys()), \
"Keys for spec and spec_dims unmatched"
if vals_dims is None:
vals_dims = dims_from_vals(vals)
else:
assert set(vals.keys()) == set(vals_dims.keys()), \
"Keys for spec and spec_dims unmatched"
# Determine maximum dimensionality of input
vals_ndim = 0
for dim in vals_dims.values():
if dim:
vals_ndim = max(vals_ndim, dim)
# Determine maximum dimensionality of output from spec and spec_dims
spec_ndim = 0
for key, dim in spec_dims.items():
if dim:
spec_ndim = max(spec_ndim, dim)
if not isscalar(spec[key]):
spec_ndim = max(spec[key].ndim, dim)
# Check for monotonic ordering of dimensions
dims = [dim for dim in vals_dims.values() if dim is not None]
if len(dims) > 1:
assert np.min(np.diff(dims)) > 0, "Dimensionality not monotically ordered"
dims = [dim for dim in spec_dims.values() if dim is not None]
if len(dims) > 1:
assert np.min(np.diff(dims)) > 0, "Dimensionality not monotically ordered"
# Evaluate reshape and slices from matches between spec and vals
reshape = [1] * spec_ndim
slices = [slice(None) for _ in range(vals_ndim+1)]
for i, key in enumerate(keys):
if vals_dims[key] is None: # If source is scalar
if not isscalar(spec[key]):
assert np.all(spec[key] == vals[key]), \
"Cannot slice by multiple values"
elif spec[key] != vals[key]:
return np.empty(np.zeros(len(dims), dtype=int), dtype=float)
else:
pass
if spec_dims[key] is None: # If target is scalar
dim = vals_dims[key]
match = np.ravel(vals[key]) == spec[key]
n_matches = match.sum()
if n_matches == 0:
slices[dim] = slice(0, 0)
elif n_matches == 1:
slices[dim] = np.nonzero(match)[0]
else:
raise ValueError("Non-unique matches found")
else:
assert np.all(np.ravel(vals[key]) == np.ravel(spec[key])), \
"Ambiguous specification with values mismatch"
dim = spec_dims[key]
reshape[dim] = vals[key].size
return prob[tuple(slices)].reshape(reshape)
#-------------------------------------------------------------------------------
| StarcoderdataPython |
143529 | <gh_stars>1000+
try:
import distutils
from distutils import sysconfig
from distutils.command.install import install
from distutils.core import setup, Extension
except:
raise SystemExit, "Distutils problem"
prefix = "__PREFIX__"
inc_dirs = [prefix + "/include"]
lib_dirs = [prefix + "/lib"]
libs = ["gdbm"]
setup(name = "gdbm",
version = "__VERSION__",
description = "GDBM Extension to Python",
ext_modules = [Extension('gdbm', ['gdbmmodule.c'],
include_dirs = inc_dirs,
libraries = libs,
library_dirs = lib_dirs)]
)
| StarcoderdataPython |
1731695 | <filename>prj/Complaint.py
#main program
import spacy
import pandas as pd
import preprocess
from preprocess import data_clean #preprocess.py
import dataframes
from dataframes import dataframing
import tokenise
from tokenise import tokenisation
import frequency
from frequency import word_frequency
import topwords
from topwords import most_repeated_keywords
import testdata
from testdata import test
import department
from department import department_class
import predict
from predict import evaluate
class Complaint:
def __init__(self,dataset,nlp):
#/home/gayathri/project/MakeComplaint/data.csv
self.dataset=dataset
#print(self.dataset)
self.dataset= pd.read_csv(self.dataset)
self.nlp =nlp
#department headings class names.
def department_class(self):
datasets=self.dataset
departments=self.dataset['Departments'].unique()
return departments,datasets
#Execution begins
files = '/home/gayathri/project/MakeComplaint/data.csv'
#nlp = spacy.load('en_core_web_md')
#c = Complaint(files,nlp)
#category,dataset = c.department_class()
#dataset = preprocess.data_clean(dataset)
#dfwater,dfpwd,dfksrtc,dfkseb,dfenv = dataframes.dataframing(dataset)
#water_lemm,pwd_lemm,ksrtc_lemm,kseb_lemm,env_lemm = tokenise.tokenisation(dfwater,dfpwd,dfksrtc,dfkseb,dfenv)
#water_freq,pwd_freq,ksrtc_freq,kseb_freq,env_freq = frequency.word_frequency(water_lemm,pwd_lemm,ksrtc_lemm,kseb_lemm,env_lemm)
#water_lis,pwd_lis,ksrtc_lis,kseb_lis,env_lis=topwords.most_repeated_keywords(dfwater,dfpwd,dfksrtc,dfkseb,dfenv,water_freq,pwd_freq,ksrtc_freq,kseb_freq,env_freq,"manual")
#no need dept=department_class()
#keywords,item=testdata.test()
#predict.evaluate(keywords,item,water_lis,env_lis,pwd_lis,ksrtc_lis,kseb_lis,category,nlp) | StarcoderdataPython |
5010197 | <reponame>tpimentelms/phonotactic-complexity<gh_stars>1-10
import numpy as np
import sys
sys.path.append('./')
from data_layer.parse import read_src_data
from model import opt_params
from util import argparser
from train_base import read_info, write_csv, convert_to_loader, _run_language
full_results = [['lang', 'fold', 'avg_len', 'test_shannon', 'test_loss',
'test_acc', 'val_loss', 'val_acc', 'best_epoch']]
def get_lang_df(lang, ffolder):
df = read_src_data(ffolder)
return df[df['Language_ID'] == lang]
def get_data_loaders_cv(ffolder, fold, nfolds, lang, token_map, concept_ids, verbose=True):
global data_split
data_split = get_data_split_cv(fold, nfolds, verbose=verbose)
df = get_lang_df(lang, ffolder)
train_loader = get_data_loader(df, data_split[0], token_map, 'train', concept_ids)
val_loader = get_data_loader(df, data_split[1], token_map, 'val', concept_ids)
test_loader = get_data_loader(df, data_split[2], token_map, 'test', concept_ids)
return train_loader, val_loader, test_loader
def get_data_split_cv(fold, nfolds, verbose=True):
_, _, data_split, _, _ = read_info()
concepts = [y for x in data_split for y in x]
return _get_data_split_cv(fold, nfolds, concepts, verbose=verbose)
def _get_data_split_cv(fold, nfolds, concepts, verbose=True):
part_size = int(len(concepts) / nfolds)
test_fold = (fold + 1) % nfolds
train_start_fold = 0 if test_fold > fold else (test_fold + 1)
train = concepts[train_start_fold * part_size:fold * part_size]
train += concepts[(fold + 2) * part_size:] if fold + 2 < nfolds else []
val = concepts[fold * part_size:(fold + 1) * part_size] if fold + 1 < nfolds else concepts[fold * part_size:]
test = concepts[(test_fold) * part_size:(test_fold + 1) * part_size] if test_fold + 1 < nfolds \
else concepts[(test_fold) * part_size:]
if verbose:
print('Train %d, Val %d, Test %d' % (len(train), len(val), len(test)))
return (train, val, test)
def get_data_loader(df, concepts, token_map, mode, concept_ids):
data = split_data(df, concepts, token_map, mode, concept_ids)
return convert_to_loader(data, mode)
def split_data(df, concepts, token_map, mode, concept_ids):
df_partial = df[df['Concept_ID'].isin(set(concepts))]
data_partial = df_partial['IPA'].values
ids = df_partial.index
max_len = max([len(x) for x in data_partial])
data = np.zeros((len(data_partial), max_len + 3))
data.fill(token_map['PAD'])
for i, (string, _id) in enumerate(zip(data_partial, ids)):
instance = string.split(' ')
_data = [token_map['SOW']] + [token_map[x] for x in instance] + [token_map['EOW']]
data[i, :len(_data)] = _data
data[i, -1] = _id
return data
def run_language_cv(lang, token_map, concept_ids, ipa_to_concept, args, embedding_size=None,
hidden_size=256, nlayers=1, dropout=0.2):
global full_results, fold
nfolds = 10
avg_shannon, avg_test_shannon, avg_test_loss, avg_test_acc, avg_val_loss, avg_val_acc = 0, 0, 0, 0, 0, 0
for fold in range(nfolds):
print()
print('Fold:', fold, end=' ')
train_loader, val_loader, test_loader = get_data_loaders_cv(
args.ffolder, fold, nfolds, lang, token_map, concept_ids)
avg_len, shannon, test_shannon, test_loss, \
test_acc, best_epoch, val_loss, val_acc = _run_language(
lang, train_loader, val_loader, test_loader, token_map, ipa_to_concept,
args, embedding_size=embedding_size, hidden_size=hidden_size,
nlayers=nlayers, dropout=dropout, per_word=True)
full_results += [[lang, fold, avg_len, test_shannon, test_loss, test_acc, val_loss, val_acc, best_epoch]]
avg_shannon += shannon / nfolds
avg_test_shannon += test_shannon / nfolds
avg_test_loss += test_loss / nfolds
avg_test_acc += test_acc / nfolds
avg_val_loss += val_loss / nfolds
avg_val_acc += val_acc / nfolds
write_csv(full_results, '%s/%s__full-results.csv' % (args.rfolder, args.model))
return avg_len, avg_shannon, avg_test_shannon, avg_test_loss, avg_test_acc, avg_val_loss, avg_val_acc
def run_opt_language_cv(lang, token_map, concept_ids, ipa_to_concept, args):
embedding_size, hidden_size, nlayers, dropout = opt_params.get_opt_params(args.model, lang)
print('Optimum hyperparams emb-hs: %d, hs: %d, nlayers: %d, drop: %.4f' %
(embedding_size, hidden_size, nlayers, dropout))
return run_language_cv(lang, token_map, concept_ids, ipa_to_concept, args,
embedding_size=embedding_size, hidden_size=hidden_size,
nlayers=nlayers, dropout=dropout)
def run_languages(args):
languages, token_map, data_split, concept_ids, ipa_to_concept = read_info()
print('Train %d, Val %d, Test %d' % (len(data_split[0]), len(data_split[1]), len(data_split[2])))
results = [['lang', 'avg_len', 'shannon', 'test_shannon', 'test_loss', 'test_acc', 'val_loss', 'val_acc']]
for i, lang in enumerate(languages):
print()
print('Lang:', i, end=' ')
if args.opt:
avg_len, shannon, test_shannon, test_loss, \
test_acc, val_loss, val_acc = run_opt_language_cv(lang, token_map, concept_ids, ipa_to_concept, args)
else:
avg_len, shannon, test_shannon, test_loss, \
test_acc, val_loss, val_acc = run_language_cv(lang, token_map, concept_ids, ipa_to_concept, args)
results += [[lang, avg_len, shannon, test_shannon, test_loss, test_acc, val_loss, val_acc]]
write_csv(results, '%s/%s__results.csv' % (args.rfolder, args.model))
write_csv(results, '%s/%s__results-final.csv' % (args.rfolder, args.model))
if __name__ == '__main__':
args = argparser.parse_args(csv_folder='cv')
assert args.data == 'northeuralex', 'this script should only be run with northeuralex data'
run_languages(args)
| StarcoderdataPython |
244122 | from OctaHomeCore.OctaFiles.menus import *
class DeviceUsersSettingsNavBarItem(SettingsSideNavBarItem):
Priority = 60
DisplayName = "Device Logins"
@property
def Link(self):
return reverse('SettingsPage', kwargs={'page':'DeviceUsers'}) | StarcoderdataPython |
11309405 | import importlib
from django.conf import settings
def str_to_class(s):
modules = s.split(".")
concrete = modules[-1]
modules = ".".join(modules[:-1])
mod = importlib.import_module(modules)
return getattr(mod, concrete)
def concrete_list(l):
return list(map(str_to_class, l))
def unpack_requirements(reqs):
return list(map(concrete_nested_dict, reqs))
def concrete_nested_dict(d):
if not isinstance(d, dict):
return d
for key in d.keys():
if isinstance(key, str) and "." in key and not key.startswith(".") and not key.endswith("."):
# then we assume it is a submoduled class
concrete_key = str_to_class(key)
d[concrete_key] = d.pop(key)
for key in d.keys():
if isinstance(d[key], dict):
d[key] = concrete_nested_dict(d[key])
return d
IGNORE = concrete_list(settings.DB_POPULATOR_IGNORE)
ALLOW_MULTIPLE = concrete_list(settings.DB_POPULATOR_ALLOW_MULTIPLE)
REQUIREMENTS = unpack_requirements(settings.DB_POPULATOR_REQUIREMENTS)
| StarcoderdataPython |
3439417 | #!/usr/bin/env python3
from __future__ import print_function
import sys
import os
from array import array
import cv2
import numpy as np
import time
import openncc as ncc
import struct
min_score = 0.8
media_head = 'iII13I'
def coordinate_is_valid(x1, y1, x2, y2):
if ((x1 < 0) or (x1 > 1)):
return False
if ((y1 < 0) or (y1 > 1)):
return False
if ((x2 < 0) or (x2 > 1)):
return False
if ((y2 < 0) or (y2 > 1)):
return False
if ((x1 >= x2) or (y1 >= y2)):
return False
# print("coordinate_is_valid true")
return True
def print_hex(bytes):
l = [hex(int(i)) for i in bytes]
print(" ".join(l))
def get_header_info(spec):
frames = struct.unpack(media_head, bytes(spec))
return frames[0], frames[1], frames[2]
def main():
res = ncc.load_fw("./moviUsbBoot", "fw/flicRefApp.mvcmd")
if res < 0:
printf('load firmware error!')
sys.exit(1)
print("get usb %d sdk versin %s" % (ncc.get_usb_version(), ncc.get_sdk_version()))
print("get fw version: %s and ncc id %s" % (ncc.camera_get_fw_version(),
ncc.camera_get_ncc_id()))
sensors = ncc.CameraSensor()
sensor1 = ncc.SensorModesConfig()
if sensors.GetFirstSensor(sensor1) == 0:
print("camera: %s, %dX%d@%dfps, AFmode:%d, maxEXP:%dus,gain[%d, %d]\n" % (
sensor1.moduleName, sensor1.camWidth, sensor1.camHeight, sensor1.camFps,
sensor1.AFmode, sensor1.maxEXP, sensor1.minGain, sensor1.maxGain))
sensor2 = ncc.SensorModesConfig()
while sensors.GetNextSensor(sensor2) == 0:
print("camera: %s, %dX%d@%dfps, AFmode:%d, maxEXP:%dus,gain[%d, %d]\n" % (
sensor2.moduleName, sensor2.camWidth, sensor2.camHeight, sensor2.camFps,
sensor2.AFmode, sensor2.maxEXP, sensor2.minGain, sensor2.maxGain))
ncc.camera_select_sensor(0) # 0 1080p 1 4k
cameraCfg = sensor1
net1_info = ncc.Network1Par()
net1_info.inputFormat = ncc.IMG_FORMAT_BGR_PLANAR
net1_info.meanValue = [0.0, 0.0, 0.0]
net1_info.stdValue = 1
net1_info.isOutputYUV = 1
net1_info.isOutputH26X = 1
net1_info.isOutputJPEG = 1
net1_info.imageWidth = cameraCfg.camWidth
net1_info.imageHeight = cameraCfg.camHeight
net1_info.startX = 0
net1_info.startY = 0
net1_info.endX = cameraCfg.camWidth
net1_info.endY = cameraCfg.camHeight
net1_info.inputDimWidth = 300
net1_info.inputDimHeight = 300
# extInputs = np.zeros(ncc.MAX_EXTINPUT_SIZE,dtype = np.uint8)
# print('input size {}'.format(extInputs.size))
net1_info.extInputs = [0] * ncc.MAX_EXTINPUT_SIZE # tobytes()
net1_info.modelCascade = 1
net1_info.inferenceACC = 1
net2_info = ncc.Network2Par()
net2_info.startXAdj = -5
net2_info.startYAdj = -5
net2_info.endXAdj = 5
net2_info.endYAdj = 5
labelMask = [0]*ncc.MAX_LABEL_SIZE
labelMask[2]=1
net2_info.labelMask = labelMask
net2_info.minConf = 0.7
net2_info.inputDimWidth = 94
net2_info.inputDimHeight = 24
net2_info.inputFormat = ncc.IMG_FORMAT_BGR_PLANAR
net2_info.meanValue = [0.0, 0.0, 0.0]
net2_info.stdValue = 1
count=ncc.MAX_EXTINPUT_SIZE//2
extInputs = array('h',[0]*count)
extInputs[0] = int(ncc.f32Tof16(0))
for i in range(1, 88):
extInputs[i] = int(ncc.f32Tof16(1.0))
net2_info.extInputs = extInputs.tobytes()
net2_info.modelCascade = 0
print("input 0={} 1={} type={}".format(int(ncc.f32Tof16(1.0)),extInputs[1],type(extInputs[1])));
blob1 = "./blob/2020.3/vehicle-license-plate-detection-barrier-0106/vehicle-license-plate-detection-barrier-0106.blob";
blob2 = "./blob/2020.3/license-plate-recognition-barrier-0001/license-plate-recognition-barrier-0001.blob"; # if par_Len=0 , cal param_size auto
ret = ncc.sdk_net2_init(None, None, \
blob1, net1_info, 0, \
blob2, net2_info, 0)
metasize = 2 * 1024 * 1024
print("xlink_init ret=%d %d" % (ret, metasize))
if (ret < 0):
return
oft_x = net1_info.startX
oft_y = net1_info.startY
dis_w = net1_info.endX - net1_info.startX
dis_h = net1_info.endY - net1_info.startY
offset = struct.calcsize(media_head) # 64
size = cameraCfg.camWidth * cameraCfg.camHeight * 2
yuvbuf = bytearray(size + offset)
metabuf = bytearray(metasize)
ncc.camera_video_out(ncc.YUV420p, ncc.VIDEO_OUT_CONTINUOUS)
bmeta=False;
while (True):
size = ncc.GetYuvData(yuvbuf)
if (size <= 0):
time.sleep(0.1) # 0.1 second
continue
numarry = np.array(yuvbuf[offset:size]) # skip head frameSpecOut 64 bytes
# print("buf len=%d/%d" % (numarry.size,size))
yuv = numarry.reshape((int(cameraCfg.camHeight * 3 / 2), cameraCfg.camWidth))
bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_I420, 3)
size, outsize0,outsize1,item_num = ncc.GetMetaDataExt(metabuf)
if (size > 0)and (item_num>0):
bmeta=True
#if(bmeta):
print("ret=%d size0=%d size1=%d,num=%d" %(size,outsize0,outsize1,item_num ))
# spec=metabuf[0:offset]
#type1,seq,m_size=get_header_info(metabuf[0:offset])
#print("meta type=%d seq=%d size=%d" % (type1,seq,m_size))
#f = open("sample.txt", "wb")
#f.write(metabuf)
#f.close()
from_i=offset + ncc.OUTPUT_INDEX_SIZE
count=outsize0//2
detMetadata = struct.unpack('H'*count,metabuf[from_i:from_i+outsize0])
from_i+=outsize0 #skip output 0
count=outsize1//2
#print("out1 count %d from %d to %d" % (count,from_i,size))
secondMeta= struct.unpack('H'*count*item_num,metabuf[from_i:size])
for i in range(item_num):
image_id = int(ncc.f16Tof32(detMetadata[i * 7 + 0]))
if (image_id < 0):
break
label = int(ncc.f16Tof32(detMetadata[i * 7 + 1]))
score = ncc.f16Tof32(detMetadata[i * 7 + 2])
x0 = ncc.f16Tof32(detMetadata[i * 7 + 3])
y0 = ncc.f16Tof32(detMetadata[i * 7 + 4])
x1 = ncc.f16Tof32(detMetadata[i * 7 + 5])
y1 = ncc.f16Tof32(detMetadata[i * 7 + 6])
# print("item sise=%d score:%.2f<->min:%.2f rec:(%.3f,%.3f)<->(%.3f,%.3f) " %(item_num,score,min_score,x0,y0,x1,y1))
if ((not coordinate_is_valid(x0, y0, x1, y1)) or (score < min_score) or (labelMask[label]==0)):
continue
x = int(x0 * dis_w + oft_x)
y = int(y0 * dis_h + oft_y)
w = int((x1 - x0) * dis_w)
h = int((y1 - y0) * dis_h)
cv2.rectangle(bgr, (x, y), (x + w, y + h), (255, 128, 128), 2)
regMetadata=secondMeta[i*count:i*count+count]
regRet = array('i')
for j in range(88):
regRet.append(int(ncc.f16Tof32(regMetadata[j])))
items = [
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", \
"<Anhui>", "<Beijing>", "<Chongqing>", "<Fujian>", \
"<Gansu>", "<Guangdong>", "<Guangxi>", "<Guizhou>", \
"<Hainan>", "<Hebei>", "<Heilongjiang>", "<Henan>", \
"<HongKong>", "<Hubei>", "<Hunan>", "<InnerMongolia>", \
"<Jiangsu>", "<Jiangxi>", "<Jilin>", "<Liaoning>", \
"<Macau>", "<Ningxia>", "<Qinghai>", "<Shaanxi>", \
"<Shandong>", "<Shanghai>", "<Shanxi>", "<Sichuan>", \
"<Tianjin>", "<Tibet>", "<Xinjiang>", "<Yunnan>", \
"<Zhejiang>", "<police>", \
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", \
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", \
"U", "V", "W", "X", "Y", "Z" \
];
result = ''
for j in range(0, len(regRet)):
if (regRet[j] == -1):
break
#result = result.join(items[regRet[j]])
result = result+items[regRet[j]]
cv2.putText(bgr, result, (x, y - 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
img_scaled = cv2.resize(bgr, None, fx=0.7, fy=0.7, interpolation=cv2.INTER_CUBIC)
cv2.namedWindow('openncc', cv2.WINDOW_AUTOSIZE)
cv2.imshow('openncc', img_scaled)
if (cv2.waitKey(20) == 27):
break
ncc.sdk_uninit()
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main() or 0)
| StarcoderdataPython |
11253170 | """A Tcl kernel for Jupyter"""
__version__ = '0.0.4'
| StarcoderdataPython |
1920127 | <gh_stars>0
import datetime
from flask import *
import MySQLdb
import gc
def connection():
conn = MySQLdb.connect(host="localhost",
user="root",
passwd="<PASSWORD>",
db="inventory")
c = conn.cursor()
return c, conn
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
def login():
error=''
try:
c,conn = connection()
if request.method == "POST":
attempted_username = request.form['username']
attempted_password = request.form['password']
if attempted_username == "admin" and attempted_password == "<PASSWORD>":
return render_template('home.html')
data = c.execute("SELECT * FROM users WHERE username = (%s)",
request.form['username'])
data = c.fetchone()
if attempted_username == data[0] and attempted_password == data[1]:
return render_template('home.html')
else:
error="Invalid Credentials , try again."
gc.collect()
return render_template('login.html',error =error)
except Exception as e:
# flash(e)
error = "Invalid credentials, try again."
return render_template('login.html', error=error)
@app.route('/stock/')
def print_stock():
c, conn = connection()
cursor = c.execute('SELECT * FROM stock')
item = [dict(pid=row[0],
pname=row[1],
quantity=row[2],
price=row[3]) for row in c.fetchall()]
return render_template('stock.html', items=item)
@app.route('/home/')
def home():
return render_template('home.html')
@app.route('/bill/',methods = ["GET","POST"])
def bill():
error =''
try:
c, conn = connection()
cursor = c.execute('SELECT * FROM stock')
item = [dict(pname=row[1], price=row[3]) for row in c.fetchall()]
if request.method == "POST":
i = 0
name = "123"
productarr = []
qtyarr = []
pricearr =[]
while 'product_name' + str(i) in request.form:
n = "product_name" + str(i)
q = "qty" + str(i)
p = "price" + str(i)
name = request.form[n]
qty = request.form[q]
price = request.form[p]
productarr.append(name)
qtyarr.append(qty)
pricearr.append(price)
i=i+1
sql=[]
date = request.form['date']
total = request.form['total']
data = c.execute('SELECT * FROM bill ORDER BY bill_no DESC LIMIT 1')
data = c.fetchone()
bill_number = data[0]
bill_number = bill_number + 1
for j in range(1, len(productarr) + 1):
pname = productarr[j - 1]
qty = qtyarr[j - 1]
data = c.execute("SELECT * FROM stock WHERE pname = '%s'" % (pname))
data = c.fetchone()
qty_total = int(data[2]) - int(qty)
if qty_total < 0:
error = "Product Out Of Stock!!"
return render_template('bill.html',items=item,error=error)
sql.append("INSERT INTO bill VALUES('%s','%s','%s')" %(bill_number,date,total))
cursor = c.execute(sql[0])
conn.commit()
for j in range(1,len(productarr)+1) :
pname = productarr[j - 1]
qty = qtyarr[j - 1]
price = pricearr[j - 1]
sql.append("INSERT INTO items VALUES('%s','%s','%s','%s')" %(bill_number,pname,qty,price))
cursor = c.execute(sql[j])
conn.commit()
for j in range(1, len(productarr) + 1):
pname = productarr[j - 1]
qty = qtyarr[j - 1]
data = c.execute("SELECT * FROM stock WHERE pname = '%s'" % (pname))
data = c.fetchone()
qty_total = int(data[2]) - int(qty)
sqlBill="UPDATE stock SET qty ='%s' WHERE pname='%s'" % (qty_total, pname)
cursor = c.execute(sqlBill)
conn.commit()
return render_template('success.html')
return render_template('bill.html',items=item,error=error)
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/ae/',methods = ["GET","POST"])
def add_employee():
error=''
try:
c, conn = connection()
if request.method == "POST":
username = request.form['username']
password = request.form['password']
confirmpassword = request.form['confirmpassword']
if( password != confirmpassword):
error='Password Mismatch'
return render_template('add_employee.html', error=error)
firstname = request.form['firstname']
middlename = request.form['middlename']
lastname = request.form['lastname']
dob = request.form['dob']
dob = datetime.datetime.strptime(dob, "%d-%m-%Y").strftime("%Y-%m-%d")
ph_number = request.form['ph_number']
address = request.form['address']
data = c.execute('SELECT * FROM person ORDER BY id DESC LIMIT 1')
data = c.fetchone()
id = data[0]
id = id + 1
salary = request.form['salary']
sqllogin = "INSERT INTO users VALUES('%s','%s')" %(username,password)
cursor = c.execute(sqllogin)
conn.commit()
sqlpersonal = "INSERT INTO person VALUES('%s','%s','%s','%s','%s')" % (id,firstname,middlename,lastname,ph_number)
cursor = c.execute(sqlpersonal)
conn.commit()
sqlothers = "INSERT INTO employee VALUES('%s','%s','%s','%s')" % (id,salary,address,dob)
cursor = c.execute(sqlothers)
conn.commit()
return render_template('home.html')
return render_template('add_employee.html',error=error)
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/as/',methods = ["GET","POST"])
def add_supplier():
try:
c, conn = connection()
if request.method == "POST":
firstname = request.form['fname']
middlename = request.form['mname']
lastname = request.form['lname']
ph_number = request.form['ph_number']
email = request.form['email']
data = c.execute('SELECT * FROM person ORDER BY id DESC LIMIT 1')
data = c.fetchone()
id = data[0]
id = id + 1
sqlpersonal = "INSERT INTO person VALUES('%s','%s','%s','%s','%s')" % (id, firstname, middlename, lastname, ph_number)
cursor = c.execute(sqlpersonal)
conn.commit()
sqlsupplier = "INSERT INTO supplier VALUES('%s','%s')" %(id,email)
cursor = c.execute(sqlsupplier)
conn.commit()
return render_template('home.html')
return render_template('add_supplier.html')
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/vs/',methods = ["GET","POST"])
def view_supplier():
try:
c, conn = connection()
cursor = c.execute('SELECT * FROM person NATURAL JOIN supplier')
item = [dict(id=row[0], name=row[1] + " " + row[3],ph_number = row[4],email = row[5]) for row in c.fetchall()]
return render_template('view_supplier.html',items = item)
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/ve/',methods = ["GET","POST"])
def view_employee():
try:
c, conn = connection()
cursor = c.execute('SELECT * FROM person NATURAL JOIN employee')
item = [dict(id=row[0], name=row[1] + " " + row[3],ph_number = row[4],salary = row[5],dob=row[7]) for row in c.fetchall()]
return render_template('view_employee.html',items=item)
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/order/',methods = ["GET","POST"])
def order():
try:
c, conn = connection()
cursor = c.execute('SELECT * FROM stock')
item = [dict(pname=row[1]) for row in c.fetchall()]
if request.method == "POST":
i = 0
name = "123"
productarr = []
qtyarr = []
pricearr = []
while 'product_name' + str(i) in request.form:
n = "product_name" + str(i)
q = "qty" + str(i)
p = "price" + str(i)
name = request.form[n]
qty = request.form[q]
price = request.form[p]
productarr.append(name)
qtyarr.append(qty)
pricearr.append(price)
i = i + 1
sql = []
date = request.form['date']
total = request.form['total']
data = c.execute('SELECT * FROM purchase ORDER BY oid DESC LIMIT 1')
data = c.fetchone()
oid = data[0]
oid = oid + 1
sql.append("INSERT INTO purchase VALUES('%s','%s','%s')" % (oid, date, total))
cursor = c.execute(sql[0])
conn.commit()
for j in range(1, len(productarr) +1 ):
pname = productarr[j - 1]
qty = qtyarr[j - 1]
price = pricearr[j - 1]
data = c.execute("SELECT * FROM stock WHERE pname = '%s'" %(pname))
data = c.fetchone()
pid = data[0]
sqlOrder = "INSERT INTO order_table VALUES('%s','%s','%s','%s')" % (oid, pid, qty, price)
cursor = c.execute(sqlOrder)
conn.commit()
for j in range(1, len(productarr) + 1):
pname = productarr[j - 1]
qty = qtyarr[j - 1]
price = pricearr[j - 1]
data = c.execute("SELECT * FROM stock WHERE pname = '%s'" % (pname))
data = c.fetchone()
nullCheck = data[2]
if nullCheck is None:
qty_total = qty
sql.append("UPDATE stock SET qty ='%s' WHERE pname='%s'" % (qty_total, pname))
cursor = c.execute(sql[j])
conn.commit()
c.execute("UPDATE stock SET price ='%s' WHERE pname='%s'" %(price, pname))
conn.commit()
else:
qty_total = int(data[2]) + int(qty)
sql.append("UPDATE stock SET qty ='%s' WHERE pname='%s'" % (qty_total,pname))
cursor = c.execute(sql[j])
conn.commit()
return render_template('success.html')
return render_template('order.html', items=item)
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/ue/',methods = ["GET","POST"])
def update_employee():
try:
c, conn = connection()
cursor = c.execute("SELECT id FROM employee")
item = [dict(id=row[0]) for row in c.fetchall()]
if request.method == "POST":
if request.form['submit'] == "Show Details":
id = request.form['id']
c.execute("SELECT * FROM employee NATURAL JOIN person WHERE id ='%s'" %id )
row = c.fetchone()
salary = int(row[1])
address = str(row[2])
dob = str(row[3])
firstanme = str(row[4])
middlename =str(row[5])
lastname= str(row[6])
ph_number = str(row[7])
return render_template('update_employee.html',id=id, items=item, salary=salary, address=address, dob=dob,
firstname=firstanme,userid=id,
middlename=middlename, lastname=lastname, ph_number=ph_number)
if request.form['submit'] == "DELETE" :
id = request.form['id']
c.execute("DELETE FROM person WHERE id ='%s'" %id)
conn.commit()
return render_template('home.html')
if request.form['submit'] == "Submit" :
id = request.form['userid']
firstname = request.form['firstname']
middlename = request.form['middlename']
lastname = request.form['lastname']
dob = request.form['dob']
ph_number = request.form['ph_number']
address = request.form['address']
salary = request.form['salary']
c.execute("UPDATE person SET firstname='%s' WHERE id='%s'" %(firstname,id))
conn.commit()
c.execute("UPDATE person SET middlename='%s' WHERE id='%s'" % (middlename, id))
conn.commit()
c.execute("UPDATE person SET lastname='%s' WHERE id='%s'" % (lastname, id))
conn.commit()
c.execute("UPDATE person SET ph_number='%s' WHERE id='%s'" % (ph_number, id))
conn.commit()
c.execute("UPDATE employee SET salary='%s' WHERE id='%s'" % (salary, id))
conn.commit()
c.execute("UPDATE employee SET address='%s' WHERE id='%s'" % (address, id))
conn.commit()
c.execute("UPDATE employee SET dob='%s' WHERE id='%s'" % (dob, id))
conn.commit()
return render_template('home.html')
return render_template('update_employee.html',items=item)
except Exception as e:
print(e)
return render_template('error.html')
@app.route('/new_product/',methods = ["GET","POST"])
def new_product():
try:
c, conn = connection()
if request.method == "POST":
data = c.execute('SELECT * FROM stock ORDER BY pid DESC LIMIT 1')
data = c.fetchone()
id = data[0]
id = id + 1
i=0;
productarr = []
while 'product_name' + str(i) in request.form:
n = "product_name" + str(i)
name = request.form[n]
productarr.append(name)
i=i+1
sql=[]
for j in range(0, len(productarr)):
pname = productarr[j]
sqlNewProduct = ("INSERT INTO product VALUES('%s','%s')" % (id, pname))
cursor = c.execute(sqlNewProduct)
sql.append("INSERT INTO stock VALUES('%s','%s',NULL,NULL)" % (id, pname))
cursor = c.execute(sql[j])
conn.commit()
conn.commit()
id=id+1
return render_template('home.html')
return render_template('new_product.html')
except Exception as e:
print(e)
return render_template('error.html')
if __name__ == '__main__':
app.run(debug = True) | StarcoderdataPython |
8167674 | '''
A collection of example models that can be fed into zap-cosmics.
'''
from .imports import *
class Model():
'''
A base model, defining a handy plot tool.
'''
def plot(self, tmin=-0.5, tmax=0.5, n=1000, **plotkw):
t = np.linspace(tmin, tmax, n)
plt.plot(t, self(t), label='{}'.format(self), **plotkw)
class Flare(Model):
'''
A fast-rising exponential decay.
'''
def __init__(self, start=0.02, decay=0.1, amplitude=0.5):
self.start = start
self.decay = decay
self.amplitude = amplitude
def __call__(self, t):
m = np.zeros_like(t)
afterstart = t > self.start
m[afterstart] += self.amplitude*np.exp(-(t[afterstart]-self.start)/self.decay)
return m
def __repr__(arg):
return '<flare with {:.2}*exp(-t/{:.2})>'.format(self.ampltiude, self.decay)
class ManyFlares(Model):
'''
Create lots of flares, all stacked together.
'''
def __init__(self, N=25):
self.N = N
starts = np.random.uniform(-0.5, 0.5, N)
decays = np.random.uniform(0, 0.05, N)
amplitudes = np.random.uniform(0, 0.5, N)
self.flares = []
for s, d, a in zip(starts, decays, amplitudes):
self.flares.append(Flare(start=s, decay=d, amplitude=a))
def __call__(self, t):
m = np.ones_like(t)
for f in self.flares:
m += f(t)
return m
def __repr__(self):
return '<{} random flares>'.format(self.N)
class Transit(Model):
'''
A transit model.
'''
def __init__(self, epoch=0.0,
period=1.0,
rp_over_rs=0.1,
a_over_rs=15.0,
impact_parameter=0.5,
eccentricity=0.0,
omega=90.0,
limb_coef=[0.1, 0.3],
limb_type="quadratic"
):
'''
Initialize a transit object and set its parameters.
'''
import batman
self.batman = batman
self.params = batman.TransitParams()
self.params.t0 = epoch
self.params.per = period
self.params.rp = rp_over_rs
self.params.a = a_over_rs
inclination = np.arccos(impact_parameter/a_over_rs)*180.0/np.pi
self.params.inc = inclination
self.params.ecc = eccentricity
self.params.w = omega
self.params.u = limb_coef
self.params.limb_dark = limb_type
def __call__(self, t):
try:
assert(np.all(self.t == self.batmant))
except (AssertionError, AttributeError):
self.batmant = t
self.batmanmodel = self.batman.TransitModel(self.params, t)
return self.batmanmodel.light_curve(self.params)
def __repr__(self):
return '<transit with rp_over_rs={:.3}>'.format(self.params.rp)
#def test():
if __name__ == '__main__':
constant = np.ones
polynomial = np.poly1d([1,0,0.02])
t_deep = Transit(rp_over_rs=0.3)
t_shallow = Transit(rp_over_rs=0.05)
few = ManyFlares(N=2)
many = ManyFlares(N=25)
for model in [t_deep, t_shallow, few, many]:
model.plot()
plt.legend(bbox_to_anchor=(1,1), loc='upper left', frameon=False)
| StarcoderdataPython |
5179577 | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# _____ ______________
# | __ \ /\|__ ____ __|
# | |__) | / \ | | | |
# | _ / / /\ \ | | | |
# | | \ \/ ____ \| | | |
# |_| \_\/ \_\_| |_| ... RFID ALL THE THINGS!
#
# A resource access control and telemetry solution for Makerspaces
#
# Developed at MakeIt Labs - New Hampshire's First & Largest Makerspace
# http://www.makeitlabs.com/
#
# Copyright 2018 MakeIt Labs
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
#
# Author: <NAME> (<EMAIL>)
#
from PyQt5.QtCore import Qt, QThread, QWaitCondition, QMutex, QIODevice, QByteArray, pyqtSlot, pyqtSignal, pyqtProperty
from PyQt5.QtSerialPort import QSerialPort
from Logger import Logger
import logging
import calendar
import time
import hashlib
import queue
class RFID(QThread):
tagScan = pyqtSignal(int, str, int, str, name='tagScan', arguments=['tag', 'hash', 'time', 'debugText'])
tagScanError = pyqtSignal(int, int, str, name='tagScanError', arguments=['error', 'time', 'debugText'])
@pyqtProperty(int)
def errPacket(self):
return -1
@pyqtProperty(int)
def errChecksum(self):
return -2
@pyqtSlot(int)
def simulateTagScan(self, tag):
hash = self.hash_tag(tag)
now = calendar.timegm(time.gmtime())
self.tagScan.emit(tag, hash, now, 'simulated')
@pyqtSlot()
def simulateScanError(self):
now = calendar.timegm(time.gmtime())
self.tagScanError.emit(0, now, 'simulated')
def __init__(self, portName='', baudRate=QSerialPort.Baud9600, loglevel='WARNING'):
QThread.__init__(self)
self.logger = Logger(name='ratt.rfid')
self.logger.setLogLevelStr(loglevel)
self.debug = self.logger.isDebug()
self.cond = QWaitCondition()
self.mutex = QMutex()
self.portName = portName
self.baudRate = baudRate
self.waitTimeout = 250
self.quit = False
self.outQueue = queue.Queue()
def serialOut(self, msg):
self.mutex.lock()
self.outQueue.put(msg)
self.mutex.unlock()
def monitor(self):
if not self.isRunning():
self.start();
else:
self.cond.wakeOne();
def dump_pkt(self, bytes):
bstr = ''
for b in bytes:
byte = ord(b)
bstr += '%02x ' % byte
return bstr
def decode_gwiot(self, bytes):
#
# sample packet from "Gwiot 7941e V3.0" eBay RFID module:
#
# 00 01 02 03 04 05 06 07 08 09 BYTE #
# -----------------------------------------------------------
# 02 0A 02 11 00 0D EF 80 7B 03
# | | | | | | | | | |
# STX ?? ?? ?? id3 id2 id1 id0 sum ETX
#
# checksum (byte 8, 'sum') is a simple 8-bit XOR of bytes 01 through 07
# starting the checksum with the value of 'sum' or starting with 0 and
# including the value of 'sum' in the calculation will net a checksum
# value of 0 when correct
#
# the actual tag value is contained in bytes 04 through 07 as a 32-bit
# unsigned integer. byte 04 ('id3') is the most significant byte, while
# byte 07 ('id0') is the least significant byte.
#
if bytes.length() == 10 and ord(bytes[0]) == 0x02 and ord(bytes[9]) == 0x03:
checksum = ord(bytes[8])
for i in range(1, 8):
checksum = checksum ^ ord(bytes[i])
self.logger.debug('calc checksum byte %d (%2x), checksum = %2x' % (i, ord(bytes[i]), checksum))
if checksum == 0:
tag = (ord(bytes[4]) << 24) | (ord(bytes[5]) << 16) | (ord(bytes[6]) << 8) | ord(bytes[7])
self.logger.debug("serial read: " + self.dump_pkt(bytes))
self.logger.debug('tag = %10.10d' % tag)
return tag
else:
self.logger.warning("checksum error")
return self.errChecksum
else:
self.logger.warning("packet error")
return self.errPacket
def hash_tag(self, tag):
m = hashlib.sha224()
tag_str = '%.10d' % int(tag)
m.update(str(tag_str).encode())
tag_hash = m.hexdigest()
self.logger.debug('hash: %s' % tag_hash)
return tag_hash
def run(self):
self.serial = QSerialPort()
self.serial.setPortName(self.portName)
self.serial.setBaudRate(self.baudRate)
# open read-write for serial output function, which is unrelated to RFID,
# but lives here because the port can only be opened in one place
if not self.serial.open(QIODevice.ReadWrite):
self.logger.error("can't open serial port")
return;
while not self.quit:
# look for queued serial output items, write them to serial port
writeItem = None
self.mutex.lock()
if not self.outQueue.empty():
writeItem = self.outQueue.get()
self.outQueue.task_done()
self.mutex.unlock()
if self.serial.isWritable() and writeItem is not None:
self.logger.debug('writing msg to serial: %s' % writeItem)
self.serial.writeData(writeItem.encode("utf-8"))
# look for incoming data from RFID module, read and process it
if self.serial.waitForReadyRead(self.waitTimeout):
bytes = self.serial.readAll()
while self.serial.waitForReadyRead(10):
bytes += self.serial.readAll()
tag = self.decode_gwiot(bytes)
now = calendar.timegm(time.gmtime())
self.logger.debug("tag=%d, now=%d" % (tag, now))
if tag > 0:
self.tagScan.emit(tag, self.hash_tag(tag), now, self.dump_pkt(bytes))
else:
self.tagScanError.emit(tag, now, self.dump_pkt(bytes))
| StarcoderdataPython |
6563298 | #!/usr/bin/env python
# coding=utf-8
"""
Unit tests for pygit module/PyGit class.
Created: <NAME>, 24.04.2019
Modified: <NAME>, 25.05.2019
"""
import unittest
from pyutilities.tests.pyutils_test_helper import get_test_logger
from pyutilities.pygit import PyGit
class PyGitTest(unittest.TestCase):
def setUp(self):
self.log.debug('setUp() is working.')
self.pygit = PyGit('http://stash.server.com/scm')
def tearDown(self):
self.log.debug('tearDown() is working.')
@classmethod
def setUpClass(cls):
cls.log = get_test_logger(__name__)
cls.log.debug('setUpClass() is working.')
@classmethod
def tearDownClass(cls):
cls.log.debug('tearDownClass() is working.')
def test(self):
pass
| StarcoderdataPython |
187887 | <reponame>dansandu/praline<gh_stars>0
from os.path import normpath
from praline.client.project.pipeline.stages.load_clang_format import clang_format_style_file_contents, ClangFormatConfigurationError, load_clang_format
from praline.common.testing.file_system_mock import FileSystemMock
from unittest import TestCase
class LoadClangFormatStageTest(TestCase):
def test_load_clang_format_stage_with_client_configuration(self):
normalized_executable_path = normpath('path/to/clang_format_executable')
normalized_style_file_path = normpath('my/project/.clang-format')
file_system = FileSystemMock({'path/to', 'my/project'}, {normalized_executable_path: b''})
resources = {'project_directory': 'my/project'}
configuration = {'clang-format-executable-path': normalized_executable_path}
load_clang_format(file_system, resources, None, None, configuration, None)
self.assertEqual(resources['clang_format_executable'], normalized_executable_path)
self.assertEqual(normpath(resources['clang_format_style_file']), normalized_style_file_path)
self.assertEqual(file_system.files[normalized_style_file_path].decode('utf-8'), clang_format_style_file_contents)
def test_load_clang_format_stage_with_file_configuration(self):
normalized_executable_path = normpath('path/to/clang_format_executable')
normalized_style_file_path = normpath('my/project/.clang-format')
file_system = FileSystemMock({'path/to', 'my/project'}, {normalized_executable_path: b''}, on_which=lambda t: normalized_executable_path if t == 'clang-format' else None)
resources = {'project_directory': 'my/project'}
configuration = {}
load_clang_format(file_system, resources, None, None, configuration, None)
self.assertEqual(resources['clang_format_executable'], normalized_executable_path)
self.assertEqual(normpath(resources['clang_format_style_file']), normalized_style_file_path)
self.assertEqual(file_system.files[normalized_style_file_path].decode('utf-8'), clang_format_style_file_contents)
def test_load_clang_format_stage_with_user_supplied_style_file(self):
normalized_executable_path = normpath('path/to/clang_format_executable')
normalized_style_file_path = normpath('my/project/.clang-format')
file_system = FileSystemMock({'path/to', 'my/project'}, {normalized_executable_path: b'', normalized_style_file_path: b'IndentWidth: 8'})
resources = {'project_directory': 'my/project'}
configuration = {'clang-format-executable-path': normalized_executable_path}
load_clang_format(file_system, resources, None, None, configuration, None)
self.assertEqual(normpath(resources['clang_format_executable']), normalized_executable_path)
self.assertEqual(normpath(resources['clang_format_style_file']), normalized_style_file_path)
self.assertEqual(file_system.files[normalized_style_file_path], b'IndentWidth: 8')
def test_load_clang_format_stage_with_no_configuration(self):
file_system = FileSystemMock({'my/project'})
resources = {
'project_directory': 'my/project'
}
configuration = {}
self.assertRaises(ClangFormatConfigurationError, load_clang_format, file_system, resources, None, None, configuration, None)
| StarcoderdataPython |
6486937 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 08:49:20 2019
@author: john.onwuemeka; <NAME>
"""
import numpy as np
from obspy.core import UTCDateTime
def read_eventlist(self):
"""
Description:
------------
Read events infomation.
Input:
-------
events.dat:
year month day hour minute second lat lon depth magnitude eventID
Returns:
---------
None
"""
data = np.genfromtxt(self.maindir+'/input/events.dat',skip_header=1,dtype='U24')
if self.whitelist_evl and not self.blacklist_evl:
evids = self.whitelist_evl
elif self.whitelist_evl and self.blacklist_evl:
evids = [i for i in self.whitelist_evl if i not in self.blacklist_evl]
elif self.blacklist_evl and not self.whitelist_evl:
evids = [str(int(data[i][10])) for i in range(len(data)) if str(int(data[i][10])) not in self.blacklist_evl]
else:
evids = [str(int(data[i][10])) for i in range(len(data))]
allids = np.asarray([str(int(data[i][10])) for i in range(len(data))])
times,metadata = [],[]
for i in range(len(evids)):
index = np.where(allids == evids[i])[0]
if index.size > 0:
index = index[0]
times.append(UTCDateTime(int(data[index][0]),int(data[index][1]),int(data[index][2]),int(data[index][3]),
int(data[index][4]),float(data[index][5])))
metadata.append([float(data[index][6]),float(data[index][7]),float(data[index][8]),float(data[index][9])])
for i,j,k in zip(evids,times,metadata):
self.evlist[i] = []
self.evlist[i].append(j)
self.evlist[i].append(k)
return None
| StarcoderdataPython |
1983264 | <filename>010.py
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
def prastevilo(n):
""" Pogleda, če je n praštevilo. """
if n <= 1:
return False
for i in range(2, round(n**(1/2) + 1)):
if n % i == 0:
return False
return True
prastevila = [i for i in range(2000000) if prastevilo(i)]
print(sum(prastevila)) | StarcoderdataPython |
1608867 | import sys,os,pickle,argparse
import numpy as np
from models import *
from sklearn.model_selection import KFold
#deprecated
#from sklearn.cross_validation import KFold
from scrape import *
import argparse
from torch.multiprocessing import Pool
from random import shuffle
def fit(X,Y,model,criterion= nn.NLLLoss(),epochs=20,batch_size=1,verbose=True,print_batches=1000,opt='Adam'):
"""
Fits a choice model with pytorch's SGD
X- Indicator vectors for choice sets
Y- indices of choices
model- choice model to fit
criterion- which loss function to use (default to negative log likelihood for MLE)
epochs- number of times to loop over the training data
batch_size- how large to make batches
verbose- whether to print updates as training goes on
print_batches- how often to print updates on training
opt- which optimizer to use 'SGD' or 'Adam'
"""
X = torch.Tensor(X)
Y = torch.LongTensor(Y.astype(int))
dataset = torch.utils.data.TensorDataset(X,Y)
if batch_size>1:
dataloader = torch.utils.data.DataLoader(dataset,batch_size=batch_size,shuffle=True)
else:
dataloader = torch.utils.data.DataLoader(dataset,shuffle=True)
if opt=='SGD':
optimizer = optim.SGD(model.parameters(), lr=0.001,momentum=0.9)
elif opt=='Adam':
optimizer = optim.Adam(model.parameters())
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
print('Starting epoch '+str(epoch)+' of '+str(epochs))
for i, data in enumerate(dataloader, 0):
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
#compute predictions from inputs
outputs = model(inputs)
#compute losses from current predictions
loss = criterion(outputs, labels)
#do backprop
loss.backward()
#take a step with the optimizer
optimizer.step()
# print statistics
running_loss += loss.data.item()
if not verbose:
continue
if i % print_batches == print_batches-1: # print every 200 mini-batches
print('(epoch %2d, %5d samples), avg loss: %.3f' %
(epoch + 1, (i + 1)*batch_size, running_loss / print_batches))
running_loss = 0.0
return model
def cv(L,n,models,save_path,K=5,epochs=20,batch_size=1,opt='Adam',seed=True,RE=False):
"""
trains and saves choosing to rank models with SGD via k-fold cv
Args:
L- list of data rankings
n- number of items ranked
model - choice models to fit
save_path- folder to save to
K- number of folds
epochs- number of times to loop over the data
"""
kf = KFold(n_splits=K,shuffle=True)
splits = kf.split(L)
split_store = {'train':[],'test':[],'data':L}
for model in models:
split_store[str(model)]=[]
for k,(train,test) in enumerate(splits):
print('Beginning fold'+str(k)+' of '+str(K))
#scrape training choices and fit model
X_train,Y_train = RS_choices([L[x] for x in train],n)
for model in models:
print('training RS-'+str(model))
if seed and str(model) == 'PCMC':
utils = models[0].parameters().next().data.numpy()
#print utils
g= np.exp(utils)
g/= np.sum(g)
model = PCMC(n,gamma=g)
model = fit(X_train,Y_train,model,criterion=nn.NLLLoss(),epochs=epochs,batch_size=batch_size,opt=opt)
split_store[str(model)].append(model)
#store everything
split_store['train'].append(train)
split_store['test'].append(test)
if not RE:
pickle.dump(split_store,open(save_path+'.p','wb'))
else:
pickle.dump(split_store,open(save_path+'-RE.p','wb'))
return 0
def parallel_helper(tup):
"""
unpacks a tuple so that we can apply the function cv in parallel
(Pool does not allow mapping of an anonymous function)
"""
L,n,models,save_path,epochs,batch_size,opt,seed,RE,K = tup
return cv(L,n,models,save_path,epochs=epochs,batch_size=batch_size,opt=opt,seed=seed,RE=RE,K=K)
def ensure_dir(file_path):
"""
helper function from stack overflow that automatically makes directories
in the cache for me
thanks to this:
https://stackoverflow.com/questions/273192/how-can-i-safely-create-a-nested-directory-in-python
"""
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def trawl(dset,dtype,epochs,parallel=False,batch_size=1,max_n=30,max_rankings=1000,opt='Adam',num_dsets=10,seed=True,RE=False,K=5):
"""
trawls over a directory and fits models to all data files
Args:
dset- name of dataset(s) considered
dtype- 'soi' for partial rankings, 'soc' for complete rankings
epochs- number of times to loop over the data
parallel- whether to train models in parallel over the datasets in the directory
batch_size- number of choices to train on at a time
max_n- largest number of alternatives allowed to train on a dataset
max_rankings- maximum number of rankings to fit a dataset
opt- which optimizer to use
num_dsets- number of datasets to fit
seed- whether to seed PCMC
RE- whether to compute repeated elimianation (RS if false)
K- number of CV folds for each dataset
"""
#we will loop over the datasets stored in this directory
path = os.getcwd()+os.sep+'data'+os.sep+dset
files = os.listdir(path)
#shuffle(files)
#this is where we'll save the output models
save_path = os.getcwd()+os.sep+'cache'+os.sep+'learned_models'+os.sep+dset+os.sep
job_list = []
batch = (batch_size>1)
for filename in files:#loop over the directory
print(filename)
if filename.endswith(dtype):#will
filepath = path+os.sep+filename
if dtype=='soi':
L,n = scrape_soi(filepath)
else:
L,n = scrape_soc(filepath)
if len(L)<=10 or len(L)>max_rankings or n>max_n:
if len(L)<=10:
reason = 'too few rankings- '+str(len(L))+', min is 10'
elif len(L)>max_rankings:
reason = 'too many rankings- '+str(len(L))+', max is '+str(max_rankings)
else:
reason = 'too many alternatives- '+str(n)+', max is '+str(max_n)
print(filename+' skipped, '+reason)
continue
else:
print(filename+' added')
#collect models
models = []
for d in [1,4,8]:
if d>n:
continue
models.append(CDM(n=n,d=d))
models.append(MNL(n))
#models.append(PCMC(n,batch=batch))
###
#models.append(BP(n=n,k=3,d=2))
#models=[BP(n=n,k=3,d=2)]
#append tuple containing all the ojects needed to train the model on the dataset
job_list.append((L,n,models,save_path+filename[:-4]+'-'+dtype,epochs,batch_size,opt,seed,False,K))
if RE:
job_list.append((map(lambda x:x[::-1],L),n,models,save_path+filename[:-4]+'-'+dtype,epochs,batch_size,opt,seed,True,K))
if len(job_list)>=num_dsets:
print('maximum number of datasets reached')
continue
print(str(len(job_list))+' datasets total')
print(str(sum(map(lambda x: len(x[0]),job_list)))+ ' total rankings')
#sorts the jobs by number of alternatives*number of (partial) rankings
#will roughly be the number of choices, up to partial ranking length
sorted(job_list,key=lambda x: x[1]*len(x[0]))
#training for each dataset can be done in parallel with this
if parallel:
p = Pool(4)
p.map(parallel_helper,job_list)
else:
[x for x in map(parallel_helper,job_list)]
def parse():
"""
parses command line args, run when train.py is __main__
"""
np.set_printoptions(suppress=True, precision=3)
parser = argparse.ArgumentParser(description='ctr data parser')
parser.add_argument('-dset', help="dataset name", default=None)
parser.add_argument('-dtype', help="dataset type", default ='soi')
parser.add_argument('-epochs', help="number of epochs to use", default='10')
parser.add_argument('-batch_size', help='batch_size for training', default = '1')
parser.add_argument('-max_n', help='maximum number of items ranked', default = '10')
parser.add_argument('-max_rankings', help='maximum number of rankings', default = '1000')
parser.add_argument('-opt', help='SGD or Adam', default='Adam')
parser.add_argument('-num_dsets', help='how many datasets to use', default='100')
parser.add_argument('-seed_pcmc', help='whether to seed pcmc with MNL (y/n)', default = 'n')
parser.add_argument('-re', help='whether to train RE models (y/n)', default = 'n')
parser.add_argument('-folds', help='number of folds for cv on each dataset', default='5')
args = parser.parse_args()
if args.dtype not in ['soi','soc']:
print('wrong data type')
assert False
if args.opt not in ['SGD','Adam']:
print('optmizer can be SGD or Adam')
assert False
if args.dset=='soc':
args.dtype='soc'
path = os.getcwd()+os.sep+'data'+os.sep+args.dset
if args.dset == 'soi':
path += os.sep+'filtered'
if args.seed_pcmc not in ['y','n']:
print('y or n required for -seed_pcmc')
seed = (args.seed_pcmc=='y')
RE = (args.re == 'y')
K = int(args.folds)
trawl(args.dset,args.dtype,epochs=int(args.epochs),batch_size=int(args.batch_size),
max_n=int(args.max_n),max_rankings=int(args.max_rankings),opt=args.opt,
num_dsets=int(args.num_dsets),seed=seed,RE=RE,K=K)
if __name__ == '__main__':
parse()
| StarcoderdataPython |
3348800 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 20:19:31 2015
@author: matt
"""
import ete2
from taxonomy import taxonomy
def build_tree_from_dict(dict_tree, tree=None):
if tree is None:
tree = ete2.Tree(name="root")
for parent, children in dict_tree.iteritems():
subtree = tree.add_child(name=parent)
if children:
subtree = build_tree_from_dict(children, subtree)
return tree
def named_internal_node_layout(node):
if node.is_leaf():
name_face = ete2.AttrFace("name")
else:
name_face = ete2.AttrFace("name", fsize=10)
ete2.faces.add_face_to_node(name_face, node, column=0, position="branch-right")
def make_equal_depth_tree(tree):
tree = tree.copy()
leaves = tree.get_leaves()
depths = [tree.get_distance(leaf) for leaf in leaves]
equalised = max(depths) == min(depths)
while not equalised:
min_depth = min(depths)
for i, leaf in enumerate(leaves):
if depths[i] == min_depth:
leaves[i] = leaf.add_child(name=leaf.name)
leaves[i].img_style['fgcolor'] = 'red'
depths[i] += 1
equalised = max(depths) == min(depths)
return tree
def make_equal_depth_tree_bottom_up(tree):
tree = tree.copy()
nodes = tree.get_leaves()
parents = set([node.up for node in nodes])
while parents != set([tree.get_tree_root()]):
for parent in parents:
children = set(parent.get_children())
children_in_nodes = children.intersection(set(nodes))
if children_in_nodes != children:
map(lambda n: n.detach(), children_in_nodes)
name = '\n'.join([n.name for n in children_in_nodes])
new_parent = ete2.TreeNode(name=name)
new_parent.img_style['fgcolor'] = 'red'
map(lambda n: new_parent.add_child(n), children_in_nodes)
parent.add_child(new_parent)
parents.remove(parent)
parents.add(new_parent)
nodes = parents
parents = set([parent.up for parent in parents])
return tree
def get_equal_depth_tree_layers(tree):
leaves = tree.get_leaves()
depths = [tree.get_distance(leaf) for leaf in leaves]
if min(depths) != max(depths):
raise Exception('Tree with equal depth leaf nodes required')
layers = [ set(leaves) ]
while next(iter(layers[-1])).up is not tree:
layers.append(list(set([node.up for node in layers[-1]])))
layers = layers[::-1]
for l, layer in enumerate(layers):
for k, node in enumerate(layer):
node.layer = l
node.layer_index = k
return layers
def change_tree_node_size(tree, size):
for node in tree.traverse():
node.img_style['size'] = size
ts = ete2.TreeStyle()
ts.mode = 'c'
#ts.show_leaf_name = False
#ts.layout_fn = named_internal_node_layout
ts.scale = None
ts.optimal_scale_level = 'full'
tree = build_tree_from_dict(taxonomy)
eq_tree_td = make_equal_depth_tree(tree)
change_tree_node_size(eq_tree_td, 10)
eq_tree_bu = make_equal_depth_tree_bottom_up(tree)
change_tree_node_size(eq_tree_bu, 10)
#layers = get_equal_depth_tree_layers(eq_tree)
eq_tree_td.show(tree_style=ts)
eq_tree_bu.show(tree_style=ts) | StarcoderdataPython |
9674464 | """
Various processing utility functions
Usage:
import only
"""
import os
import pandas as pd
from pycytominer.cyto_utils import infer_cp_features
def load_data(
batch,
profile_dir="profiles",
suffix="normalized_feature_selected.csv.gz",
combine_dfs=False,
add_cell_count=False,
cell_count_dir="cell_counts",
):
batch_dir = os.path.join(profile_dir, batch)
plate_folders = os.listdir(batch_dir)
plate_files = [
os.path.join(batch_dir, x, "{}_{}".format(x, suffix))
for x in plate_folders
if ".DS_Store" not in x
]
plate_data = {}
for plate_idx in range(0, len(plate_files)):
plate = plate_folders[plate_idx]
df = pd.read_csv(plate_files[plate_idx]).assign(Metadata_batch=batch)
if add_cell_count:
df = merge_cell_count(df, batch, cell_count_dir=cell_count_dir)
plate_data[plate] = df
if combine_dfs:
plate_data = convert_data(plate_data)
return plate_data
def merge_cell_count(df, batch, cell_count_dir="cell_counts"):
# Load cell counts for the specific plates
count_files = [
os.path.join(cell_count_dir, x)
for x in os.listdir(cell_count_dir)
if batch in x
]
all_plate_dfs = []
for count_file in count_files:
plate = os.path.basename(count_file)
plate = plate.replace(batch, "").replace("cell_count.tsv", "").strip("_")
plate_df = pd.read_csv(count_file, sep="\t").rename(
{"cell_count": "Metadata_cell_count"}, axis="columns"
)
all_plate_dfs.append(plate_df)
# Merge all plates and append cell count information as a metadata feature
plate_df = pd.concat(all_plate_dfs, sort=True)
df = plate_df.merge(
df, on=plate_df.drop("Metadata_cell_count", axis="columns").columns.tolist()
)
return df
def convert_data(df_dict):
df = pd.concat(df_dict.values(), ignore_index=True, sort=True).reset_index(
drop=True
)
cp_cols = infer_cp_features(df)
meta_cols = df.drop(cp_cols, axis="columns").columns.tolist()
return df.reindex(meta_cols + cp_cols, axis="columns")
| StarcoderdataPython |
3309662 | <reponame>jappa/PyFR
# -*- coding: utf-8 -*-
"""Converts .pyfr[m, s] files to a Paraview VTK UnstructuredGrid File"""
from copy import copy
import numpy as np
import sympy as sy
from pyfr.bases import BaseBasis, get_std_ele_by_name
from pyfr.inifile import Inifile
from pyfr.readers.nodemaps import GmshNodeMaps
from pyfr.util import subclass_map
from pyfr.writers import BaseWriter
class ParaviewWriter(BaseWriter):
"""Wrapper for writing serial .vtu Paraview files"""
# Supported file types and extensions
name = 'paraview'
extn = ['.vtu']
# PyFR to VTK element types, node mapping, (number of nodes)
vtk_to_pyfr = {'tri': (5, [0, 1, 2], 3),
'quad': (9, [0, 1, 3, 2], 4),
'tet': (10, [0, 1, 2, 3], 4),
'pyr': (14, [0, 1, 3, 2, 4], 5),
'pri': (13, [0, 1, 4, 2, 3, 5], 6),
'hex': (12, [0, 1, 3, 2, 4, 5, 7, 6], 8)}
def write_out(self):
"""Controls the writing of serial .vtu Paraview files
Writes .vtu pieces for each element type, in each partition of
the PyFR files. The Paraview data type used is "appended",
which concatenates all data into a single block of binary data
at the end of the file. ASCII headers written at the top of
the file describe the structure of this data.
Two different methods for outputting the data are used; one
that involves subdividing a high order element into low
order elements (at the shape points), and another that appends
high-order data to be read by an external plugin detailed here:
http://perso.uclouvain.be/sebastien.blaise/tools.html
The methods are switched such that the latter is used when
self.args.divisor is None. When it is not none, the former
method is used on a high order element of shape order =
self.args.divsior. An initial 0 value sets itself to the
solution order.
"""
# Set default divisor to solution order
if self.args.divisor == 0:
self.args.divisor = self.cfg.getint('solver', 'order')
# Write .vtu file header
self.outf.write('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" type="UnstructuredGrid" '
'version="0.1">\n<UnstructuredGrid>\n')
# Initialise offset (in bytes) to end of appended data
off = np.array([0])
# Write data description header. A vtk "piece" is used for each
# element in a partition.
for i, mk in enumerate(self.mesh_inf.iterkeys()):
sk = self.soln_inf.keys()[i]
_write_vtu_header(self.args, self.outf, self.mesh_inf[mk],
self.soln_inf[sk], off)
# Write end/start of header/data sections
self.outf.write('</UnstructuredGrid>\n<AppendedData '
'encoding="raw">\n_')
# Write data "piece"wise
for i, mk in enumerate(self.mesh_inf.iterkeys()):
sk = self.soln_inf.keys()[i]
_write_vtu_data(self.args, self.outf, copy(self.cfg),
self.mesh[mk], self.mesh_inf[mk], self.soln[sk],
self.soln_inf[sk])
# Write .vtu file footer
self.outf.write('\n</AppendedData>\n</VTKFile>')
def _write_vtk_darray(array, vtuf, numtyp):
"""Writes a numpy array to a vtu file (in binary as type numtyp)
.vtu files require the size of the data (in bytes) to be prepended.
:param array: Array to be written to file.
:param vtuf: File to write array to.
:param numtyp: Type of number representation to use. e.g. 'float32'
:type array: numpy.ndrarray
:type vtuf: file
:type numtyp: string
"""
np.array(array.astype(numtyp).nbytes).astype('uint32').tofile(vtuf)
array.astype(numtyp).tofile(vtuf)
def _component_to_physical_soln(soln, gamma):
"""Convert PyFR solution of rho, rho(u, v, [w]), E to rho, u, v, [w], p
:param soln: PyFR solution array to be converted.
:param gamma: Ratio of specific heats.
:type soln: numpy.ndarray of shape [nupts, neles, ndims + 2]
:type gamma: integer
"""
# Convert rhou, rhov, [rhow] to u, v, [w]
soln[...,1:-1] /= soln[...,0,None]
# Convert total energy to pressure
soln[...,-1] -= 0.5 * soln[...,0] * np.sum(soln[...,1:-1] ** 2, axis=2)
soln[...,-1] *= (gamma - 1)
def _ncells_after_subdiv(ms_inf, divisor):
"""Calculates total number of vtu cells in partition after subdivision
:param ms_inf: Mesh/solninformation. ('ele_type', [npts, nele, ndims])
:type ms_inf: tuple: (str, list)
:rtype: integer
"""
# Catch all for cases where cell subdivision is not performed
if divisor is None:
divisor = 1
# Calculate the number of low-order cells in each high order element
n_sub_ele = divisor ** ms_inf[1][2]
# Pyramids require the further addition of an arithmetic series
if ms_inf[0] == 'pyr':
n_sub_ele += (divisor - 1) * divisor / 2
# Multiply by number of elements
return n_sub_ele * ms_inf[1][1]
def _npts_from_order(order, m_inf, total=True):
"""Calculates the number of nodes in an element of order n
:param order: Shape order of element
:param ms_inf: Mesh/soln information. ('ele_type', [npts, nele, ndims])
:param total: True/False return nodes in element/nodes in partition
:type order: integer
:type ms_inf: tuple: (str, list)
:type total: bool
:rtype: integer
"""
# Calculate number of nodes in element of type and order
if m_inf[0] in ['quad', 'hex']:
gen_npts = (order + 1)**m_inf[1][2]
elif m_inf[0] in ['tri', 'pri']:
gen_npts = (order + 2) * (order + 1)**(m_inf[1][2] - 1) / 2
elif m_inf[0] == 'tet':
gen_npts = (order + 1) * (order + 2) * (order + 3) / 6
elif m_inf == 'pyr':
gen_npts = (order + 1) * (order + 2) * (2 * order + 3) / 6
# Multiply by number of elements
if total:
return gen_npts * m_inf[1][1]
else:
return gen_npts
def _quadcube_con(ndim, nsubdiv):
"""Generate node connectivity for vtu hex/quad in high-order elements
:param ndim: Number of dimensions [2,3]
:param nsubdiv: Number of subdivisions (equal to element shape order)
:type ndim: integer
:type nsubdiv: integer
:rtype: list
"""
# Mapping from pyfr to vtk quad nodes
conbase = np.array([0, 1, nsubdiv + 2, nsubdiv + 1], dtype=int)
# Extend quad mapping to hex mapping if 3-d
if ndim == 3:
conbase = np.hstack((conbase, conbase + (1 + nsubdiv) ** 2))
# Calculate offset of each subdivided element's nodes from std. mapping
nodeoff = np.zeros((nsubdiv,)*ndim)
for dim, off in enumerate(np.ix_(*(xrange(nsubdiv),)*ndim)):
nodeoff += off * (nsubdiv + 1) ** dim
# Tile standard element node ordering mapping, then apply offsets
internal_con = np.tile(conbase, (nsubdiv ** ndim, 1))
internal_con += nodeoff.T.flatten()[:, None]
return np.hstack(internal_con)
def _tri_con(ndim, nsubdiv):
"""Generate node connectivity for vtu triangles in high-order elements
:param ndim: Number of dimensions [2,3]
:param nsubdiv: Number of subdivisions (equal to element shape order)
:type ndim: integer
:type nsubdiv: integer
:rtype: list
"""
conlst = []
for row in xrange(nsubdiv, 0, -1):
# Lower and upper indices
l = (nsubdiv - row)*(nsubdiv + row + 3) // 2
u = l + row + 1
# Base offsets
off = [l, l + 1, u, u + 1, l + 1, u]
# Generate current row
subin = np.ravel(np.arange(row - 1)[...,None] + off)
subex = [ix + row - 1 for ix in off[:3]]
# Extent list
conlst.extend([subin, subex])
return np.hstack(conlst)
def _base_con(etype, ndim, nsubdiv):
"""Switch case to select node connectivity for supported vtu elements
PyFR high-order elements are subdivided into low-order vtu
cells to permit visualisation in Paraview. Cells are defined
in vtk files by specifying connectivity between nodes. To
reduce memory requirements, it is possible to use a single
high-order node multiple times in defining low-order cells.
:param etype: PyFR element type
:param ndim: Number of dimensions [2,3]
:param nsubdiv: Number of subdivisions (equal to element shape order)
:type etype: string
:type ndim: integer
:type nsubdiv: integer
:rtype: list
"""
# Catch-all for cases where cell subdivision is not performed
if nsubdiv is None:
nsubdiv = 1
# Switch case to generate node connectivity for each element type
if etype == 'tri':
connec = _tri_con(ndim, nsubdiv)
elif etype == 'quad' or etype == 'hex':
connec = _quadcube_con(ndim, nsubdiv)
else:
raise RuntimeError('Node connectivity is not yet implemented for %s '
'elements.' % etype)
return connec
def _write_vtu_header(args, vtuf, m_inf, s_inf, off):
"""Write headers for .vtu piece objects and subsequent appended DataArrays
The .vtk data format used by the Paraview converter is "appended",
which requires ASCII headers to be written to define the contents
of the appended data arrays.
The function handles both "append" and "divide" high-order data
output options. The "append" option requires analogous data
to the "divide" option to define the low-order cells. The
high-order data is appended to the end of the piece as "CellData".
:param args: pyfr-postp command line arguments from argparse
:param vtuf: .vtu output file
:param m_inf: Tuple of element type and array shape of mesh
:param s_inf: Tuple of element type and array shape of soln
:param off: Offset (in bytes) to end of appended data
:type args: class 'argparse.Namespace'
:type vtuf: file
:type m_inf: tuple
:type s_inf: tuple
:type off: type 'numpy.ndarray'
"""
# Set vtk name for float, set size in bytes
if args.precision == 'single':
flt = ['Float32', 4]
else:
flt = ['Float64', 8]
# Assign variables dependent on output mode
if args.divisor is not None:
nele = _ncells_after_subdiv(m_inf, args.divisor)
npts = _npts_from_order(args.divisor, m_inf)
else:
nele = m_inf[1][1]
npts = m_inf[1][1] * ParaviewWriter.vtk_to_pyfr[m_inf[0]][2]
# Standard template for vtk DataArray string
darray = '<DataArray Name="%s" type="%s" NumberOfComponents="%s" '\
'format="appended" offset="%d"/>\n'
# Write headers for vtu elements
vtuf.write('<Piece NumberOfPoints="%s" NumberOfCells="%s">\n<Points>\n'
% (npts, nele))
# Lists of DataArray "names", "types" and "NumberOfComponents"
nams = ['', 'connectivity', 'offsets', 'types', 'Density', 'Velocity',
'Pressure']
typs = [flt[0], 'Int32', 'Int32', 'UInt8'] + [flt[0]] * 3
ncom = ['3', '', '', '', '1', str(m_inf[1][2]), '1']
# Calculate size of described DataArrays (in bytes)
offs = np.array([0, 3, 4 * nele * ParaviewWriter.vtk_to_pyfr[m_inf[0]][2],
4 * nele, nele, 1, m_inf[1][2], 1])
offs[[1,5,6,7]] *= flt[1] * npts
# Write vtk DaraArray headers
for i in xrange(len(nams)):
vtuf.write(darray % (nams[i], typs[i], ncom[i],
sum(offs[:i+1]) + i*4 + off))
# Write ends/starts of vtk file objects
if i == 0:
vtuf.write('</Points>\n<Cells>\n')
elif i == 3:
vtuf.write('</Cells>\n<PointData>\n')
# Write end of vtk element data
vtuf.write('</PointData>\n')
# Store total offset (bytes) to end of appended data
off += sum(offs) + 4*len(nams)
# Write headers for appended high-order data, if required
if args.divisor is None:
vtuf.write('<CellData>\n')
# Size of described high order data (in bytes)
hooffs = np.array([3, 1, m_inf[1][2], 1]) * nele * flt[1]
# Number of high-order nodes per ele (less those written as low-order)
nhpts = s_inf[1][0] - ParaviewWriter.vtk_to_pyfr[m_inf[0]][2]
# Lists of requisite DataArray "names" and "NumberOfComponents"
nams = ['HOcoord', 'Density_HOsol', 'Velocity_HOsol', 'Pressure_HOsol']
ncom = ['3', 1, str(m_inf[1][2]), 1]
# Equivalent high-order nodes are written to the same array
for spt in xrange(nhpts):
# Write DataArrays as named in nams for each node
for i in xrange(len(nams)):
vtuf.write(darray % ('_'.join((nams[i], str(spt))), flt[0],
ncom[i], sum(hooffs[:i]) + off + 4*i))
# Update total byte offset to current end of appended data
off += sum(hooffs) + 4*len(nams)
# Write ends of vtk objects
vtuf.write('</CellData>\n')
vtuf.write('</Piece>\n')
def _write_vtu_data(args, vtuf, cfg, mesh, m_inf, soln, s_inf):
""" Writes mesh and solution data for appended (binary) data .vtu files
:param args: pyfr-postp command line arguments from argparse
:param vtuf: .vtu output file
:param cfg: PyFR config file used in the respective simulation
:param mesh: Single PyFR mesh array (corresponding to soln)
:param m_inf: Tuple of element type and array shape of mesh
:param soln: Single PyFR solution array (corresponding to mesh)
:param s_inf: Tuple of element type and array shape of soln
:type args: class 'argparse.Namespace'
:type vtuf: file
:type cfg: class 'pyfr.inifile.Inifile'
:type mesh: numpy.ndarray
:type m_inf: tuple
:type soln: numpy.ndrayy
:type s_inf: tuple
"""
# Set numpy name for float; set size in bytes
if args.precision == 'single':
flt = ['float32', 4]
else:
flt = ['float64', 8]
# Construct basismap and dimensions
basismap = subclass_map(BaseBasis, 'name')
dims = sy.symbols('p q r')[:m_inf[1][2]]
# Set npts for divide/append cases
if args.divisor is not None:
npts = _npts_from_order(args.divisor, m_inf, total=False)
else:
npts = ParaviewWriter.vtk_to_pyfr[m_inf[0]][2]
# Generate basis objects for solution and vtu output
soln_b = basismap[m_inf[0]](dims, m_inf[1][0], cfg)
vtu_b = basismap[m_inf[0]](dims, npts, cfg)
# Generate operator matrices to move points and solutions to vtu nodes
mesh_vtu_op = np.array(soln_b.sbasis_at(vtu_b.spts), dtype=float)
soln_vtu_op = np.array(soln_b.ubasis_at(vtu_b.spts), dtype=float)
# Calculate node locations of vtu elements
pts = np.dot(mesh_vtu_op, mesh.reshape(m_inf[1][0],
-1)).reshape(npts, -1, len(dims))
# Calculate solution at node locations of vtu elements
sol = np.dot(soln_vtu_op, soln.reshape(s_inf[1][0],
-1)).reshape(npts, s_inf[1][1], -1)
# Append dummy z dimension for points in 2-d (required by Paraview)
if len(dims) == 2:
pts = np.append(pts, np.zeros(pts.shape[:-1])[...,None], axis=2)
# Write element node locations to file
_write_vtk_darray(pts.swapaxes(0,1), vtuf, flt[0])
# Prepare vtu cell arrays (connectivity, offsets, types):
# Generate and extend vtu sub-cell node connectivity across all elements
vtu_con = np.tile(_base_con(m_inf[0], len(dims), args.divisor),
(m_inf[1][1], 1))
vtu_con += (np.arange(m_inf[1][1]) * npts)[:, None]
# Generate offset into the connectivity array for the end of each element
vtu_off = np.arange(_ncells_after_subdiv(m_inf, args.divisor)) + 1
vtu_off *= ParaviewWriter.vtk_to_pyfr[m_inf[0]][2]
# Tile vtu cell type numbers
vtu_typ = np.tile(ParaviewWriter.vtk_to_pyfr[m_inf[0]][0],
_ncells_after_subdiv(m_inf, args.divisor))
# Write vtu node connectivity, connectivity offsets and cell types
_write_vtk_darray(vtu_con, vtuf, 'int32')
_write_vtk_darray(vtu_off, vtuf, 'int32')
_write_vtk_darray(vtu_typ, vtuf, 'uint8')
# Convert rhou, rhov, [rhow] to u, v, [w] and energy to pressure
_component_to_physical_soln(sol, cfg.getfloat('constants', 'gamma'))
# Write Density, Velocity and Pressure
_write_vtk_darray(sol[...,0].T, vtuf, flt[0])
_write_vtk_darray(sol[...,1:-1].swapaxes(0,1), vtuf, flt[0])
_write_vtk_darray(sol[...,-1].swapaxes(0,1), vtuf, flt[0])
# Append high-order data as CellData if not dividing cells
if args.divisor is None:
# Calculate number of points written as low-order, and left to append
nlpts = ParaviewWriter.vtk_to_pyfr[s_inf[0]][2]
nhpts = s_inf[1][0] - nlpts
# Generate basis objects for mesh, solution and vtu output
mesh_b = basismap[m_inf[0]](dims, m_inf[1][0], cfg)
# Get location of spts in standard element of solution order
uord = cfg.getint('solver', 'order')
ele_spts = get_std_ele_by_name(m_inf[0], uord)
# Generate operator matrices to move points and solutions to vtu nodes
mesh_hpts_op = np.array(mesh_b.sbasis_at(ele_spts), dtype=float)
soln_hpts_op = np.array(mesh_b.ubasis_at(ele_spts), dtype=float)
# Calculate node locations of vtu elements
pts = np.dot(mesh_hpts_op, mesh.reshape(m_inf[1][0], -1))
pts = pts.reshape((-1,) + m_inf[1][1:])
# Append dummy z dimension to 2-d points (required by Paraview)
if len(dims) == 2:
pts = np.append(pts, np.zeros(pts.shape[:-1])[...,None], axis=2)
# Calculate solution at node locations
sol = np.dot(soln_hpts_op, soln.reshape(s_inf[1][0], -1))
sol = sol.reshape((-1,) + s_inf[1][1:])
# Convert rhou, rhov, [rhow] to u, v, [w] and energy to pressure
_component_to_physical_soln(sol, cfg.getfloat('constants', 'gamma'))
# Write data arrays, one set of high order points at a time
for gmshpt in xrange(nhpts):
# Convert Gmsh node number to equivalent in PyFR
pt = GmshNodeMaps.to_pyfr[s_inf[0], s_inf[1][0]][gmshpt + nlpts]
# Write node locations, density, velocity and pressure
_write_vtk_darray(pts[pt,...], vtuf, flt[0])
_write_vtk_darray(sol[pt,...,0], vtuf, flt[0])
_write_vtk_darray(sol[pt,...,1:-1], vtuf, flt[0])
_write_vtk_darray(sol[pt,...,-1], vtuf, flt[0])
| StarcoderdataPython |
5085310 | <filename>apps/snippet/models/__init__.py<gh_stars>0
from .snippet_models import Snippet
| StarcoderdataPython |
3403447 | # -*- coding: utf-8 -*-
from functools import wraps
from datetime import datetime
import socket
from flask import current_app, request, make_response
# Logging utils
#
def after_request_log(response):
name = dns_resolve(request.remote_addr)
current_app.logger.warn(u"""[client {ip} {host}] {http} "{method} {path}" {status}
Request: {method} {path}
Version: {http}
Status: {status}
Url: {url}
IP: {ip}
Hostname: {host}
Agent: {agent_platform} | {agent_browser} | {agent_browser_version}
Raw Agent: {agent}
""".format(method=request.method,
path=request.path,
url=request.url,
ip=request.remote_addr,
host=name if name is not None else '?',
agent_platform=request.user_agent.platform,
agent_browser=request.user_agent.browser,
agent_browser_version=request.user_agent.version,
agent=request.user_agent.string,
http=request.environ.get('SERVER_PROTOCOL'),
status=response.status))
return response
def dns_resolve(ip_addr):
"""Safe DNS query."""
try:
name = socket.gethostbyaddr(ip_addr)[0]
except (socket.herror, socket.gaierror):
# 1: IP not known
# 2: Probably badly formated IP
name = None
return name
# Cache utils
#
def cache(timeout):
def _cache(f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
resp.cache_control.max_age = timeout # seconds
return resp
return wrapper
return _cache
def nocache(f):
@wraps(f)
def wrapper(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
resp.headers['Last-Modified'] = datetime.now()
resp.headers['Cache-Control'] = ('no-store, no-cache, must-revalidate, '
'post-check=0, pre-check=0, max-age=0')
resp.headers['Pragma'] = 'no-cache'
resp.headers['Expires'] = '-1'
return resp
return wrapper
| StarcoderdataPython |
9724435 | META = {
'author': '<NAME>, <NAME>',
'description': 'Rules for browser fuzzing',
'type':'Folder',
'comments': ['All rules must trigger a() function']
}
| StarcoderdataPython |
11295997 | <filename>barcode_parser.py
'''
Descripttion:
version:
Author: zpliu
Date: 2021-07-15 20:29:47
LastEditors: zpliu
LastEditTime: 2021-08-04 16:09:10
@param:
'''
import pandas as pd
import os
import re
import gzip
import argparse
import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def get_sequence_fasta(sequenceFile: str):
'''
args: input fasta file@str
fasta format
returns: @pd.DataFrame
'''
pattern = re.compile(r'^>')
sequences = []
with open(sequenceFile, 'r') as File:
for line in File:
if re.match(pattern, line):
sequenceId = "".join(line.split(" ")[0].split(":")[-3:])
else:
sequenceLine = line.strip("\n")
sequences.append((sequenceId, sequenceLine))
outData = pd.DataFrame(sequences, columns=['seq_id', 'seq'])
return outData
def reversed_sequence(sequence):
'''
reversed and complement the fasta sequences
'''
sequence = sequence.upper()[::-1]
basecomplement = {
"A": "T",
"T": "A",
"G": "C",
"C": "G",
'N':'N'
}
letters = list(sequence)
letters = [basecomplement[base] for base in letters]
return ''.join(letters)
def fastq2fasta(fastqFile, reversedSeq=False):
'''transform fastaq to fasta
args:
-fastqFile: raw sequence fastaq file
-reverseseq: reversed and complement the R2 sequence
'''
try:
File = gzip.open(fastqFile, 'rt')
File.readline()
File.seek(0)
except:
File = open(fastqFile, 'rt')
out = []
index = 0
for line in File:
if index % 4 == 0:
#! sequence Id
sequenceId = "".join(re.split(r"/[12]",line.split(" ")[0])[0])
index += 1
elif index % 4 == 1:
#! sequence line
if reversedSeq:
sequenceLine = reversed_sequence(line.strip("\n"))
out.append((sequenceId, sequenceLine))
else:
sequenceLine = line.strip("\n")
out.append((sequenceId, sequenceLine))
index += 1
else:
#! other line
index += 1
outSequence = pd.DataFrame(out, columns=['seq_id', 'seq'])
return(outSequence)
if __name__=="__main__":
parser=argparse.ArgumentParser()
parser.add_argument('-R1',help='R1 sequence file')
parser.add_argument('-R2',help='R2 sequence file')
parser.add_argument('-vector5',help='verctor sequence adjacent to 5\' barcode',
default='TATAAGCGAAAGAAGCATCAGATGGGCAAACAAAGCACCAGTGGTCTAGTGGTAGAATAGTACCCTGCCACGGTACAGACCCGGGTTCGATTCCCGGCTGGTGCA')
parser.add_argument('-vector3',help='verctor sequence adjacent to 3\' barcode',
default='TAAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCTTTTTTGTTTTAGAGCTAGAAATAGCAAGTTAAAATAAGGCTAGTCCGTTTTTAGCGCGTGCATGCCTGCAGGTCCACAAATTCGGGTC')
parser.add_argument('-o',help='out put file')
args=parser.parse_args()
##################################
#! barcode id
################################
barcodeDict={}
R1barcode=["GACGCCTAG","GACGCGCTA","GACGCGGAT","GACGCGTCA","GACGCTACG","GACGTACGA","GACGTCACG",
"GACGTCGCT","GACGTCGTA","GACGTGGAC","GACGACATG","GACGACGTA","GACGACTGT","GACGAGTCA","GACGATCGA",
"GACGCACCG","CGAGCCTAG","CGAGCGCTA","CGAGCGGAT","CGAGCGTCA","CGAGCTACG","CGAGTACGA","CGAGTCACG",
"CGAGTCGCT","CGAGTCGTA","CGAGTGGAC","CGAGACATG","CGAGACGTA","CGAGACTGT","CGAGAGTCA","CGAGATCGA","CGAGCACCG"]
R2barcode=[
"ACGTCA","ACCATG","ACGAGC","ACAGCG","ATGATG",
"ATCATC","ATGGTC","ATGCTG","AGCACG",
"AGCTCA","AGATGT","AGACGA"
]
for R1index,R1 in enumerate(R1barcode):
for R2index,R2 in enumerate(R2barcode):
barcodeKey="R1-"+str(R1index+1)+"~R2-"+str(R2index+1)
barcodeDict[R1+"-"+R2]=barcodeKey
#############################
#!merge sequence
#############################
logger.info("read fastq file...")
R1sequences=fastq2fasta(args.R1,reversedSeq=False)
R2sequences=fastq2fasta(args.R2,reversedSeq=True)
logger.info("merge the R1 and R2 file...")
mergeSequence=pd.merge(left=R1sequences, right=R2sequences, left_on='seq_id', right_on='seq_id')
# mergeSequence.to_csv("test.fa",header=False,sep="\t",index=False)
# R1sequences.to_csv("R1.fa",header=False,sep="\t",index=False)
# R2sequences.to_csv("R2.fa",header=False,sep="\t",index=False)
#########################################
# regular expression
#########################################
searchParrernStr='[ATCG]*([ATCG]{}{}.*{}[ATGC]{})[ATCG]*'.format('{9}',args.vector5,args.vector3,'{6}')
# print(searchParrernStr)
#! search barcode sequence
barcodePattern=re.compile(searchParrernStr)
containBarcodeSequence=mergeSequence.apply(
lambda x: barcodePattern.match(x['seq_x']+x['seq_y'])[1] if barcodePattern.match(x['seq_x']+x['seq_y']) else None ,
axis=1)
barcodesequence=[]
logger.info("count the barcode numbers")
for sequence in containBarcodeSequence:
if sequence:
#! barcode sequence
try:
barcode=sequence[0:9]+"-"+reversed_sequence(sequence)[0:6]
except KeyError:
print(sequence)
#! sgRNA sequence
sgRNAstart=9+len(args.vector5)
sgRNA=sequence[sgRNAstart:sgRNAstart+20]
barcodesequence.append((barcode,sgRNA))
else:
pass
#####################################################
#!barcode and sgRNA sequence
#####################################################
barcodeData=pd.DataFrame(barcodesequence,columns=['barcode','sgRNA'])
outData=[]
for barcodesequence in barcodeDict.keys():
sgRNAData=barcodeData.loc[barcodeData['barcode']==barcodesequence]
if sgRNAData.empty:
#! without sequence sgRNA
pass
else:
#! count the sgRNA number
totalCount=sgRNAData.shape[0]
sgRNAcount=dict(sgRNAData['sgRNA'].value_counts())
for key,value in sgRNAcount.items():
outData.append((
barcodeDict[barcodesequence],
barcodesequence, #barcode sequence
totalCount, #barcode count
key, #sgRNA sequence
value #sgRNA count
))
if not outData:
logger.warning("Sorry no barcode were detect in the sequence file!")
outData=pd.DataFrame(outData,columns=['barcodeID','barcodesequence','barcodeCount','sgRNAsequence','sgRNACount'])
outData.to_csv(args.o,header=True,index=False,sep="\t")
logger.info("complete!") | StarcoderdataPython |
321472 | <reponame>zhengfaning/vnpy_andy<gh_stars>1-10
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# noinspection PyUnresolvedReferences
from win32api import *
# Try and use XP features, so we get alpha-blending etc.
try:
from winxpgui import *
except ImportError:
# noinspection PyUnresolvedReferences
from win32gui import *
# noinspection PyUnresolvedReferences
import win32con
import sys
import os
import struct
import time
# noinspection PyUnresolvedReferences
import win32com.client as com
# noinspection PyUnresolvedReferences
import win32event as w32e
# noinspection PyUnresolvedReferences
import mmapfile as mmf
# noinspection PyUnresolvedReferences
import win32api as win_api
from ..UtilBu.ABuStrUtil import to_native_str
def show_msg(title, msg):
"""
windows pop弹窗,主要用在长时间且耗时的任务中,提示重要问题信息
:param title: 弹窗标题
:param msg: 弹窗信息
"""
MainWindow(to_native_str(title), to_native_str(msg))
def socket_bind_recv(socket_fn, cmd_handler):
"""
非bsd系统的进程间通信,接受消息,处理消息,使用windows全局共享内存实现,
函数名称保持与bsd的接口名称一致
:param socket_fn: 共享内存文件名称
:param cmd_handler: cmd处理函数,callable类型
"""
if not callable(cmd_handler):
print('socket_bind_recv cmd_handler must callable!')
while True:
global_fn = 'Global\\{}'.format(socket_fn)
event = w32e.CreateEvent(None, 0, 0, global_fn)
event_mmap = mmf.mmapfile(None, socket_fn, 1024)
w32e.WaitForSingleObject(event, -1)
socket_cmd = event_mmap.read(1024).decode()
# 把接收到的socket传递给外部对应的处理函数
cmd_handler(socket_cmd)
event_mmap.close()
win_api.CloseHandle(event)
def socket_send_msg(socket_fn, msg):
"""
非bsd系统的进程间通信,发送消息,使用windows全局共享内存实现,函数名称保持与bsd的接口名称一致
:param socket_fn: : 共享内存名称
:param msg: 字符串类型需要传递的数据,不需要encode,内部进行encode
"""
global_fn = 'Global\\{}'.format(socket_fn)
event = w32e.OpenEvent(w32e.EVENT_ALL_ACCESS, 0, global_fn)
event_mmap = mmf.mmapfile(None, socket_fn, 1024)
w32e.SetEvent(event)
event_mmap.write(msg)
event_mmap.close()
win_api.CloseHandle(event)
def fold_free_size_mb(folder):
"""
windows os下剩余磁盘空间获取
:param folder: 目标目录
:return: 返回float,单位mb
"""
return drive_free_space(folder) / 1024 / 1024 / 1024
def drive_free_space(drive):
# noinspection PyBroadException
try:
fso = com.Dispatch("Scripting.FileSystemObject")
drv = fso.GetDrive(drive)
return drv.FreeSpace
except:
return 0
def max_drive():
space_array = dict()
for i in range(65, 91):
vol = chr(i) + '://'
if os.path.isdir(vol):
space_array[vol] = drive_free_space(vol)
max_v = max(zip(space_array.values(), space_array.keys()))[1]
if max_v.startswith('c'):
return os.path.expanduser('~')
return max_v
# noinspection PyClassHasNoInit
class PyNOTIFYICONDATA:
_struct_format = (
"I" # DWORD cbSize;
"I" # HWND hWnd;
"I" # UINT uID;
"I" # UINT uFlags;
"I" # UINT uCallbackMessage;
"I" # HICON hIcon;
"128s" # TCHAR szTip[128];
"I" # DWORD dwState;
"I" # DWORD dwStateMask;
"256s" # TCHAR szInfo[256];
"I" # union {
# UINT uTimeout;
# UINT uVersion;
# } DUMMYUNIONNAME;
"64s" # TCHAR szInfoTitle[64];
"I" # DWORD dwInfoFlags;
# GUID guidItem;
)
_struct = struct.Struct(_struct_format)
hWnd = 0
uID = 0
uFlags = 0
uCallbackMessage = 0
hIcon = 0
szTip = ''
dwState = 0
dwStateMask = 0
szInfo = ''
uTimeoutOrVersion = 0
szInfoTitle = ''
dwInfoFlags = 0
def pack(self):
return self._struct.pack(
self._struct.size,
self.hWnd,
self.uID,
self.uFlags,
self.uCallbackMessage,
self.hIcon,
self.szTip,
self.dwState,
self.dwStateMask,
self.szInfo,
self.uTimeoutOrVersion,
self.szInfoTitle,
self.dwInfoFlags)
def __setattr__(self, name, value):
# avoid wrong field names
if not hasattr(self, name):
raise NameError(name)
self.__dict__[name] = value
# noinspection PyUnresolvedReferences,PyUnusedLocal
class MainWindow:
def __init__(self, title, msg):
message_map = {
win32con.WM_DESTROY: self.on_destroy,
}
# Register the Window class.
wc = WNDCLASS()
hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbarDemo"
wc.lpfnWndProc = message_map # could also specify a wndproc.
class_atom = RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow(class_atom, "Taskbar Demo", style,
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT,
0, 0, hinst, None)
UpdateWindow(self.hwnd)
icon_path_name = os.path.abspath(os.path.join(sys.prefix, "pyc.ico"))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
# noinspection PyBroadException
try:
hicon = LoadImage(hinst, icon_path_name, win32con.IMAGE_ICON, 0, 0, icon_flags)
except:
hicon = LoadIcon(0, win32con.IDI_APPLICATION)
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER + 20, hicon, "Balloon tooltip demo")
Shell_NotifyIcon(NIM_ADD, nid)
self.show_balloon(title, msg)
time.sleep(20)
DestroyWindow(self.hwnd)
def show_balloon(self, title, msg):
# For this message I can't use the win32gui structure because
# it doesn't declare the new, required fields
nid = PyNOTIFYICONDATA()
nid.hWnd = self.hwnd
nid.uFlags = NIF_INFO
# type of balloon and text are random
nid.dwInfoFlags = NIIF_INFO
nid.szInfo = msg
nid.szInfoTitle = title
# Call the Windows function, not the wrapped one
from ctypes import windll
shell_notify_icon = windll.shell32.Shell_NotifyIconA
shell_notify_icon(NIM_MODIFY, nid.pack())
def on_destroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
# Terminate the app.
PostQuitMessage(0)
| StarcoderdataPython |
9760013 | <reponame>Gwarglemar/PythonExercises
class Card():
def __init__(self,suit,num):
self.suit = suit
self.num = num
def __str__(self):
return str(self.num + ' of ' + self.suit)
__repr__ = __str__
| StarcoderdataPython |
4837385 | <reponame>daojunL/Art-Event-Gallery
# Generated by Django 2.1.5 on 2020-04-21 04:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_auto_20200421_0331'),
]
operations = [
migrations.AlterModelOptions(
name='artist',
options={'managed': True},
),
]
| StarcoderdataPython |
3228691 | <filename>checks/checksls.py
import yaml
import sys
from jinja2 import Environment, PackageLoader, Template
import os
def check(fn):
print("Consider {0}".format(fn))
r = os.system("~/.local/bin/pylint -E --rcfile=pylint.rc %s" % fn)
if r not in (0, 512) : # 512= check failed
raise Exception('fail %s' % r)
seen = {}
print """
file_roots:
base:
"""
def proc(root):
# check if all parts of path are not seen
parts = os.path.split(root)
#print 'check parts', parts
np = []
for p in parts :
np.append(p)
x = '/'.join(np)
if x in seen :
return False
else:
#print "new part ", p, "joined", x, "newpath",np
pass
print """ - """ + os.path.abspath(root)
seen[root]=root
#print "ROOT",root
for x in dirs :
d= os.path.join(root,x)
seen[d]=root
for root, dirs, files in os.walk("./"):
for filen in files:
if filen.endswith(".sls"):
proc(root)
| StarcoderdataPython |
5102374 | <reponame>wang-junjian/scrapy-hub
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
import os
import logging
class ChinesePipeline(object):
mp3_dir = 'mp3/'
def process_item(self, item, spider):
logging.info(item)
mp3_url = item['mp3_url']
if not os.path.exists(self.mp3_dir):
os.makedirs(self.mp3_dir)
mp3_file = self.__get_pinyin_mp3_path(item['pinyin'])
if os.path.exists(mp3_file):
logging.info('downloaded mp3: ' + mp3_file)
return item
logging.info('downloading mp3: ' + mp3_url)
request = scrapy.Request(mp3_url)
dfd = spider.crawler.engine.download(request, spider)
dfd.addBoth(self.return_item, item)
return dfd
def return_item(self, response, item):
if response.status != 200:
return item
mp3_file = self.__get_pinyin_mp3_path(item['pinyin'])
with open(mp3_file, "wb") as f:
f.write(response.body)
logging.info('save mp3: ' + mp3_file)
return item
def __get_pinyin_mp3_path(self, pinyin):
return self.mp3_dir + pinyin + '.mp3' | StarcoderdataPython |
11334643 | import cv2
import torch
import numpy as np
import sys
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from .baseline.utils.tensorboard import TensorBoard
from .baseline.Renderer.model import FCN
from .baseline.Renderer.stroke_gen import *
from argparse import Namespace
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model:
def __init__(self, device=None, jit=False):
self.device = device
self.jit = jit
self.criterion = nn.MSELoss()
net = FCN()
self.step = 0
self.opt = Namespace(**{
'batch_size': 64,
'debug': '',
'script': False,
})
train_batch = []
self.ground_truth = []
for i in range(self.opt.batch_size):
f = np.random.uniform(0, 1, 10)
train_batch.append(f)
self.ground_truth.append(draw(f))
train_batch = torch.tensor(train_batch).float()
self.ground_truth = torch.tensor(self.ground_truth).float()
if self.jit:
net = torch.jit.script(net)
net = net.to(self.device)
train_batch = train_batch.to(self.device)
self.ground_truth = self.ground_truth.to(self.device)
self.module = net
self.example_inputs = train_batch
self.optimizer = optim.Adam(self.module.parameters(), lr=3e-6)
def get_module(self):
return self.module,[self.example_inputs]
def train(self, niter=1):
self.module.train()
for _ in range(niter):
gen = self.module(self.example_inputs)
self.optimizer.zero_grad()
loss = self.criterion(gen, self.ground_truth)
loss.backward()
self.optimizer.step()
def eval(self, niter=1):
self.module.eval()
for _ in range(niter):
self.module(self.example_inputs)
if __name__ == '__main__':
m = Model(device='cpu', jit=False)
module,example_inputs = m.get_module()
while m.step < 100:
m.train(niter=1)
if m.step%100 == 0:
m.eval(niter=1)
m.step += 1
| StarcoderdataPython |
1602483 | # (c) Copyright 2022 <NAME>
#
# Short test program for arduino_dbg.io.
import arduino_dbg.io as io
import os
if __name__ == "__main__":
(left, right) = io.make_bidi_pipe()
pid = os.fork()
if pid == 0:
# Child
left.open()
s = "Major Tom to ground control\n"
left.write(s.encode("utf-8"))
ln = left.readline().decode("utf-8")
print(f'Child received: {ln}')
left.close()
else:
# Parent
right.open()
ln2 = right.readline().decode("utf-8")
print(f'Parent received: {ln2}')
s2 = "ground control to Major Tom\n"
right.write(s2.encode("utf-8"))
right.close()
| StarcoderdataPython |
12843857 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 12:16:14 2018
@author: gdavila
This is a example to get Full Band Channel information
On Docsis 3.0 Full Band Channels is a feature that allows to get detailed info
about the power distribution of the espectrum
"""
import docsisMon.cmDevices as cmDevices
from docsisMon.snmp import SnmpError
import time
import ggplot
import sys
def asint(s):
try: return int(s), ''
except ValueError: return sys.maxsize, s
def format_fb_data(data):
spectrum_freq = []
spectrum_pot = []
if data is not None:
for key in sorted(data, key=asint):
center_frec = int('0x'+data[key][2:10], 16)
span = int('0x'+data[key][10:18], 16)
samples = int('0x'+data[key][18:26], 16)
resolution_bw = int('0x'+data[key][26:34], 16)
offset = 42
for i in range(0, samples):
frec = (center_frec-span/2)+i*resolution_bw
dec_value = int('0x'+data[key][offset+i*4:offset+i*4+4], 16)
if dec_value > 32767:
value = (dec_value-65535)/100
else:
value = dec_value/100
item = [frec, round(value, 2)]
spectrum_freq.append(item[0])
spectrum_pot.append(item[1])
return spectrum_freq, spectrum_pot
else:
return None
def main():
try:
myIP = '10.218.49.38'
myCm = cmDevices.Cm(myIP)
myCmModel = myCm.getModel()
print ("CM IP:\t", myIP)
print ("CM Model:\t", myCmModel)
print ("CM Firmware:\t", myCm.getSw_rev())
#Accesing to Docsis Interfaces
myCmDocIf = myCm.DocsIf()
#Getting the MAC address of Docsis Interfaces (CM)
myCmMac = myCmDocIf.getMac()
print ("CM Mac:\t", myCmMac)
#Gettingfull band capture information;
print ("Full Band Capture Information:")
print("Modelo \t\tTiempo Espera SET/GET(s) \tTiempo de Descarga FB data(s)\t Result\t\t Nro Muestras")
for i in range(1,2):
data = {}
fbc = myCm.fbc()
fbc.turnOff()
time.sleep(2)
fbc.inactivityTimeout = 300
fbc.firstFrequency = 50000000
fbc.lastFrequency = 1000000000
fbc.span = 10000000
fbc.binsPerSegment = 250
fbc.noisebandwidth = 150
fbc.numberOfAverages = 1
fbc.config()
timeConfig = time.time()
result = 'data OK'
timeGet = time.time()
data = fbc.get()
timeResponse = time.time()
while(data == {}):
time.sleep(1)
if (time.time() - timeConfig > 600): break
timeGet = time.time()
data = fbc.get()
timeResponse = time.time()
print(str(i)+" "+myCm.getModel() +'\t\t\t' + str(round(timeGet-timeConfig)) + \
'\t\t\t'+ str(round(timeResponse - timeGet)) + '\t\t\t'+ str(result)+'\t\t'+ str(len(format_fb_data(data)[0])))
return(format_fb_data(data))
except SnmpError as e:
print(e)
result = e
freq, pot= main()
ggplot.qplot(freq[0:], pot[0:], geom="line")
| StarcoderdataPython |
11303370 | import os
import glob
import datetime
import calendar
def configuration_file(ncpu=16, start_date='19990101', end_date='19990131'):
conf_txt = """#!/bin/bash
#PBS -P en0
#PBS -q normal
#PBS -l walltime=15:00:00
#PBS -l mem={mem}GB
#PBS -l wd
#PBS -l ncpus={cpu}
#PBS -lother=gdata2
source activate radar
python rajin_multiproc_processing.py -s {sdate} -e {edate} -j {cpu}
""".format(cpu=ncpu, mem=int(ncpu*2), sdate=start_date, edate=end_date)
return conf_txt
for year in range(2014, 2017):
for month in range(1, 13):
if month > 7 and month < 10:
continue
indir = "/g/data2/rr5/vhl548/CPOL_level_1"
indir += "/%i/%i%02i" % (year, year, month)
dirlist = glob.glob(indir + "*")
print(indir)
if len(dirlist) == 0:
continue
_, ed = calendar.monthrange(year, month)
sdatestr = "%i%02i%02i" % (year, month, 1)
edatestr = "%i%02i%02i" % (year, month, ed)
f = configuration_file(16, sdatestr, edatestr)
fname = "qlevel1b_%i%02i.pbs" % (year, month)
with open(fname, 'w') as fid:
fid.write(f)
| StarcoderdataPython |
287181 | #############################################################################
# Dynamic time warping for 2D images
import sys
from java.awt import *
from java.awt.image import *
from java.io import *
from java.lang import *
from java.util import *
from java.nio import *
from javax.swing import *
from edu.mines.jtk.awt import *
from edu.mines.jtk.dsp import *
from edu.mines.jtk.io import *
from edu.mines.jtk.mosaic import *
from edu.mines.jtk.sgl import *
from edu.mines.jtk.util import *
from edu.mines.jtk.util.ArrayMath import *
from dtw import *
#from dtw.Util import *
#############################################################################
#pngDir = "./png"
pngDir = './migpics/Layered/'
#pngDir = './migpics/Marmousi/'
saveDir = './tmpdata/'
seed = abs(Random().nextInt()/1000000)
seed = 580
seed = 588
#print "seed =",seed
nrms = 2.00
npass = 5
stretchMax = 0.25
showLsf = False
smoothShifts = False
nv = 51 # number of datasets
_s = 1 # number of shifts
J1 = zerofloat(nv)
J2 = zerofloat(nv)
J3 = zerofloat(nv)
J4 = zerofloat(nv)
J41 = zerofloat(nv)
y0 = 150
def main(args):
#go1Dwrite()
#goTestImages()
#goFaultImages()
#goTestShifts() #smax = 0.20, nrms = 2.0
#goFaultShifts() #smax = 0.25, npass = 3
readJs()
goImageShift2()
writeJs()
def goImageShift2():
for s in range(1,_s+1):
for it in range(5,10):
#f = readMMImage(25)
#g = readMMImage(it)
f = readLayImage(50)
g = readLayImage(it)
_shift = s
h1 = goImageShift(f,g,it,_shift)
if (s==2 or s>2):
_shift = s
h2 = goImageShift(f,h1,it,_shift)
if (s==3 or s>3):
_shift = s
h3 = goImageShift(f,h2,it,_shift)
if (s==4 or s>4):
_shift = s
h4 = goImageShift(f,h3,it,_shift)
if s==5:
_shift = s
h5 = goImageShift(f,h4,it,_shift)
goPlotJ(s)
"""
g = zerofloat(len(f[0]),len(f))
readImage("h5",g)
h1 = goImageShift(f,g,it)
h2 = goImageShift(f,h1,it)
h3 = goImageShift(f,h2,it)
h4 = goImageShift(f,h3,it)
h5 = goImageShift(f,h4,it)
#writeImage("h5",h5)
writeImage("h10",h5)
"""
def goPlotJ(s):
plotJ(J1,s,png='F-H')
plotJ(J2,s,png='U*H')
plotJ(J3,s,png='U')
plotJ(J4,s,png='F-G')
#plotJ(J41,s,png='F-G_1D')
def writeJs():
writeImage('J1',J1)
writeImage('J2',J2)
writeImage('J3',J3)
writeImage('J4',J4)
def readJs():
readJ('J1',J1)
readJ('J2',J2)
readJ('J3',J3)
readJ('J4',J4)
def readJ(name,J):
fileName = saveDir+name+'.dat'
ais = ArrayInputStream(fileName)
ais.readFloats(J)
ais.close()
def goImageShift(f,g,it,_shift):
nm = ":v"+str(it)
shift = 25
sigma = shift
ml = 2*shift
uclips = (-shift,shift)
dw = DynamicWarping(-ml,ml)
dw.setStretchMax(stretchMax)
#fclips = (min(f),max(f))
fclips = None
plot(f,"f"+nm,fclips,png='f'+str(it))#+'_shift='+str(_shift))
plot(g,"g"+nm,fclips,png='g'+str(it))#+'_shift='+str(_shift))
plot(sub(f,g),"dc-do"+nm,fclips,png='f-g'+str(it))#+'_shift='+str(_shift))
#plot1(f[y0],"f"+nm)
#plot1(g[y0],"g"+nm)
#plot1(sub(f[y0],g[y0]),"dc-do"+nm)
#objectiveFunctionFG1(f,g,it)
#h = 1
e = dw.computeErrors(f,g)
if npass>=5:
u = shifts12121(dw,e)
print "errors u12121: sum =",dw.sumErrors(e,u)
plot(u,"u12121"+nm,uclips,png='u'+str(it))#+'_shift='+str(_shift))
objectiveFunctionU(u,it)
h = align(u,f,g)
objectiveFunctionFH(f,h,it)
objectiveFunctionFG(f,g,it)
objectiveFunctionUH(u,h,it)
plot(h,"h"+nm,fclips,png='h'+str(it))#+'_shift='+str(_shift))
plot(sub(f,h),"dc-h"+nm,fclips,png='f-h'+str(it))#+'_shift='+str(_shift))
plot(mul(u,g),"u*g"+nm,fclips,png='u*g'+str(it))#+'_shift='+str(_shift))
return h
def readMMImage(t):
ddir = "../../../../../data/migclass/"
num = str(t)
n1,n2 = 421,5101
v = zerofloat(n2,n1)
fileName = ddir+"marmousiTest-v"+num+".dat"
ais = ArrayInputStream(fileName)
ais.readFloats(v)
ais.close()
return v
def readLayImage(t):
ddir = "../../../../../data/migclass/"
num = str(t)
n1,n2 = 301,3501
v = zerofloat(n2,n1)
fileName = ddir+"layered-sineSourceTapered2-"+num+".dat"
ais = ArrayInputStream(fileName)
ais.readFloats(v)
ais.close()
return v
def objectiveFunctionFH(f,h,it):
n1,n2 = len(f[0]),len(f)
sumu = 0
for i2 in range(n2):
for i1 in range(n1):
u = f[i2][i1]-h[i2][i1]
uu = u*u
sumu += uu
J1[it] = 0.5*sumu
def objectiveFunctionUH(u,h,it):
n1,n2 = len(u[0]),len(u)
sumu = 0
for i2 in range(n2):
for i1 in range(n1):
uu = u[i2][i1]*u[i2][i1]
hh = h[i2][i1]*h[i2][i1]
sumu += uu*hh
J2[it] = 0.5*sumu
def objectiveFunctionU(u,it):
n1,n2 = len(u[0]),len(u)
sumu = 0
for i2 in range(n2):
for i1 in range(n1):
uu = u[i2][i1]*u[i2][i1]
sumu += uu
J3[it] = 0.5*sumu
def objectiveFunctionFG(f,g,it):
n1,n2 = len(f[0]),len(f)
sumu = 0
for i2 in range(n2):
for i1 in range(n1):
u = f[i2][i1]-g[i2][i1]
uu = u*u
sumu += uu
J4[it] = 0.5*sumu
def objectiveFunctionFG1(f,g,it):
n = len(f[0])
sumu = 0
for i in range(n):
u = f[y0][i]-g[y0][i]
uu = u*u
sumu += uu
J41[it] = 0.5*sumu
def goTestShifts():
shift = 16
sigma = shift
ml = 2*shift
uclips = (-shift,shift)
dw = DynamicWarping(-ml,ml)
dw.setStretchMax(stretchMax)
f,g,s = makeTestImages()
fclips = (min(f),max(f))
plot(f,"f",fclips)
plot(g,"g",fclips)
e = dw.computeErrors(f,g)
if npass>=1:
u = shifts1(dw,e);
print "errors u1: sum =",dw.sumErrors(e,u)
plot(u,"u1",uclips)
if npass>=2:
u = shifts12(dw,e)
print "errors u12: sum =",dw.sumErrors(e,u)
plot(u,"u12",uclips)
if npass>=3:
u = shifts121(dw,e)
print "errors u121: sum =",dw.sumErrors(e,u)
plot(u,"u121",uclips)
if npass>=4:
u = shifts1212(dw,e)
print "errors u1212: sum =",dw.sumErrors(e,u)
plot(u,"u1212",uclips)
if npass>=5:
u = shifts12121(dw,e)
print "errors u12121: sum =",dw.sumErrors(e,u)
plot(u,"u12121",uclips)
if showLsf:
v = copy(u)
LocalShiftFinder(ml,sigma).find1(-ml,ml,f,g,v)
print "errors u lsf: sum =",dw.sumErrors(e,v)
plot(v,"u lsf",uclips)
if s:
print "errors s: sum =",dw.sumErrors(e,s)
plot(s,"s")
h = align(u,f,g)
plot(h,"h",fclips)
def goTestImages():
f,g,s = makeTestImages()
plot(f,"f")
plot(g,"g")
plot(s,"s")
def makeTestImages():
dip = 30.0
shift = 16
n1,n2 = 501,501; f = FakeData.seismic2d2011A(n1,n2,dip)
#n1,n2 = 462,951; f = readImage("/data/seis/f3d/f3d75.dat",n1,n2)
f = sub(f,sum(f)/n1/n2)
#w = Warp2.constant(shift,0.0,n1,n2)
w = Warp2.sinusoid(shift,0.0,n1,n2)
g = w.warp(f)
f = addNoise(nrms,f,seed=10*seed+1)
g = addNoise(nrms,g,seed=10*seed+2)
s = zerofloat(n1,n2)
for i2 in range(n2):
for i1 in range(n1):
s[i2][i1] = w.u1x(i1,i2)
return f,g,s
def go1Dwrite():
y = 150
f = readLayImage(5)
g = readLayImage(50)
fng = zerofloat(len(f[y]),2)
for i in range(len(f[y])):
fng[0][i] = f[y][i]
fng[1][i] = g[y][i]
writeImage("f=5_g=50_y=150_len="+str(len(f[y])),fng)
#############################################################################
# shifts
def smooth(u):
v = copy(u)
if smoothShifts:
rgf = RecursiveGaussianFilter(8); rgf.apply00(u,v)
return v
def normalize(x):
xmin = min(x)
xmax = max(x)
return mul(sub(x,xmin),1.0/(xmax-xmin))
def align(u,f,g):
n1,n2 = len(u[0]),len(u)
si = SincInterpolator()
si.setUniformSampling(n1,1.0,0.0)
h = copy(g)
r = rampfloat(0.0,1.0,n1)
for i2 in range(n2):
t = add(r,u[i2])
si.setUniformSamples(g[i2])
si.interpolate(n1,t,h[i2])
return h
def shifts1(dw,e):
d = dw.accumulateForward1(e)
u = dw.findShiftsReverse1(d,e)
return smooth(u)
def shifts12(dw,e):
e = dw.accumulate1(e)
e = normalize(e)
d = dw.accumulateForward2(e)
u = dw.findShiftsReverse2(d,e)
return smooth(u)
def shifts121(dw,e):
e = dw.accumulate1(e)
e = normalize(e)
e = dw.accumulate2(e)
e = normalize(e)
d = dw.accumulateForward1(e)
u = dw.findShiftsReverse1(d,e)
return smooth(u)
def shifts1212(dw,e):
e = dw.accumulate1(e)
e = normalize(e)
e = dw.accumulate2(e)
e = normalize(e)
e = dw.accumulate1(e)
e = normalize(e)
d = dw.accumulateForward2(e)
u = dw.findShiftsReverse2(d,e)
return smooth(u)
def shifts12121(dw,e):
e = dw.accumulate1(e)
e = normalize(e)
e = dw.accumulate2(e)
e = normalize(e)
e = dw.accumulate1(e)
e = normalize(e)
e = dw.accumulate2(e)
e = normalize(e)
d = dw.accumulateForward1(e)
u = dw.findShiftsReverse1(d,e)
return smooth(u)
#############################################################################
# utilities
def readImage(fileName,n1,n2):
x = zerofloat(n1,n2)
ais = ArrayInputStream(fileName)
ais.readFloats(x)
ais.close()
return x
def readImage(name,x):
fileName = saveDir+name+'.dat'
ais = ArrayInputStream(fileName)
ais.readFloats(x)
ais.close()
def writeImage(name,image):
fileName = saveDir+name+'.dat'
aos = ArrayOutputStream(fileName)
aos.writeFloats(image)
aos.close()
def addRickerWavelet(fpeak,f):
n1,n2 = len(f[0]),len(f)
ih = int(3.0/fpeak)
nh = 1+2*ih
h = zerofloat(nh)
for jh in range(nh):
h[jh] = ricker(fpeak,jh-ih)
g = zerofloat(n1,n2)
for i2 in range(n2):
Conv.conv(nh,-ih,h,n1,0,f[i2],n1,0,g[i2])
return g
def ricker(fpeak,time):
x = PI*fpeak*time
return (1.0-2.0*x*x)*exp(-x*x)
def addNoise(nrms,f,seed=0):
n1,n2 = len(f[0]),len(f)
if seed!=0:
r = Random(seed)
else:
r = Random()
g = mul(2.0,sub(randfloat(r,n1,n2),0.5))
g = addRickerWavelet(0.125,g) # same wavelet used in signal
rgf = RecursiveGaussianFilter(1.0)
rgf.applyX0(g,g)
frms = sqrt(sum(mul(f,f))/n1/n2)
#frms = max(abs(f))
grms = sqrt(sum(mul(g,g))/n1/n2)
g = mul(g,nrms*frms/grms)
return add(f,g)
#############################################################################
# plotting
def plot(f,title=None,clips=None,png=None):
#sp = SimplePlot.asPixels(f)
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
pv = sp.addPixels(f)
if clips:
pv.setClips(clips[0],clips[1])
if title:
sp.setTitle(title)
sp.addColorBar()
sp.setSize(900,900)
if png:
sp.paintToPng(360,3.33,pngDir+png+'.png')
def plot1(f,title=None):
sp = SimplePlot()
sp.addPoints(f)
sp.setSize(900,500)
if title:
sp.setTitle(title)
def plotJ(J,it,png=None):
it = str(it)
sp = SimplePlot()
pv = sp.addPoints(J)
sp.setHLabel("Velocity Models")
#sp.setHInterval(1.0)
sp.setVLabel("J- Objective Function")
sp.setTitle("Data misfit "+png)
sp.setSize(1200,500)
if png:
sp.paintToPng(360,3.33,pngDir+png+'-'+it+'.png')
#############################################################################
# Do everything on Swing thread.
import sys,time
class RunMain(Runnable):
def run(self):
start = time.time()
main(sys.argv)
s = time.time()-start
h = int(s/3600); s -= h*3600
m = int(s/60); s -= m*60
print '%02d:%02d:%02ds'%(h,m,s)
#main(sys.argv)
SwingUtilities.invokeLater(RunMain())
| StarcoderdataPython |
6635490 | <reponame>EneaMapelli/SafecastPy
# -*- coding: utf-8 -*-
"""
SafecastPy.exceptions
~~~~~~~~~~~~~~~~~~
This module contains SafecastPy specific Exception classes.
"""
class SafecastPyError(Exception):
"""Generic error class, catch-all for most SafecastPy issues.
Special cases are handled by TwythonAuthError & TwythonRateLimitError.
"""
def __init__(self, message, error_code=None):
if error_code is not None:
self.html = message
message = "Safecast API returned a {0} error".format(error_code)
super(SafecastPyError, self).__init__(message)
class SafecastPyAuthError(SafecastPyError):
"""Raised when you try to access a protected resource and it fails due to
some issue with your authentication.
"""
pass
| StarcoderdataPython |
11361177 | class Car:
def __init__(self, maker, model):
self.maker = maker
self.model = model
def __repr__(self):
return f'<Car {self.maker} {self.model}>'
class Garage:
def __init__(self):
self.cars = []
def __len__(self):
return len(self.cars)
def add_car(self, car):
if not isinstance(car, Car):
raise TypeError('Car argument required')
self.cars.append(car)
garage = Garage()
car1 = Car('Maker 1', 'Car 1')
try:
garage.add_car(car1)
except TypeError:
print('You did not provide a Car')
except ValueError:
print('In case any ValueError happens')
finally:
print('Whatever happens, run this anyway')
| StarcoderdataPython |
3325831 | <reponame>AguilaDFG/Python
from turtle import Turtle, Screen
from random import randint
turtle = Turtle()
screen = Screen()
screen.colormode(255)
for a in range(50,randint(100,200)):
turtle.setheading(90*randint(0,4))
turtle.speed(a/20)
turtle.pensize(a/20)
turtle.pencolor(randint(0,255), randint(0,255), randint(0,255))
turtle.fd(10)
screen.exitonclick() | StarcoderdataPython |
12806037 | import pandas as pd
from bs4 import BeautifulSoup
import requests
import os
# scrapes and saves average player stats when imported.
from scraping_scripts.player_per_game_scrape import player_per_game_std
def avg_dfs_score():
# read in data that was scraped via import statment
df = pd.read_csv('scraped_data\player_per_game_std_2020.csv')
# reference for stats and dfs scoring values
# TODO merge two dicts into one by using the dfs_score_key values as keys
dfs_point_ref = {'Point': 1.0,
'Made 3pt': 0.5,
'Rebound': 1.25,
'Assist': 1.5,
'Steal': 2.0,
'Block': 2.0,
'Turnover': -0.5}
# Double-double': 1.5, # future feature
# 'Triple-double': 3.0 # future feature
dfs_score_key = {'Point': 'PTS',
'Made 3pt': '3P',
'Rebound': 'TRB',
'Assist': 'AST',
'Steal': 'STL',
'Block': 'BLK',
'Turnover': 'TOV'}
# 'Double-double': 'DD%', # future feature
# 'Triple-double': 'TD%' # future feature
# converting stats to dfs scoring total per stat
for stat in dfs_score_key.keys():
df[dfs_score_key[stat]] = df[dfs_score_key[stat]] * dfs_point_ref[stat]
# creating average daily fantasy score total per player
df['ADFS'] = df['PTS'] + df['3P'] + df['TRB'] + df['AST'] + df['STL'] + df['BLK'] + df['TOV']
# sorting for top ADFS scoring total
df.sort_values(by='ADFS', ascending=False)
# printing top 10 ADFS
print(df[['Player', 'PTS', '3P', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'ADFS']].head(10))
# writing data
df.to_csv(r'computed_data\avg_dfs_score_exclude_dubs.csv')
return df
# calling function to apply computations
# and write to csv for storage.
# set to variable for reveiwing data in console.
avg_dfs_score() | StarcoderdataPython |
9792669 | <gh_stars>1-10
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from data import get_train_transform, get_test_transform
class CustomDataset(Dataset):
img_aug = True
imgs = []
transform = None
def __init__(self, label_file, image_set, input_size):
with open(label_file, 'r', encoding="utf-8") as f:
self.imgs = list(map(lambda line: line.strip().split('|'), f))
if image_set == 'train':
self.transform = get_train_transform(size=input_size)
else:
self.transform = get_test_transform(size=input_size)
self.input_size = input_size
def __getitem__(self, index):
# print(self.imgs)
# print(index)
# print(len(self.imgs[index]))
img_path, label = self.imgs[index]
# print(img_path)
img = Image.open(img_path).convert('RGB')
if self.img_aug:
img = self.transform(img)
else:
img = np.array(img)
img = torch.from_numpy(img)
return img, torch.from_numpy(np.array(int(label)))
def __len__(self):
return len(self.imgs)
def get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):
_dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)
_dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
return _dataset, _dataloader
| StarcoderdataPython |
3404196 | <reponame>rrgaya-zz/client-manager<filename>clientes/templatetags/filters.py
from django import template
register = template.Library()
@register.filter
def meu_filtro(data):
return data + " - " + "Alterado pelo filtro"
@register.filter
def arredonda(value, casas):
return round(value, casas)
@register.filter
def footer_message():
return "Desenvolvido por <NAME>"
| StarcoderdataPython |
8098173 | <gh_stars>0
class MocketException(Exception):
pass
class StrictMocketException(MocketException):
pass
| StarcoderdataPython |
1648621 | <filename>concurrenflict/forms.py
import simplejson
from django import forms
from django.core import serializers
from django.utils.html import mark_safe
class ConcurrenflictFormMixin(forms.ModelForm):
"""
Compares model instance between requests: first at for render, then upon submit but before save (i.e. on clean).
If model instances are different, the Form fails validation and displays what has been changed.
"""
concurrenflict_initial = forms.CharField(widget=forms.HiddenInput, label="", required=False)
_concurrenflict_json_data = ''
concurrenflict_field_name = 'concurrenflict_initial'
def _get_concurrenflict_field(self):
return self.fields[self.concurrenflict_field_name]
def __init__(self, *args, **kwargs):
super(ConcurrenflictFormMixin, self).__init__(*args, **kwargs)
instance = kwargs.get('instance', None)
if instance:
self._concurrenflict_json_data = serializers.serialize('json', [instance])
self._get_concurrenflict_field().initial = self._concurrenflict_json_data
def clean(self):
self.cleaned_data = super(ConcurrenflictFormMixin, self).clean()
json_at_get = self.cleaned_data[self.concurrenflict_field_name]
del self.cleaned_data[self.concurrenflict_field_name]
json_at_post = self._concurrenflict_json_data
# we want to keep using the initial data set in __init__()
self.data = self.data.copy()
self.data[self.concurrenflict_field_name] = self._concurrenflict_json_data
have_diff = False
# if json_at_post is None then this is an add() rather than a change(), so
# there's no old record that could have changed while this one was being worked on
if json_at_post and json_at_get and (json_at_post != json_at_get):
json_data_before = simplejson.loads(json_at_get)
json_data_after = simplejson.loads(json_at_post)
serial_data_before = serializers.deserialize('json', json_at_get).next()
model_before = serial_data_before.object
m2m_before = serial_data_before.m2m_data
serial_data_after = serializers.deserialize('json', json_at_post).next()
model_after = serial_data_after.object
m2m_after = serial_data_after.m2m_data
fake_form = self.__class__(instance=model_after, prefix='concurrenflict')
for field in model_before._meta.fields + m2m_before.keys():
try:
key = field.name
except AttributeError:
key = field # m2m_before is dict, model._meta.fields is list of Fields
if key == self.concurrenflict_field_name:
continue
if key not in fake_form.fields.keys():
continue
json_value_before = json_data_before[0]['fields'].get(key, None)
json_value_after = json_data_after[0]['fields'].get(key, None)
if json_value_after != json_value_before:
value_before = getattr(model_before, key, m2m_before.get(key))
value_after = getattr(model_after, key, m2m_after.get(key, ''))
have_diff = True
fake_form.data[key] = value_after
# this does not work for MultiSelect widget (and other Multi-something) widgets:
# ANDDD this appears to not be thread-safe! (faceplam)
#fake_form[key].field.widget.attrs['disabled'] = 'disabled'
# so to make sure:
js_fix = '''
<script type="text/javascript">
(function($){
$(function(){
$('[name^="%(html_name)s"]').attr('disabled', 'disabled').attr('readonly', 'readonly');
$('#add_id_%(html_name)s').remove();
});
})(window.jQuery || django.jQuery);
</script>
''' % {'html_name': fake_form[key].html_name}
temp_field = unicode(fake_form[key])
msg = mark_safe(u'This field has changed! New Value: <div class="concurrenflict_disabled_widget">%s</div>%s'
% (temp_field, js_fix,))
#@TODO Django 1.7: use Form.add_error()
self._errors[key] = self.error_class([msg])
# These fields are no longer valid. Remove them from the
# cleaned data. As if that has any effect...
del self.cleaned_data[key]
if have_diff:
raise forms.ValidationError(u"This record has changed since you started editing it.")
return self.cleaned_data
class ModelForm(ConcurrenflictFormMixin, forms.ModelForm):
pass
| StarcoderdataPython |
4962085 | #
# Copyright (C) <NAME> 2020 <<EMAIL>>
#
import configparser
import keyring
import sqlite3
import subprocess
import bluetooth
import gi
import datetime
from . import config
from .config import log
from . import utility
from .utility import WorkerThread
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib # noqa: E402
class InitBarcodeThread(WorkerThread):
def __init__(self):
super().__init__()
# Inizializza
def run(self):
resultList = []
try:
devices = bluetooth.discover_devices()
for device in devices:
name = bluetooth.lookup_name(device)
services = bluetooth.find_service(address=device)
for svc in services:
if (svc['protocol'] == 'RFCOMM'):
resultList.append([name, device, svc['port']])
log.debug("Device found: %s %s %s" % (name, device, svc['port']))
except Exception as e:
self.setError(e)
else:
self.status = self.DONE
finally:
GLib.idle_add(self.progressDialog.close, resultList)
return False
# Classe per la gestione del thread per la connessione con il lettore di barcode bluetooth
class ConnectBarcodeThread(WorkerThread):
def __init__(self, addr, port):
super().__init__()
self.addr = addr
self.port = port
self.sock = None
def stop(self):
log.debug("Barcode connect thread: STOP")
WorkerThread.stop(self)
self.__closeSocket(True)
def __closeSocket(self, force=False):
if self.sock:
if force:
try:
self.sock.shutdown(2) # blocks both sides of the socket
except Exception:
pass
log.debug("Barcode connect thread: socket shutdown!")
try:
self.sock.close()
except Exception:
pass
log.debug("Barcode connect thread: socket closed.")
self.sock = None
# Inizializza
def run(self):
try:
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
log.debug("Connecting Socket..")
self.sock.connect((self.addr, self.port))
except Exception as e:
self.setError(e)
self.__closeSocket()
else:
self.status = self.DONE
finally:
GLib.idle_add(self.progressDialog.close, self.sock)
return False
# Classe per la gestione del thread di lettura codici a barre
class ReadBarcodeThread(WorkerThread):
BARCODE_SUFFIX = '\r\n'
def __init__(self, updateCallback, errorCallback, sock):
super().__init__()
self.sock = sock
self.updateCallback = updateCallback
self.errorCallback = errorCallback
def stop(self):
log.debug("Barcode read thread: STOP")
WorkerThread.stop(self)
self.__closeSocket(True)
def __closeSocket(self, force=False):
if self.sock:
if force:
try:
self.sock.shutdown(2) # blocks both sides of the socket
except Exception:
pass
log.debug("Barcode read thread: socket shutdown!")
try:
self.sock.close()
except Exception:
pass
log.debug("Barcode read thread: socket closed.")
self.sock = None
# Legge i dati dal dispositivo bluetooth
def run(self):
try:
# Si esce solo se il socket viene chiuso (con shutdown) dall'esterno
while not (self.status == self.STOPPED):
data = self.sock.recv(1024)
if data:
# log.debug("data: %s", data)
if (len(data) > 0) and data.endswith(self.BARCODE_SUFFIX):
GLib.idle_add(self.updateCallback, data[:-len(self.BARCODE_SUFFIX)])
else:
raise Exception("Invalid data from barcode reader.")
except Exception as e:
self.setError(e)
GLib.idle_add(self.errorCallback, e)
finally:
self.__closeSocket()
return False
# Opzioni del programma
class Preferences(utility.Preferences):
TABACCHI_STR = "Logista Website password"
DB_PATHNAME = config.user_data_dir / f'{config.PACKAGE_NAME}.sqlite'
def __init__(self):
super().__init__(config.__desc__, config.CONF_PATHNAME)
self.numRivendita = ""
self.codCliente = ""
self.nome = ""
self.cognome = ""
self.citta = ""
self.telefono = ""
self.pianoConsegneDaSito = False
self.giornoLevata = 3 # Default Giovedi, ma si può cambiare
self.ggPerOrdine = 2
self.oraInvio = 11
self.timbro = ""
self.firma = ""
self.u88 = ""
self.u88urg = ""
self.timbroW = 0
self.timbroH = 0
self.firmaW = 0
self.firmaH = 0
self.tabacchiPwd = ""
self.tabacchiUser = ""
self.catalogoUrl = ""
self.loginUrl = ""
self.dataCatalogo = datetime.date.today()
self.defaultBarcode = -1
self.barcodeList = []
self.pianoConsegneList = []
# Controlla se è possibile ottenere una connessione con la configurazione attuale
def checkDB(self):
return self.DB_PATHNAME.exists()
# Ritorna una nuova connessione
def getConn(self):
conn = sqlite3.connect(self.DB_PATHNAME, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.execute("pragma foreign_keys=ON;")
return conn
# Ritorna un cursore, data una connessione
def getCursor(self, conn):
return conn.cursor()
# Legge le preferenze dal file di configurazione
def load(self):
config = super().load()
self.barcodeList[:] = []
if config.has_section('Barcode'):
barcode = config['Barcode']
i = 0
self.defaultBarcode = barcode.getint('defaultbarcode', -1)
while config.has_option('Barcode', f'device{i}'):
device = barcode[f'device{i}']
port = barcode.getint(f'port{i}')
addr = barcode[f'addr{i}']
i += 1
self.barcodeList.append([device, addr, port])
if config.has_section("Tabacchi"):
tabacchi = config['Tabacchi']
self.numRivendita = tabacchi.get('numRivendita', '')
self.codCliente = tabacchi.get('codCliente', '')
self.dataCatalogo = datetime.datetime.strptime(tabacchi.get('dataCatalogo', datetime.date.today()), '%Y-%m-%d')
self.nome = tabacchi.get('nome', '')
self.cognome = tabacchi.get('cognome', '')
self.citta = tabacchi.get('citta', '')
self.telefono = tabacchi.get('telefono')
self.pianoConsegneDaSito = tabacchi.getboolean('pianoConsegneDaSito')
self.giornoLevata = tabacchi.getint('giornoLevata', 3)
self.oraInvio = tabacchi.getint('oraInvio', 11)
self.ggPerOrdine = tabacchi.getint('ggPerOrdine', 2)
self.timbro = tabacchi.get('timbro', '')
self.firma = tabacchi.get('firma', '')
self.u88 = tabacchi.get('u88', '')
self.u88urg = tabacchi.get('u88urg', '')
self.timbroW = tabacchi.getfloat('timbroW', 0)
self.timbroH = tabacchi.getfloat('timbroH', 0)
self.firmaW = tabacchi.getfloat('firmaW', 0)
self.firmaH = tabacchi.getfloat('firmaH', 0)
self.tabacchiUser = tabacchi.get('user', '')
self.catalogoUrl = tabacchi.get('catalogoUrl', '')
self.loginUrl = tabacchi.get('loginUrl', '')
value = keyring.get_password(self.TABACCHI_STR, self.tabacchiUser)
if value:
self.tabacchiPwd = value
# Carica nelle preferenze una list con il piano levate
# che sarà usato per generare la treeview del piano consegne nella mainwindow
# usare funzione a basso livello per aggiornare i dati e ad alto per rinnovare model e treeview
if self.pianoConsegneDaSito:
if config.has_section("PianoLevate"):
tmpList = config.items("PianoLevate")
for row in tmpList:
consegna = datetime.datetime.strptime(row[0], "%d/%m/%Y").date()
dataLimiteStr, ordine, stato, canale, tipo = row[1].split(',')
dataLimite = datetime.datetime.strptime(dataLimiteStr, "%d/%m/%Y - %H:%M")
self.pianoConsegneList.append([consegna, dataLimite, ordine, stato, canale, tipo])
# Salva le preferenze per questo progetto
def save(self):
config = configparser.ConfigParser()
if len(self.barcodeList) > 0:
config['Barcode'] = {}
barcode = config['Barcode']
i = 0
barcode['defaultbarcode'] = str(self.defaultBarcode)
for code in self.barcodeList:
barcode[f'device{i}'] = code[0]
barcode[f'addr{i}'] = code[1]
barcode[f'port{i}'] = str(code[2])
i += 1
config['Tabacchi'] = {'numRivendita': self.numRivendita,
'codCliente': self.codCliente,
'nome': self.nome,
'cognome': self.cognome,
'citta': self.citta,
'telefono': self.telefono,
'pianoConsegneDaSito': self.pianoConsegneDaSito,
'giornoLevata': self.giornoLevata,
'oraInvio': self.oraInvio,
'ggPerOrdine': self.ggPerOrdine,
'dataCatalogo': self.dataCatalogo.strftime('%Y-%m-%d'),
'timbro': self.timbro,
'firma': self.firma,
'u88': self.u88,
'u88urg': self.u88urg,
'timbroW': self.timbroW,
'timbroH': self.timbroH,
'firmaW': self.firmaW,
'firmaH': self.firmaH,
'user': self.tabacchiUser,
'catalogoUrl': self.catalogoUrl,
'loginUrl': self.loginUrl
}
keyring.set_password(self.TABACCHI_STR, self.tabacchiUser, self.tabacchiPwd)
if self.pianoConsegneDaSito:
config["PianoLevate"] = {}
for row in self.pianoConsegneList:
consegna = datetime.datetime.strftime(row[0], "%d/%m/%Y")
dataLimite = datetime.datetime.strftime(row[1], "%d/%m/%Y - %H:%M")
config["PianoLevate"][consegna] = dataLimite + ',' + ','.join(map(str, row[2:]))
super().save(config)
# Istanza globale
prefs = Preferences()
# Presa una data di riferimento qualsiasi, e un giorno della settimana, restituisce il giorno della
# settimana successivo alla data di riferimento (nel caso si passi anche un'ora, confronta anche quella)
#
# Per esempio:
# nextWeekday('venerdì 12 Dicembre 9:00', giovedì) = > giovedì 18 Dicembre 00:00
# nextWeekday('martedì 3 Maggio 10:27', martedi, '11:00') = > martedi 3 Maggio 11:00
def nextWeekday(d, weekday, time=datetime.time(0, 0, 0, 0)):
days_ahead = weekday - d.weekday()
if (days_ahead < 0) or ((days_ahead == 0) and (d.time() >= time)):
days_ahead += 7
return datetime.datetime.combine(d.date() + datetime.timedelta(days_ahead), time)
# In base alla data passata per parametro, restituisce il timestamp entro il quale
# si può inviare l'ordine e la data di quando l'ordine sarà consegnato (levata).
#
# Condizioni:
# - Gg lavorativi per preparare l'ordine (prefs.ggPerOrdine)
# - Non si consegna di sabato e domenica,
# - Il giorno di consegna è modificabile nelle prefs (prefs.giornoLevata)
# - Lunedi = 0, .. Domenica = 6
# - Non oltre un certo orario (prefs.oraInvio)
def dataLimiteOrdine(data):
dataOrdine = None
levata = None
if prefs.pianoConsegneDaSito:
log.debug("[dataLimiteOrdine] check prefs.pianoConsegneList: %s" % prefs.pianoConsegneList)
for row in prefs.pianoConsegneList:
if data < row[1]:
dataOrdine = row[1]
levata = row[0]
break
# Nel caso sia disabilitata la scansione del piano consegne dal sito Logista
# oppure sia vuota la lista del piano consegne
if dataOrdine is None or levata is None:
# Giorno della settimana entro il quale ordinare
giornoOrdine = (prefs.giornoLevata - prefs.ggPerOrdine) % 5
# Data esatta entro la quale ordinare
dataOrdine = nextWeekday(data, giornoOrdine, datetime.time(prefs.oraInvio, 0, 0, 0))
# Data esatta quando sarà consegnato l'ordine
levata = nextWeekday(dataOrdine, prefs.giornoLevata)
levata = levata.date()
return (dataOrdine, levata)
# Dialog per impostare le opzioni del programma
class PreferencesDialog(utility.PreferencesDialog):
def __init__(self, parent):
super().__init__(parent, prefs)
self.add_from_file("preferencesTabacchi.glade")
self.preferencesNotebook = self.builder.get_object("preferencesNotebook")
self.tabacchiVbox = self.builder.get_object("tabacchiVbox")
self.barcodeVbox = self.builder.get_object("barcodeVbox")
self.preferencesNotebook.append_page(self.tabacchiVbox)
self.preferencesNotebook.append_page(self.barcodeVbox)
self.preferencesNotebook.set_tab_label_text(self.tabacchiVbox, "Tabacchi")
self.preferencesNotebook.set_tab_label_text(self.barcodeVbox, "Lettore codici a barre")
self.__buildTabacchi(self.builder)
self.__buildBarcode(self.builder)
self.__loadBarcode(prefs.barcodeList)
self.builder.connect_signals({
"on_refreshBarcodeButton_clicked": self.refreshBarcode,
})
self.allfilter = Gtk.FileFilter()
self.allfilter.set_name("All files")
self.allfilter.add_pattern("*")
self.pdffilter = Gtk.FileFilter()
self.pdffilter.set_name("PDF files")
self.pdffilter.add_pattern("*.pdf")
self.imgfilter = Gtk.FileFilter()
self.imgfilter.set_name("Images")
self.imgfilter.add_mime_type("image/png")
self.imgfilter.add_mime_type("image/jpeg")
self.imgfilter.add_mime_type("image/gif")
self.imgfilter.add_pattern("*.png")
self.imgfilter.add_pattern("*.jpg")
self.imgfilter.add_pattern("*.gif")
self.imgfilter.add_pattern("*.tif")
self.imgfilter.add_pattern("*.xpm")
# Backup thread specializzato
def __getBackupThread(self, preferences, history):
return BackupThread(preferences, history)
def __buildTabacchi(self, builder):
self.numRivenditaEntry = builder.get_object("numRivenditaEntry")
self.codClienteEntry = builder.get_object("codClienteEntry")
self.cittaEntry = builder.get_object("cittaEntry")
self.nomeEntry = builder.get_object("nomeEntry")
self.cognomeEntry = builder.get_object("cognomeEntry")
self.telefonoEntry = builder.get_object("telefonoEntry")
self.levataCombobox = builder.get_object("levataCombobox")
self.timbrofchooser = builder.get_object("timbroFilechooserbutton")
self.firmafchooser = builder.get_object("firmaFilechooserbutton")
self.u88fchooser = builder.get_object("u88Filechooserbutton")
self.u88urgfchooser = builder.get_object("u88urgFilechooserbutton")
timbroBox = builder.get_object("timbroBox")
firmaBox = builder.get_object("firmaBox")
self.giorniConsegnaCombobox = builder.get_object("giorniConsegnaCombobox")
self.ordineEntroCombobox = builder.get_object("ordineEntroCombobox")
self.consegneFrame = builder.get_object("consegneFrame")
self.pianoConsegneDaSitoSwitch = builder.get_object("pianoConsegneDaSitoSwitch")
self.firmaWEntry = utility.NumEntry(firmaBox, 1, 2, 2)
self.firmaHEntry = utility.NumEntry(firmaBox, 3, 2, 2)
self.timbroWEntry = utility.NumEntry(timbroBox, 1, 2, 2)
self.timbroHEntry = utility.NumEntry(timbroBox, 3, 2, 2)
self.tabacchiUserEntry = builder.get_object("tabacchiUserEntry")
self.catalogoUrlEntry = builder.get_object("catalogoUrlEntry")
self.loginUrlEntry = builder.get_object("loginUrlEntry")
self.tabacchiPwdEntry = builder.get_object("tabacchiPwdEntry")
self.dateModel = Gtk.ListStore(str)
self.dateModel.append(["Lunedì"])
self.dateModel.append(["Martedì"])
self.dateModel.append(["Mercoledì"])
self.dateModel.append(["Giovedì"])
self.dateModel.append(["Venerdì"])
self.giorniConsegnaModel = Gtk.ListStore(str)
for x in range(0, 6):
self.giorniConsegnaModel.append([str(x)])
self.ordiniEntroModel = Gtk.ListStore(str)
for x in range(0, 24):
self.ordiniEntroModel.append([str(x)])
self.numRivenditaEntry.set_text(prefs.numRivendita)
self.codClienteEntry.set_text(prefs.codCliente)
self.cittaEntry.set_text(prefs.citta)
self.nomeEntry.set_text(prefs.nome)
self.cognomeEntry.set_text(prefs.cognome)
self.telefonoEntry.set_text(prefs.telefono)
self.pianoConsegneDaSitoSwitch.set_active(prefs.pianoConsegneDaSito)
self.consegneFrame.set_sensitive(not prefs.pianoConsegneDaSito)
self.levataCombobox.set_model(self.dateModel)
self.giorniConsegnaCombobox.set_model(self.giorniConsegnaModel)
self.ordineEntroCombobox.set_model(self.ordiniEntroModel)
if prefs.timbro:
self.timbrofchooser.set_filename(prefs.timbro)
if prefs.firma:
self.firmafchooser.set_filename(prefs.firma)
if prefs.u88:
self.u88fchooser.set_filename(prefs.u88)
if prefs.u88urg:
self.u88urgfchooser.set_filename(prefs.u88urg)
self.firmaWEntry.set_value(prefs.firmaW)
self.firmaHEntry.set_value(prefs.firmaH)
self.timbroWEntry.set_value(prefs.timbroW)
self.timbroHEntry.set_value(prefs.timbroH)
cell = Gtk.CellRendererText()
self.levataCombobox.pack_start(cell, True)
self.levataCombobox.add_attribute(cell, 'text', 0)
self.levataCombobox.set_active(prefs.giornoLevata)
cell = Gtk.CellRendererText()
self.giorniConsegnaCombobox.pack_start(cell, True)
self.giorniConsegnaCombobox.add_attribute(cell, 'text', 0)
self.giorniConsegnaCombobox.set_active(prefs.ggPerOrdine)
cell = Gtk.CellRendererText()
self.ordineEntroCombobox.pack_start(cell, True)
self.ordineEntroCombobox.add_attribute(cell, 'text', 0)
self.ordineEntroCombobox.set_active(prefs.oraInvio)
self.numRivenditaEntry.set_text(prefs.numRivendita)
self.codClienteEntry.set_text(prefs.codCliente)
self.catalogoUrlEntry.set_text(prefs.catalogoUrl)
self.loginUrlEntry.set_text(prefs.loginUrl)
self.tabacchiUserEntry.set_text(prefs.tabacchiUser)
self.tabacchiPwdEntry.set_text(prefs.tabacchiPwd)
self.pianoConsegneDaSitoSwitch.connect("notify::active", self.consegneToggled)
#
def consegneToggled(self, switch, gparam):
toggled = switch.get_active()
self.consegneFrame.set_sensitive(not toggled)
def refreshBarcode(self, widget):
initBarcodeThread = InitBarcodeThread()
progressDialog = utility.ProgressDialog(self.preferencesDialog, "Searching bluetooth devices..", "", "RFCOMM Bluetooth devices", initBarcodeThread)
progressDialog.setResponseCallback(self.__loadBarcode)
progressDialog.setStopCallback(self.barcodeModel.clear)
progressDialog.setErrorCallback(self.barcodeModel.clear)
progressDialog.startPulse()
def __loadBarcode(self, barcodeList):
self.barcodeModel.clear()
for row in barcodeList:
self.barcodeModel.append(row)
self.barcodeCombobox.set_active(0)
def __buildBarcode(self, builder):
self.barcodeCombobox = builder.get_object("barcodeCombobox")
self.barcodeModel = Gtk.ListStore(str, str, int)
self.barcodeCombobox.set_model(self.barcodeModel)
cell = Gtk.CellRendererText()
self.barcodeCombobox.pack_start(cell, True)
self.barcodeCombobox.add_attribute(cell, 'text', 0)
def check(self, widget, other=None):
utility.PreferencesDialog.check(self, widget, other)
def save(self):
prefs.numRivendita = self.numRivenditaEntry.get_text()
prefs.codCliente = self.codClienteEntry.get_text()
prefs.citta = self.cittaEntry.get_text()
prefs.nome = self.nomeEntry.get_text()
prefs.cognome = self.cognomeEntry.get_text()
prefs.telefono = self.telefonoEntry.get_text()
prefs.pianoConsegneDaSito = self.pianoConsegneDaSitoSwitch.get_active()
prefs.oraInvio = self.ordineEntroCombobox.get_active()
prefs.giornoLevata = self.levataCombobox.get_active()
prefs.ggPerOrdine = self.giorniConsegnaCombobox.get_active()
# Se il primo operatore e' None restituisce il secondo (False or True = True)
prefs.timbro = self.timbrofchooser.get_filename() or ''
prefs.firma = self.firmafchooser.get_filename() or ''
prefs.u88 = self.u88fchooser.get_filename() or ''
prefs.u88urg = self.u88urgfchooser.get_filename() or ''
prefs.firmaW = self.firmaWEntry.get_value()
prefs.firmaH = self.firmaHEntry.get_value()
prefs.timbroW = self.timbroWEntry.get_value()
prefs.timbroH = self.timbroHEntry.get_value()
prefs.catalogoUrl = self.catalogoUrlEntry.get_text()
prefs.loginUrl = self.loginUrlEntry.get_text()
prefs.tabacchiUser = self.tabacchiUserEntry.get_text()
prefs.tabacchiPwd = self.tabacchiPwdEntry.get_text()
prefs.barcodeList[:] = []
for row in self.barcodeModel:
prefs.barcodeList.append(list(row))
prefs.defaultBarcode = self.barcodeCombobox.get_active()
super().save()
# Effettua il backup del programma su Cloud esteso
class BackupThread(utility.BackupThread):
def __init__(self, preferences, history=None):
super().__init__(preferences, history)
#
def __dataBackup(self, workdir, backupDir):
dataPathName = f"{workdir}/{config.__desc__.replace(' ', '_')}"
command = f"tar cjfP {dataPathName}.tar.bz2 --exclude=.* {backupDir}"
subprocess.check_call(command, shell=True)
return dataPathName
| StarcoderdataPython |
12846147 | __author__ = '<NAME>'
from pymongo import MongoClient
import detectlanguage
import json
import logging
import time
logging.basicConfig(
filename='emovix_twitter_detectlang.log',
level=logging.WARNING,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d-%m-%y %H:%M')
# Configuration parameters
detectlanguage_api_key = ""
database_host = ""
database_name = ""
twitterStatusCol = ""
client = None
db = None
if __name__ == '__main__':
logging.debug('emovix_twitter_detectlang.py starting ...')
# Load configuration
with open('config.json', 'r') as f:
config = json.load(f)
detectlanguage_api_key = config['detectlanguage_api_key']
database_host = config['database_host']
database_name = config['database_name']
twitterStatusCol = config['source_box'] + "_twitterStatus"
client = MongoClient('mongodb://' + database_host + ':27017/')
db = client[database_name]
detectlanguage.configuration.api_key = detectlanguage_api_key
while True:
try:
if detectlanguage.user_status()['requests'] >= detectlanguage.user_status()['daily_requests_limit']:
logging.debug("Number of requests over daily limit.")
time.sleep(60)
statuses = db[twitterStatusCol].find({ "language_detections.language": { "$exists": False } })
if statuses:
count = 0
batch_request = []
batch_status = []
for twitterStatus in statuses:
if count >= 500:
logging.debug("Processing batch ...")
detections = detectlanguage.detect(batch_request)
if len(detections) != 500:
logging.error("ABNORMAL NUMBER OF LANGUAGE DETECTIONS: " + str(len(detections)))
break
count = 0
for detection in detections:
if len(detection) == 0:
detection = {}
detection['source'] = 'detectlanguage'
detection['language'] = ''
batch_status[count]['language_detections'] = []
batch_status[count]['language_detections'].append(detection)
else:
detection[0]['source'] = 'detectlanguage'
batch_status[count]['language_detections'] = []
batch_status[count]['language_detections'].append(detection[0])
db[twitterStatusCol].update( { "_id": batch_status[count]['_id']}, batch_status[count], upsert=True)
count += 1
count = 0
batch_request = []
batch_status = []
text = twitterStatus['text'].encode('utf-8')
batch_request.append(text)
batch_status.append(twitterStatus)
count += 1
except Exception as e:
# Oh well, just keep going
logging.error(e.__class__)
logging.error(e)
continue
except KeyboardInterrupt:
break
| StarcoderdataPython |
11346981 | <reponame>asijit123/Python<filename>text_cleaning_comparisson.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[63]:
from string import punctuation
from unidecode import unidecode
from time import process_time
from re import sub, compile
from nltk.corpus import stopwords, gutenberg
from nltk.tokenize import sent_tokenize
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
# Data for testing
emma = gutenberg.words('austen-emma.txt')
example_text = ' '.join(emma)
df = pd.DataFrame(data={'sentences': sent_tokenize(example_text)})
tokenizer = lambda s: clean_text(s).split()
vectorizer = CountVectorizer(encoding='ascii', decode_error='ignore',
strip_accents='ascii',
tokenizer=tokenizer, lowercase=False,
max_df=0.7,
min_df=0.0001
)
vectorizer.fit(df['sentences'])
STOP_WORDS = stopwords.words('english')
# Remove any charcater is non alphanumeric or space
pattern_cleaning = compile(r'[^\w\s]|\d')
pattern_stop_words = compile(r'\b(' + r'|'.join(STOP_WORDS) + r')\b\s*')
# First remove punctuation and numbers, then remove stop words
remove_punctuation_r = lambda s : sub(pattern_stop_words, '', sub(pattern_cleaning, '', s.lower()))
remove_short_words = lambda s : ' '.join(filter(lambda w: len(w) > 2, s.split()))
# Remove numbers, short words (one or two characters),
# punctuaction, non ascii characers and stop words
clean_text = lambda s: remove_short_words(remove_punctuation_r(s))
# Data cleaning functions
pattern_cleaning = compile(r'[^\w\s]|\d')
pattern_stop_words = compile(r'\b(' + r'|'.join(stopwords.words('english')) + r')\b\s*')
pattern_short_words = compile(r'\b[^\s]{0,2}\b')
exclude = punctuation
remove_punctuation_t = lambda s : unidecode(s).translate(str.maketrans('', '', exclude)).lower()
remove_punctuation_r = lambda s : sub(pattern_stop_words, '', sub(pattern_cleaning, '', s.lower()))
remove_stop_words = lambda s : ' '.join([word for word in s.split() if word not in STOP_WORDS])
remove_stop_words_2 = lambda s : sub(pattern_stop_words, '', s)
remove_stop_words_3 = lambda s : ' '.join(filter(lambda w: len(w) > 2 and not w in STOP_WORDS, s.split()))
remove_short_words = lambda s : ' '.join(filter(lambda w: len(w) > 2, s.split()))
remove_short_words_2 = lambda s : sub(pattern_stop_words, '', s)
clean_text_1 = lambda s: remove_short_words_2(remove_punctuation_r(s))
clean_text_2 = lambda s: remove_short_words(remove_punctuation_r(s))
clean_text_3 = lambda s: remove_stop_words(remove_short_words(remove_punctuation_t(s)))
clean_text_4 = lambda s: remove_stop_words_3(remove_punctuation_t(s))
clean_text_5 = lambda s: remove_stop_words_3(remove_punctuation_r(s))
# Comparing data cleaning ways
func = (clean_text_1,clean_text_2,clean_text_3,clean_text_4, clean_text_5)
title = ('Regex and unidecode, loop (short words)',
'Regex and unidecode, filter (short words)',
'Translate and unidecode, filter (short words) ,loops (stop words)',
'Translate and unidecode, filter (short words, stop words)',
'Regex, loop (short words, stop words)'
)
for f, t in zip(func, title):
print('*'*len(t))
print(t)
print('*'*len(t))
t0 = process_time()
print(df['sentences'].apply(f).head())
print(f'Time: {process_time()-t0}')
| StarcoderdataPython |
1947968 | <gh_stars>1-10
def random_search(space, num_samples):
configs = []
for _ in range(num_samples):
c_ = {}
for param_name, sample in space.items():
c_[param_name] = sample()
configs.append(c_)
return configs
| StarcoderdataPython |
140980 | <reponame>UT-Covid/compartmental_model_case_studies
from .param import (Param, String, Float, Integer, ListStrings, ListFloats,
ListInts)
EPI_DEMOGRAPHIC_COHORTS = 5
EPI_SCENARIOS = 7
__all__ = ['MatlabIntDate', 'ListMatlabIntDates', 'Triangular', 'IntDateRange']
class Triangular(ListFloats):
help = 'Triangular Distribution'
MINLEN = 3
MAXLEN = 3
class MatlabIntDate(Integer):
# YYYYMMDD expressed as an integer
help = 'Integer Date'
@classmethod
def validate(cls, value):
# TODO - validate that the int represents a realistic date
if len(str(value)) != 8:
raise ValueError('{0} must be 8 digits in length'.format(value))
return True
class ListMatlabIntDates(ListInts):
help = 'List of Integer Dates'
MINLEN = 1
PTYPE = MatlabIntDate
class IntDateRange(ListMatlabIntDates):
help = 'Start / End Date'
MINLEN = 2
MAXLEN = 3
class MatlabTriangular(String):
help = 'Matlab Triangular'
@classmethod
def validate(cls, value):
super().validate(value)
if not value.startswith('Triangular'):
raise ValueError(
'{} does not appear to be a Triangular'.format(value))
return True
class Demographics(ListFloats):
help = 'Comma-separated Demographic Variables (Float)'
# D,D,D,D, ...
@classmethod
def validate(cls, value):
super().validate(value)
if len(value) != EPI_DEMOGRAPHIC_COHORTS:
raise ValueError('{} must contain {} values'.format(
value, EPI_DEMOGRAPHIC_COHORTS))
return True
class Scenarios(ListFloats):
help = 'Comma-separated Scenario Variables (Float)'
# S,S,S,S, ...
@classmethod
def validate(cls, value):
super().validate(value)
if len(value) != EPI_SCENARIOS:
raise ValueError('{} must contain {} values'.format(
value, EPI_SCENARIOS))
return True
class DemographicsString(Demographics):
help = 'Stringified Comma-separated Demographic Variables (Float)'
@classmethod
def cast(cls, value):
value = super().cast(value)
value = ','.join([str(v) for v in value])
return value
@classmethod
def validate(cls, value):
tval = ListFloats(value).value
if len(tval) != EPI_DEMOGRAPHIC_COHORTS:
raise ValueError('{} must contain {} values'.format(
value, EPI_DEMOGRAPHIC_COHORTS))
else:
return True
class DemographicsStringByScenarios(Param):
# D,D,D,D,D; D,D,D,D,D; ...
help = 'Semicolon-separated Scenario Variables (Comma-separated Demographics Variables)'
@classmethod
def cast(cls, value, delim=';'):
try:
if isinstance(value, str):
# Split on commas
temp = [i.strip() for i in value.split(delim)]
elif isinstance(value, list):
temp = value
temp2 = [DemographicsString(i).value for i in temp]
return temp2
except Exception:
raise TypeError(
'Unable to cast {0} to DemographicsStringByScenarios'.format(
value))
@classmethod
def validate(cls, value):
super().validate(value)
# type
if not isinstance(value, list):
raise TypeError('Must be a list')
# length
if len(value) != EPI_SCENARIOS:
raise ValueError('{} must contain {} values'.format(
value, EPI_SCENARIOS))
# interior contents
for v in value:
Demographics(v)
return True
| StarcoderdataPython |
11368517 | from pyrogram import Client, Filters
from config import cmds
from utils import meval
import traceback
import html
@Client.on_message(Filters.command("eval", prefixes=".") & Filters.me)
async def evals(client, message):
text = message.text[6:]
try:
res = await meval(text, locals())
except:
ev = traceback.format_exc()
await message.edit(ev)
return
else:
try:
await message.edit(f"<code>{html.escape(str(res))}</code>")
except Exception as e:
await message.edit(e)
cmds.update({'.eval':'Run commands on eval'})
| StarcoderdataPython |
5184832 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as mdates
import os
from datetime import datetime
TRANSIENT = False
#Model = 'Bertrand'
#steps = 1000
#start = 0
#drainages = 46
#path = '../modelruns_BC_2016/'
Model = 'WRIA1_s'
steps = 16894
start = 15069 # plot only part of the series
drainages = 172
path = '../modelruns_1946_2006/'
zbar_ref = np.zeros((drainages, steps), dtype='d')
zbar_ss = np.zeros((drainages, steps), dtype='d')
if TRANSIENT:
zbar_transient = np.zeros((drainages, steps), dtype='d')
else:
zbar_irr = np.zeros((drainages, steps), dtype='d')
zbar_nirr = np.zeros((drainages, steps), dtype='d')
dates = []
print('Reading date_zbar.dat')
referenceFile = open(path + 'results/date_zbar.dat','r')
lines = referenceFile.readlines()
referenceFile.close()
for line in lines:
splitLine = line.split()
step = int(splitLine[0])-1
nsub = int(splitLine[1])-1
zbar_ref[nsub,step] = float(splitLine[5])
year = int(splitLine[2])
month = int(splitLine[3])
day = int(splitLine[4])
if nsub == 1:
dates.append(np.datetime64('{0:04d}-{1:02d}-{2:02d}'.format(year, month, day)).astype(datetime))
print('Reading date_zbar_ss.dat')
steadyStateFile = open(path + 'results/date_zbar_ss.dat','r')
lines = steadyStateFile.readlines()
steadyStateFile.close()
for line in lines:
splitLine = line.split()
step = int(splitLine[0])-1
nsub = int(splitLine[1])-1
zbar_ss[nsub,step] = float(splitLine[5])
if TRANSIENT:
print('Reading date_zbar_transient.dat')
transientFile = open(path + 'results/date_zbar_transient.dat','r')
lines = transientFile.readlines()
transientFile.close()
for line in lines:
splitLine = line.split()
step = int(splitLine[0])-1
nsub = int(splitLine[1])-1
zbar_transient[nsub,step] = float(splitLine[5])
else:
print('Reading date_zbar_irr.dat')
irrFile = open(path + 'results/date_zbar_irr.dat','r')
lines = irrFile.readlines()
irrFile.close()
for line in lines:
splitLine = line.split()
step = int(splitLine[0])-1
nsub = int(splitLine[1])-1
zbar_irr[nsub,step] = float(splitLine[5])
print('Reading date_zbar_nirr.dat')
nirrFile = open(path + 'results/date_zbar_nirr.dat','r')
lines = nirrFile.readlines()
nirrFile.close()
for line in lines:
splitLine = line.split()
step = int(splitLine[0])-1
nsub = int(splitLine[1])-1
zbar_nirr[nsub,step] = float(splitLine[5])
if not os.access('images_'+Model, os.F_OK):
os.mkdir('images_'+Model)
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
for n in range(drainages):
fig, ax = plt.subplots(figsize=(20,9))
ax.plot(dates[start:], zbar_ref[n,start:], label='Reference')
ax.plot(dates[start:], zbar_ss[n,start:], label='Steady State')
if TRANSIENT:
ax.plot(dates[start:], zbar_transient[n,start:], label='transient')
else:
ax.plot(dates[start:], zbar_irr[n,start:], label='Irrigated')
ax.plot(dates[start:], zbar_nirr[n,start:], label='Non-Irrigated')
ax.legend()
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(dates[start], 'Y')
datemax = np.datetime64(dates[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
fig.autofmt_xdate()
#ax.set_xlabel('Timesteps (days)')
ax.set_ylabel('Depth to Water Table (m)')
ax.set_title(Model + ' Drainage {0:2d}'.format(n+1))
print('Saving images_'+Model+'/D_to_W_sub_{0:03d}.png'.format(n+1))
plt.savefig('images_'+Model+'/D_to_W_sub_{0:03d}.png'.format(n+1))
#plt.show()
plt.close()
# If GM ImageMagick is installed, in shell terminal run:
# gm convert -delay 2000 *.png depth_to_water.gif
| StarcoderdataPython |
11286420 | <filename>flask_jwt_extended/jwt_manager.py
from flask import jsonify
class JWTManager:
def __init__(self, app=None):
# Function that will be called to add custom user claims to a JWT.
self.user_claims_callback = lambda _: {}
# Function that will be called when an expired token is received
self.expired_token_callback = lambda: (
jsonify({'msg': 'Token has expired'}), 401
)
# Function that will be called when an invalid token is received
self.invalid_token_callback = lambda err: (
jsonify({'msg': err}), 422
)
# Function that will be called when attempting to access a protected
# endpoint without a valid token
self.unauthorized_callback = lambda: (
jsonify({'msg': 'Missing Authorization Header'}), 401
)
# Function that will be called when attempting to access a fresh_jwt_required
# endpoint with a valid token that is not fresh
self.needs_fresh_token_callback = lambda: (
jsonify({'msg': 'Fresh token required'}), 401
)
# Function that will be called when a revoked token attempts to access
# a protected endpoint
self.revoked_token_callback = lambda: (
jsonify({'msg': 'Token has been revoked'}), 401
)
# Setup the app if it is given (can be passed to this consturctor, or
# called later by calling init_app directly)
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Register this extension with the flask app
"""
app.jwt_manager = self
def user_claims_loader(self, callback):
"""
This sets the callback method for adding custom user claims to a JWT.
By default, no extra user claims will be added to the JWT.
Callback must be a function that takes only one argument, which is the
identity of the JWT being created.
"""
self.user_claims_callback = callback
return callback
def expired_token_loader(self, callback):
"""
Sets the callback method to be called if an expired JWT is received
The default implementation will return json '{"msg": "Token has expired"}'
with a 401 status code.
Callback must be a function that takes zero arguments.
"""
self.expired_token_callback = callback
return callback
def invalid_token_loader(self, callback):
"""
Sets the callback method to be called if an invalid JWT is received.
The default implementation will return json '{"msg": <err>}' with a 401
status code.
Callback must be a function that takes only one argument, which is the
error message of why the token is invalid.
"""
self.invalid_token_callback = callback
return callback
def unauthorized_loader(self, callback):
"""
Sets the callback method to be called if an invalid JWT is received
The default implementation will return '{"msg": "Missing Authorization Header"}'
json with a 401 status code.
Callback must be a function that takes only one argument, which is the
error message of why the token is invalid.
"""
self.unauthorized_callback = callback
return callback
def needs_fresh_token_loader(self, callback):
"""
Sets the callback method to be called if a valid and non-fresh token
attempts to access an endpoint protected with @fresh_jwt_required.
The default implementation will return json '{"msg": "Fresh token required"}'
with a 401 status code.
Callback must be a function that takes no arguments.
"""
self.needs_fresh_token_callback = callback
return callback
def revoked_token_loader(self, callback):
"""
Sets the callback method to be called if a blacklisted (revoked) token
attempt to access a protected endpoint
The default implementation will return json '{"msg": "Token has been revoked"}'
with a 401 status code.
Callback must be a function that takes no arguments.
"""
self.revoked_token_callback = callback
return callback
| StarcoderdataPython |
367248 | import torch
import numpy as np
def measure(vector):
return (vector**2).sum()
def expm(q,rtol=1e-3,maxStep=15):
accumulator = torch.eye(q.shape[-1]).to(q)
tmpq = q
i = 1
error = rtol*measure(q)
while measure(tmpq) >= error:
accumulator = tmpq +accumulator
i+=1
tmpq = torch.matmul(tmpq,q)/i
if i>maxStep:
break
return accumulator
def expmv(q,v,rtol=1e-3,maxStep=15):
accumulator = v
tmpq = torch.matmul(q,v)
i = 1
error = rtol*measure(tmpq)
while measure(tmpq) >= error:
accumulator = tmpq + accumulator
i+=1
tmpq = torch.matmul(q,tmpq)/i
if i > maxStep:
break
return accumulator | StarcoderdataPython |
9643718 | <filename>fsm.py
from transitions.extensions import GraphMachine
import time
import random
Hp = 60
Lp = 60
Lk = 60
def TAMAGOCHI():
ret = 'Lp: '+str(Lp)+'\n' + 'Hp: '+str(Hp)+'\n' + 'Like: '+str(Lk)+'\n'
return ret
#def FULL_TEST():
# if Lp <= 50:
# ret = "尚未吃飽\n"+"[a] 繼續餵食\n"+"[b] 停止餵食\n"
# elif Lp > 50:
# ret = "吃尬飽飽\n"+"[b] 停止餵食\n"+"[c] 繼續餵食\n"
# return ret
#def STATE_CHANGE(Lpa, Hpa, Lka):
# Lp = Lp + Lpa
# Hp = Hp + Hpa
# Lk = Lk + Lka
# if Hp<=0 or Lp<=0 or Lk<=0:
# return -1
# else:
# return 0
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
# new
def on_enter_state0(self, update):
global Lp, Hp, Lk
Lp = 60
Hp = 60
Lk = 60
def is_going_to_state1(self, update):
text = update.message.text
return text.lower() == '/start'
def on_enter_state1(self, update):
ret = TAMAGOCHI() + "\n來與蛋子做點事情吧\n開始冒險?[y/n]\n"
update.message.reply_text(ret)
self.go_back(update)
# intro
def is_going_to_state2(self, update):
text = update.message.text
return text.lower() == 'y'
def on_enter_state2(self, update):
if Lp<0 or Hp<0 or Lk<0:
self.go_back(update)
else:
update.message.reply_text("[3] 吃點什麼\n[17] 講個笑話R\n[12] 來點其他的\n")
# A
def is_going_to_state3(self, update):
text = update.message.text
return text.lower() == '3'
def on_enter_state3(self, update):
ret = TAMAGOCHI() + "[4]便當\n[5]速食\n[6]牛排\n[7]麵包\n[8]泡麵\n[9]點心\n"
update.message.reply_text(ret)
def is_going_to_state4(self, update):
text = update.message.text
return text.lower() == '4'
def on_enter_state4(self, update): ## bendong
update.message.reply_text("[3]繼續餵食\n[10]停止餵食\n")
global Lp, Hp, Lk
Lp=Lp+5
Hp=Hp+5
Lk=Lk-1
# B
def is_going_to_state5(self, update):
text = update.message.text
return text.lower() == '5'
def on_enter_state5(self, update): ## fast_food
update.message.reply_text("[3]繼續餵食\n[10]停止餵食\n")
global Lp, Hp, Lk
Lp=Lp-5
Hp=Hp-5
Lk=Lk+10
# C
def is_going_to_state6(self, update):
text = update.message.text
return text.lower() == '6'
def on_enter_state6(self, update): ## steak
update.message.reply_text("[3]繼續餵食\n[10]停止餵食\n")
global Lp, Hp, Lk
Lp=Lp+5
Hp=Hp-5
Lk=Lk-1
# D
def is_going_to_state7(self, update):
text = update.message.text
return text.lower() == '7'
def on_enter_state7(self, update): ## bread
update.message.reply_text("[3]繼續餵食\n[10]停止餵食\n")
global Lp, Hp, Lk
Lp=Lp+5
Hp=Hp+1
Lk=Lk-5
# E
def is_going_to_state8(self, update):
text = update.message.text
return text.lower() == '8'
def on_enter_state8(self, update): ## fast_food
update.message.reply_text("[3]繼續餵食\n[10]停止餵食\n")
global Lp, Hp, Lk
Hp=Hp-10
Lp=Lp-10
Lk=Lk+5
# F
def is_going_to_state9(self, update):
text = update.message.text
return text.lower() == '9'
def on_enter_state9(self, update): ## snack
update.message.reply_text("[3]繼續餵食\n[10]停止餵食\n")
global Lp, Hp, Lk
Hp=Hp-5
Lp=Lp-5
Lk=Lk+5
# DIE
def is_going_to_state11(self, update):
if Lp<0 or Hp<0 or Lk<0:
return '11'
# update.message.reply_text("TAMAGOCHI IS DEAD")
def on_enter_state11(self, update):
update.message.reply_text("DIE")
self.go_back(update)
# back to option
def is_going_to_state10(self, update):
if Lp<=50:
print("Lp<50")
else:
print("Lp>50")
text = update.message.text
return text.lower() == '10'
def on_enter_state10(self, update):
self.go_back(update)
def is_going_to_state12(self, update):
text = update.message.text
return text.lower() == '12'
def on_enter_state12(self, update):
update.message.reply_text("[13] 睡覺\n[14] 滑手機\n")
def is_going_to_state13(self, update):
text = update.message.text
return text.lower() == '13'
def on_enter_state13(self, update):
update.message.reply_text("睡飽飽^^")
global Hp, Lp, Lk
Hp = Hp + 10
Lp = Lp + 10
Lk = Lk + 10
self.go_back(update)
def is_going_to_state14(self, update):
text = update.message.text
return text.lower() == '14'
def on_enter_state14(self, update):
update.message.reply_text("[15] FB\n[16] Google\n")
def is_going_to_state15(self, update):
text = update.message.text
return text.lower() == '15'
def on_enter_state15(self, update):
update.message.reply_text("http://www.facebook.com")
global Lk, Hp, Lp
Lk = Lk + 5
Hp = Hp - 5
Lp = Lp - 5
self.go_back(update)
def is_going_to_state16(self, update):
text = update.message.text
return text.lower() == '16'
def on_enter_state16(self, update):
update.message.reply_text("www.google.com")
global Lk, Hp, Lp
Lk = Lk + 5
Hp = Hp - 5
Lp = Lp - 5
self.go_back(update)
def is_going_to_state17(self, update):
text = update.message.text
return text.lower() == '17'
def on_enter_state17(self, update):
dice = random.randint(0,3)
if dice == 1:
update.message.reply_text("xx個人電腦維修工作室\n這天半夜有個客人打電話來問說\n客人:「xx電腦維修公司嗎?」\n工程師:「是的!請問客人有什麼問題?」\n客人:「我的電腦不能開機。」\n工程師:「您電源插頭有插嗎?」\n客人:「有的。」\n工程師:「請檢查一下power電源插頭是否有鬆落,接觸不良。」\n客人:「沒!」\n工程師:「那請您拿出紙、筆來。」\n客人:「喔好。稍等一下,我先找一下拿手電筒。」\n工程師:「為什麼要拿手電筒?」\n客人:「我家停電啊!」\n工程師:「…………」\n")
elif dice == 2:
update.message.reply_text("明明:我爸爸在公車上會讓人,你爸爸會嗎?\n笑笑:不會.\n明明:為什麼?\n笑笑:因為,他是公車師機.")
elif dice == 3:
update.message.reply_text("第一天上學的小朋友哭的很可憐,老師問他原因,他說:「我不喜歡學校,可是以後我得天天來這你,一直到15歲。」老師安慰她道:「我比你更可憐,我得天天來這裡,一直到60歲呢!」")
else:
update.message.reply_text("突然想不到笑話拉>.<")
self.go_back(update)
def force_exit(self, update):
text = update.message.text
return text.lower() == '/exit'
| StarcoderdataPython |
3244339 | import unittest
from base import BaseTestCase
class CreditCardTestCase(unittest.TestCase, BaseTestCase):
"""
Test cases for Credit Card number removal removal.
All these will clash with PASSPORT filth.
"""
def test_american_express(self):
"""
BEFORE: My credit card is 378282246310005.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_american_express2(self):
"""
BEFORE: My credit card is 371449635398431.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_american_corporate(self):
"""
BEFORE: My credit card is 378734493671000.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_diners_club(self):
"""
BEFORE: My credit card is 30569309025904.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_diners_club2(self):
"""
BEFORE: My credit card is 38520000023237.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_discover(self):
"""
BEFORE: My credit card is 6011111111111117.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_discover2(self):
"""
BEFORE: My credit card is 6011000990139424.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_jcb(self):
"""
BEFORE: My credit card is 3530111333300000.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_jcb2(self):
"""
BEFORE: My credit card is 3566002020360505.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_mastercard(self):
"""
BEFORE: My credit card is 5555555555554444.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_mastercard2(self):
"""
BEFORE: My credit card is 5105105105105100.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_visa(self):
"""
BEFORE: My credit card is 4111111111111111.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
def test_visa2(self):
"""
BEFORE: My credit card is 4012888888881881.
AFTER: My credit card is {{CREDIT_CARD}}.
"""
self.compare_before_after()
| StarcoderdataPython |
9746845 | <filename>NCPWD/apps/topics/urls.py
from django.urls import path, include
from .views import TopicAPIView
from rest_framework import routers
app_name = "topics"
router = routers.DefaultRouter()
router.register(r"topics", TopicAPIView)
urlpatterns = [path('', include(router.urls))]
| StarcoderdataPython |
74982 | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as Data
import numpy as np
import time
import sys
import utils
print('生成测试数据')
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05
features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
print('初始化模型参数')
def init_params():
w = torch.randn((num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
print('定义 L2 惩罚项')
def l2_penalty(w):
return (w**2).sum() / 2
print('定义训练和测试')
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = utils.linreg, utils.squared_loss
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
l = l.sum()
if w.grad is not None:
w.grad.data.zero_()
b.grad.data.zero_()
l.backward()
utils.sgd([w, b], lr, batch_size)
train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
utils.semilogy(range(1, num_epochs+1), train_ls, 'epochs', 'loss',
range(1, num_epochs+1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().item())
print('观察过拟合')
fit_and_plot(lambd=0)
print('使用权重衰减')
fit_and_plot(lambd=4)
| StarcoderdataPython |
3350730 | from .datetimewindow import DatetimeWindow
__version__ = '0.1'
| StarcoderdataPython |
6629882 | <filename>slingen/src/algogen/BackEnd/trsm2lgen.py
from core.expression import Equal, Times, Minus, Inverse, Transpose, NList, Predicate, PatternDot
import core.properties as props
from core.functional import RewriteRule, Constraint, Replacement
import Config
import PredicateMetadata as pm
pm.DB["ldiv_lni"] = pm.PredicateMetadata( "ldiv_lni", tuple() )
pm.DB["ldiv_lni"].overwrite = []
pm.DB["ldiv_lni_ow"] = pm.PredicateMetadata( "ldiv_lni_ow", tuple() )
pm.DB["ldiv_lni_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lnn"] = pm.PredicateMetadata( "ldiv_lnn", tuple() )
pm.DB["ldiv_lnn"].overwrite = []
pm.DB["ldiv_lnn_ow"] = pm.PredicateMetadata( "ldiv_lnn_ow", tuple() )
pm.DB["ldiv_lnn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lnu"] = pm.PredicateMetadata( "ldiv_lnu", tuple() )
pm.DB["ldiv_lnu"].overwrite = []
pm.DB["ldiv_lnu_ow"] = pm.PredicateMetadata( "ldiv_lnu_ow", tuple() )
pm.DB["ldiv_lnu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lti"] = pm.PredicateMetadata( "ldiv_lti", tuple() )
pm.DB["ldiv_lti"].overwrite = []
pm.DB["ldiv_lti_ow"] = pm.PredicateMetadata( "ldiv_lti_ow", tuple() )
pm.DB["ldiv_lti_ow"].overwrite = [(1,0)]
pm.DB["ldiv_ltn"] = pm.PredicateMetadata( "ldiv_ltn", tuple() )
pm.DB["ldiv_ltn"].overwrite = []
pm.DB["ldiv_ltn_ow"] = pm.PredicateMetadata( "ldiv_ltn_ow", tuple() )
pm.DB["ldiv_ltn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_ltu"] = pm.PredicateMetadata( "ldiv_ltu", tuple() )
pm.DB["ldiv_ltu"].overwrite = []
pm.DB["ldiv_ltu_ow"] = pm.PredicateMetadata( "ldiv_ltu_ow", tuple() )
pm.DB["ldiv_ltu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_uni"] = pm.PredicateMetadata( "ldiv_uni", tuple() )
pm.DB["ldiv_uni"].overwrite = []
pm.DB["ldiv_uni_ow"] = pm.PredicateMetadata( "ldiv_uni_ow", tuple() )
pm.DB["ldiv_uni_ow"].overwrite = [(1,0)]
pm.DB["ldiv_unn"] = pm.PredicateMetadata( "ldiv_unn", tuple() )
pm.DB["ldiv_unn"].overwrite = []
pm.DB["ldiv_unn_ow"] = pm.PredicateMetadata( "ldiv_unn_ow", tuple() )
pm.DB["ldiv_unn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_unu"] = pm.PredicateMetadata( "ldiv_unu", tuple() )
pm.DB["ldiv_unu"].overwrite = []
pm.DB["ldiv_unu_ow"] = pm.PredicateMetadata( "ldiv_unu_ow", tuple() )
pm.DB["ldiv_unu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_uti"] = pm.PredicateMetadata( "ldiv_uti", tuple() )
pm.DB["ldiv_uti"].overwrite = []
pm.DB["ldiv_uti_ow"] = pm.PredicateMetadata( "ldiv_uti_ow", tuple() )
pm.DB["ldiv_uti_ow"].overwrite = [(1,0)]
pm.DB["ldiv_utn"] = pm.PredicateMetadata( "ldiv_utn", tuple() )
pm.DB["ldiv_utn"].overwrite = []
pm.DB["ldiv_utn_ow"] = pm.PredicateMetadata( "ldiv_utn_ow", tuple() )
pm.DB["ldiv_utn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_utu"] = pm.PredicateMetadata( "ldiv_utu", tuple() )
pm.DB["ldiv_utu"].overwrite = []
pm.DB["ldiv_utu_ow"] = pm.PredicateMetadata( "ldiv_utu_ow", tuple() )
pm.DB["ldiv_utu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lni"] = pm.PredicateMetadata( "rdiv_lni", tuple() )
pm.DB["rdiv_lni"].overwrite = []
pm.DB["rdiv_lni_ow"] = pm.PredicateMetadata( "rdiv_lni_ow", tuple() )
pm.DB["rdiv_lni_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lnn"] = pm.PredicateMetadata( "rdiv_lnn", tuple() )
pm.DB["rdiv_lnn"].overwrite = []
pm.DB["rdiv_lnn_ow"] = pm.PredicateMetadata( "rdiv_lnn_ow", tuple() )
pm.DB["rdiv_lnn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lnu"] = pm.PredicateMetadata( "rdiv_lnu", tuple() )
pm.DB["rdiv_lnu"].overwrite = []
pm.DB["rdiv_lnu_ow"] = pm.PredicateMetadata( "rdiv_lnu_ow", tuple() )
pm.DB["rdiv_lnu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lti"] = pm.PredicateMetadata( "rdiv_lti", tuple() )
pm.DB["rdiv_lti"].overwrite = []
pm.DB["rdiv_lti_ow"] = pm.PredicateMetadata( "rdiv_lti_ow", tuple() )
pm.DB["rdiv_lti_ow"].overwrite = [(1,0)]
pm.DB["rdiv_ltn"] = pm.PredicateMetadata( "rdiv_ltn", tuple() )
pm.DB["rdiv_ltn"].overwrite = []
pm.DB["rdiv_ltn_ow"] = pm.PredicateMetadata( "rdiv_ltn_ow", tuple() )
pm.DB["rdiv_ltn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_ltu"] = pm.PredicateMetadata( "rdiv_ltu", tuple() )
pm.DB["rdiv_ltu"].overwrite = []
pm.DB["rdiv_ltu_ow"] = pm.PredicateMetadata( "rdiv_ltu_ow", tuple() )
pm.DB["rdiv_ltu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_uni"] = pm.PredicateMetadata( "rdiv_uni", tuple() )
pm.DB["rdiv_uni"].overwrite = []
pm.DB["rdiv_uni_ow"] = pm.PredicateMetadata( "rdiv_uni_ow", tuple() )
pm.DB["rdiv_uni_ow"].overwrite = [(1,0)]
pm.DB["rdiv_unn"] = pm.PredicateMetadata( "rdiv_unn", tuple() )
pm.DB["rdiv_unn"].overwrite = []
pm.DB["rdiv_unn_ow"] = pm.PredicateMetadata( "rdiv_unn_ow", tuple() )
pm.DB["rdiv_unn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_unu"] = pm.PredicateMetadata( "rdiv_unu", tuple() )
pm.DB["rdiv_unu"].overwrite = []
pm.DB["rdiv_unu_ow"] = pm.PredicateMetadata( "rdiv_unu_ow", tuple() )
pm.DB["rdiv_unu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_uti"] = pm.PredicateMetadata( "rdiv_uti", tuple() )
pm.DB["rdiv_uti"].overwrite = []
pm.DB["rdiv_uti_ow"] = pm.PredicateMetadata( "rdiv_uti_ow", tuple() )
pm.DB["rdiv_uti_ow"].overwrite = [(1,0)]
pm.DB["rdiv_utn"] = pm.PredicateMetadata( "rdiv_utn", tuple() )
pm.DB["rdiv_utn"].overwrite = []
pm.DB["rdiv_utn_ow"] = pm.PredicateMetadata( "rdiv_utn_ow", tuple() )
pm.DB["rdiv_utn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_utu"] = pm.PredicateMetadata( "rdiv_utu", tuple() )
pm.DB["rdiv_utu"].overwrite = []
pm.DB["rdiv_utu_ow"] = pm.PredicateMetadata( "rdiv_utu_ow", tuple() )
pm.DB["rdiv_utu_ow"].overwrite = [(1,0)]
A = PatternDot("A")
B = PatternDot("B")
X = PatternDot("X")
trsm2lgen_rules = [
# X = i(t(A)) B -> ldiv_lni
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
RewriteRule(
(
Equal([ NList([ X ]), Times([ Minus([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Minus([ Predicate("ldiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ]) ])
)
),
# X = i(t(A)) B -> ldiv_lnn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
RewriteRule(
(
Equal([ NList([ X ]), Times([ Minus([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Minus([ Predicate("ldiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ]) ])
)
),
# X = i(t(A)) B -> ldiv_lti
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uni
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uti
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lni
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lti
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uni
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uti
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
]
| StarcoderdataPython |
32250 | #Calcular el salario neto de un tnrabajador en fucion del numero de horas trabajadas, el precio de la hora
#y el descuento fijo al sueldo base por concepto de impuestos del 20%
horas = float(input("Ingrese el numero de horas trabajadas: "))
precio_hora = float(input("Ingrese el precio por hora trabajada: "))
sueldo_base = float(input("Ingrese el valor del sueldo base: "))
pago_hora = horas * precio_hora
impuesto = 0.2
salario_neto = pago_hora + (sueldo_base * 0.8)
print(f"Si el trabajador tiene un sueldo base de {sueldo_base}$ (al cual se le descuenta un 20% por impuestos), trabaja {horas} horas, y la hora se le paga a {precio_hora}$.")
print(f"El salario neto del trabajador es de {salario_neto}$") | StarcoderdataPython |
6649996 | <filename>challenges/counting_syllabes.py
def count(text):
syllabes = text.split('-')
return len(syllabes) | StarcoderdataPython |
8094375 | <reponame>jcarrete5/drexel-api
from django.db import models
class Course(models.Model):
title = models.CharField(max_length=50)
crn = models.CharField(max_length=5)
course_num = models.CharField(max_length=4)
subject_code = models.CharField(max_length=4)
description = models.TextField()
prerequisites = models.ManyToManyField(
"self", symmetrical=False, related_name="dependents"
)
corequisites = models.ManyToManyField("self", related_name="+")
credits = models.DecimalField(max_digits=3, decimal_places=2)
| StarcoderdataPython |
1864968 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for hypothesis testing of bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
from absl import logging
import hypothesis.strategies as hps
import six
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensorshape_util
tfb = tfp.bijectors
tfd = tfp.distributions
SPECIAL_BIJECTORS = ['Invert']
# INSTANTIABLE_BIJECTORS is a map from str->(BijectorClass,)
INSTANTIABLE_BIJECTORS = None
def instantiable_bijectors():
"""Identifies bijectors that are trivially instantiable."""
global INSTANTIABLE_BIJECTORS
if INSTANTIABLE_BIJECTORS is not None:
return INSTANTIABLE_BIJECTORS
result = {}
for (bijector_name, bijector_class) in six.iteritems(tfb.__dict__):
if (not inspect.isclass(bijector_class) or
not issubclass(bijector_class, tfb.Bijector) or
bijector_name in SPECIAL_BIJECTORS):
continue
# ArgSpec(args, varargs, keywords, defaults)
spec = inspect.getargspec(bijector_class.__init__)
ctor_args = set(spec.args) | set(
[arg for arg in (spec.varargs, spec.keywords) if arg is not None])
unsupported_args = set(ctor_args) - set(['name', 'self', 'validate_args'])
if unsupported_args:
logging.warning('Unable to test tfb.%s: unsupported args %s',
bijector_name, unsupported_args)
continue
if not bijector_class()._is_injective: # pylint: disable=protected-access
logging.warning('Unable to test non-injective tfb.%s.', bijector_name)
continue
result[bijector_name] = (bijector_class,)
result['Invert'] = (tfb.Invert,)
for bijector_name in sorted(result):
logging.warning('Supported bijector: tfb.%s', bijector_name)
INSTANTIABLE_BIJECTORS = result
return INSTANTIABLE_BIJECTORS
class Support(object):
"""Enumeration of supports for trivially instantiable bijectors."""
SCALAR_UNCONSTRAINED = 'SCALAR_UNCONSTRAINED'
SCALAR_NON_NEGATIVE = 'SCALAR_NON_NEGATIVE'
SCALAR_NON_ZERO = 'SCALAR_NON_ZERO'
SCALAR_GT_NEG1 = 'SCALAR_GT_NEG1'
SCALAR_IN_NEG1_1 = 'SCALAR_IN_NEG1_1'
SCALAR_IN_0_1 = 'SCALAR_IN_0_1'
VECTOR_UNCONSTRAINED = 'VECTOR_UNCONSTRAINED'
VECTOR_SIZE_TRIANGULAR = 'VECTOR_SIZE_TRIANGULAR'
VECTOR_WITH_L1_NORM_1_SIZE_GT1 = 'VECTOR_WITH_L1_NORM_1_SIZE_GT1'
VECTOR_STRICTLY_INCREASING = 'VECTOR_STRICTLY_INCREASING'
MATRIX_LOWER_TRIL_POSITIVE_DEFINITE = 'MATRIX_LOWER_TRIL_POSITIVE_DEFINITE'
MATRIX_POSITIVE_DEFINITE = 'MATRIX_POSITIVE_DEFINITE'
CORRELATION_CHOLESKY = 'CORRELATION_CHOLESKY'
OTHER = 'OTHER'
BijectorSupport = collections.namedtuple('BijectorSupport', 'forward,inverse')
BIJECTOR_SUPPORTS = None
def bijector_supports():
"""Returns a dict of support mappings for each instantiable bijector."""
global BIJECTOR_SUPPORTS
if BIJECTOR_SUPPORTS is not None:
return BIJECTOR_SUPPORTS
supports = {
'CholeskyOuterProduct':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_POSITIVE_DEFINITE),
'CholeskyToInvCholesky':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'CorrelationCholesky':
BijectorSupport(Support.VECTOR_SIZE_TRIANGULAR,
Support.CORRELATION_CHOLESKY),
'MatrixInverseTriL':
BijectorSupport(Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE,
Support.MATRIX_LOWER_TRIL_POSITIVE_DEFINITE),
'NormalCDF':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_NON_NEGATIVE),
'Exp':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_NON_NEGATIVE),
'Expm1':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_GT_NEG1),
'Identity':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_UNCONSTRAINED),
'Invert':
BijectorSupport(Support.OTHER, Support.OTHER),
'IteratedSigmoidCentered':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1),
'Ordered':
BijectorSupport(Support.VECTOR_STRICTLY_INCREASING,
Support.VECTOR_UNCONSTRAINED),
'Reciprocal':
BijectorSupport(Support.SCALAR_NON_ZERO, Support.SCALAR_NON_ZERO),
'Sigmoid':
BijectorSupport(Support.SCALAR_UNCONSTRAINED, Support.SCALAR_IN_0_1),
'Softsign':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_IN_NEG1_1),
'SoftmaxCentered':
BijectorSupport(Support.VECTOR_UNCONSTRAINED,
Support.VECTOR_WITH_L1_NORM_1_SIZE_GT1),
'Square':
BijectorSupport(Support.SCALAR_NON_NEGATIVE,
Support.SCALAR_NON_NEGATIVE),
'Tanh':
BijectorSupport(Support.SCALAR_UNCONSTRAINED,
Support.SCALAR_IN_NEG1_1),
}
missing_keys = set(instantiable_bijectors().keys()) - set(supports.keys())
unexpected_keys = set(supports.keys()) - set(instantiable_bijectors().keys())
if missing_keys:
raise ValueError('Missing bijector supports: {}'.format(missing_keys))
if unexpected_keys:
raise ValueError('Unexpected bijector names: {}'.format(unexpected_keys))
BIJECTOR_SUPPORTS = supports
return BIJECTOR_SUPPORTS
@hps.composite
def unconstrained_bijectors(draw):
"""Draws bijectors which can act on [numerically] unconstrained events."""
bijector_names = hps.one_of(map(hps.just, instantiable_bijectors().keys()))
bijector_name = draw(
bijector_names.filter(lambda b: ( # pylint: disable=g-long-lambda
b == 'Invert' or 'UNCONSTRAINED' in bijector_supports()[b].forward)))
if bijector_name == 'Invert':
underlying = draw(
bijector_names.filter(
lambda b: 'UNCONSTRAINED' in bijector_supports()[b].inverse))
underlying = instantiable_bijectors()[underlying][0](validate_args=True)
return tfb.Invert(underlying, validate_args=True)
return instantiable_bijectors()[bijector_name][0](validate_args=True)
def distribution_filter_for(bijector):
"""Returns filter function f s.t. f(dist)=True => bijector can act on dist."""
if isinstance(bijector, tfb.CholeskyToInvCholesky):
def additional_check(dist):
return (tensorshape_util.rank(dist.event_shape) == 2 and
int(dist.event_shape[0]) == int(dist.event_shape[1]))
elif isinstance(bijector, tfb.CorrelationCholesky):
def additional_check(dist):
return isinstance(dist, tfd.LKJ) and dist.input_output_cholesky
else:
additional_check = lambda dist: True
def distribution_filter(dist):
if not dtype_util.is_floating(dist.dtype):
return False
if bijector.forward_min_event_ndims > tensorshape_util.rank(
dist.event_shape):
return False
return additional_check(dist)
return distribution_filter
| StarcoderdataPython |
3502657 | <filename>python/8896.py<gh_stars>1-10
import sys
input = lambda: sys.stdin.readline().rstrip()
def conv(c):
if c == 'R': return 0
if c == 'S': return 1
return 2
for _ in range(int(input())):
n = int(input())
v = [input() for _ in range(n)]
check = [0] * n
for i in range(len(v[0])):
cur = [0] * 3
for j in range(n):
if not check[j]:
cur[conv(v[j][i])] = 1
if sum(cur) != 2: continue
win = 0 if cur[0] and cur[1] else 1 if cur[1] and cur[2] else 2
for j in range(n):
if not check[j] and conv(v[j][i]) != win:
check[j] = 1
print(check.index(0) + 1 if sum(check) == n - 1 else 0) | StarcoderdataPython |
5089641 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
AVAILABLE_PRIORITIES = [
('0', 'Bad'),
('1', 'Below Average'),
('2', 'Average'),
('3', 'Good'),
('4', 'Excellent')
]
class hr_recruitment_source(osv.osv):
""" Sources of HR Recruitment """
_name = "hr.recruitment.source"
_description = "Source of Applicants"
_columns = {
'name': fields.char('Source Name', required=True, translate=True),
}
class hr_recruitment_stage(osv.osv):
""" Stage of HR Recruitment """
_name = "hr.recruitment.stage"
_description = "Stage of Recruitment"
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of stages."),
'department_id':fields.many2one('hr.department', 'Specific to a Department', help="Stages of the recruitment process may be different per department. If this stage is common to all departments, keep this field empty."),
'requirements': fields.text('Requirements'),
'template_id': fields.many2one('email.template', 'Use template', help="If set, a message is posted on the applicant using the template when the applicant is set to the stage."),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
}
_defaults = {
'sequence': 1,
}
class hr_recruitment_degree(osv.osv):
""" Degree of HR Recruitment """
_name = "hr.recruitment.degree"
_description = "Degree of Recruitment"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of degrees."),
}
_defaults = {
'sequence': 1,
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the Degree of Recruitment must be unique!')
]
class hr_applicant(osv.Model):
_name = "hr.applicant"
_description = "Applicant"
_order = "priority desc, id desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'hr_recruitment.mt_applicant_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'hr_recruitment.mt_applicant_stage_changed': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
}
_mail_mass_mailing = _('Applicants')
def _get_default_department_id(self, cr, uid, context=None):
""" Gives default department by checking if present in the context """
return (self._resolve_department_id_from_context(cr, uid, context=context) or False)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
department_id = self._get_default_department_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], department_id, [('fold', '=', False)], context=context)
def _resolve_department_id_from_context(self, cr, uid, context=None):
""" Returns ID of department based on the value of 'default_department_id'
context key, or None if it cannot be resolved to a single
department.
"""
if context is None:
context = {}
if type(context.get('default_department_id')) in (int, long):
return context.get('default_department_id')
if isinstance(context.get('default_department_id'), basestring):
department_name = context['default_department_id']
department_ids = self.pool.get('hr.department').name_search(cr, uid, name=department_name, context=context)
if len(department_ids) == 1:
return int(department_ids[0][0])
return None
def _get_default_company_id(self, cr, uid, department_id=None, context=None):
company_id = False
if department_id:
department = self.pool['hr.department'].browse(cr, uid, department_id, context=context)
company_id = department.company_id.id if department and department.company_id else False
if not company_id:
company_id = self.pool['res.company']._company_default_get(cr, uid, 'hr.applicant', context=context)
return company_id
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('hr.recruitment.stage')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('department_id', '=', False), ('fold', '=', False): add default columns that are not folded
# - OR ('department_id', 'in', department_id), ('fold', '=', False) if department_id: add department columns that are not folded
department_id = self._resolve_department_id_from_context(cr, uid, context=context)
search_domain = []
if department_id:
search_domain += ['|', ('department_id', '=', department_id)]
search_domain += ['|', ('id', 'in', ids), ('department_id', '=', False)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
res = dict((res_id, {}) for res_id in ids)
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0,
'day_close': 0.0,
}
if issue.date_open:
date_create = datetime.strptime(issue.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_open = datetime.strptime(issue.date_open, tools.DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (date_open - date_create).total_seconds() / (24.0 * 3600)
if issue.date_closed:
date_create = datetime.strptime(issue.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_closed = datetime.strptime(issue.date_closed, tools.DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (date_closed - date_create).total_seconds() / (24.0 * 3600)
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _get_attachment_number(self, cr, uid, ids, fields, args, context=None):
res = dict.fromkeys(ids, 0)
for app_id in ids:
res[app_id] = self.pool['ir.attachment'].search_count(cr, uid, [('res_model', '=', 'hr.applicant'), ('res_id', '=', app_id)], context=context)
return res
_columns = {
'name': fields.char('Subject / Application Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the case without removing it."),
'description': fields.text('Description'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'email_cc': fields.text('Watchers Emails', size=252, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'probability': fields.float('Probability'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage', track_visibility='onchange',
domain="['|', ('department_id', '=', department_id), ('department_id', '=', False)]"),
'last_stage_id': fields.many2one('hr.recruitment.stage', 'Last Stage',
help='Stage of the applicant before being in the current stage. Used for lost cases analysis.'),
'categ_ids': fields.many2many('hr.applicant_category', string='Tags'),
'company_id': fields.many2one('res.company', 'Company'),
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='onchange'),
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'date_action': fields.date('Next Action Date'),
'title_action': fields.char('Next Action', size=64),
'priority': fields.selection(AVAILABLE_PRIORITIES, 'Appreciation'),
'job_id': fields.many2one('hr.job', 'Applied Job'),
'salary_proposed_extra': fields.char('Proposed Salary Extra', help="Salary Proposed by the Organisation, extra advantages"),
'salary_expected_extra': fields.char('Expected Salary Extra', help="Salary Expected by Applicant, extra advantages"),
'salary_proposed': fields.float('Proposed Salary', help="Salary Proposed by the Organisation"),
'salary_expected': fields.float('Expected Salary', help="Salary Expected by Applicant"),
'availability': fields.integer('Availability', help="The number of days in which the applicant will be available to start working"),
'partner_name': fields.char("Applicant's Name"),
'partner_phone': fields.char('Phone', size=32),
'partner_mobile': fields.char('Mobile', size=32),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department', 'Department'),
'survey': fields.related('job_id', 'survey_id', type='many2one', relation='survey.survey', string='Survey'),
'response_id': fields.many2one('survey.user_input', "Response", ondelete='set null', oldname="response"),
'reference': fields.char('Referred By'),
'source_id': fields.many2one('hr.recruitment.source', 'Source'),
'day_open': fields.function(_compute_day, string='Days to Open',
multi='day_open', type="float",
store={'hr.applicant': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='day_close', type="float",
store={'hr.applicant': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'color': fields.integer('Color Index'),
'emp_id': fields.many2one('hr.employee', string='Employee', help='Employee linked to the applicant.'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'attachment_number': fields.function(_get_attachment_number, string='Number of Attachments', type="integer"),
}
_defaults = {
'active': lambda *a: 1,
'user_id': lambda s, cr, uid, c: uid,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'department_id': lambda s, cr, uid, c: s._get_default_department_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s._get_default_company_id(cr, uid, s._get_default_department_id(cr, uid, c), c),
'color': 0,
'priority': '0',
'date_last_stage_update': fields.datetime.now,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def onchange_job(self, cr, uid, ids, job_id=False, context=None):
department_id = False
user_id = False
if job_id:
job_record = self.pool.get('hr.job').browse(cr, uid, job_id, context=context)
department_id = job_record and job_record.department_id and job_record.department_id.id or False
user_id = job_record and job_record.user_id and job_record.user_id.id or False
return {'value': {'department_id': department_id, 'user_id': user_id}}
def onchange_department_id(self, cr, uid, ids, department_id=False, stage_id=False, context=None):
values = {}
if not stage_id:
values['stage_id'] = self.stage_find(cr, uid, [], department_id, [('fold', '=', False)], context=context)
if department_id:
department = self.pool['hr.department'].browse(cr, uid, department_id, context=context)
values['company_id'] = department.company_id.id
return {'value': values}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
data = {'partner_phone': False,
'partner_mobile': False,
'email_from': False}
if partner_id:
addr = self.pool.get('res.partner').browse(cr, uid, partner_id, context)
data.update({'partner_phone': addr.phone,
'partner_mobile': addr.mobile,
'email_from': addr.email})
return {'value': data}
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['hr.recruitment.stage'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- department_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
department_ids = []
if section_id:
department_ids.append(section_id)
for case in cases:
if case.department_id:
department_ids.append(case.department_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if department_ids:
search_domain += ['|', ('department_id', 'in', department_ids)]
search_domain.append(('department_id', '=', False))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('hr.recruitment.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def action_makeMeeting(self, cr, uid, ids, context=None):
""" This opens Meeting's calendar view to schedule meeting on current applicant
@return: Dictionary value for created Meeting view
"""
applicant = self.browse(cr, uid, ids[0], context)
applicant_ids = []
if applicant.partner_id:
applicant_ids.append(applicant.partner_id.id)
if applicant.department_id and applicant.department_id.manager_id and applicant.department_id.manager_id.user_id and applicant.department_id.manager_id.user_id.partner_id:
applicant_ids.append(applicant.department_id.manager_id.user_id.partner_id.id)
category = self.pool.get('ir.model.data').get_object(cr, uid, 'hr_recruitment', 'categ_meet_interview', context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_partner_ids': applicant_ids,
'default_user_id': uid,
'default_name': applicant.name,
'default_categ_ids': category and [category.id] or False,
}
return res
def action_start_survey(self, cr, uid, ids, context=None):
context = dict(context or {})
applicant = self.browse(cr, uid, ids, context=context)[0]
survey_obj = self.pool.get('survey.survey')
response_obj = self.pool.get('survey.user_input')
# create a response and link it to this applicant
if not applicant.response_id:
response_id = response_obj.create(cr, uid, {'survey_id': applicant.survey.id, 'partner_id': applicant.partner_id.id}, context=context)
self.write(cr, uid, ids[0], {'response_id': response_id}, context=context)
else:
response_id = applicant.response_id.id
# grab the token of the response and start surveying
response = response_obj.browse(cr, uid, response_id, context=context)
context.update({'survey_token': response.token})
return survey_obj.action_start_survey(cr, uid, [applicant.survey.id], context=context)
def action_print_survey(self, cr, uid, ids, context=None):
""" If response is available then print this response otherwise print survey form (print template of the survey) """
context = dict(context or {})
applicant = self.browse(cr, uid, ids, context=context)[0]
survey_obj = self.pool.get('survey.survey')
response_obj = self.pool.get('survey.user_input')
if not applicant.response_id:
return survey_obj.action_print_survey(cr, uid, [applicant.survey.id], context=context)
else:
response = response_obj.browse(cr, uid, applicant.response_id.id, context=context)
context.update({'survey_token': response.token})
return survey_obj.action_print_survey(cr, uid, [applicant.survey.id], context=context)
def action_get_attachment_tree_view(self, cr, uid, ids, context=None):
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_attachment')
action = self.pool.get(model).read(cr, uid, action_id, context=context)
action['context'] = {'default_res_model': self._name, 'default_res_id': ids[0]}
action['domain'] = str(['&', ('res_model', '=', self._name), ('res_id', 'in', ids)])
return action
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
applicants = self.browse(cr, SUPERUSER_ID, ids, context=context)
job_ids = set([applicant.job_id.id for applicant in applicants if applicant.job_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(job_ids), context=context)
return dict((applicant.id, aliases.get(applicant.job_id and applicant.job_id.id or 0, False)) for applicant in applicants)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(hr_applicant, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for applicant in self.browse(cr, uid, ids, context=context):
if applicant.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, applicant, partner=applicant.partner_id, reason=_('Contact'))
elif applicant.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, applicant, email=applicant.email_from, reason=_('Contact Email'))
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
val = msg.get('from').split('<')[0]
defaults = {
'name': msg.get('subject') or _("No Subject"),
'partner_name': val,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'user_id': False,
'partner_id': msg.get('author_id', False),
}
if msg.get('priority'):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(hr_applicant, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
context['mail_create_nolog'] = True
if vals.get('department_id') and not context.get('default_department_id'):
context['default_department_id'] = vals.get('department_id')
if vals.get('job_id') or context.get('default_job_id'):
job_id = vals.get('job_id') or context.get('default_job_id')
vals.update(self.onchange_job(cr, uid, [], job_id, context=context)['value'])
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
obj_id = super(hr_applicant, self).create(cr, uid, vals, context=context)
applicant = self.browse(cr, uid, obj_id, context=context)
if applicant.job_id:
name = applicant.partner_name if applicant.partner_name else applicant.name
self.pool['hr.job'].message_post(
cr, uid, [applicant.job_id.id],
body=_('New application from %s') % name,
subtype="hr_recruitment.mt_job_applicant_new", context=context)
return obj_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = True
# user_id change: update date_open
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
# stage_id: track last stage before update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
for applicant in self.browse(cr, uid, ids, context=None):
vals['last_stage_id'] = applicant.stage_id.id
res = super(hr_applicant, self).write(cr, uid, [applicant.id], vals, context=context)
else:
res = super(hr_applicant, self).write(cr, uid, ids, vals, context=context)
# post processing: if job changed, post a message on the job
if vals.get('job_id'):
for applicant in self.browse(cr, uid, ids, context=None):
name = applicant.partner_name if applicant.partner_name else applicant.name
self.pool['hr.job'].message_post(
cr, uid, [vals['job_id']],
body=_('New application from %s') % name,
subtype="hr_recruitment.mt_job_applicant_new", context=context)
# post processing: if stage changed, post a message in the chatter
if vals.get('stage_id'):
stage = self.pool['hr.recruitment.stage'].browse(cr, uid, vals['stage_id'], context=context)
if stage.template_id:
# TDENOTE: probably factorize me in a message_post_with_template generic method FIXME
compose_ctx = dict(context,
active_id=False,
active_ids=ids)
compose_id = self.pool['mail.compose.message'].create(
cr, uid, {
'model': self._name,
'composition_mode': 'mass_mail',
'template_id': stage.template_id.id,
'post': True,
'notify': True,
}, context=compose_ctx)
values = self.pool['mail.compose.message'].onchange_template_id(
cr, uid, [compose_id], stage.template_id.id, 'mass_mail', self._name, False, context=compose_ctx)['value']
if values.get('attachment_ids'):
values['attachment_ids'] = [(6, 0, values['attachment_ids'])]
self.pool['mail.compose.message'].write(
cr, uid, [compose_id],
values,
context=compose_ctx)
self.pool['mail.compose.message'].send_mail(cr, uid, [compose_id], context=compose_ctx)
return res
def create_employee_from_applicant(self, cr, uid, ids, context=None):
""" Create an hr.employee from the hr.applicants """
if context is None:
context = {}
hr_employee = self.pool.get('hr.employee')
model_data = self.pool.get('ir.model.data')
act_window = self.pool.get('ir.actions.act_window')
emp_id = False
for applicant in self.browse(cr, uid, ids, context=context):
address_id = contact_name = False
if applicant.partner_id:
address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']
contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]
if applicant.job_id and (applicant.partner_name or contact_name):
applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})
create_ctx = dict(context, mail_broadcast=True)
emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or contact_name,
'job_id': applicant.job_id.id,
'address_home_id': address_id,
'department_id': applicant.department_id.id or False,
'address_id': applicant.company_id and applicant.company_id.partner_id and applicant.company_id.partner_id.id or False,
'work_email': applicant.department_id and applicant.department_id.company_id and applicant.department_id.company_id.email or False,
'work_phone': applicant.department_id and applicant.department_id.company_id and applicant.department_id.company_id.phone or False,
}, context=create_ctx)
self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)
self.pool['hr.job'].message_post(
cr, uid, [applicant.job_id.id],
body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,
subtype="hr_recruitment.mt_job_applicant_hired", context=context)
else:
raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))
action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')
dict_act_window = act_window.read(cr, uid, [action_id], [])[0]
if emp_id:
dict_act_window['res_id'] = emp_id
dict_act_window['view_mode'] = 'form,tree'
return dict_act_window
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'hr.job'
context['empty_list_help_id'] = context.get('default_job_id', None)
context['empty_list_help_document_name'] = _("job applicants")
return super(hr_applicant, self).get_empty_list_help(cr, uid, help, context=context)
class hr_job(osv.osv):
_inherit = "hr.job"
_name = "hr.job"
_inherits = {'mail.alias': 'alias_id'}
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context=None):
res = {}
attachment_obj = self.pool.get('ir.attachment')
for job_id in ids:
applicant_ids = self.pool.get('hr.applicant').search(cr, uid, [('job_id', '=', job_id)], context=context)
res[job_id] = attachment_obj.search(
cr, uid, [
'|',
'&', ('res_model', '=', 'hr.job'), ('res_id', '=', job_id),
'&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicant_ids)
], context=context)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Applicant = self.pool['hr.applicant']
return {
job_id: {
'application_count': Applicant.search_count(cr,uid, [('job_id', '=', job_id)], context=context),
'documents_count': len(self._get_attached_docs(cr, uid, [job_id], field_name, arg, context=context)[job_id])
}
for job_id in ids
}
_columns = {
'survey_id': fields.many2one('survey.survey', 'Interview Form', help="Choose an interview form for this job position and you will be able to print/answer this interview from all applicants who apply for this job"),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email alias for this job position. New emails will automatically "
"create new applicants for this job position."),
'address_id': fields.many2one('res.partner', 'Job Location', help="Address where employees are working"),
'application_ids': fields.one2many('hr.applicant', 'job_id', 'Applications'),
'application_count': fields.function(_count_all, type='integer', string='Applications', multi=True),
'manager_id': fields.related('department_id', 'manager_id', type='many2one', string='Department Manager', relation='hr.employee', readonly=True, store=True),
'document_ids': fields.function(_get_attached_docs, type='one2many', relation='ir.attachment', string='Applications'),
'documents_count': fields.function(_count_all, type='integer', string='Documents', multi=True),
'user_id': fields.many2one('res.users', 'Recruitment Responsible', track_visibility='onchange'),
'color': fields.integer('Color Index'),
}
def _address_get(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
_defaults = {
'address_id': _address_get
}
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all jobs and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(hr_job, self)._auto_init,
'hr.applicant', self._columns['alias_id'], 'name', alias_prefix='job+', alias_defaults={'job_id': 'id'}, context=context)
def create(self, cr, uid, vals, context=None):
alias_context = dict(context, alias_model_name='hr.applicant', alias_parent_model_name=self._name)
job_id = super(hr_job, self).create(cr, uid, vals, context=alias_context)
job = self.browse(cr, uid, job_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [job.alias_id.id], {'alias_parent_thread_id': job_id, "alias_defaults": {'job_id': job_id}}, context)
return job_id
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the job position.
mail_alias = self.pool.get('mail.alias')
alias_ids = [job.alias_id.id for job in self.browse(cr, uid, ids, context=context) if job.alias_id]
res = super(hr_job, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
def action_print_survey(self, cr, uid, ids, context=None):
job = self.browse(cr, uid, ids, context=context)[0]
survey_id = job.survey_id.id
return self.pool.get('survey.survey').action_print_survey(cr, uid, [survey_id], context=context)
def action_get_attachment_tree_view(self, cr, uid, ids, context=None):
#open attachments of job and related applicantions.
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_attachment')
action = self.pool.get(model).read(cr, uid, action_id, context=context)
applicant_ids = self.pool.get('hr.applicant').search(cr, uid, [('job_id', 'in', ids)], context=context)
action['context'] = {'default_res_model': self._name, 'default_res_id': ids[0]}
action['domain'] = str(['|', '&', ('res_model', '=', 'hr.job'), ('res_id', 'in', ids), '&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicant_ids)])
return action
def action_set_no_of_recruitment(self, cr, uid, id, value, context=None):
return self.write(cr, uid, [id], {'no_of_recruitment': value}, context=context)
class applicant_category(osv.osv):
""" Category of applicant """
_name = "hr.applicant_category"
_description = "Category of applicant"
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| StarcoderdataPython |
5113323 | import http.server
import socketserver
import json
import geocode
import logging
from http_service import Request, Response
from geocode_service import handle_geocode_request
logging.basicConfig(level=logging.INFO)
PORT = 8000
# Listening on all network interfaces on the given port
listening_address = ("", PORT)
LOOKUP_REQUEST_PATH = '/geo_lookup.json'
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
request = Request(self.path, method='GET')
if (request.path != LOOKUP_REQUEST_PATH):
response = Response.not_found(request.path)
else:
try:
response = handle_geocode_request(request)
except Exception as ex:
logging.exception(ex)
response = Response.server_error()
self.send_response(response.status_code)
body = response.encoded_body()
headers = dict(response.headers)
headers['Content-Length'] = str(len(body))
for header_name, header_value in headers.items():
self.send_header(header_name, header_value)
self.end_headers()
self.wfile.write(body)
logging.info('Initializing TCP server')
with socketserver.TCPServer(listening_address, RequestHandler) as httpd:
logging.info('Starting server on port %s', httpd.server_address)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
logging.info('Server will shut down')
httpd.shutdown()
httpd.server_close()
| StarcoderdataPython |
9775548 | # Decimal > Hex > ASCII converter.
# by MikeTheScriptKid.
# I made this for a CTF challenge on THM to decode a flag.
decimal = int(input("Enter your decimal: "))
hex = hex(decimal)[2:]
ascii = bytearray.fromhex(hex).decode()
print("HEX:", hex , "\n" "ASCII:", ascii)
| StarcoderdataPython |
3334513 | # Readable: can read during runtime/compile time
# Writeable: can write during runtime and compile time
"""
============
| List/Array |
============
Ordered
Readable
Writeable
"""
fooList = ["fooListItem", 123, True];
anotherList = list(("anotherListItem", 456, False));
print(fooList);
print(anotherList);
print ("===================================");
# Accessing an element based on given index
# List index starts at 0.
print(fooList[1]); # print the second element
# Adding a new element to the list
fooList.append("hello world");
# Removing an element at a specific index from the list
del fooList[0];
# Removing an element based on the value from the list
fooList.remove(123);
print ("===================================");
print("New modified list: ");
print(fooList);
| StarcoderdataPython |
9661192 | def bubble_sort(items):
'''
Return array of items, sorted in ascending order
"""
the bubble_sort algorithm takes in an unsorted list of numbers.
returns a list in ascending order.
Parameter
----------
items : list
list of unordered numbers
Returns
-------
list
list of elements in items in ascending order
'''
x = len(items)
for a in range(x):
for b in range(x-1):
if (items[a] < items[b]):
items[a], items[b] = items[b], items[a]
return items
#------------------------------------------------------------------------------
def merge_sort(items):
'''
Return array of items, sorted in ascending order
"""
the merge sort algorithm takes in an unsorted list of numbers.
returns a list in ascending order.
Parameter
----------
items : list
list of unordered numbers
Returns
-------
list
list of elements in items in ascending order
'''
length = len(items)
if length > 1:
mid = length // 2
half1 = items[:mid]
half2 = items[mid:]
#---------bubble_sorting for half1-
x = len(half1)
for a in range(x):
for b in range(x-1):
if (half1[a] < half1[b]):
half1[a], half1[b] = half1[b], half1[a]
#----------bubble_sorting for half2-
y = len(half2)
for a in range(y):
for b in range(y-1):
if (half2[a] < half2[b]):
half2[a], half2[b] = half2[b], half2[a]
#----------
i = j = 0
result = []
while i < len(half1) and j < len(half2):
if half1[i] < half2[j]:
result.append(half1[i])
i += 1
else:
result.append(half2[j])
j += 1
result += half1[i:]
result += half2[j:]
return result
#---------------------------------------------------------------------------
def quick_sort(items):
'''Return array of items, sorted in ascending order'''
"""
the quick sort algorithm takes in an unsorted list of numbers.
returns a list in ascending order.
Parameter
----------
items : list
list of unordered numbers
Returns
-------
list
list of elements in items in ascending order
"""
index=-1
len_i = len(items)
if len_i <= 1:
# Logic Error
# identified with test run [1,5,4,3, 2, 6, 5, 4, 3, 8, 6, 5, 3, 1]
# len <= 1
return items
pivot = items[index]
small = []
large = []
dup = []
for i in items:
if i < pivot:
small.append(i)
elif i > pivot:
large.append(i)
elif i == pivot:
dup.append(i)
small = quick_sort(small)
large = quick_sort(large)
return small + dup + large
| StarcoderdataPython |
11214104 | import unittest
from rekcurd.utils import RekcurdConfig, ModelModeEnum
from rekcurd.data_servers import CephHandler
from . import patch_predictor
class CephHandlerTest(unittest.TestCase):
"""Tests for CephHandlerTest.
"""
def setUp(self):
config = RekcurdConfig("./test/test-settings.yml")
config.set_configurations(
model_mode=ModelModeEnum.CEPH_S3.value, ceph_access_key="xxx",
ceph_secret_key="xxx", ceph_host="127.0.0.1", ceph_port=443,
ceph_is_secure=True, ceph_bucket_name="xxx")
self.handler = CephHandler(config)
@patch_predictor()
def test_download(self):
self.assertIsNone(self.handler.download("remote","local"))
@patch_predictor()
def test_upload(self):
self.assertIsNone(self.handler.upload("remote","local"))
| StarcoderdataPython |
8057389 | from pyDiamondsBackground import Background
from pyDiamondsBackground.models import WhiteNoiseOnlyModel
bg = Background(kicID='123456789', modelObject=WhiteNoiseOnlyModel, rootPath="exampleFiles")
bg.run()
bg.writeResults("exampleFiles/results/KIC123456789/run", "background_") | StarcoderdataPython |
326403 | <filename>benchmarks/benchmark.py
import os
import sys
import re
import subprocess
import traceback
import statistics
python = "python3"
progname = "/Users/emery/git/scalene/benchmarks/julia1_nopil.py"
number_of_runs = 1 # We take the average of this many runs.
# Output timing string from the benchmark.
result_regexp = re.compile("calculate_z_serial_purepython took ([0-9]*\.[0-9]+) seconds")
# Characteristics of the tools.
line_level = {}
cpu_profiler = {}
separate_profiler = {}
memory_profiler = {}
unmodified_code = {}
line_level["baseline"] = None
line_level["cProfile"] = False
line_level["Profile"] = False
line_level["line_profiler"] = True
line_level["pyinstrument"] = False
line_level["yappi_cputime"] = False
line_level["yappi_wallclock"] = False
line_level["pprofile_deterministic"] = True
line_level["pprofile_statistical"] = True
line_level["memory_profiler"] = True
line_level["scalene_cpu"] = True
line_level["scalene_cpu_memory"] = True
cpu_profiler["baseline"] = None
cpu_profiler["cProfile"] = True
cpu_profiler["Profile"] = True
cpu_profiler["pyinstrument"] = True
cpu_profiler["line_profiler"] = True
cpu_profiler["yappi_cputime"] = True
cpu_profiler["yappi_wallclock"] = True
cpu_profiler["pprofile_deterministic"] = True
cpu_profiler["pprofile_statistical"] = True
cpu_profiler["memory_profiler"] = False
cpu_profiler["scalene_cpu"] = True
cpu_profiler["scalene_cpu_memory"] = True
separate_profiler["baseline"] = None
separate_profiler["cProfile"] = False
separate_profiler["Profile"] = False
separate_profiler["pyinstrument"] = False
separate_profiler["line_profiler"] = False
separate_profiler["yappi_cputime"] = False
separate_profiler["yappi_wallclock"] = False
separate_profiler["pprofile_deterministic"] = False
separate_profiler["pprofile_statistical"] = False
separate_profiler["memory_profiler"] = False
separate_profiler["scalene_cpu"] = True
separate_profiler["scalene_cpu_memory"] = True
memory_profiler["baseline"] = None
memory_profiler["cProfile"] = False
memory_profiler["Profile"] = False
memory_profiler["pyinstrument"] = False
memory_profiler["line_profiler"] = False
memory_profiler["yappi_cputime"] = False
memory_profiler["yappi_wallclock"] = False
memory_profiler["pprofile_deterministic"] = False
memory_profiler["pprofile_statistical"] = False
memory_profiler["memory_profiler"] = True
memory_profiler["scalene_cpu"] = False
memory_profiler["scalene_cpu_memory"] = True
unmodified_code["baseline"] = None
unmodified_code["cProfile"] = True
unmodified_code["Profile"] = True
unmodified_code["pyinstrument"] = True
unmodified_code["line_profiler"] = False
unmodified_code["yappi_cputime"] = True
unmodified_code["yappi_wallclock"] = True
unmodified_code["pprofile_deterministic"] = True
unmodified_code["pprofile_statistical"] = True
unmodified_code["memory_profiler"] = False
unmodified_code["scalene_cpu"] = True
unmodified_code["scalene_cpu_memory"] = True
# Command lines for the various tools.
baseline = f"{python} {progname}"
cprofile = f"{python} -m cProfile {progname}"
profile = f"{python} -m profile {progname}"
pyinstrument = f"pyinstrument {progname}"
line_profiler = f"{python} -m kernprof -l -v {progname}"
pprofile_deterministic = f"pprofile {progname}"
pprofile_statistical = f"pprofile --statistic 0.001 {progname}" # Same as Scalene
yappi_cputime = f"yappi {progname}"
yappi_wallclock = f"yappi -c wall {progname}"
scalene_cpu = f"{python} -m scalene {progname}"
scalene_cpu_memory = f"{python} -m scalene {progname}" # see below for environment variables
benchmarks = [(baseline, "baseline", "_original program_"), (cprofile, "cProfile", "`cProfile`"), (profile, "Profile", "`Profile`"), (pyinstrument, "pyinstrument", "`pyinstrument`"), (line_profiler, "line_profiler", "`line_profiler`"), (pprofile_deterministic, "pprofile_deterministic", "`pprofile` _(deterministic)_"), (pprofile_statistical, "pprofile_statistical", "`pprofile` _(statistical)_"), (yappi_cputime, "yappi_cputime", "`yappi` _(CPU)_"), (yappi_wallclock, "yappi_wallclock", "`yappi` _(wallclock)_"), (scalene_cpu, "scalene_cpu", "`scalene` _(CPU only)_"), (scalene_cpu_memory, "scalene_cpu_memory", "`scalene` _(CPU + memory)_")]
# benchmarks = [(baseline, "baseline", "_original program_"), (pprofile_deterministic, "`pprofile` _(deterministic)_")]
# benchmarks = [(baseline, "baseline", "_original program_"), (pprofile_statistical, "pprofile_statistical", "`pprofile` _(statistical)_")]
average_time = {}
check = ":heavy_check_mark:"
print("| | Time (seconds) | Slowdown | Line-level? | CPU? | Python vs. C? | Memory? | Unmodified code? |")
print("| :--- | ---: | ---: | :---: | :---: | :---: | :---: | :---: |")
for bench in benchmarks:
# print(bench)
times = []
for i in range(0, number_of_runs):
my_env = os.environ.copy()
if bench[1] == "scalene_cpu_memory":
my_env["PYTHONMALLOC"] = "malloc"
if sys.platform == 'darwin':
my_env["DYLD_INSERT_LIBRARIES"] = "./libscalene.dylib"
if sys.platform == 'linux':
my_env["LD_PRELOAD"] = "./libscalene.so"
result = subprocess.run(bench[0].split(), env = my_env, stdout = subprocess.PIPE)
output = result.stdout.decode('utf-8')
match = result_regexp.search(output)
if match is not None:
times.append(round(100 * float(match.group(1))) / 100.0)
else:
print("failed run")
average_time[bench[1]] = statistics.mean(times) # sum_time / (number_of_runs * 1.0)
print(str(average_time[bench[1]]))
if bench[1] == "baseline":
print(f"| {bench[2]} | {average_time[bench[1]]}s | 1.0x | | | | | |")
print("| | | | | |")
else:
try:
if bench[1].find("scalene") >= 0:
if bench[1].find("scalene_cpu") >= 0:
print("| | | | | |")
print(f"| {bench[2]} | {average_time[bench[1]]}s | **{round(100 * average_time[bench[1]] / average_time['baseline']) / 100}x** | {check if line_level[bench[1]] else 'function-level'} | {check if cpu_profiler[bench[1]] else ''} | {check if separate_profiler[bench[1]] else ''} | {check if memory_profiler[bench[1]] else ''} | {check if unmodified_code[bench[1]] else 'needs `@profile` decorators'} |")
else:
print(f"| {bench[2]} | {average_time[bench[1]]}s | {round(100 * average_time[bench[1]] / average_time['baseline']) / 100}x | {check if line_level[bench[1]] else 'function-level'} | {check if cpu_profiler[bench[1]] else ''} | {check if separate_profiler[bench[1]] else ''} | {check if memory_profiler[bench[1]] else ''} | {check if unmodified_code[bench[1]] else 'needs `@profile` decorators'} |")
except Exception as err:
traceback.print_exc()
print("err = " + str(err))
print("WOOPS")
# print(bench[1] + " = " + str(sum_time / 5.0))
| StarcoderdataPython |
1795466 | <reponame>PolusAI/toil
# Copyright (C) 2015-2022 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import getpass
from toil.lib.misc import get_user_name
from toil.test import ToilTest
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class UserNameAvailableTest(ToilTest):
"""
Make sure we can get user names when they are available.
"""
def test_get_user_name(self):
# We assume we have the user in /etc/passwd when running the tests.
real_user_name = getpass.getuser()
apparent_user_name = get_user_name()
self.assertEqual(apparent_user_name, real_user_name)
class UserNameUnvailableTest(ToilTest):
"""
Make sure we can get something for a user name when user names are not
available.
"""
def setUp(self):
super().setUp()
# Monkey patch getpass.getuser to fail
self.original_getuser = getpass.getuser
def fake_getuser():
raise KeyError('Fake key error')
getpass.getuser = fake_getuser
def tearDown(self):
# Fix the module we hacked up
getpass.getuser = self.original_getuser
super().tearDown()
def test_get_user_name(self):
apparent_user_name = get_user_name()
# Make sure we got something
self.assertTrue(isinstance(apparent_user_name, str))
self.assertNotEqual(apparent_user_name, '')
class UserNameVeryBrokenTest(ToilTest):
"""
Make sure we can get something for a user name when user name fetching is
broken in ways we did not expect.
"""
def setUp(self):
super().setUp()
# Monkey patch getpass.getuser to fail
self.original_getuser = getpass.getuser
def fake_getuser():
raise RuntimeError('Fake error that we did not anticipate')
getpass.getuser = fake_getuser
def tearDown(self):
# Fix the module we hacked up
getpass.getuser = self.original_getuser
super().tearDown()
def test_get_user_name(self):
apparent_user_name = get_user_name()
# Make sure we got something
self.assertTrue(isinstance(apparent_user_name, str))
self.assertNotEqual(apparent_user_name, '')
| StarcoderdataPython |
3476110 | <reponame>RhnSaxena/KonfHub
import requests
import json
# Function that a link as a string parameter,
# performs GET operation on the link
# and returns the response.
def requestGet(link):
response = requests.get(link)
if response.status_code == 200:
return json.loads(response.text)
else:
print("Some error occured. Please check the link")
exit()
| StarcoderdataPython |
6476817 | <reponame>paras55/officialWebsite
from django.db import models
from officialWebsite.members.models import Member
class Project(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
project_lead = models.ForeignKey(Member, related_name='lead', on_delete=models.CASCADE)
image = models.ImageField(upload_to='project-images/', blank=True)
members = models.ManyToManyField(Member)
github_link = models.URLField()
funding = models.CharField(max_length=255, blank=True)
faculty = models.CharField(max_length=255, blank=True)
extra = models.TextField(blank=True)
def __str__(self):
return self.name
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.