id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8131430 | '''
save csv in mysql
'''
# -*- coding:utf-8 -*-
import csv
import os
import numpy as np
import pandas as pd
import pymysql
from pymysql import connect
class CsvToMysql(object):
def __init__(self, hostname, port, user, passwd, db):
self.dbname = db
self.conn = connect(host=hostname, port=port, user=user, passwd=passwd, db=db)
self.cursor = self.conn.cursor()
def read_csv(self, filename):
df = pd.read_csv(filename, keep_default_na=False, encoding='utf-8')
table_name = '`' + os.path.split(filename)[-1].split('.')[0].replace(' ', '_') + '`'
self.csv2mysql(db_name=self.dbname, table_name=table_name, df=df)
def make_table_sql(self, df):
# 将csv中的字段类型转换成mysql中的字段类型
columns = df.columns.tolist()
types = df.ftypes
make_table = []
make_field = []
for item in columns:
item1 = '`' + item.replace(' ', '_').replace(':', '') + '`'
if 'int' in types[item]:
char = item1 + ' INT'
elif 'float' in types[item]:
char = item1 + ' FLOAT'
elif 'object' in types[item]:
char = item1 + ' VARCHAR(255)'
elif 'datetime' in types[item]:
char = item1 + ' DATETIME'
else:
char = item1 + ' VARCHAR(255)'
# char = item1 + ' VARCHAR(255)'
make_table.append(char)
make_field.append(item1)
return ','.join(make_table), ','.join(make_field)
# select lr,max(train_acc) from train_log group by lr;
def csv2mysql(self, db_name, table_name, df):
field1, field2 = self.make_table_sql(df)
print("create table {} (id int AUTO_INCREMENT not null primary key, {})".format(table_name, field1))
self.cursor.execute('drop table if exists {}'.format(table_name))
self.cursor.execute("create table {} (id int AUTO_INCREMENT not null primary key,{},foreign key(id_index) references result(id))".format(table_name, field1))
values = df.values.tolist()
s = ','.join(['%s' for _ in range(len(df.columns))])
try:
print(len(values[0]), len(s.split(',')))
print('insert into {}({}) values ({})'.format(table_name, field2, s), values[0])
self.cursor.executemany('insert into {}({}) values ({})'.format(table_name, field2, s), values)
except Exception as e:
print(e.message)
finally:
self.conn.commit()
if __name__ == "__main__":
hostname = 'localhost'
port = 3306
user = 'root'
passwd = '<PASSWORD>'
db = 'test'
M = CsvToMysql(hostname=hostname, port=port, user=user, passwd=<PASSWORD>, db=db)
# csv文件目录
# dir = 'C:\data'
# file_list = os.listdir(dir)
# for i in range(len(file_list)):
# file_path = os.path.join(dir, file_list[i])
# if os.path.isfile(file_path):
M.read_csv('/media/hkuit164/WD20EJRX/CNN_classification/weight/ceiling_action-FrogFreestyle/3/3/train_log.csv') | StarcoderdataPython |
253404 | <reponame>deepnox-io/python-wipbox
#!/usr/bin/env python3
"""
This package provides simple routines for defining models or schemas.
Package: deepnox.models
This file is a part of python-wipbox project.
(c) 2021, Deepnox SAS.
"""
__import__("pkg_resources").declare_namespace(__name__)
from typing import Union, Any, Dict
import pydantic
class ExtendedBaseModel(pydantic.BaseModel):
"""
An extended Pydantic model to support definition of properties.
Resources:
- https://github.com/samuelcolvin/pydantic/issues/935#issuecomment-554378904
- https://stackoverflow.com/questions/63264888/pydantic-using-property-getter-decorator-for-a-field-with-an-alias
"""
@classmethod
def get_properties(cls):
return [prop for prop in cls.__dict__ if isinstance(cls.__dict__[prop], property)]
def dict(
self,
*,
include: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,
exclude: Union['AbstractSetIntStr', 'DictIntStrAny'] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = True,
) -> Dict[str, Any]:
"""Override the dict function to include our properties"""
attribs = super().dict(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none
)
props = self.get_properties()
# Include and exclude properties
if include:
props = [prop for prop in props if prop in include]
if exclude:
props = [prop for prop in props if prop not in exclude]
if exclude_none:
props = [prop for prop in props if getattr(self, prop) is not None]
# Update the attribute dict with the properties
if props:
attribs.update({prop: getattr(self, prop) for prop in props})
return attribs
| StarcoderdataPython |
1808541 | <reponame>ReubenJ/fltk-testbed
import torch.nn as nn
import torch.nn.functional as F
class Cifar10CNN(nn.Module):
def __init__(self):
super(Cifar10CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(64)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv5 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn5 = nn.BatchNorm2d(128)
self.conv6 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn6 = nn.BatchNorm2d(128)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(128 * 4 * 4, 128)
self.softmax = nn.Softmax()
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.bn1(F.relu(self.conv1(x)))
x = self.bn2(F.relu(self.conv2(x)))
x = self.pool1(x)
x = self.bn3(F.relu(self.conv3(x)))
x = self.bn4(F.relu(self.conv4(x)))
x = self.pool2(x)
x = self.bn5(F.relu(self.conv5(x)))
x = self.bn6(F.relu(self.conv6(x)))
x = self.pool3(x)
x = x.view(-1, 128 * 4 * 4)
x = self.fc1(x)
x = self.softmax(self.fc2(x))
return x | StarcoderdataPython |
1906122 | <filename>app/backend/backend/migrations/0004_teams_school.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-19 13:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_auto_20181019_1316'),
]
operations = [
migrations.AddField(
model_name='teams',
name='school',
field=models.CharField(default=1, max_length=128),
preserve_default=False,
),
]
| StarcoderdataPython |
25993 | <reponame>BrunoMalard/pybbox<gh_stars>0
from .bboxConstant import BboxConstant
import netaddr as net
import socket
class BboxAPIUrl:
"""
Used to handle API url
"""
API_PREFIX = "api/v1"
def __init__(self, api_class, api_method, ip=BboxConstant.DEFAULT_LOCAL_IP):
"""
:param api_class: string
:param api_method: string
:param ip: string
:return:
"""
self.api_class = api_class
self.api_method = api_method
self.ip = ip
self.authentication_type = None
self.url = None
self.build_url_request()
def get_api_class(self):
return self.api_class
def get_api_method(self):
return self.api_method
def get_ip(self):
return self.ip
def get_url(self):
return self.url
def get_authentication_type(self):
return self.authentication_type
def set_api_name(self, api_class, api_method):
self.api_class = api_class
self.api_method = api_method
self.build_url_request()
def build_url_request(self):
"""
Build the url to use for making a call to the Bbox API
:return: url string
"""
# Check if the ip is LAN or WAN
if net.IPAddress(socket.gethostbyname(self.ip)).is_private():
url = BboxConstant.DEFAULT_BBOX_URL
self.authentication_type = BboxConstant.AUTHENTICATION_TYPE_LOCAL
else:
url = "https://{}:{}".format(self.ip,
BboxConstant.DEFAULT_REMOTE_PORT)
self.authentication_type = BboxConstant.AUTHENTICATION_TYPE_REMOTE
if self.api_class is None:
url = "{}/{}".format(url, self.API_PREFIX)
else:
url = "{}/{}/{}".format(url, self.API_PREFIX, self.api_class)
if self.api_method is None:
self.url = url
else:
self.url = "{}/{}".format(url, self.api_method)
| StarcoderdataPython |
3419452 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from utils.mysql_util import MysqlUtil
from sqlalchemy import desc
from models import Article
import datetime
class ArticleDao:
def __init__(self):
self.session = MysqlUtil().get_session()
def get_articles(self, page=0, size=20):
article_num = self.get_article_num()
if page * size > article_num:
return None, u'分页数超出最大值'
info = u'more' if (page + 1) * size < article_num else u'nomore'
return self.session.query(Article).order_by(desc(Article.modified_time)).offset(size * page).limit(size).all(), info
def get_article_num(self):
return self.session.query(Article).count()
def get_article_by_title(self, title):
return self.session.query(Article).filter_by(title=title).first()
def get_article_by_id(self, id):
return self.session.query(Article).filter_by(id=id).first()
def new_article(self, title, author_id, author_name, cate_id, cate_name, intro, filepath, tags):
article = Article(
title=title,
intro=intro,
is_public=1,
auth_id=author_id,
auth_name=author_name,
cate_id=cate_id,
cate_name=cate_name,
file_path=filepath,
tags=tags,
create_time=datetime.datetime.now(),
modified_time=datetime.datetime.now()
)
if self.get_article_by_title(title) is None:
self.session.add(article)
self.session.commit()
return True, u'成功添加新文章!'
else:
return False, u'文章标题已经存在!'
def delete_article(self, article):
self.session.delete(article)
self.session.commit()
return True, u'删除成功!'
def get_articles_by_cate(self, cate, page=0, size=20):
article_num = self.get_article_num()
if page * size > article_num:
return None, u'分页数超出最大值'
info = u'more' if (page + 1) * size < article_num else u'nomore'
return self.session.query(Article).filter_by(cate_id=cate).order_by(desc(Article.modified_time)).offset(size * page).limit(size).all(), info
def get_article_by_tag(self, tag, page=0, size=20):
article_num = self.get_article_num()
if page * size > article_num:
return None, u'分页数超出最大值'
info = u'more' if (page + 1) * size < article_num else u'nomore'
return self.session.query(Article)\
.filter(Article.tags.like(u'%{tag}%'.format(tag=tag)))\
.order_by(desc(Article.modified_time)).offset(size * page).limit(size).all(), info
if __name__ == '__main__':
print ArticleDao().get_article_by_tag('hive') | StarcoderdataPython |
1705735 | def soma_elementos(lista):
soma = 0
for i in lista:
soma = soma + i
return soma | StarcoderdataPython |
4849649 |
import rospy
# from ros_homebot_msgs import srv as srvs
#from ros_homebot_python import constants as c
#from ros_homebot_python.node import (
# subscribe_to_topic,
#get_service_proxy,
# packet_to_message_type,
# set_line_laser,
# say,
#)
from std_msgs.msg import UInt8MultiArray, Int16
class Robot(object):
"""
A helper class to simplify interacting with ROS.
Usage:
robot = Robot()
robot.enable.head_pan = True
robot.enable.head_tilt = True
robot.
"""
enabled_topics = [
]
node_name = None
sleep_hertz = 60
# http://wiki.ros.org/rospy/Overview/Logging
log_level = 'ERROR' # 'DEBUG'
def __init__(self):
self.running = True
rospy.init_node(self.node_name, log_level=getattr(rospy, self.log_level))
rospy.on_shutdown(self._on_shutdown)
self.init_subscriptions()
self._head_rgb_set_pub = None
self._head_ultrabright_set_pub = None
r = rospy.Rate(self.sleep_hertz)
while not rospy.is_shutdown():
self.on_loop()
r.sleep()
def init_subscriptions(self):
pass
@property
def head_rgb_set_pub(self):
if self._head_rgb_set_pub is None:
self._head_rgb_set_pub = rospy.Publisher('/head_arduino/rgb_set', UInt8MultiArray, queue_size=1)
return self._head_rgb_set_pub
@property
def head_ultrabright_set_pub(self):
if self._head_ultrabright_set_pub is None:
self._head_ultrabright_set_pub = rospy.Publisher('/head_arduino/ultrabright_set', Int16, queue_size=1)
return self._head_ultrabright_set_pub
def is_shutdown(self):
return not self.running
def subscribe(self, *args, **kwargs):
rospy.Subscriber(*args, **kwargs)#topic_name, msg_type, callback)
def on_loop(self):
pass
def _on_shutdown(self):
self.running = False
self.on_shutdown()
def on_shutdown(self):
pass
def say(self, *args, **kwargs):
from ros_homebot_python.node import say
say(*args, **kwargs)
def move(self, distance, duration):
"""
Moves the platform a given distance within a given time.
distance := (x,y) in meters
duration := scalar in seconds
The positive y-axis points away from the front of the robot.
The vector (0, 1) means forward 1 meter
The vector (0, 0.5) means forward 0.5 meters
The vector (0, 0) means stop.
The vector (0, -1) means reverse 1 meter
The vector (1, 0) means turn counter clockwise and move forward 1 meter
The vector (-1, 0) means turn clockwise and move forward 1 meter
"""
raise NotImplementedError
def turn(self, theta, duration):
"""
Rotates platform in place by a given angle in radians with a given duration.
theta := angle in radians
duration := scalar in seconds
"""
raise NotImplementedError
def set_rgbled(self, value):
"""
Value is a 3-part tuple representing the RGB values of the color, each value being an integer in the range 0-254.
"""
assert len(value) == 3
for i, v in enumerate(value):
assert 0 <= v <= 254
#get_service_proxy(c.HEAD, c.ID_LED)(i, v)
self.head_rgb_set_pub.publish(UInt8MultiArray(data=value))
def set_ultrabright(self, value):
"""
Value is a single integer between 0-254 representing brightness with 0 being off and 254 being full brightness.
"""
assert 0 <= value <= 254
#get_service_proxy(c.HEAD, c.ID_LED)(3, value)
self.head_ultrabright_set_pub.publish(Int16(value))
| StarcoderdataPython |
8123208 | """
NOTE: these functions are copied from "gpu_extract.py" in the hackathon branch;
the pieces have not yet been put together into a working GPU extraction
in this branch.
"""
import math
import numpy as np
import cupy as cp
import cupyx.scipy.special
from numba import cuda
from ..io import native_endian
from ..util import Timer
import numpy.polynomial.legendre
@cuda.jit
def _hermevander(x, deg, output_matrix):
i = cuda.blockIdx.x
_, j = cuda.grid(2)
_, stride = cuda.gridsize(2)
for j in range(j, x.shape[1], stride):
output_matrix[i][j][0] = 1
if deg > 0:
output_matrix[i][j][1] = x[i][j]
for k in range(2, deg + 1):
output_matrix[i][j][k] = output_matrix[i][j][k-1]*x[i][j] - output_matrix[i][j][k-2]*(k-1)
def hermevander(x, deg):
"""Temprorary wrapper that allocates memory and calls hermevander_gpu
"""
if x.ndim == 1:
x = cp.expand_dims(x, 0)
output = cp.ndarray(x.shape + (deg+1,))
blocksize = 256
numblocks = (x.shape[0], (x.shape[1] + blocksize - 1) // blocksize)
_hermevander[numblocks, blocksize](x, deg, output)
return cp.squeeze(output)
@cuda.jit
def _legvander(x, deg, output_matrix):
i = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(i, x.shape[0], stride):
output_matrix[i][0] = 1
output_matrix[i][1] = x[i]
for j in range(2, deg + 1):
output_matrix[i][j] = (output_matrix[i][j-1]*x[i]*(2*j - 1) - output_matrix[i][j-2]*(j - 1)) / j
def legvander(x, deg):
"""Temporary wrapper that allocates memory and defines grid before calling legvander.
Probably won't be needed once cupy has the correpsponding legvander function.
Input: Same as cpu version of legvander
Output: legvander matrix, cp.ndarray
"""
output = cp.ndarray((len(x), deg + 1))
blocksize = 256
numblocks = (len(x) + blocksize - 1) // blocksize
_legvander[numblocks, blocksize](x, deg, output)
return output
def evalcoeffs(psfdata, wavelengths, specmin=0, nspec=None):
'''
evaluate PSF coefficients parameterized as Legendre polynomials
Args:
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
wavelengths: 1D array of wavelengths
Options:
specmin: first spectrum to include
nspec: number of spectra to include (default: all)
Returns a dictionary params[paramname] = value[nspec, nwave]
The Gauss Hermite coefficients are treated differently:
params['GH'] = value[i,j,nspec,nwave]
The dictionary also contains scalars with the recommended spot size
2*(HSIZEX, HSIZEY)+1 and Gauss-Hermite degrees GHDEGX, GHDEGY
(which is also derivable from the dimensions of params['GH'])
'''
if nspec is None:
nspec = psfdata['PSF']['COEFF'].shape[1]
p = dict(WAVE=wavelengths)
#- Evaluate X and Y which have different dimensionality from the
#- PSF coefficients (and might have different WAVEMIN, WAVEMAX)
meta = psfdata['XTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
# TODO: Implement cuda legval
p['X'] = cp.asarray(numpy.polynomial.legendre.legval(ww, psfdata['XTRACE']['X'][specmin:specmin+nspec].T))
meta = psfdata['YTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
# TODO: Implement cuda legval
p['Y'] = cp.asarray(numpy.polynomial.legendre.legval(ww, psfdata['YTRACE']['Y'][specmin:specmin+nspec].T))
#- Evaluate the remaining PSF coefficients with a shared dimensionality
#- and WAVEMIN, WAVEMAX
meta = psfdata['PSF'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
L = legvander(ww, meta['LEGDEG'])
nparam = psfdata['PSF']['COEFF'].shape[0]
ndeg = psfdata['PSF']['COEFF'].shape[2]
nwave = L.shape[0]
nghx = meta['GHDEGX']+1
nghy = meta['GHDEGY']+1
p['GH'] = cp.zeros((nghx, nghy, nspec, nwave))
coeff_gpu = cp.array(native_endian(psfdata['PSF']['COEFF']))
for name, coeff in zip(psfdata['PSF']['PARAM'], coeff_gpu):
name = name.strip()
coeff = coeff[specmin:specmin+nspec]
if name.startswith('GH-'):
i, j = map(int, name.split('-')[1:3])
p['GH'][i,j] = L.dot(coeff.T).T
else:
p[name] = L.dot(coeff.T).T
#- Include some additional keywords that we'll need
for key in ['HSIZEX', 'HSIZEY', 'GHDEGX', 'GHDEGY']:
p[key] = meta[key]
return p
def calc_pgh(ispec, wavelengths, psfparams):
'''
Calculate the pixelated Gauss Hermite for all wavelengths of a single spectrum
ispec : integer spectrum number
wavelengths : array of wavelengths to evaluate
psfparams : dictionary of PSF parameters returned by evalcoeffs
returns pGHx, pGHy
where pGHx[ghdeg+1, nwave, nbinsx] contains the pixel-integrated Gauss-Hermite polynomial
for all degrees at all wavelengths across nbinsx bins spaning the PSF spot, and similarly
for pGHy. The core PSF will then be evaluated as
PSFcore = sum_ij c_ij outer(pGHy[j], pGHx[i])
'''
#- shorthand
p = psfparams
#- spot size (ny,nx)
nx = 2*p['HSIZEX'] + 1
ny = 2*p['HSIZEY'] + 1
nwave = len(wavelengths)
#- convert to cupy arrays
for k in ['X', 'Y', 'GHSIGX', 'GHSIGY']:
p[k] = cp.asarray(p[k])
#- x and y edges of bins that span the center of the PSF spot
xedges = cp.repeat(cp.arange(nx+1) - nx//2 - 0.5, nwave).reshape(nx+1, nwave)
yedges = cp.repeat(cp.arange(ny+1) - ny//2 - 0.5, nwave).reshape(ny+1, nwave)
#- Shift to be relative to the PSF center and normalize
#- by the PSF sigma (GHSIGX, GHSIGY).
#- Note: x,y = 0,0 is center of pixel 0,0 not corner
#- Dimensions: xedges[nx+1, nwave], yedges[ny+1, nwave]
dx = (p['X'][ispec]+0.5)%1 - 0.5
dy = (p['Y'][ispec]+0.5)%1 - 0.5
xedges = ((xedges - dx)/p['GHSIGX'][ispec])
yedges = ((yedges - dy)/p['GHSIGY'][ispec])
#- Degree of the Gauss-Hermite polynomials
ghdegx = p['GHDEGX']
ghdegy = p['GHDEGY']
#- Evaluate the Hermite polynomials at the pixel edges
#- HVx[ghdegx+1, nwave, nx+1]
#- HVy[ghdegy+1, nwave, ny+1]
HVx = hermevander(xedges, ghdegx).T
HVy = hermevander(yedges, ghdegy).T
#- Evaluate the Gaussians at the pixel edges
#- Gx[nwave, nx+1]
#- Gy[nwave, ny+1]
Gx = cp.exp(-0.5*xedges**2).T / cp.sqrt(2. * cp.pi)
Gy = cp.exp(-0.5*yedges**2).T / cp.sqrt(2. * cp.pi)
#- Combine into Gauss*Hermite
GHx = HVx * Gx
GHy = HVy * Gy
#- Integrate over the pixels using the relationship
# Integral{ H_k(x) exp(-0.5 x^2) dx} = -H_{k-1}(x) exp(-0.5 x^2) + const
#- pGHx[ghdegx+1, nwave, nx]
#- pGHy[ghdegy+1, nwave, ny]
pGHx = cp.zeros((ghdegx+1, nwave, nx))
pGHy = cp.zeros((ghdegy+1, nwave, ny))
pGHx[0] = 0.5 * cp.diff(cupyx.scipy.special.erf(xedges/cp.sqrt(2.)).T)
pGHy[0] = 0.5 * cp.diff(cupyx.scipy.special.erf(yedges/cp.sqrt(2.)).T)
pGHx[1:] = GHx[:ghdegx,:,0:nx] - GHx[:ghdegx,:,1:nx+1]
pGHy[1:] = GHy[:ghdegy,:,0:ny] - GHy[:ghdegy,:,1:ny+1]
return pGHx, pGHy
@cuda.jit()
def _multispot(pGHx, pGHy, ghc, mspots):
nx = pGHx.shape[-1]
ny = pGHy.shape[-1]
nwave = pGHx.shape[1]
#this is the magic step
iwave = cuda.grid(1)
n = pGHx.shape[0]
m = pGHy.shape[0]
if (0 <= iwave < nwave):
#yanked out the i and j loops in lieu of the cuda grid of threads
for i in range(pGHx.shape[0]):
px = pGHx[i,iwave]
for j in range(0, pGHy.shape[0]):
py = pGHy[j,iwave]
c = ghc[i,j,iwave]
for iy in range(len(py)):
for ix in range(len(px)):
mspots[iwave, iy, ix] += c * py[iy] * px[ix]
def get_spots(specmin, nspec, wavelengths, psfdata):
'''Calculate PSF spots for the specified spectra and wavelengths
Args:
specmin: first spectrum to include
nspec: number of spectra to evaluate spots for
wavelengths: 1D array of wavelengths
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
Returns:
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
'''
nwave = len(wavelengths)
p = evalcoeffs(psfdata, wavelengths, specmin, nspec)
nx = 2*p['HSIZEX']+1
ny = 2*p['HSIZEY']+1
spots = cp.zeros((nspec, nwave, ny, nx))
#use mark's numblocks and blocksize method
blocksize = 256
numblocks = (nwave + blocksize - 1) // blocksize
for ispec in range(nspec):
pGHx, pGHy = calc_pgh(ispec, wavelengths, p)
ghc = cp.asarray(p['GH'][:,:,ispec,:])
mspots = cp.zeros((nwave, ny, nx)) #empty every time!
_multispot[numblocks, blocksize](pGHx, pGHy, ghc, mspots)
spots[ispec] = mspots
#- ensure positivity and normalize
#- TODO: should this be within multispot itself?
spots = spots.clip(0.0)
norm = cp.sum(spots, axis=(2,3)) #- norm[nspec, nwave] = sum over each spot
spots = (spots.T / norm.T).T #- transpose magic for numpy array broadcasting
#- Define corners of spots
#- extra 0.5 is because X and Y are relative to center of pixel not edge
xc = np.floor(p['X'] - p['HSIZEX'] + 0.5).astype(int)
yc = np.floor(p['Y'] - p['HSIZEY'] + 0.5).astype(int)
return spots, (xc, yc)
@cuda.jit()
def _cuda_projection_matrix(A, xc, yc, xmin, ymin, ispec, iwave, nspec, nwave, spots):
#this is the heart of the projection matrix calculation
ny, nx = spots.shape[2:4]
i, j = cuda.grid(2)
#no loops, just a boundary check
if (0 <= i < nspec) and (0 <= j <nwave):
ixc = xc[ispec+i, iwave+j] - xmin
iyc = yc[ispec+i, iwave+j] - ymin
#A[iyc:iyc+ny, ixc:ixc+nx, i, j] = spots[ispec+i,iwave+j]
#this fancy indexing is not allowed in numba gpu (although it is in numba cpu...)
#try this instead
for iy, y in enumerate(range(iyc,iyc+ny)):
for ix, x in enumerate(range(ixc,ixc+nx)):
temp_spot = spots[ispec+i, iwave+j][iy, ix]
A[y, x, i, j] += temp_spot
def get_xyrange(ispec, nspec, iwave, nwave, spots, corners):
"""
Find xy ranges that these spectra cover
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (xmin, xmax, ymin, ymax)
spots[ispec:ispec+nspec,iwave:iwave+nwave] touch pixels[ymin:ymax,xmin:xmax]
"""
ny, nx = spots.shape[2:4]
# Note: transfer corners back to host
xc = corners[0][ispec:ispec+nspec, iwave:iwave+nwave].get()
yc = corners[1][ispec:ispec+nspec, iwave:iwave+nwave].get()
xmin = np.min(xc)
xmax = np.max(xc) + nx
ymin = np.min(yc)
ymax = np.max(yc) + ny
return xmin, xmax, ymin, ymax
def projection_matrix(ispec, nspec, iwave, nwave, spots, corners):
'''
Create the projection matrix A for p = Af
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (A[iy, ix, ispec, iwave], (xmin, xmax, ymin, ymax))
'''
xc, yc = corners
xmin, xmax, ymin, ymax = get_xyrange(ispec, nspec, iwave, nwave, spots, corners)
A = cp.zeros((ymax-ymin,xmax-xmin,nspec,nwave), dtype=np.float64)
threads_per_block = (16, 16)
blocks_per_grid_y = math.ceil(A.shape[0] / threads_per_block[0])
blocks_per_grid_x = math.ceil(A.shape[1] / threads_per_block[1])
blocks_per_grid = (blocks_per_grid_x, blocks_per_grid_y)
_cuda_projection_matrix[blocks_per_grid, threads_per_block](
A, xc, yc, xmin, ymin, ispec, iwave, nspec, nwave, spots)
return A, (xmin, xmax, ymin, ymax)
from .cpu import get_spec_padding
from .both import xp_ex2d_patch
def ex2d_padded(image, imageivar, ispec, nspec, iwave, nwave, spots, corners,
wavepad, bundlesize=25):
"""
Extracted a patch with border padding, but only return results for patch
Args:
image: full image (not trimmed to a particular xy range)
imageivar: image inverse variance (same dimensions as image)
ispec: starting spectrum index relative to `spots` indexing
nspec: number of spectra to extract (not including padding)
iwave: starting wavelength index
nwave: number of wavelengths to extract (not including padding)
spots: array[nspec, nwave, ny, nx] pre-evaluated PSF spots
corners: tuple of arrays xcorners[nspec, nwave], ycorners[nspec, nwave]
wavepad: number of extra wave bins to extract (and discard) on each end
Options:
bundlesize: size of fiber bundles; padding not needed on their edges
"""
# timer = Timer()
specmin, nspecpad = get_spec_padding(ispec, nspec, bundlesize)
#- Total number of wavelengths to be extracted, including padding
nwavetot = nwave+2*wavepad
# timer.split('init')
#- Get the projection matrix for the full wavelength range with padding
cp.cuda.nvtx.RangePush('projection_matrix')
A4, xyrange = projection_matrix(specmin, nspecpad,
iwave-wavepad, nwave+2*wavepad, spots, corners)
cp.cuda.nvtx.RangePop()
# timer.split('projection_matrix')
xmin, xmax, ypadmin, ypadmax = xyrange
#- But we only want to use the pixels covered by the original wavelengths
#- TODO: this unnecessarily also re-calculates xranges
cp.cuda.nvtx.RangePush('get_xyrange')
xlo, xhi, ymin, ymax = get_xyrange(specmin, nspecpad, iwave, nwave, spots, corners)
cp.cuda.nvtx.RangePop()
# timer.split('get_xyrange')
ypadlo = ymin - ypadmin
ypadhi = ypadmax - ymax
A4 = A4[ypadlo:-ypadhi]
#- Number of image pixels in y and x
ny, nx = A4.shape[0:2]
#- Check dimensions
assert A4.shape[2] == nspecpad
assert A4.shape[3] == nwave + 2*wavepad
#- Diagonals of R in a form suited for creating scipy.sparse.dia_matrix
ndiag = spots.shape[2]//2
cp.cuda.nvtx.RangePush('Rdiags allocation')
Rdiags = cp.zeros( (nspec, 2*ndiag+1, nwave) )
cp.cuda.nvtx.RangePop()
if (0 <= ymin) & (ymin+ny < image.shape[0]):
xyslice = np.s_[ymin:ymin+ny, xmin:xmin+nx]
# timer.split('ready for extraction')
cp.cuda.nvtx.RangePush('extract patch')
fx, ivarfx, R = xp_ex2d_patch(image[xyslice], imageivar[xyslice], A4)
cp.cuda.nvtx.RangePop()
# timer.split('extracted patch')
#- Select the non-padded spectra x wavelength core region
cp.cuda.nvtx.RangePush('select slices to keep')
specslice = np.s_[ispec-specmin:ispec-specmin+nspec,wavepad:wavepad+nwave]
cp.cuda.nvtx.RangePush('slice flux')
specflux = fx[specslice]
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('slice ivar')
specivar = ivarfx[specslice]
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePush('slice R')
mask = (
~cp.tri(nwave, nwavetot, (wavepad-ndiag-1), dtype=bool) &
cp.tri(nwave, nwavetot, (wavepad+ndiag), dtype=bool)
)
i0 = ispec-specmin
for i in range(i0, i0+nspec):
ii = slice(nwavetot*i, nwavetot*(i+1))
Rdiags[i-i0] = R[ii, ii][:,wavepad:-wavepad].T[mask].reshape(nwave, 2*ndiag+1).T
# timer.split('saved Rdiags')
cp.cuda.nvtx.RangePop()
cp.cuda.nvtx.RangePop()
else:
#- TODO: this zeros out the entire patch if any of it is off the edge
#- of the image; we can do better than that
specflux = cp.zeros((nspec, nwave))
specivar = cp.zeros((nspec, nwave))
#- TODO: add chi2pix, pixmask_fraction, optionally modelimage; see specter
cp.cuda.nvtx.RangePush('prepare result')
result = dict(
flux = specflux,
ivar = specivar,
Rdiags = Rdiags,
)
cp.cuda.nvtx.RangePop()
# timer.split('done')
# timer.print_splits()
return result
| StarcoderdataPython |
1827865 | from accurate_bg_check.client import BgCheck
# Add your client key, client secrete here
client = BgCheck('CLIENT_KEY', 'CLIENT_SECRETE') | StarcoderdataPython |
5167420 | <filename>contacts/app/views.py
from django.shortcuts import render
from .models import Contacts
import uuid
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
def index(request):
data = Contacts.objects.all()
return render(request, "index.html", {"data":data})
def new(request):
return render(request , "new.html")
def add(request):
name = request.POST['name']
rel = request.POST['rel']
num = request.POST['num']
data = Contacts(unique_id=str(uuid.uuid4()),
name=name,
relationship=rel,
mobile=num
)
data.save()
return HttpResponseRedirect("/")
def views(request, id):
data = Contacts.objects.get(unique_id=id)
return render(request, "view.html", {"data" : data})
def e(request, id):
data = Contacts.objects.get(unique_id=id)
return render(request, "edit.html", {"data" : data})
def edit(request, id):
data = Contacts.objects.get(unique_id=id)
n = request.POST['name']
r = request.POST['rel']
num = request.POST['num']
data.name = n
data.relationship = r
data.mobile = num
data.save()
return HttpResponseRedirect("/") | StarcoderdataPython |
6688604 | <reponame>zongdaoming/TinyTransformer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from ..initializer import initialize_from_cfg
from ...extensions import DeformableConvInOne
from ...utils.bn_helper import setup_bn, rollback_bn, FREEZE
__all__ = ['bqnnv1_large']
class Block(nn.Module):
def __init__(self, in_channel, channel, deformable=None):
"""Deformable: indices of op need to use deformalbe conv"""
super(Block, self).__init__()
op_conv2d = [nn.Conv2d] * 10
if deformable:
for i in deformable:
op_conv2d[i] = DeformableConvInOne
self.op1 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(in_channel, channel, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
nn.ReLU(),
op_conv2d[1](
channel, channel, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=channel, bias=False),
nn.Conv2d(channel, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op4 = nn.Sequential(
nn.ReLU(),
op_conv2d[4](
in_channel + channel,
in_channel + channel,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
groups=in_channel + channel,
bias=False),
nn.Conv2d(in_channel + channel, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op5 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(
in_channel + channel * 2,
in_channel + channel * 2,
kernel_size=(1, 1),
stride=(1, 1),
groups=in_channel + channel * 2,
bias=False),
nn.Conv2d(in_channel + channel * 2, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op6 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(in_channel, in_channel, kernel_size=(1, 1), stride=(1, 1), groups=in_channel, bias=False),
nn.Conv2d(in_channel, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op7 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(channel, channel, kernel_size=(1, 1), stride=(1, 1), groups=channel, bias=False),
nn.Conv2d(channel, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op8 = nn.Sequential(
nn.ReLU(),
op_conv2d[8](
in_channel + channel * 2,
in_channel + channel * 2,
kernel_size=(5, 5),
stride=(1, 1),
padding=(2, 2),
groups=in_channel + channel * 2,
bias=False),
nn.Conv2d(in_channel + channel * 2, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op9 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(in_channel, channel, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
nn.ReLU(),
op_conv2d[9](
channel, channel, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=channel, bias=False),
nn.Conv2d(channel, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
self.op10 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(in_channel, in_channel, kernel_size=(1, 1), stride=(1, 1), groups=in_channel, bias=False),
nn.Conv2d(in_channel, channel, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(channel, eps=0.001, momentum=0.0003, affine=True),
)
def forward(self, x):
x0 = x
x1 = self.op1(x0)
x2 = torch.cat([x0, x1], 1)
x3 = torch.cat([x1, x2], 1)
x4 = self.op4(x2)
x5 = self.op5(x3)
x6 = self.op6(x0)
x7 = self.op7(x6)
x8 = self.op8(x3)
x9 = self.op9(x0)
x10 = self.op10(x0)
x11 = x6
x12 = x5 + x8
x13 = x10
x = torch.cat([x4, x7, x9, x11, x12, x13], 1)
return x
class DqnnaNetLargeV1(nn.Module):
def __init__(self,
out_layers,
out_strides,
bn={FREEZE: True},
frozen_layers=None,
deformable=None,
initializer=None):
"""
Args:
out_layers: indices of output layers, {0,1,2,3,4}
out_strides: strides of output features
deformable: indices of deformable op in Block
initiailizer: initializer method
"""
super(DqnnaNetLargeV1, self).__init__()
# setup bn before building model
setup_bn(bn)
channels = [80, 192, 384, 640]
Hin = 3
if frozen_layers is not None and len(frozen_layers) > 0:
assert min(frozen_layers) >= 0, frozen_layers
assert max(frozen_layers) <= 4, frozen_layers
assert min(out_layers) >= 0, out_layers
assert max(out_layers) <= 4, out_layers
self.frozen_layers = frozen_layers
self.out_layers = out_layers
self.out_strides = out_strides
self.outplanes = [channels[i - 1] * 6 for i in self.out_layers]
self.conv1 = nn.Conv2d(Hin, channels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(channels[0])
self.conv2 = nn.Conv2d(channels[0], channels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(channels[0])
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# frozen layers should not use deformable conv
self.layer1 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
Block(channels[0], channels[0]),
Block(channels[0] * 6, channels[0]),
Block(channels[0] * 6, channels[0]),
)
self.layer2 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
Block(channels[0] * 6, channels[1]),
Block(channels[1] * 6, channels[1]),
Block(channels[1] * 6, channels[1], deformable),
)
self.layer3 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
Block(channels[1] * 6, channels[2]),
Block(channels[2] * 6, channels[2]),
Block(channels[2] * 6, channels[2]),
Block(channels[2] * 6, channels[2], deformable),
)
self.layer4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
Block(channels[2] * 6, channels[3]),
Block(channels[3] * 6, channels[3]),
Block(channels[3] * 6, channels[3], deformable),
)
if initializer is not None:
initialize_from_cfg(initializer)
self.freeze_layer()
# rollback bn after model builded
rollback_bn()
def forward(self, input):
x = input['image']
x = self.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
c1 = self.relu(self.bn2(self.conv2(x)))
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
outs = [c1, c2, c3, c4, c5]
features = [outs[i] for i in self.out_layers]
return {'features': features, 'strides': self.out_strides}
def get_outplanes(self):
return self.outplanes
def freeze_layer(self):
layers = [
nn.Sequential(self.conv1, self.bn1, self.conv2, self.bn2, self.relu, self.maxpool), self.layer1,
self.layer2, self.layer3, self.layer4
]
for layer_idx in self.frozen_layers:
layer = layers[layer_idx]
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
"""
Sets the module in training mode.
This has any effect only on modules such as Dropout or BatchNorm.
Returns:
Module: self
"""
self.training = mode
for module in self.children():
module.train(mode)
self.freeze_layer()
return self
def bqnnv1_large(pretrain=False, **kwargs):
return DqnnaNetLargeV1(**kwargs)
| StarcoderdataPython |
11284133 | from django.conf import settings
from sqlalchemy.orm import sessionmaker
from EOSS.vassar.api import VASSARClient
from asgiref.sync import async_to_sync, sync_to_async
from EOSS.graphql.client.Dataset import DatasetGraphqlClient
from EOSS.graphql.client.Admin import AdminGraphqlClient
from EOSS.graphql.client.Problem import ProblemGraphqlClient
from EOSS.graphql.client.Abstract import AbstractGraphqlClient
if 'EOSS' in settings.ACTIVE_MODULES:
import EOSS.historian.models as models
import EOSS.data.problem_specific as problem_specific
general_commands = [
('0000', 'Stop')
]
engineer_commands = [
('2000', 'Why does design ${design_id} have this science benefit?'),
('2012', 'Why does this design have this science benefit?'),
('2017', 'Why does this design have this cost?'),
('2001', 'How does ${design_id} satisfy ${subobjective}?'),
('2002', 'Why does ${design_id} not satisfy ${subobjective}?'),
('2008', 'What is the ${engineer_instrument_parameter} of ${engineer_instrument}?'),
('2010', 'What is the requirement for ${engineer_instrument_parameter} for ${engineer_measurement}?'),
('2013', 'Explain the stakeholder ${engineer_stakeholder} science benefit for this design.'),
('2014', 'Explain the objective ${engineer_objective} science benefit for this design.'),
('2016', 'Which instruments improve the science score for stakeholder ${engineer_stakeholder}?'),
('2015', 'Which instruments improve the science score for objective ${engineer_objective}?'),
('2008', 'What is the ${engineer_instrument_parameter} of ${engineer_instrument}?'),
]
analyst_commands = [
('1000', 'What are the driving features?'),
]
explorer_commands = [
]
historian_commands = [
('4000', 'Which missions [from ${historian_space_agency}] can measure ${historian_measurement} [between ${year} and ${year}]?'),
('4001', 'Which missions [from ${historian_space_agency}] do we currently use to measure ${measurement}?'),
('4002', 'Which instruments [from ${historian_space_agency}] can measure ${historian_measurement} [between ${year} and ${year}]?'),
('4003', 'Which instruments [from ${historian_space_agency}] do we currently use to measure ${historian_measurement}?'),
('4004', 'Which missions [from ${historian_space_agency}] have flown ${historian_technology} [between ${year} and ${year}]?'),
('4005', 'Which missions [from ${historian_space_agency}] are currently flying ${historian_technology}?'),
('4006', 'Which orbit is the most typical for ${historian_technology}?'),
('4007', 'Which orbit is the most typical for ${historian_measurement}?'),
('4008', 'When was mission ${historian_mission} launched?'),
('4009', 'Which missions have been designed by ${historian_space_agency}?'),
('4010', 'Show me a timeline of missions [from ${historian_space_agency}] which measure ${historian_measurement}')
]
critic_commands = [
('3000', 'What do you think of design ${design_id}?'),
('3005', 'What do you think of this design?')
#'What does agent ${agent} think of design ${design_id}?'
]
def commands_list(command_list, restricted_list=None):
if restricted_list is not None:
return [command[1] for command in command_list if command[0] in restricted_list]
else:
return [command[1] for command in command_list]
def general_commands_list(restricted_list=None):
return commands_list(general_commands, restricted_list)
def engineer_commands_list(restricted_list=None):
return commands_list(engineer_commands, restricted_list)
def analyst_commands_list(restricted_list=None):
return commands_list(analyst_commands, restricted_list)
def explorer_commands_list(restricted_list=None):
return commands_list(explorer_commands, restricted_list)
def historian_commands_list(restricted_list=None):
return commands_list(historian_commands, restricted_list)
def critic_commands_list(restricted_list=None):
return commands_list(critic_commands, restricted_list)
def orbits_info(vassar_client: VASSARClient, problem_id: int):
return problem_specific.get_orbits_info(vassar_client, problem_id)
def instruments_info(vassar_client: VASSARClient, problem_id: int):
return problem_specific.get_instruments_info(vassar_client, problem_id)
def engineer_instrument_list(vassar_client: VASSARClient, problem_id: int):
return [instr["name"] for instr in problem_specific.get_instrument_dataset(problem_id)]
def engineer_instrument_parameter_list(vassar_client: VASSARClient, group_id: int):
return problem_specific.get_instruments_parameters(vassar_client, group_id)
def engineer_measurement_list(vassar_client: VASSARClient, problem_id: int):
return problem_specific.get_problem_measurements(vassar_client, problem_id)
def engineer_stakeholder_list(vassar_client: VASSARClient, problem_id: int):
result = async_to_sync(AbstractGraphqlClient.get_stakeholders)(problem_id, True, False, False)
return result['panel']
# return problem_specific.get_stakeholders_list(vassar_client, problem_id)
def engineer_objectives_list(vassar_client: VASSARClient, problem_id: int):
return problem_specific.get_objectives_list(vassar_client, problem_id)
def engineer_subobjectives_list(vassar_client: VASSARClient, problem_id: int):
return problem_specific.get_subobjectives_list(vassar_client, problem_id)
def historian_measurements_list():
engine = models.db_connect()
session = sessionmaker(bind=engine)()
measurements = [measurement.name.strip() for measurement in session.query(models.Measurement).all()]
return measurements
def historian_missions_list():
engine = models.db_connect()
session = sessionmaker(bind=engine)()
missions = [mission.name.strip() for mission in session.query(models.Mission).all()]
return missions
def historian_technologies_list():
engine = models.db_connect()
session = sessionmaker(bind=engine)()
technologies = [technology for technology in models.technologies]
technologies = technologies + [type.name.strip() for type in session.query(models.InstrumentType).all()]
return technologies
def historian_agencies_list():
engine = models.db_connect()
session = sessionmaker(bind=engine)()
agencies = [agency.name.strip() for agency in session.query(models.Agency).all()]
return agencies
| StarcoderdataPython |
3206554 | from boto3 import client
from json import dumps
from os import environ
'''
Coding notes:
1) If the source language is the same target language, a JSON is also stored
using the original transcription text.
2) AWS translate_text() allows 5000 bytes per request, so original
transcription must be split in case its size is bigger.
https://docs.aws.amazon.com/translate/latest/dg/what-is-limits.html
3) Best practices for working with AWS Lambda functions
https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html
4) Lambda, API Gateway and Bucket must be in the same server.
4) Functionalities:
*) Detects if transcription (key) exists in Bucket. If not, returns 400
*) Returns translations already stored to improve efficiency
*) If no translation found, automatically detects transcription language.
If language can't be detected, returns 400
*) If the target language is the same transcription language, converts
transcription to JSON, store it and return it
*) If the target language is different than transcription language, it
translates it, converts transcription to JSON, store it, return it.
Stored file name: key_targetLanguage.srt (e.g: video1_es.srt)
*) Translates transcriptions bigger than 5000 bytes (maximum allowed by
request), doing split without cutting sentences. Step for split can be
4990 for Unicode-8 general languages, but for very special characters
languages, step must be as low as 600.
*) Returns 500 in case any error occurs during the translate/store task.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_object
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/translate.html
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/translate.html#Translate.Client.start_text_translation_job
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/comprehend.html
'''
s3_client = client('s3')
comprehend_client = client('comprehend')
translate_client = client('translate')
# bucket declared as environmental variable.
bucket = environ.get('bucket')
# 5000, maximum of bytes for AWS translate_text, step=4990 for unicode-8 source
step = 4990
# Threshold to accept an identified language match [0...1]
lan_threshold = 0.6
# Response dictionary
resp = {'statusCode': 200,
'headers': {'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'targetKey': ''},
'body': dumps([])}
'''
languages_list = ["af","am","ar","as","az","ba","be","bn","bs","bg","ca","ceb",
"cs","cv","cy","da","de","el","en","eo","et","eu","fa","fi",
"fr","gd","ga","gl","gu","ht","he","hi","hr","hu","hy","ilo",
"id","is","it","jv","ja","kn","ka","kk","km","ky","ko","ku",
"la","lv","lt","lb","ml","mr","mk","mg","mn","ms","my","ne",
"new","nl","no","or","pa","pl","pt","ps","qu","ro","ru","sa",
"si","sk","sl","sd","so","es","sq","sr","su","sw","sv","ta",
"tt","te","tg","tl","th","tk","tr","ug","uk","ur","uz","vi",
"yi","yo","zh","zh-TW"]
'''
def lambda_handler(event, context):
'''
This function is called from an API Gateway (translate-API), receive three
parameters inside an array to get a translation.
event: array to receive parameters from API
event['queryStringParameters']['TargetLang']: Target language for translate
event['queryStringParameters']['Bucketkey']: s3 object with transcription
'''
key = event.get('queryStringParameters').get('Bucketkey')
target_lang = event.get('queryStringParameters').get('TargetLang')
return lambda_function(key, target_lang)
def lambda_function(key, target_lang):
resp['headers']['targetKey'] = key[:key.rfind('.')] + '_' + target_lang + \
'.srt'
''' IF ORIGINAL TRANSCRIPTION STORED IN S3 IS NOT FOUND, RETURN 40X '''
if not key_exist(key):
resp['body'] = dumps('Video transcription not found')
return resp
'''GET STORED TRANSLATION, IF NOT, TRANSLATE TRANSCRIPTION AND STORE IT'''
if key_exist(resp.get('headers').get('targetKey')):
translation_file = s3_client.get_object(
Bucket=bucket,
Key=resp.get('headers').get('targetKey'))
resp['body'] = translation_file['Body'].read().decode("utf-8")
resp['statusCode'] = 200 # Ok
return resp
else:
# To get S3 object with original transcription
response = s3_client.get_object(Bucket=bucket, Key=key)
text = str(response.get('Body').read().decode('utf-8'))
# Detect source language by sending first 100 characters to check
response_language = comprehend_client.detect_dominant_language(
Text=text[:100])
if response_language.get('Languages')[0].get('Score') <= lan_threshold:
resp['statusCode'] = 412 # Precondition Failed
resp['body'] = dumps('Video language not soported')
return resp
source_lang = response_language.get('Languages')[0].get('LanguageCode')
# Translate text if target language is not the source languaje
if target_lang != source_lang:
text = translate_batches(source_lang, target_lang, text)
if not text:
resp['body'] = dumps('Translation was not possible')
return resp
# Convert translated text into a list of dicts for every sentense
list_translated = []
for i in text.split('\n\n'):
phrase = i.split('\n')
list_translated.append({'index': phrase[0],
'start': phrase[1][:8],
'content': phrase[2]})
# ######### ALWAYS ACTIVATE THIS IN PRODUCTION ##########
# #### Store translation in a S3 object for future requests #####
s3_client.put_object(Bucket=bucket,
Key=resp.get('headers').get('targetKey'),
Body=dumps(list_translated),
Metadata={'lang': target_lang,
'src_trscpt_file': key})
resp['statusCode'] = 200 # Ok
resp['body'] = dumps(list_translated)
return resp
def translate_batches(source_lang, target_lang, text):
''' Translate text in batches of step size without cutting rows of file'''
lenn = len(text)
if lenn <= step:
try:
translated_text = translate_client.translate_text(
Text=text,
SourceLanguageCode=source_lang,
TargetLanguageCode=target_lang)
except translate_client.exceptions.ClientError:
# if e.response['Error']['Code'] == "500":
resp['statusCode'] = 500 # Internal error
return None
return translated_text.get('TranslatedText')
else:
srt_temp = ''
index_low = 0
index_high = text.rfind('\n\n', 0, step)
while(index_high < lenn and index_high != -1):
try:
translated_text = translate_client.translate_text(
Text=text[index_low:index_high],
SourceLanguageCode=source_lang,
TargetLanguageCode=target_lang)
except translate_client.exceptions.ClientError:
# if e.response['Error']['Code'] == "500":
resp['statusCode'] = 500 # Internal error
return None
srt_temp += translated_text.get('TranslatedText') + '\n\n'
index_low = index_high + 2
index_high = text.rfind('\n\n', index_low, index_low + step)
try:
translated_text = translate_client.translate_text(
Text=text[index_low:lenn],
SourceLanguageCode=source_lang,
TargetLanguageCode=target_lang)
except translate_client.exceptions.ClientError:
# if e.response['Error']['Code'] == "500":
resp['statusCode'] = 500 # Internal error
return None
srt_temp += translated_text.get('TranslatedText')
return srt_temp
def key_exist(s3_key):
''' TRY TO FIND IF S3 KEY EXIST '''
try:
s3_client.head_object(Bucket=bucket, Key=s3_key)
except s3_client.exceptions.ClientError as e:
if e.response.get('Error').get('Code') == "404":
resp['statusCode'] = 404 # Not found
return False
else:
return True
| StarcoderdataPython |
3412835 | <reponame>kivzcu/heatmap.zcu<gh_stars>0
import yaml
import os
from typing import Dict, Set
from shared_types import StringSetType
from Utilities.Database import database_record_logs
from Utilities.helpers import should_skip
# Path to dataset configuration files
CONFIG_FILES_PATH = "DatasetConfigs/"
# Config file type
CONFIG_FILE_TYPE = ".yaml"
def load_configuration(dataset_name: str) -> Dict[str, any]:
"""
Loads yaml configuration file into memory
Args:
dataset_name: name of dataset that has existing configuration file
Returns:
yaml configuration file as dictionary
"""
with open(CONFIG_FILES_PATH + dataset_name + CONFIG_FILE_TYPE, "r") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
devices_dic = {}
if data["devices"] is not None:
for item in data["devices"]:
devices_dic.update(item)
data["devices"] = devices_dic
return data
def update_configuration(dataset_name: str,
new_devices: StringSetType) -> None:
"""
Open dataset and appends new_devices to the end
Args:
dataset_name: name of dataset that has existing configuration file
new_devices: list or set of new devices for dataset
"""
with open(CONFIG_FILES_PATH + dataset_name + CONFIG_FILE_TYPE,
"a") as file:
for device in new_devices:
if device == "":
continue
file.write(" - " + device + ":\n")
file.write(" x: UNKNOWN!\n")
file.write(" y: UNKNOWN!\n")
file.write("\n")
def check_if_there_is_a_config_file(dataset_name: str) -> bool:
"""
Goes trough all config files (represeting valid dataset in database)
and checks if dataset_name is there
Args:
dataset_name: name of dataset that has existing configuration file
Returns:
True - if contains
False - if not
"""
datasets = os.listdir(CONFIG_FILES_PATH)
for dataset in datasets:
name = dataset.split('.')
if name[0] == dataset_name:
return True
return False
def return_dictionary_of_valid_devices(
devices: Dict[str, any]) -> Dict[str, Dict[str, any]]:
"""
Iterates over all devices specified in config file
Extracts only valid one (have both specified coordinates no UNKOWN! OR SKIP)
Args:
devices: dictionary of devices contained in config file
Returns:
Dictonary containing only valid devices
"""
valid_devices = {}
for device in devices.keys():
if not should_skip(devices[device]):
valid_devices[device] = {
'name': device,
'x': devices[device]['x'],
'y': devices[device]['y']
}
return valid_devices | StarcoderdataPython |
11286714 | import datanog
dn = datanog.daq()
dn.calibrate(dn.dev[0]) | StarcoderdataPython |
1779831 | <gh_stars>0
# name1 = "Helder"
# name2 = "Ragle"
# name3 = "Anna"
# name4 = "Tina"
# name5 = "Anni"
# name6 = "Marvin"
names = ["Helder", "Ragle", "Anna", "Tina", "Anni", "Marvin"]
counter = 0
while counter < 6:
print(names[counter])
counter = counter + 1
| StarcoderdataPython |
3514061 | from PySide.QtGui import QWidget, QGridLayout, QLabel, QLineEdit, QPushButton
from PySide.QtCore import Qt
class Connect(QWidget):
def __init__(self):
QWidget.__init__(self)
self.grid = QGridLayout()
self.setLayout(self.grid)
self.setWindowModality(Qt.ApplicationModal)
def showUI(self):
self.show()
def finish(self):
self.close()
class AskConnect(Connect):
def __init__(self):
Connect.__init__(self)
self.initUI()
Connect.showUI(self)
def initUI(self):
title = QLabel("Enter IP address of the person you wish to connect to.")
self.grid.addWidget(title, 0, 0, 2, 1)
ipL = QLabel("IP:")
self.grid.addWidget(ipL, 0, 1)
ip = QLineEdit()
self.grid.addWidget(ip, 1, 1)
attemptConnection = QPushButton("Connect")
attemptConnection.clicked.connect(self.tryConnect)
self.grid.addWidget(attemptConnection, 0, 2)
cancel = QPushButton("Cancel")
cancel.clicked.connect(self.cancel)
self.grid.addWidget(cancel, 1, 2)
def tryConnect(self):
pass
def cancel(self):
Connect.finish(self)
class AskedConnect(Connect):
def __init__(self):
Connect.__init__(self)
| StarcoderdataPython |
9713032 | # -*- coding: utf-8 -*-
"""Provides sync for podio."""
__all__ = [
'add_notification',
'AddNotification'
]
from pyramid_torque_engine import unpack
from pyramid_torque_engine import operations as ops
from pyramid_torque_engine import repo
from pyramid import path
from pyramid_simpleauth.model import get_existing_user
import colander
import notification_table_executer
import datetime
import pyramid_basemodel as bm
import requests
import json
import os
def send_email_from_notification_dispatch(request, notification_dispatch_id):
"""Boilerplate to extract information from the notification
dispatch and send an email.
Please note that no verification if it should
be sent is made prior to sending.
"""
lookup = repo.LookupNotificationDispatch()
dotted_name_resolver = path.DottedNameResolver()
notification_dispatch = lookup(notification_dispatch_id)
if not notification_dispatch:
return False
# Get our spec.
spec = notification_dispatch.single_spec
# Get our Address to send to.
send_to = notification_dispatch.address
# Get our view to render the spec.
view = dotted_name_resolver.resolve(notification_dispatch.view)
# Get the context.
context = notification_dispatch.notification.event.parent
# Send the email.
view(request, context, spec, send_to)
# Set the sent info in our db.
notification_dispatch.sent = datetime.datetime.now()
bm.save(notification_dispatch)
return True
def notification_email_single_view(request):
"""View to handle a single email notification dispatch"""
class SingleNotificationSchema(colander.Schema):
notification_dispatch_id = colander.SchemaNode(
colander.Integer(),
)
schema = SingleNotificationSchema()
# Decode JSON.
try:
json = request.json
except ValueError as err:
request.response.status_int = 400
return {'JSON error': str(err)}
# Validate.
try:
appstruct = schema.deserialize(json)
except colander.Invalid as err:
request.response.status_int = 400
return {'error': err.asdict()}
# Get data out of JSON.
notification_dispatch_id = appstruct['notification_dispatch_id']
# Send the email.
r = send_email_from_notification_dispatch(request, notification_dispatch_id)
if not r:
request.response.status_int = 404
return {'error': u'Notification dispatch not Found.'}
# Return 200.
return {'dispatched': 'ok'}
def notification_email_batch_view(request):
"""View to handle a batch email notification dispatch"""
pass
class AddNotification(object):
"""Standard boilerplate to add a notification."""
def __init__(self, iface, role, dispatch_mapping, delay=None):
""""""
self.dispatch_mapping = dispatch_mapping
self.notification_factory = repo.NotificationFactory
self.role = role
self.delay = delay
self.iface = iface
def __call__(self, request, context, event, op, **kwargs):
""""""
# Unpack.
dispatch_mapping = self.dispatch_mapping
notification_factory = self.notification_factory(request)
delay = self.delay
iface = self.iface
role = self.role
# Prepare.
notifications = []
# get relevant information.
interested_users_func = get_roles_mapping(request, iface)
interested_users = interested_users_func(request, context)
for user in interested_users[role]:
notification = notification_factory(event, user, dispatch_mapping, delay)
notifications.append(notification)
# Tries to optimistically send the notification.
dispatch_notifications(request, notifications)
def add_notification(config,
iface,
role,
state_or_action_changes,
dispatch_mapping,
delay=None):
# Unpack.
_, o, _, s = unpack.constants()
_, on, _ = unpack.directives(config)
o.register(
'CREATE_NOTIFICATION',
)
create_notification_in_db = AddNotification(iface, role, dispatch_mapping, delay)
on(iface, state_or_action_changes, o.CREATE_NOTIFICATION, create_notification_in_db)
def add_roles_mapping(config, iface, mapping):
"""Adds a roles mapping to the resource."""
# Unpack.
registry = config.registry
# Noop if we've done this already.
roles_mapping = registry.roles_mapping
if iface in roles_mapping:
return
# Register the role mapping.
roles_mapping[iface] = mapping
def get_roles_mapping(request, iface):
"""Gets the role mapping for the resource."""
# Unpack.
registry = request.registry
roles_mapping = registry.roles_mapping
return roles_mapping.get(iface, None)
def get_operator_user(request, registry=None):
"""We have a special user in our db representing the operator user. Here
We look them up by username, constructed from the client server name.
The operator should be the one to receive e-mails that target
the website / administration.
"""
if registry == None:
# Unpack.
settings = request.registry.settings
else:
settings = registry.settings
# Get the user, which depends on the server.
server = os.environ.get('INI_site__title', '')
if server.lower() == 'opendesk':
username = u'opendesk_operator'
elif server.lower() == 'fabhub':
username = u'fabhub_operator'
else:
raise Exception('Operator user not configured.')
return get_existing_user(username=username)
def dispatch_notifications(request, notifications):
"""Dispatches a notification directly without waiting for the
background process."""
lookup = repo.LookupNotificationDispatch()
now = datetime.datetime.now()
# Loop through the notifications and check if we should send them.
for notification in notifications:
# Get our create the user preferences.
preference = repo.get_or_create_notification_preferences(notification.user)
# Check if its an email and if its due to dispatch, if so, dispatch.
if preference.channel == 'email':
for dispatch in lookup.by_notification_id(notification.id):
if dispatch.due <= now:
send_email_from_notification_dispatch(request, dispatch.id)
class IncludeMe(object):
"""Set up the state change event subscription system and provide an
``add_engine_subscriber`` directive.
"""
def __init__(self, **kwargs):
self.add_notification = kwargs.get('add_notification', add_notification)
self.add_roles_mapping = kwargs.get('add_roles_mapping', add_roles_mapping)
self.get_roles_mapping = kwargs.get('get_roles_mapping', get_roles_mapping)
def __call__(self, config):
"""Handle `/events` requests and provide subscription directive."""
# Dispatch the notifications.
config.add_request_method(dispatch_notifications, 'dispatch_notifications', reify=True)
# Adds a notification to the resource.
config.add_directive('add_notification', self.add_notification)
config.registry.roles_mapping = {}
# Adds / gets role mapping.
config.add_directive('add_roles_mapping', self.add_roles_mapping)
config.add_directive('get_roles_mapping', self.get_roles_mapping)
# Operator user to receive admin related emails.
config.add_request_method(get_operator_user, 'operator_user', reify=True)
# Expose webhook views to notifications such as single / batch emails / sms's.
config.add_route('notification_email_single', '/notifications/email_single')
config.add_view(notification_email_single_view, renderer='json',
request_method='POST', route_name='notification_email_single')
config.add_route('notification_email_batch', '/notifications/email_batch')
config.add_view(notification_email_batch_view, renderer='json',
request_method='POST', route_name='notification_email_batch')
includeme = IncludeMe().__call__
| StarcoderdataPython |
3532769 | <filename>STED_analysis/img_preprocess.py
# -*- coding: utf-8 -*-
import cv2,os
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from removehighlight import remove_connectivity
#Binary processing
#Set the threshold threshold, the pixel value is less than the threshold, the value is 0, the pixel value is greater than the threshold, the value is 1
#The specific threshold needs to be tried many times, and the effect of different thresholds is different
def get_table(threshold=115):
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
return table
def calcGrayHist(image):
rows,clos = image.shape
grahHist = np.zeros([256],np.uint64)
for r in range(rows):
for c in range(clos):
grahHist[image[r][c]] +=1
return grahHist
#gray image generation
def tif2gray(img_path):
# img_path = '/home/zat/project/STED-20200915T020146Z-001/STED/STED (20200909)/Sample 1/Cell 3/Image_Area01_Channel_tubulin.tif'
uint16_img = cv2.imread(img_path, -1)
uint16_img -= uint16_img.min()
uint16_img = uint16_img / (uint16_img.max() - uint16_img.min())
uint16_img *= 255
new_uint16_img = uint16_img.astype(np.uint8)
#cv2.imshow('UINT8', uint8_img)
# cv2.imshow('UINT16', new_uint16_img)
#hsv=cv2.cvtColor(new_uint16_img,cv2.COLOR_RGB2HSV)
#cv2.imshow('hsv', hsv)
name = img_path[-15:]
cv2.imwrite(name, new_uint16_img)
# new_path=os.path.join('/home/zat/project/STED_NEW/',path,subpath,subsubpath)
# print('---------------')
# if not os.path.exists(new_path):
# print(new_path)
# os.makedirs(new_path)
# cv2.imwrite(new_path+'/'+file, new_uint16_img)
return new_uint16_img
def denoising(new_img_path):
new_img = cv2.imread(new_img_path,cv2.IMREAD_GRAYSCALE)
Maximg = np.max(new_img)
Minimg = np.min(new_img)
Omin,Omax = 0,260
a = float(Omax - Omin)/(Maximg - Minimg)
b = Omin - a*Minimg
O = a*new_img + b
O = O.astype(np.uint8)
# a=1
# O = float(a)*new_img
# O[0>255] = 255
# O = np.round(O)
# O = O.astype(np.uint8)
# binary_im=cv2.GaussianBlur(new_img, (7, 7), 0)
# binary_im = cv2.medianBlur(O,5)
# binary_im = cv2.blur(O,(3,3))
return O
def coloring_local(new_img_path,file_name):
new_img = cv2.imread(new_img_path)
HSV=cv2.cvtColor(new_img,cv2.COLOR_BGR2HSV)
HSV_ex_high=cv2.cvtColor(new_img,cv2.COLOR_BGR2HSV)
HSV_ex_low=cv2.cvtColor(new_img,cv2.COLOR_BGR2HSV)
retn_img=HSV
h,w,_=HSV.shape
thresh_point=np.array([0,0,0])
z=range(0,h)
d=range(0,w)
thresh_point_num=0
for x in z:
for y in d:
b = HSV[x,y]
if b.any()!=np.array([0,0,0]).any():
thresh_point_num+=1
thresh_point=thresh_point+b
thresh_point=thresh_point/thresh_point_num
print('thresh_point',thresh_point)
for x in z:
for y in d:
b = HSV[x,y]
if 'vash' in file_name:
if b[2] >= round(thresh_point[2])+30 and b[2] <= 255:
HSV_ex_low[x,y]=[0 ,0 ,221]
else:
HSV_ex_low[x,y]=[0 ,0 ,0]
else:
if b[2] >= round(thresh_point[2])+30 and b[2] <= 255:
HSV_ex_high[x,y]=[34 ,255 ,255]
else:
HSV_ex_high[x,y]=[0 ,0 ,0]
if 'vash' in file_name:
# HSV_ex_low = cv2.GaussianBlur(HSV_ex_low, (3, 3), 0)
thresh, binary = cv2.threshold(HSV_ex_low, round(thresh_point[2]), 255,cv2.THRESH_BINARY)
retn_img=binary
else:
# HSV_ex_high = cv2.GaussianBlur(HSV_ex_high, (3, 3), 0)
thresh, binary = cv2.threshold(HSV_ex_high, round(thresh_point[2]), 255,cv2.THRESH_BINARY)
retn_img=binary
return retn_img
def coloring_whole(new_img_path,file_name):
#remove noise
# new_img_path = 'Image_Area02_Channel_tubulin.jpg'
new_img = cv2.imread(new_img_path)
#dst = cv2.fastNlMeansDenoisingColored(new_img,None,20,20,9,21)
#cv2.imwrite('no_noise.jpg', dst)
HSV=cv2.cvtColor(new_img,cv2.COLOR_BGR2HSV)
HSV_ex_high=cv2.cvtColor(new_img,cv2.COLOR_BGR2HSV)
HSV_ex_low=cv2.cvtColor(new_img,cv2.COLOR_BGR2HSV)
retn_img=HSV
h,w,_=HSV.shape
name = file_name[-10:]
cv2.imwrite(name, HSV)
thresh_point=np.array([0,0,0])
z=range(0,h)
d=range(0,w)
thresh_point_num=0
for x in z:
for y in d:
b = HSV[x,y]
if b.any()!=np.array([0,0,0]).any():
thresh_point_num+=1
thresh_point=thresh_point+b
thresh_point=thresh_point/thresh_point_num
print('thresh_point',thresh_point)
# remove highlight
for x in z:
for y in d:
b = HSV[x,y]
## 04/09
# if 'Channel_vash' in file_name:
# if b[0]>=0 and b[0]<=180 and b[1] >= 0 and b[1] <= 40 and b[2] >= 60 and b[2] <= 220:
# HSV_ex_low[x,y]=[0 ,0 ,221]
# else:
# HSV_ex_low[x,y]=[0 ,0 ,0]
#
# else:
# if b[0]>=0 and b[0]<=180 and b[1] >= 0 and b[1] <= 43 and b[2] >= 90 and b[2] <= 220:
# HSV_ex_high[x,y]=[34 ,255 ,255]
# else:
# HSV_ex_high[x,y]=[0 ,0 ,0]
## 09/09
# if 'Channel_vash' in file_name:
# if b[0]>=0 and b[0]<=180 and b[1] >= 0 and b[1] <= 43 and b[2] >= 100 and b[2] <= 220:
# HSV_ex_low[x,y]=[0 ,0 ,221]
# else:
# HSV_ex_low[x,y]=[0 ,0 ,0]
#
# else:
# if b[0]>=0 and b[0]<=180 and b[1] >= 0 and b[1] <= 43 and b[2] >= 100 and b[2] <= 220:
# HSV_ex_high[x,y]=[34 ,255 ,255]
# else:
# HSV_ex_high[x,y]=[0 ,0 ,0]
if 'vash' in file_name:
if b[0]>=0 and b[0]<=180 and b[1] >= 0 and b[1] <= 43 and b[2] >= round(thresh_point[2])+30 and b[2] <= 220:
HSV_ex_low[x,y]=[0 ,0 ,221]
else:
HSV_ex_low[x,y]=[0 ,0 ,0]
else:
if b[0]>=0 and b[0]<=180 and b[1] >= 0 and b[1] <= 43 and b[2] >= round(thresh_point[2])+30 and b[2] <= 220:
HSV_ex_high[x,y]=[34 ,255 ,255]
else:
HSV_ex_high[x,y]=[0 ,0 ,0]
if 'vash' in file_name:
name = file_name[-9:]
cv2.imwrite(name, HSV_ex_low)
HSV_ex_low = cv2.GaussianBlur(HSV_ex_low, (3, 3), 0)
name = file_name[-8:]
cv2.imwrite(name, HSV_ex_low)
# cv2.imshow("imageHSV",HSV_ex_low)
# cv2.imwrite(new_img_path+'.tif', HSV_ex_low)
# HSV_ex_low = cv2.fastNlMeansDenoisingColored(HSV_ex_high,None,10,10,7,21)
# HSV_ex_low = cv2.medianBlur(HSV_ex_low,3)
thresh, binary = cv2.threshold(HSV_ex_low, round(thresh_point[2])+30, 255,cv2.THRESH_BINARY)
retn_img=binary
else:
if round(thresh_point[2])+30>=65:
name = file_name[-7:]
cv2.imwrite(name, HSV_ex_high)
# HSV_ex_high = cv2.bilateralFilter(HSV_ex_high, int(round(thresh_point[2]))+30, 75, 75)
HSV_ex_high = cv2.GaussianBlur(HSV_ex_high, (5, 5), 0)
name = file_name[-6:]
cv2.imwrite(name, HSV_ex_high)
# HSV_ex_high = cv2.medianBlur(HSV_ex_high,3)
# HSV_ex_high = cv2.cvtColor(HSV_ex_high, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(HSV_ex_high, round(thresh_point[2])+30, 255,cv2.THRESH_BINARY)
retn_img=binary
else:
name = file_name[-7:]
cv2.imwrite(name, HSV_ex_high)
HSV_ex_high = cv2.GaussianBlur(HSV_ex_high, (3, 3), 0)
name = file_name[-6:]
cv2.imwrite(name, HSV_ex_high)
# HSV_ex_high = cv2.bilateralFilter(HSV_ex_high, int(round(thresh_point[2]))+30, 75, 75)
# HSV_ex_high = cv2.medianBlur(HSV_ex_high,3)
# HSV_ex_high = cv2.cvtColor(HSV_ex_high, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(HSV_ex_high, round(thresh_point[2])+30, 255,cv2.THRESH_BINARY)
retn_img=binary
# retn_img = cv2.dilate(retn_img,np.ones((2,1),np.uint8),iterations = 2)
# remove isolated points
# num_labels,labels,stats,centers = cv2.connectedComponentsWithStats(retn_img, connectivity=8,ltype=cv2.CV_32S)
# new_image = retn_img.copy()
# for label in range(num_labels):
# if stats[label,cv2.CC_STAT_AREA] == 1:
# new_image[labels == label] = 0
# retn_img=new_image
# retn_img = cv2.medianBlur(retn_img,3)
# retn_img = cv2.fastNlMeansDenoising(retn_img,3,3,7,21)
# cv2.imshow('HSV_ex',HSV_ex_high)
# cv2.imwrite(new_img_path+'.tif', HSV_ex_high)
return retn_img
def merge(path1,path2):
# path1='Image_Area02_Channel_tubulin.jpg.tif'
# path2='Image_Area02_Channel_vash2.jpg.tif'
new_img1 = cv2.imread(path1)
new_img2 = cv2.imread(path2)
htich = cv2.addWeighted(new_img1,1, new_img2, 0.5, 1)
# htich = cv2.add(new_img1,new_img2)
# cv2.imshow("merged_img", htich)
return htich
# cv2.imshow("merged_img", htich)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if __name__ == '__main__':
data_path = './STED-20200915T020146Z-001/STED'
fileList = os.listdir(data_path)
for path in fileList:
for subpath in os.listdir(os.path.join(data_path,path)):
for subsubpath in os.listdir(os.path.join(data_path,path,subpath)):
for file in os.listdir(os.path.join(data_path,path,subpath,subsubpath)):
if 'Channel' in file:
print(file)
img_gray=tif2gray(os.path.join(data_path,path,subpath,subsubpath,file))
img_color=coloring_whole(img_gray,file)
new_path=os.path.join('./STED_COLOR/',path,subpath,subsubpath)
print('---------------')
if not os.path.exists(new_path):
print(new_path)
os.makedirs(new_path)
cv2.imwrite(new_path+'/'+file, img_color)
data_path = './1'
fileList = os.listdir(data_path)
for path in fileList:
for subpath in os.listdir(os.path.join(data_path,path)):
for subsubpath in os.listdir(os.path.join(data_path,path,subpath)):
for file in os.listdir(os.path.join(data_path,path,subpath,subsubpath)):
if 'Ch' in file:
print(file)
img_gray=tif2gray(os.path.join(data_path,path,subpath,subsubpath,file))
img_denoise=denoising(os.path.join(data_path,path,subpath,subsubpath,file))
new_path=os.path.join('./STED_DENOISE/1',path,subpath,subsubpath)
print('---------------')
if not os.path.exists(new_path):
print(new_path)
os.makedirs(new_path)
cv2.imwrite(new_path+'/'+file, img_gray)
data_path = './STED_DENOISE/1'
fileList = os.listdir(data_path)
for path in fileList:
for subpath in os.listdir(os.path.join(data_path,path)):
for subsubpath in os.listdir(os.path.join(data_path,path,subpath)):
for file in os.listdir(os.path.join(data_path,path,subpath,subsubpath)):
if 'merge' not in file:
if '局部' not in path:
# filename=file.split('_')
img_denoise=coloring_whole(os.path.join(data_path,path,subpath,subsubpath,file),file)
new_path=os.path.join('./',path,subpath,subsubpath)
print(new_path)
if not os.path.exists(new_path):
os.makedirs(new_path)
cv2.imwrite(new_path+'/'+file, img_denoise)
else:
# filename=file.split('_')
img_denoise=coloring_local(os.path.join(data_path,path,subpath,subsubpath,file),file)
new_path=os.path.join('./STED_COLOR/20201202',path,subpath,subsubpath)
print(new_path)
if not os.path.exists(new_path):
os.makedirs(new_path)
cv2.imwrite(new_path+'/'+file, img_denoise)
# merge
data_path = './STED_COLOR1/'
fileList = os.listdir(data_path)
for path in fileList:
for subpath in os.listdir(os.path.join(data_path,path)):
for subsubpath in os.listdir(os.path.join(data_path,path,subpath)):
file_list=sorted(os.listdir(os.path.join(data_path,path,subpath,subsubpath)))
for index,file in enumerate(file_list):
if index%2==0 and ('merge' not in file):
print(index,file)
img=merge(os.path.join(data_path,path,subpath,subsubpath,file_list[index+1]),os.path.join(data_path,path,subpath,subsubpath,file_list[index]))
new_path=os.path.join('./STED_COLOR1/',path,subpath,subsubpath)
cv2.imwrite(new_path+'/merge_'+file, img)
| StarcoderdataPython |
9743616 | '''
@author: <NAME>
@summary: Test cases to check the behavior when the batch request is
not constructed properly.
'''
import json
from django.test import TestCase
from tests import ensure_text_content
class TestBadBatchRequest(TestCase):
'''
Check the behavior of bad batch request.
'''
def _batch_request(self, method, path, data, headers={}):
'''
Prepares a batch request.
'''
return {"url": path, "method": method, "headers": headers, "body": data}
def test_invalid_http_method(self):
'''
Make a batch request with invalid HTTP method.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([self._batch_request("select", "/views", "", {})]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method validation is broken!")
self.assertEqual(resp.text.lower(), "invalid request method.", "Method validation is broken!")
def test_missing_http_method(self):
'''
Make a batch request without HTTP method.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"body": "/views"}]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method & URL validation is broken!")
self.assertEqual(resp.text.lower(), "request definition should have url, method defined.", "Method validation is broken!")
def test_missing_url(self):
'''
Make a batch request without the URL.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"method": "get"}]),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Method & URL validation is broken!")
self.assertEqual(resp.text.lower(), "request definition should have url, method defined.",
"Method validation is broken!")
def test_invalid_batch_request(self):
'''
Make a batch request without wrapping in the list.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps({"method": "get", "url": "/views/"}),
content_type="application/json"
)
)
self.assertEqual(resp.status_code, 400, "Batch requests should always be in list.")
self.assertEqual(resp.text.lower(), "the body of batch request should always be list!",
"List validation is broken!")
def test_view_that_raises_exception(self):
'''
Make a batch request to a view that raises exception.
'''
resp = ensure_text_content(
self.client.post(
"/api/v1/batch/",
json.dumps([{"method": "get", "url": "/exception/"}]),
content_type="application/json"
)
)
resp = json.loads(resp.text)[0]
self.assertEqual(resp['status_code'], 500, "Exceptions should return 500.")
self.assertEqual(resp['body'].lower(), "exception", "Exception handling is broken!")
| StarcoderdataPython |
3235703 | #!/usr/bin/env python
"""
Given a string, return a new string where "not " has been added to the front. However, if the string already begins with
"not", return the string unchanged.
not_string('candy') == 'not candy'
not_string('x') == 'not x'
not_string('not bad') == 'not bad'
"""
def not_string(str):
if str[0:3] == 'not':
return str
else:
return 'not ' + str
def test_function():
assert not_string('candy') == 'not candy'
assert not_string('x') == 'not x'
assert not_string('not bad') == 'not bad'
assert not_string('bad') == 'not bad'
assert not_string('not') == 'not'
assert not_string('is not') == 'not is not'
assert not_string('no') == 'not no'
if __name__ == '__main__':
test_function()
| StarcoderdataPython |
8176956 | from java import lang
lang.System.loadLibrary('GraphMolWrap')
from org.RDKit import *
from threading import Thread
import os
from find_props import funct_dict
def filter_prop(request):
"""Function to filter a list of mols given a particular property
Takes a request object with three potential attributes
1) function - a string indicating the property (e.g. 'num_hba')
2) Max_ans and 3) min_ans - floats indicating the upper and lower limits"""
new_ans = []
# Loop through the mols
for mol in request.body:
# Get the value for this property
my_val = funct_dict[request.function](mol)
# Add this value to the molecule
mol.setProp(request.function, str(my_val))
# Now do the checks
if request.max_ans:
if my_val > request.max_ans:
continue
if request.min_ans:
if my_val < request.min_ans:
continue
# If it's passed these tests append to the out list
new_ans.append(mol)
# Return the out list in the body of the request
request.body = new_ans
return request
if __name__ == "__main__":
# Call this function when calling the script
filter_prop(request)
| StarcoderdataPython |
344063 | """Preprocessing module."""
# pylint: disable=C0330
# pylint: disable=R0902
#import tensorflow
import argparse
import csv
import json
import logging
import re
import string
import sys
from typing import List
import preprocessor # type: ignore
import nltk # type: ignore
from nltk.stem.wordnet import WordNetLemmatizer # type: ignore
from nltk.tag import pos_tag # type: ignore
from nltk.tokenize import word_tokenize # type: ignore
MAX_TWEETS = -1
DIVISION = 25
def preprocess_tweets(infile: str, outfile: str) -> None:
"""Remove redundant and non-objective posts."""
logger = logging.getLogger("preprocessor")
# Number of Tweets read
counter: int = 0
# List of all Tweets
tweets: List[Tweet] = []
# Begin reading
with open(infile, "r") as csv_file:
# CSV reader
csv_reader = csv.reader(csv_file, delimiter=",")
logger.info("Attached CSV reader")
# Number of Tweets deleted due to URL
url_blocked = 0
# Iterate
for tweet in csv_reader:
# Messaging checkpoints
if not counter % DIVISION:
logger.info("Processed %s Tweets", counter)
# Break at limit
if counter == MAX_TWEETS:
break
# Only add Tweet if it doesn't contain a URL.
# As per Ejieh's master's thesis, the vast majority
# of posts with URLs lack any subjectivity.
ptn = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
if not bool(re.search(ptn, tweet[0])):
tweets.append(Tweet(tweet))
else:
url_blocked += 1
counter += 1
logger.info("Read %s Tweets in total", counter)
# Finishing message
logger.info("Only %s Tweets were kept", len(tweets))
with open(outfile, "w", encoding="utf-8") as output_file:
tweet_writer = csv.writer(output_file)
i = 1
for tweet in tweets: # type: ignore
tweet_writer.writerow(
[
tweet.full_text, # type: ignore
tweet.created_at, # type: ignore
tweet.source, # type: ignore
tweet.tweet_id, # type: ignore
tweet.retweet_count, # type: ignore
tweet.favorite_count, # type: ignore
tweet.user_name, # type: ignore
tweet.user_id_str, # type: ignore
tweet.user_handle, # type: ignore
tweet.user_location, # type: ignore
tweet.user_desc, # type: ignore
tweet.user_protected, # type: ignore
tweet.user_followers, # type: ignore
tweet.user_created, # type: ignore
tweet.user_verified, # type: ignore
tweet.user_tweet_count, # type: ignore
tweet.cleaned_text, # type: ignore
json.dumps(tweet.cleaned_tokens), # type: ignore
]
)
if not i % DIVISION:
logger.info("Wrote Tweet #%s", i)
i += 1
logger.info("Wrote %s Tweets in total", len(tweets))
class Tweet:
"""Tweet object."""
def __init__(self, tweet_row: List[str]) -> None:
"""Initialize Tweet object."""
# Existing members
self.full_text = tweet_row[0]
self.created_at = tweet_row[1]
self.source = tweet_row[2]
self.tweet_id = tweet_row[3]
self.retweet_count = tweet_row[4]
self.favorite_count = tweet_row[5]
self.user_name = tweet_row[6]
self.user_id_str = tweet_row[7]
self.user_handle = tweet_row[8]
self.user_location = tweet_row[9]
self.user_desc = tweet_row[10]
self.user_protected = tweet_row[11]
self.user_followers = tweet_row[12]
self.user_created = tweet_row[13]
self.user_verified = tweet_row[14]
self.user_tweet_count = tweet_row[15]
# New members
self.cleaned_text = Tweet.clean_tweet(self.full_text)
self.cleaned_tokens = Tweet.normalize(word_tokenize(self.cleaned_text))
@staticmethod
def clean_tweet(full_text: str) -> str:
"""Remove meaningless data, in-place, from Tweets."""
# Said Ozcan's preprocessor
cleaned = str(preprocessor.clean(full_text))
# Remove any remnant mentions
cleaned = str(re.sub(r"@[A-Za-z0-9_]+", "", cleaned))
# Remove non-alpha
cleaned = str(re.sub(r"[^A-Za-z ]+", "", cleaned))
return cleaned
@staticmethod
def normalize(tweet_tokens: List[str]) -> List[str]:
"""Lemmatize a Twitter post.."""
cleaned_tokens = []
# Part of Speech tagging
for token, tag in pos_tag(tweet_tokens):
if tag.startswith("NN"):
pos = "n"
elif tag.startswith("VB"):
pos = "v"
else:
pos = "a"
# Lemmatize
lemmatizer = WordNetLemmatizer()
token = lemmatizer.lemmatize(token, pos)
if len(token) > 0 and token not in string.punctuation:
cleaned_tokens.append(token.lower())
return cleaned_tokens
def main() -> int:
"""Execute standalone."""
arg_p = argparse.ArgumentParser()
arg_p.add_argument("infile", help="input .CSV file")
arg_p.add_argument("outfile", help="output .CSV file")
args = arg_p.parse_args()
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s | %(name)s] %(message)s",
)
nltk.download("punkt")
nltk.download("averaged_perceptron_tagger")
nltk.download("wordnet")
nltk.download("twitter_samples")
nltk.download("stopwords")
preprocess_tweets(args.infile, args.outfile)
return 0
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1806448 | from django.shortcuts import render
from wechatpy.utils import check_signature
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from wechatpy import WeChatClient, parse_message
from wechatpy.replies import TextReply
from wechatpy.events import ScanCodeWaitMsgEvent
from wechatpy.exceptions import InvalidSignatureException, InvalidAppIdException
from wx_app.wxcrypt import wxdecrypt, wxencrypt
# Create your views here.
def wx_web(request):
return render(request, 'wx_app/wx_main.html')
TOKEN = 'w<PASSWORD>'
APPID = 'wx1c53a7cf55314e85'
ENCODING_AES_KEY = '<KEY>'
APPSCRET = 'd28ecedbf5ff74fe268595ea035a6b6f'
@csrf_exempt
def wx_main(request):
if request.method == 'GET':
signature = request.GET.get("signature", None)
timestamp = request.GET.get("timestamp", None)
nonce = request.GET.get("nonce", None)
echostr = request.GET.get("echostr", None)
try:
check_signature(TOKEN, signature, timestamp, nonce)
return HttpResponse(echostr)
except InvalidSignatureException:
return HttpResponse('微信接口Token验证失败!')
else:
msg = parse_message(wxdecrypt(request, APPID, ENCODING_AES_KEY, TOKEN))
print(msg)
if msg.type == 'text':
#获取文本内容
content = msg.content
try:
reply = TextReply(content=content, message=msg)
#render()返回XML格式消息
r_xml = reply.render()
encryptmsg=wxencrypt(r_xml,request,APPID,ENCODING_AES_KEY,TOKEN)
return HttpResponse(encryptmsg)
except Exception as e:
#自行处理
return HttpResponse('回复文本信息给你的时候出错啦')
elif msg.type == 'image':
return HttpResponse('还未处理')
elif msg.type == 'location':
reply = TextReply(
content="我发现你了,你在" + msg.label, message=msg)
r_xml = reply.render()
encryptmsg = wxencrypt(
r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
return HttpResponse(encryptmsg)
elif msg.type == 'voice':
return HttpResponse('还未处理')
elif msg.type == 'link':
return HttpResponse('还未处理')
elif msg.type == 'video':
return HttpResponse('还未处理')
elif msg.type == 'shortvideo':
return HttpResponse('还未处理')
elif msg.type == 'event':
try:
if msg.event == 'subscribe':
reply = TextReply(content="欢迎关注我,你真帅!", message=msg)
r_xml = reply.render()
encryptmsg = wxencrypt(
r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
return HttpResponse(encryptmsg)
elif msg.event == 'unsubscribe':
return HttpResponse('还未处理')
elif msg.event == 'subscribe_scan':
return HttpResponse('还未处理')
elif msg.event == 'scan':
return HttpResponse('还未处理')
elif msg.event == 'location':
reply = TextReply(
content="我发现你了,你在" + msg.lable, message=msg)
r_xml = reply.render()
encryptmsg = wxencrypt(
r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
return HttpResponse(encryptmsg)
elif msg.event == 'click':
reply = TextReply(
content="我知道你刚才click了某个菜单", message=msg)
r_xml = reply.render()
encryptmsg = wxencrypt(
r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
return HttpResponse(encryptmsg)
elif msg.event == 'view':
print('hhhh哈哈哈哈哈啊哈')
reply = TextReply(
content="我知道你刚才点了哪个菜单", message=msg)
r_xml = reply.render()
encryptmsg = wxencrypt(
r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
#对于跳转页面,也许是Django会转到后面的执行函数再返回页面,此处无法提前回复消息到客户端。
return HttpResponse(encryptmsg)
#群发任务状态
elif msg.event == 'masssendjobfinish':
return HttpResponse('还未处理')
#模板消息任务完成事件
elif msg.event == 'templatesendjobfinish':
return HttpResponse('还未处理')
#扫码推事件
elif msg.event == 'scancode_push':
return HttpResponse('还未处理')
#扫码推事件且弹出“消息接收中”提示框的事件
elif msg.event == 'scancode_waitmsg':
return HttpResponse('还未处理')
#弹出系统拍照发图的事件
elif msg.event == 'pic_sysphoto':
return HttpResponse('还未处理')
#弹出拍照或者相册发图的事件
elif msg.event == 'pic_photo_or_album':
return HttpResponse('还未处理')
#弹出微信相册发图器的事件
elif msg.event == 'pic_weixin':
return HttpResponse('还未处理')
#弹出地理位置选择器的事件
elif msg.event == 'location_select':
return HttpResponse('还未处理')
#微信认证事件推送
#资质认证成功事件
elif msg.event == 'qualification_verify_success':
return HttpResponse('还未处理')
#资质认证失败事件
elif msg.event == 'qualification_verify_fail':
return HttpResponse('还未处理')
#名称认证成功事件
elif msg.event == 'naming_verify_success':
return HttpResponse('还未处理')
#名称认证失败事件
elif msg.event == 'naming_verify_fail':
return HttpResponse('还未处理')
#年审通知事件
elif msg.event == 'annual_renew':
return HttpResponse('还未处理')
#认证过期失效通知
elif msg.event == 'verify_expired':
return HttpResponse('还未处理')
#微信扫一扫事件
#打开商品主页事件
elif msg.event == 'user_scan_product':
return HttpResponse('还未处理')
#进入公众号事件
elif msg.event == 'user_scan_product_enter_session':
return HttpResponse('还未处理')
#地理位置信息异步推送事件
elif msg.event == 'user_scan_product_async':
return HttpResponse('还未处理')
#商品审核结果事件
elif msg.event == 'user_scan_product_verify_action':
return HttpResponse('还未处理')
#用户在商品主页中关注公众号事件
elif msg.event == 'subscribe_scan_product':
return HttpResponse('还未处理')
#用户授权发票事件 (会包含一个订单号,不成功就失败)
elif msg.event == 'user_authorize_invoice':
return HttpResponse('还未处理')
#发票状态更新事件
elif msg.event == 'update_invoice_status':
return HttpResponse('还未处理')
#用户提交发票抬头事件
elif msg.event == 'submit_invoice_title':
return HttpResponse('还未处理')
else:
reply = TextReply(content="干嘛呢!"+msg.event, message=msg)
r_xml = reply.render()
encryptmsg = wxencrypt(
r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
return HttpResponse(encryptmsg)
# push = ScanCodeWaitMsgEvent(msg)
# #获取二维码信息,字符串
# content = msg.scan_result
# print(content)
# # 如何处理,自行处理,回复一段文本或者图文
# reply = TextReply(content="something", message=msg)
# r_xml = reply.render()
# encryptmsg = wxencrypt(
# r_xml, request, APPID, ENCODING_AES_KEY, TOKEN)
# return HttpResponse(encryptmsg)
except Exception as e:
#暂时不处理
return HttpResponse('出错啦')
else:
return HttpResponse('不明消息')
def create_menu(request):
# 第一个参数是公众号里面的appID,第二个参数是appsecret
client = WeChatClient("wx1c53a7cf55314e85",
"d28ecedbf5ff74fe268595ea035a6b6f")
client.menu.create({
"button": [
{
"name": "智能访客",
"sub_button": [
{
"type": "view",
"name": "访客预约",
"url": "http://3fcba1c69bb95399.natapp.cc/wx",
"sub_button": []
}
]
},
{
"name": "产品演示",
"sub_button": [
{
"type": "view",
"name": "资产管理",
"url": "http://127.0.0.1",
"sub_button": []
}
]
},
{
"type": "click",
"name": "关于戊辰",
"key": "<KEY>"
}
]
})
return HttpResponse('ok')
| StarcoderdataPython |
4960331 | <filename>train/abstract2vec.py
import os, nltk, csv, re, gensim, logging
from nltk import RegexpTokenizer
from nltk.corpus import stopwords
from os.path import isfile, join
from random import shuffle
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
from operator import itemgetter
from sqlalchemy import create_engine, MetaData, Table, select
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='.pypatent.log')
tokenizer = RegexpTokenizer(r'\w+')
eng_stopwords = nltk.corpus.stopwords.words('english')
eng_stopwords.append('abstract')
'''
Modified LabeledLineSentence Class from
https://medium.com/@klintcho/doc2vec-tutorial-using-gensim-ab3ac03d3a1
Results in 1 vec per 1 doc, rather than 1 vec for each sentence in a doc.
'''
class LabeledLineSentence(object):
def __init__(self, doc_list, labels_list):
self.labels_list = labels_list
self.doc_list = doc_list
def __iter__(self):
for idx, doc in enumerate(self.doc_list):
yield LabeledSentence(doc.split(), [self.labels_list[idx]])
#Pre-processes the text by tokenizing it and removing stopwords
def clean_text(unprocessed_text):
lower = unprocessed_text.lower()
word_list = tokenizer.tokenize(lower)
word_set = [w for w in word_list if w not in eng_stopwords]
clean_string = (' ').join(word_set)
return clean_string
#Function called to actually train our doc2vec model.
def train_d2v():
filedir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
files = os.listdir(filedir)
docLabels = [f for f in files if f.endswith('.txt')]
logging.info(docLabels)
keep_labels = [] #not going to keep labels for Null Text docs
data = []
logging.info("reading through patents and abstracts ... ")
for doc in docLabels:
#print(doc)
source = os.path.abspath(os.path.join(os.path.dirname(__file__), doc))
with open(source, "r", encoding="ISO-8859-1") as f:
if re.match(".*US.*", doc): #for patents
#print("PATENT: " + doc)
the_text = f.read()
keep_text = the_text.rstrip()
clean_string = clean_text(keep_text)
data.append(clean_string)
keep_labels.append(doc)
if not re.match(".*US.*", doc): #for abstracts, only get the abstract parts
#print("ABSTRACT: " + doc)
try:
the_text = f.readlines() #all abstract files are at least 9 lines
authors = the_text[0] #don't train on author names or titles!
titles = the_text[3]
the_real_text = the_text[4:]
joined_text = (' ').join(the_real_text) #make it a string
keep_text = joined_text.rstrip() #remove \n\t\r etc.
clean_string = clean_text(keep_text) #pre-process the text
if clean_string.startswith("null"): #skip training of "Null Text" docs
pass
else:
data.append(clean_string)
keep_labels.append(doc)
except Exception as e: #if a document is empty skip it
logging.info(e)
logging.info("done reading through patents and abstracts!!")
#Now add the database stuff
logging.info("Now going to add the database stuff!")
engine = create_engine('sqlite:///mimic.db')
logging.info("initiated database engine")
conn = engine.connect()
logging.info("setting metadata")
metadata = MetaData(bind=engine) #init metadata. will be empty
metadata.reflect(engine) #retrieve db info for metadata (tables, columns, types)
mydata = Table('mydata', metadata)
logging.info("metadata set!!!")
#Query db for index and text
logging.info("beginning db query")
s = select([mydata.c.index, mydata.c.TEXT])
result = conn.execute(s)
logging.info("retrieved result form db!")
for row in result:
#label
index = row["index"]
index_label = "mimic" + str(index)
keep_labels.append(index_label)
#text
the_text = row["TEXT"]
keep_text = the_text.rstrip()
clean_string = clean_text(keep_text)
data.append(clean_string)
logging.info("done appending MIMIC texts+labels to data and keep_labels")
logging.info("* Creating LabeledLineSentence Class ...")
it = LabeledLineSentence(data, keep_labels)
logging.info("* Created LabeledLineSentences!!! ")
logging.info("* Initializing Doc2Vec Model ... ")
model = gensim.models.Doc2Vec(size=300, window=10, min_count=5, workers=11,alpha=0.025, min_alpha=0.025) # use fixed learning rate
logging.info("* Training Doc2Vec Model ... ")
model.build_vocab(it)
for epoch in range(10):
model.train(it)
model.alpha -= 0.002 # decrease the learning rate
model.min_alpha = model.alpha # fix the learning rate, no decay
model.train(it)
#model.save('./a2v.d2v')
model.save('./pypatent.d2v')
logging.info("* Saving Doc2Vec Model !!!")
return keep_labels
#Function to load our saved model
def load_model(model_dot_d2v):
logging.info("* Loading Doc2Vec Model ... ")
#model = Doc2Vec.load('a2v.d2v')
model = Doc2Vec.load(model_dot_d2v)
logging.info("* Loaded Saved Doc2Vec Model !!!")
return model
def get_data(keep_labels):
#Obtain txt abstracts and txt patents
filedir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
files = os.listdir(filedir)
docLabels = keep_labels #filtered out "Null Text" files
abstracts = []
patents = []
for doc in docLabels:
if re.match(".*US.*", doc): #documents with "US" in it are patents
label = doc
pDict = {"label": label}
patents.append(pDict)
elif doc.startswith('mimic'): #we don't want mimic data
pass
else:
abstractloc = os.path.join( filedir, doc)
abstract = open(abstractloc, 'r')
abstract_lines = abstract.readlines()
authors = abstract_lines[0]
titles = abstract_lines[3]
label = doc
aDict = {"label": label, "author": authors, "title": titles}
abstracts.append(aDict)
return abstracts, patents
def compare_patents_to_abstracts(keep_labels):
results_list = []
model = load_model('pypatent.d2v')
abstracts, patents = get_data(keep_labels)
#sort patents
sorted_patents = sorted(patents, key=itemgetter('label'))
for p in sorted_patents:
p_label = p["label"]
p_number = re.sub("(\_US.*\.txt)", '', p_label)
p_vec = model.docvecs[p_label] #Patent vector
P = sparse.csr_matrix(p_vec) #Sparse Patent Vector
for a in abstracts:
if a["label"].startswith( str(p_number)+"_" ):
a_label = a["label"]
a_authors = (a["author"]).strip('\t\n\r')
a_title = (a["title"]).strip('\t\n\r')
a_vec = model.docvecs[a_label]
A = sparse.csr_matrix(a_vec)
sim = cosine_similarity(P, A) #cos(patent, abstract) #
percent = str((sim[0][0]) * 100) + "%"
# [patent_name, abstract_label, percent, abstract_title, abstract_authors ]
r_list = [p_label, a_label, percent, a_title]
results_list.append(r_list)
#print(str(p_label) + " is " + str(sim) + " similar to " + str(a_label))
with open('FINALresults.csv', 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['PatentName', 'AbstractFile', 'PercentSimilarity', 'AbstractTitle'])
for row in results_list:
filewriter.writerow(row)
#keep_labels = train_d2v()
#compare_patents_to_abstracts(keep_labels)
#model = load_model('pypatent.d2v')
#print(model.docvecs['98_US20050142162.txt'])
# print (model.most_similar('invention'))
| StarcoderdataPython |
3269981 | # Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import os
import requests
MFN_ELASTICSEARCH = os.getenv("MFN_ELASTICSEARCH", os.getenv("MFN_HOSTNAME"))
ELASTICSEARCH_HOST = MFN_ELASTICSEARCH.split(':')[0]
try:
ELASTICSEARCH_PORT = MFN_ELASTICSEARCH.split(':')[1]
except:
ELASTICSEARCH_PORT = 9200
ELASTICSEARCH_URL = "http://" + ELASTICSEARCH_HOST + ":" + str(ELASTICSEARCH_PORT)
def delete_workflow_index(index_name):
try:
r = requests.delete(ELASTICSEARCH_URL + "/" + index_name, proxies={"http":None})
except Exception as e:
if type(e).__name__ == 'ConnectionError':
print('Could not connect to: ' + ELASTICSEARCH_URL)
else:
raise e
def handle(value, sapi):
assert isinstance(value, dict)
data = value
response = {}
response_data = {}
success = False
email = data["email"]
storage_userid = data["storage_userid"]
if "workflow" in data:
workflow = data["workflow"]
if "id" in workflow:
workflows = sapi.get(email + "_list_workflows", True)
if workflows is not None and workflows != "":
workflows = json.loads(workflows)
if workflow["id"] in workflows.values():
wf = sapi.get(email + "_workflow_" + workflow["id"], True)
if wf is not None and wf != "":
wf = json.loads(wf)
if wf["status"] == "undeployed" or wf["status"] == "failed":
for wn in workflows:
if workflows[wn] == workflow["id"]:
del workflows[wn]
break
# delete workflow logs
delete_workflow_index("mfnwf-" + workflow["id"])
sapi.delete(email + "_workflow_" + workflow["id"], True, True)
#sapi.delete(email + "_workflow_json_" + workflow["id"], True, True)
#sapi.delete(email + "_workflow_requirements_" + workflow["id"], True, True)
dlc = sapi.get_privileged_data_layer_client(storage_userid)
dlc.delete("workflow_json_" + workflow["id"])
dlc.delete("workflow_requirements_" + workflow["id"])
print("Current workflow metadata: " + str(wf))
if "associatedTriggerableTables" in wf:
for table in wf["associatedTriggerableTables"]:
removeWorkflowFromTableMetadata(email, table, wf["name"], dlc)
dlc.shutdown()
sapi.put(email + "_list_workflows", json.dumps(workflows), True, True)
response_data["message"] = "Deleted workflow " + workflow["id"] + "."
success = True
else:
response_data["message"] = "Couldn't delete workflow; workflow is still deployed. Undeploy workflow first."
else:
response_data["message"] = "Couldn't delete workflow; workflow metadata is not valid."
else:
response_data["message"] = "Couldn't delete workflow; no such workflow."
else:
response_data["message"] = "Couldn't delete workflow; no such workflow."
else:
response_data["message"] = "Couldn't delete workflow; malformed input."
else:
response_data["message"] = "Couldn't delete workflow; malformed input."
if success:
response["status"] = "success"
else:
response["status"] = "failure"
response["data"] = response_data
sapi.log(json.dumps(response))
return response
def removeWorkflowFromTableMetadata(email, tablename, workflowname, dlc):
metadata_key = tablename
triggers_metadata_table = 'triggersInfoTable'
print("[removeWorkflowFromTableMetadata] User: " + email + ", Workflow: " + workflowname + ", Table: " + tablename)
current_meta = dlc.get(metadata_key, tableName=triggers_metadata_table)
meta_list = json.loads(current_meta)
if type(meta_list == type([])):
for i in range(len(meta_list)):
meta=meta_list[i]
if meta["wfname"] == workflowname:
del meta_list[i]
break
dlc.put(metadata_key, json.dumps(meta_list), tableName=triggers_metadata_table)
time.sleep(0.2)
updated_meta = dlc.get(metadata_key, tableName=triggers_metadata_table)
updated_meta_list = json.loads(updated_meta)
print("[removeWorkflowFromTableMetadata] User: " + email + ", Workflow: " + workflowname + ", Table: " + tablename + ", Updated metadata: " + str(updated_meta_list))
| StarcoderdataPython |
1695218 | <gh_stars>1-10
import os
from prediction.src.opt import model_path, classes
from prediction.src.utils import load_model, load_image
if os.path.exists(model_path):
model = load_model(model_path)
print("Model successfully loaded!")
else:
print("Model file path does not exist or is incorrect! Please check prediction/src/opt.py")
def predict_image(image_path):
image = load_image(image_path)
results = model.predict(image)
results = [{'name': classes[index], 'proba': float(proba)} for index, proba in enumerate(list(results[0]))]
return results
| StarcoderdataPython |
9799936 | <reponame>sma-software/openviriato.algorithm-platform.py-client<filename>py_client/conversion/convert_json_to_aidm_for_end_to_end_test_tools.py
from enum import Enum
from typing import List, Union, Dict, Type
from py_client.aidm import StopStatus, UpdateTimesTrainPathNode, UpdateStopTimesTrainPathNode, IncomingRoutingEdge, \
OutgoingRoutingEdge, CrossingRoutingEdge, UpdateTrainRoute, StationEntryOrExit, TableDefinition, TableCellDataType, \
TableTextCell, TableColumnDefinition, TableAlgorithmNodeCell, TableRow, TableAlgorithmTrainCell, \
UpdateRunTimesTrainPathSegment, TimeWindow, OutgoingNodeTrackRoutingEdge, \
IncomingNodeTrackRoutingEdge, AlgorithmRosterLinkDefinition
from py_client.conversion.algorithm_platform_json_to_aidm_converter import convert
from py_client.conversion.converter_helpers import convert_keys_to_snake_case, parse_to_datetime, \
parse_to_timedelta_or_none, RoutingEdgeType
# TODO: VPLAT-8570: Remove this module from py_client (used only for End-to-End-Test)
def convert_list_of_json_to_update_train_times_node(attribute_dict: dict) -> List[UpdateTimesTrainPathNode]:
return [convert_json_to_update_train_times_node(node_attributes)
for node_attributes in attribute_dict["train_path_nodes"]]
def convert_json_to_update_train_times_node(attribute_dict: dict) -> UpdateTimesTrainPathNode:
snake_case_dict = convert_keys_to_snake_case(attribute_dict)
for key in ['arrival_time', 'departure_time']:
snake_case_dict[key] = parse_to_datetime(snake_case_dict[key])
return convert(UpdateTimesTrainPathNode, snake_case_dict)
def convert_json_to_update_stop_times_train_path_node(attribute: dict) -> UpdateStopTimesTrainPathNode:
snake_case_dict = convert_keys_to_snake_case(attribute)
for key in ['arrival_time', 'departure_time']:
snake_case_dict[key] = parse_to_datetime(snake_case_dict[key])
snake_case_dict["stop_status"] = convert_to_aidm_enum_from_string(snake_case_dict["stop_status"], StopStatus)
return convert(UpdateStopTimesTrainPathNode, snake_case_dict)
def convert_json_to_update_run_times_train_path_segment(attribute: dict) -> UpdateRunTimesTrainPathSegment:
snake_case_dict = convert_keys_to_snake_case(attribute)
for key in ['to_node_arrival_time', 'from_node_departure_time']:
snake_case_dict[key] = parse_to_datetime(snake_case_dict[key])
snake_case_dict["minimum_run_time"] = parse_to_timedelta_or_none(snake_case_dict["minimum_run_time"])
return convert(UpdateRunTimesTrainPathSegment, snake_case_dict)
def extract_first_dict_value(attribute_dict: dict) -> object:
return attribute_dict[list(attribute_dict.keys())[0]]
def create_update_train_route_for_end_to_end_test(evaluated_parameter_mapping: dict) -> UpdateTrainRoute:
start_train_path_node_id: int = evaluated_parameter_mapping["start_train_path_node_id"]
end_train_path_node_id: int = evaluated_parameter_mapping["end_train_path_node_id"]
routing_edges: List[Dict[str, Union[str, dict]]] = evaluated_parameter_mapping["routing_edges"]
converted_routing_edges = []
for i, edge_as_dict in enumerate(routing_edges):
class_fields_as_dict = convert_keys_to_snake_case(edge_as_dict)
edge_type = class_fields_as_dict.pop("type")
if edge_type == RoutingEdgeType.outgoing.value:
converted_routing_edges.append(OutgoingRoutingEdge(**class_fields_as_dict))
elif edge_type == RoutingEdgeType.outgoing_node_track.value:
converted_routing_edges.append(OutgoingNodeTrackRoutingEdge(**class_fields_as_dict))
elif edge_type == RoutingEdgeType.incoming.value:
converted_routing_edges.append(IncomingRoutingEdge(**class_fields_as_dict))
elif edge_type == RoutingEdgeType.incoming_node_track.value:
converted_routing_edges.append(IncomingNodeTrackRoutingEdge(**class_fields_as_dict))
elif edge_type == RoutingEdgeType.crossing.value:
converted_routing_edges.append(CrossingRoutingEdge(**class_fields_as_dict))
else:
raise TypeError(f"{edge_type} is not defined as a routing edge")
return UpdateTrainRoute(start_train_path_node_id, end_train_path_node_id, converted_routing_edges)
def create_table_definition_for_end_to_end_to_test(object_as_json: dict) -> TableDefinition:
json_with_snake_case_keys = convert_keys_to_snake_case(object_as_json)["table_definition"]
table_name = json_with_snake_case_keys["name"]
columns = []
for cell_definition in json_with_snake_case_keys["columns"]:
cell_definition_with_snake_case_keys = convert_keys_to_snake_case(cell_definition)
key = cell_definition_with_snake_case_keys["key"]
header = TableTextCell(key, cell_definition_with_snake_case_keys["header"])
header_data_type = convert_to_aidm_enum_from_string(
cell_definition_with_snake_case_keys["header_data_type"],
TableCellDataType)
column_data_type = convert_to_aidm_enum_from_string(
cell_definition_with_snake_case_keys["column_data_type"],
TableCellDataType)
columns.append(TableColumnDefinition(key, header, header_data_type, column_data_type))
return TableDefinition(table_name, columns)
def create_table_rows_for_end_to_end_to_test(rows_as_json: dict) -> List[TableRow]:
table_rows = []
for row_as_json in rows_as_json["rows"]:
list_of_cells_to_add = convert_keys_to_snake_case(row_as_json)["row"]
cells = []
for cell in list_of_cells_to_add:
cell_with_camel_case_keys = convert_keys_to_snake_case(cell)
column_key = cell_with_camel_case_keys['column_key']
if "value" in cell_with_camel_case_keys.keys():
cells.append(TableTextCell(column_key, cell_with_camel_case_keys['value']))
elif "node_id" in cell_with_camel_case_keys.keys():
cells.append(TableAlgorithmNodeCell(column_key, cell_with_camel_case_keys['node_id']))
elif "train_id" in cell_with_camel_case_keys.keys():
cells.append(TableAlgorithmTrainCell(column_key, cell_with_camel_case_keys['train_id']))
table_rows.append(TableRow(cells))
return table_rows
def convert_to_aidm_enum_from_string(
enum_as_string: str,
enum_to_convert_to: Type[Union[StopStatus, StationEntryOrExit, TableCellDataType]]) -> \
Union[StopStatus, StationEntryOrExit, TableCellDataType]:
for member in enum_to_convert_to:
if member.value == enum_as_string:
return member
raise TypeError("{0} is not defined as member of {1}".format(enum_as_string, str(enum_to_convert_to)))
def convert_string_to_stop_status(dict_with_stop_status_as_string) -> StopStatus:
stop_status_as_string = dict_with_stop_status_as_string["stop_status_as_string"]
return convert_to_aidm_enum_from_string(stop_status_as_string, StopStatus)
def convert_string_to_station_entry_or_exit(
dict_with_station_entry_or_exit_as_string: Dict[str, str]) -> StationEntryOrExit:
station_entry_or_exit_as_string = dict_with_station_entry_or_exit_as_string["station_entry_or_exit_as_string"]
return convert_to_aidm_enum_from_string(station_entry_or_exit_as_string, StationEntryOrExit)
def convert_json_with_url_encoding_to_time_window(url_encoded_time_window_dict: Dict[str, object]) -> TimeWindow:
for key in url_encoded_time_window_dict.keys():
url_encoded_time_window_dict[key] = str(url_encoded_time_window_dict[key]).replace("%3A", ":")
return convert(TimeWindow, url_encoded_time_window_dict)
def concatenate_argument_values_to_list(*kwargs) -> list:
return list(dict(*kwargs).values())
class EndToEndTestParameterEnum(Enum):
optionValue3 = "optionValue3"
def convert_request_body_to_algorithm_roster_link_definitions(
request_body: Dict[str, List[Dict[str, Union[int, str]]]]
) -> List[AlgorithmRosterLinkDefinition]:
definitions_as_json = request_body["algorithm_roster_link_definitions"]
for definition in definitions_as_json:
definition.pop("type")
return [
AlgorithmRosterLinkDefinition(**convert_keys_to_snake_case(definition)) for definition in definitions_as_json
]
| StarcoderdataPython |
4926400 | """
This template is written by @Nocturnal-2
What does this quickstart script aim to do?
- I do some unfollow and like by tags mostly
NOTES:
- I am an one month old InstaPy user, with a small following. So my numbers
in settings are bit conservative.
"""
from instapy import InstaPy
from instapy import smart_run
# get a session!
session = InstaPy(username='', password='')
# let's go! :>
with smart_run(session):
""" Start of parameter setting """
# don't like if a post already has more than 150 likes
session.set_delimit_liking(enabled=True, max=150, min=0)
# don't comment if a post already has more than 4 comments
session.set_delimit_commenting(enabled=True, max=4, min=0)
"""I used to have potency_ratio=-0.85 and max_followers=1200 for
set_relationship_bounds()
Having a stricter relationship bound to target only low profiles
users was not very useful,
as interactions/sever calls ratio was very low. I would reach the
server call threshold for
the day before even crossing half of the presumed safe limits for
likes, follow and comments (yes,
looks like quiet a lot of big(bot) managed accounts out there!!).
So I relaxed it a bit to -0.50 and 2000 respectively.
"""
session.set_relationship_bounds(enabled=True,
potency_ratio=-0.50,
delimit_by_numbers=True,
max_followers=2000,
max_following=3500,
min_followers=25,
min_following=25)
session.set_do_comment(True, percentage=20)
session.set_do_follow(enabled=True, percentage=20, times=2)
session.set_comments(['Amazing!', 'Awesome!!', 'Cool!', 'Good one!',
'Really good one', 'Love this!', 'Like it!',
'Beautiful!', 'Great!', 'Nice one'])
session.set_sleep_reduce(200)
""" Get the list of non-followers
I duplicated unfollow_users() to see a list of non-followers which I
run once in a while when I time
to review the list
"""
# session.just_get_nonfollowers()
# my account is small at the moment, so I keep smaller upper threshold
session.set_quota_supervisor(enabled=True,
sleep_after=["likes", "comments_d", "follows",
"unfollows", "server_calls_h"],
sleepyhead=True, stochastic_flow=True,
notify_me=True,
peak_likes=(100, 700),
peak_comments=(25, 200),
peak_follows=(48, 125),
peak_unfollows=(35, 400),
peak_server_calls=(None, 3000))
""" End of parameter setting """
""" Actions start here """
# Unfollow users
""" Users who were followed by InstaPy, but not have followed back will
be removed in
One week (168 * 60 * 60)
Yes, I give a liberal one week time to follow [back] :)
"""
session.unfollow_users(amount=25, InstapyFollowed=(True, "nonfollowers"),
style="RANDOM",
unfollow_after=168 * 60 * 60,
sleep_delay=600)
# Remove specific users immediately
""" I use InstaPy only for my personal account, I sometimes use custom
list to remove users who fill up my feed
with annoying photos
"""
# custom_list = ["sexy.girls.pagee", "browneyedbitch97"]
#
# session.unfollow_users(amount=20, customList=(True, custom_list,
# "all"), style="RANDOM",
# unfollow_after=1 * 60 * 60, sleep_delay=200)
# Like by tags
""" I mostly use like by tags. I used to use a small list of targeted
tags with a big 'amount' like 300
But that resulted in lots of "insufficient links" messages. So I
started using a huge list of tags with
'amount' set to something small like 50. Probably this is not the
best way to deal with "insufficient links"
message. But I feel it is a quick work around.
"""
session.like_by_tags(['tag1', 'tag2', 'tag3', 'tag4'], amount=300)
""" Joining Engagement Pods...
"""
session.join_pods(topic='fashion')
"""
-- REVIEWS --
@uluQulu:
- @Nocturnal-2, your template looks stylish, thanks for preparing it.
@nocturnal-2:
- I think it is good opportunity to educate and get educated [using templates of other people] :) ...
"""
| StarcoderdataPython |
3594346 | # -*- coding: utf-8 -*-
"""Top-level package for Coding Assignments."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| StarcoderdataPython |
9637397 | <reponame>TachikakaMin/client<filename>wandb/sdk/service/grpc_server.py
#!/usr/bin/env python
"""wandb grpc server.
- GrpcServer:
- StreamMux:
- StreamRecord:
- WandbServicer:
"""
from concurrent import futures
import logging
import multiprocessing
import os
import sys
import tempfile
import threading
from threading import Event
import time
from typing import Any, Callable, Dict, List, Optional, Union
from typing import TYPE_CHECKING
import grpc
from six.moves import queue
import wandb
from wandb.proto import wandb_internal_pb2 as pb
from wandb.proto import wandb_server_pb2 as spb
from wandb.proto import wandb_server_pb2_grpc as spb_grpc
from wandb.proto import wandb_telemetry_pb2 as tpb
from .. import lib as wandb_lib
from ..interface.interface_queue import InterfaceQueue
if TYPE_CHECKING:
from google.protobuf.internal.containers import MessageMap
class GrpcServerType(object):
def __init__(self) -> None:
pass
def stop(self, num: int) -> None:
pass
def _dict_from_pbmap(pbmap: "MessageMap[str, spb.SettingsValue]") -> Dict[str, Any]:
d: Dict[str, Any] = dict()
for k in pbmap:
v_obj = pbmap[k]
v_type = v_obj.WhichOneof("value_type")
v: Union[int, str, float, None, tuple] = None
if v_type == "int_value":
v = v_obj.int_value
elif v_type == "string_value":
v = v_obj.string_value
elif v_type == "float_value":
v = v_obj.float_value
elif v_type == "bool_value":
v = v_obj.bool_value
elif v_type == "null_value":
v = None
elif v_type == "tuple_value":
v = tuple(v_obj.tuple_value.string_values)
d[k] = v
return d
class StreamThread(threading.Thread):
"""Class to running internal process as a thread."""
def __init__(self, target: Callable, kwargs: Dict[str, Any]) -> None:
threading.Thread.__init__(self)
self._target = target
self._kwargs = kwargs
self.daemon = True
self.pid = 0
def run(self) -> None:
# TODO: catch exceptions and report errors to scheduler
self._target(**self._kwargs)
class StreamRecord:
_record_q: "queue.Queue[pb.Record]"
_result_q: "queue.Queue[pb.Result]"
_iface: InterfaceQueue
_thread: StreamThread
def __init__(self) -> None:
self._record_q = multiprocessing.Queue()
self._result_q = multiprocessing.Queue()
process = multiprocessing.current_process()
self._iface = InterfaceQueue(
record_q=self._record_q,
result_q=self._result_q,
process=process,
process_check=False,
)
def start_thread(self, thread: StreamThread) -> None:
self._thread = thread
thread.start()
self._wait_thread_active()
def _wait_thread_active(self) -> None:
result = self._iface.communicate_status()
# TODO: using the default communicate timeout, is that enough? retries?
assert result
def join(self) -> None:
self._iface.join()
if self._thread:
self._thread.join()
@property
def interface(self) -> InterfaceQueue:
return self._iface
class StreamAction:
_action: str
_stream_id: str
_processed: Event
_data: Any
def __init__(self, action: str, stream_id: str, data: Any = None):
self._action = action
self._stream_id = stream_id
self._data = data
self._processed = Event()
def __repr__(self) -> str:
return f"StreamAction({self._action},{self._stream_id})"
def wait_handled(self) -> None:
self._processed.wait()
def set_handled(self) -> None:
self._processed.set()
@property
def stream_id(self) -> str:
return self._stream_id
class StreamMux:
_streams_lock: threading.Lock
_streams: Dict[str, StreamRecord]
_port: Optional[int]
_pid: Optional[int]
_action_q: "queue.Queue[StreamAction]"
_stopped: Event
def __init__(self) -> None:
self._streams_lock = threading.Lock()
self._streams = dict()
self._port = None
self._pid = None
self._stopped = Event()
self._action_q = queue.Queue()
def set_port(self, port: int) -> None:
self._port = port
def set_pid(self, pid: int) -> None:
self._pid = pid
def add_stream(self, stream_id: str, settings: Dict[str, Any]) -> None:
action = StreamAction(action="add", stream_id=stream_id, data=settings)
self._action_q.put(action)
action.wait_handled()
def del_stream(self, stream_id: str) -> None:
action = StreamAction(action="del", stream_id=stream_id)
self._action_q.put(action)
action.wait_handled()
def teardown(self, exit_code: int) -> None:
action = StreamAction(action="teardown", stream_id="na", data=exit_code)
self._action_q.put(action)
action.wait_handled()
def stream_names(self) -> List[str]:
with self._streams_lock:
names = list(self._streams.keys())
return names
def has_stream(self, stream_id: str) -> bool:
with self._streams_lock:
return stream_id in self._streams
def get_stream(self, stream_id: str) -> StreamRecord:
with self._streams_lock:
stream = self._streams[stream_id]
return stream
def _process_add(self, action: StreamAction) -> None:
stream = StreamRecord()
# run_id = action.stream_id # will want to fix if a streamid != runid
settings = action._data
thread = StreamThread(
target=wandb.wandb_sdk.internal.internal.wandb_internal,
kwargs=dict(
settings=settings,
record_q=stream._record_q,
result_q=stream._result_q,
port=self._port,
user_pid=self._pid,
),
)
stream.start_thread(thread)
with self._streams_lock:
self._streams[action._stream_id] = stream
def _process_del(self, action: StreamAction) -> None:
with self._streams_lock:
_ = self._streams.pop(action._stream_id)
# TODO: we assume stream has already been shutdown. should we verify?
def _finish_all(self, streams: Dict[str, StreamRecord], exit_code: int) -> None:
if not streams:
return
for sid, stream in streams.items():
wandb.termlog(f"Finishing run: {sid}...") # type: ignore
stream.interface.publish_exit(exit_code)
streams_to_join = []
while streams:
for sid, stream in list(streams.items()):
poll_exit_resp = stream.interface.communicate_poll_exit()
if poll_exit_resp and poll_exit_resp.done:
streams.pop(sid)
streams_to_join.append(stream)
time.sleep(0.1)
# TODO: this would be nice to do in parallel
for stream in streams_to_join:
stream.join()
wandb.termlog("Done!") # type: ignore
def _process_teardown(self, action: StreamAction) -> None:
exit_code: int = action._data
with self._streams_lock:
# TODO: mark streams to prevent new modifications?
streams_copy = self._streams.copy()
self._finish_all(streams_copy, exit_code)
with self._streams_lock:
self._streams = dict()
self._stopped.set()
def _process_action(self, action: StreamAction) -> None:
if action._action == "add":
self._process_add(action)
return
if action._action == "del":
self._process_del(action)
return
if action._action == "teardown":
self._process_teardown(action)
return
raise AssertionError(f"Unsupported action: {action._action}")
def _loop(self) -> None:
while not self._stopped.is_set():
# TODO: check for parent process going away
try:
action = self._action_q.get(timeout=1)
except queue.Empty:
continue
self._process_action(action)
action.set_handled()
self._action_q.task_done()
def loop(self) -> None:
try:
self._loop()
except Exception as e:
raise e
def cleanup(self) -> None:
pass
class WandbServicer(spb_grpc.InternalServiceServicer):
"""Provides methods that implement functionality of route guide server."""
_server: "GrpcServerType"
_mux: StreamMux
def __init__(self, server: "GrpcServerType", mux: StreamMux) -> None:
self._server = server
self._mux = mux
def RunUpdate( # noqa: N802
self, run_data: pb.RunRecord, context: grpc.ServicerContext
) -> pb.RunUpdateResult:
if not run_data.run_id:
run_data.run_id = wandb_lib.runid.generate_id()
# Record telemetry info about grpc server
run_data.telemetry.feature.grpc = True
run_data.telemetry.cli_version = wandb.__version__
stream_id = run_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface._communicate_run(run_data)
assert result # TODO: handle errors
return result
def RunStart( # noqa: N802
self, run_start: pb.RunStartRequest, context: grpc.ServicerContext
) -> pb.RunStartResponse:
# initiate run (stats and metadata probing)
stream_id = run_start._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface._communicate_run_start(run_start)
assert result # TODO: handle errors
return result
def CheckVersion( # noqa: N802
self, check_version: pb.CheckVersionRequest, context: grpc.ServicerContext
) -> pb.CheckVersionResponse:
# result = self._servicer._interface._communicate_check_version(check_version)
# assert result # TODO: handle errors
result = pb.CheckVersionResponse()
return result
def Attach( # noqa: N802
self, attach: pb.AttachRequest, context: grpc.ServicerContext
) -> pb.AttachResponse:
stream_id = attach._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface._communicate_attach(attach)
assert result # TODO: handle errors
return result
def PollExit( # noqa: N802
self, poll_exit: pb.PollExitRequest, context: grpc.ServicerContext
) -> pb.PollExitResponse:
stream_id = poll_exit._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface.communicate_poll_exit()
assert result # TODO: handle errors
return result
def GetSummary( # noqa: N802
self, get_summary: pb.GetSummaryRequest, context: grpc.ServicerContext
) -> pb.GetSummaryResponse:
stream_id = get_summary._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface.communicate_get_summary()
assert result # TODO: handle errors
return result
def SampledHistory( # noqa: N802
self, sampled_history: pb.SampledHistoryRequest, context: grpc.ServicerContext
) -> pb.SampledHistoryResponse:
stream_id = sampled_history._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface.communicate_sampled_history()
assert result # TODO: handle errors
return result
def Shutdown( # noqa: N802
self, shutdown: pb.ShutdownRequest, context: grpc.ServicerContext
) -> pb.ShutdownResponse:
stream_id = shutdown._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._communicate_shutdown()
result = pb.ShutdownResponse()
return result
def RunExit( # noqa: N802
self, exit_data: pb.RunExitRecord, context: grpc.ServicerContext
) -> pb.RunExitResult:
stream_id = exit_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface.publish_exit(exit_data.exit_code)
result = pb.RunExitResult()
return result
def RunPreempting( # noqa: N802
self, preempt: pb.RunPreemptingRecord, context: grpc.ServicerContext
) -> pb.RunPreemptingResult:
stream_id = preempt._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_preempting(preempt)
result = pb.RunPreemptingResult()
return result
def Artifact( # noqa: N802
self, art_data: pb.ArtifactRecord, context: grpc.ServicerContext
) -> pb.ArtifactResult:
stream_id = art_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_artifact(art_data)
result = pb.ArtifactResult()
return result
def ArtifactSend( # noqa: N802
self, art_send: pb.ArtifactSendRequest, context: grpc.ServicerContext
) -> pb.ArtifactSendResponse:
stream_id = art_send._info.stream_id
iface = self._mux.get_stream(stream_id).interface
resp = iface._communicate_artifact_send(art_send)
assert resp
return resp
def ArtifactPoll( # noqa: N802
self, art_poll: pb.ArtifactPollRequest, context: grpc.ServicerContext
) -> pb.ArtifactPollResponse:
stream_id = art_poll._info.stream_id
iface = self._mux.get_stream(stream_id).interface
resp = iface._communicate_artifact_poll(art_poll)
assert resp
return resp
def TBSend( # noqa: N802
self, tb_data: pb.TBRecord, context: grpc.ServicerContext
) -> pb.TBResult:
stream_id = tb_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_tbdata(tb_data)
result = pb.TBResult()
return result
def Log( # noqa: N802
self, history: pb.HistoryRecord, context: grpc.ServicerContext
) -> pb.HistoryResult:
stream_id = history._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_history(history)
# make up a response even though this was async
result = pb.HistoryResult()
return result
def Summary( # noqa: N802
self, summary: pb.SummaryRecord, context: grpc.ServicerContext
) -> pb.SummaryResult:
stream_id = summary._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_summary(summary)
# make up a response even though this was async
result = pb.SummaryResult()
return result
def Telemetry( # noqa: N802
self, telem: tpb.TelemetryRecord, context: grpc.ServicerContext
) -> tpb.TelemetryResult:
stream_id = telem._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_telemetry(telem)
# make up a response even though this was async
result = tpb.TelemetryResult()
return result
def Output( # noqa: N802
self, output_data: pb.OutputRecord, context: grpc.ServicerContext
) -> pb.OutputResult:
stream_id = output_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_output(output_data)
# make up a response even though this was async
result = pb.OutputResult()
return result
def Files( # noqa: N802
self, files_data: pb.FilesRecord, context: grpc.ServicerContext
) -> pb.FilesResult:
stream_id = files_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_files(files_data)
# make up a response even though this was async
result = pb.FilesResult()
return result
def Config( # noqa: N802
self, config_data: pb.ConfigRecord, context: grpc.ServicerContext
) -> pb.ConfigResult:
stream_id = config_data._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_config(config_data)
# make up a response even though this was async
result = pb.ConfigResult()
return result
def Metric( # noqa: N802
self, metric: pb.MetricRecord, context: grpc.ServicerContext
) -> pb.MetricResult:
stream_id = metric._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_metric(metric)
# make up a response even though this was async
result = pb.MetricResult()
return result
def Pause( # noqa: N802
self, pause: pb.PauseRequest, context: grpc.ServicerContext
) -> pb.PauseResponse:
stream_id = pause._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_pause(pause)
# make up a response even though this was async
result = pb.PauseResponse()
return result
def Resume( # noqa: N802
self, resume: pb.ResumeRequest, context: grpc.ServicerContext
) -> pb.ResumeResponse:
stream_id = resume._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_resume(resume)
# make up a response even though this was async
result = pb.ResumeResponse()
return result
def Alert( # noqa: N802
self, alert: pb.AlertRecord, context: grpc.ServicerContext
) -> pb.AlertResult:
stream_id = alert._info.stream_id
iface = self._mux.get_stream(stream_id).interface
iface._publish_alert(alert)
# make up a response even though this was async
result = pb.AlertResult()
return result
def Status( # noqa: N802
self, status: pb.StatusRequest, context: grpc.ServicerContext
) -> pb.StatusResponse:
stream_id = status._info.stream_id
iface = self._mux.get_stream(stream_id).interface
result = iface._communicate_status(status)
assert result
return result
def ServerShutdown( # noqa: N802
self, request: spb.ServerShutdownRequest, context: grpc.ServicerContext,
) -> spb.ServerShutdownResponse:
result = spb.ServerShutdownResponse()
self._server.stop(5)
return result
def ServerStatus( # noqa: N802
self, request: spb.ServerStatusRequest, context: grpc.ServicerContext,
) -> spb.ServerStatusResponse:
result = spb.ServerStatusResponse()
return result
def ServerInformInit( # noqa: N802
self, request: spb.ServerInformInitRequest, context: grpc.ServicerContext,
) -> spb.ServerInformInitResponse:
stream_id = request._info.stream_id
settings = _dict_from_pbmap(request._settings_map)
self._mux.add_stream(stream_id, settings=settings)
result = spb.ServerInformInitResponse()
return result
def ServerInformFinish( # noqa: N802
self, request: spb.ServerInformFinishRequest, context: grpc.ServicerContext,
) -> spb.ServerInformFinishResponse:
stream_id = request._info.stream_id
self._mux.del_stream(stream_id)
result = spb.ServerInformFinishResponse()
return result
def ServerInformAttach( # noqa: N802
self, request: spb.ServerInformAttachRequest, context: grpc.ServicerContext,
) -> spb.ServerInformAttachResponse:
# TODO
result = spb.ServerInformAttachResponse()
return result
def ServerInformDetach( # noqa: N802
self, request: spb.ServerInformDetachRequest, context: grpc.ServicerContext,
) -> spb.ServerInformDetachResponse:
# TODO
result = spb.ServerInformDetachResponse()
return result
def ServerInformTeardown( # noqa: N802
self, request: spb.ServerInformTeardownRequest, context: grpc.ServicerContext,
) -> spb.ServerInformTeardownResponse:
exit_code = request.exit_code
self._mux.teardown(exit_code)
result = spb.ServerInformTeardownResponse()
return result
class GrpcServer:
def __init__(
self,
port: int = None,
port_fname: str = None,
address: str = None,
pid: int = None,
debug: bool = False,
) -> None:
self._port = port
self._port_fname = port_fname
self._address = address
self._pid = pid
self._debug = debug
debug = True
if debug:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def _inform_used_port(self, port: int) -> None:
if not self._port_fname:
return
dname, bname = os.path.split(self._port_fname)
f = tempfile.NamedTemporaryFile(prefix=bname, dir=dname, mode="w", delete=False)
tmp_filename = f.name
try:
with f:
f.write("%d" % port)
os.rename(tmp_filename, self._port_fname)
except Exception:
os.unlink(tmp_filename)
raise
def _launch(self, mux: StreamMux) -> int:
address: str = self._address or "127.0.0.1"
port: int = self._port or 0
pid: int = self._pid or 0
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
servicer = WandbServicer(server=server, mux=mux)
try:
spb_grpc.add_InternalServiceServicer_to_server(servicer, server)
port = server.add_insecure_port(f"{address}:{port}")
mux.set_port(port)
mux.set_pid(pid)
server.start()
self._inform_used_port(port)
except KeyboardInterrupt:
mux.cleanup()
server.stop(0)
raise
except Exception:
mux.cleanup()
server.stop(0)
raise
return port
def serve(self) -> None:
mux = StreamMux()
port = self._launch(mux=mux)
setproctitle = wandb.util.get_optional_module("setproctitle")
if setproctitle:
service_ver = 0
service_id = f"{service_ver}-{port}"
proc_title = f"wandb-service({service_id})"
setproctitle.setproctitle(proc_title)
mux.loop()
| StarcoderdataPython |
11388412 | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from deprecated import deprecated
def deprecate_and_set_removal(since: str, remove_in: str, message: str):
"""
Decorator for deprecating functions in ogr.
Args:
since: Indicates a version since which is attribute deprecated.
remove_in: Indicates a version in which the attribute will be removed.
message: Message to be included with deprecation.
Returns:
Decorator.
"""
return deprecated(
version=since, reason=f"will be removed in {remove_in}: {message}"
)
| StarcoderdataPython |
5154416 | <reponame>thompsnm/capture-the-pi
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import RPi.GPIO as GPIO
import Tkinter
import time
import httplib2 as http
import json
class app_tk(Tkinter.Tk):
def __init__(self, parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.labelVariable = Tkinter.StringVar()
self.scoreVariable = Tkinter.StringVar()
self.label = Tkinter.Label(self, textvariable=self.labelVariable, font=("Courier", 44));
self.label.grid(column=0, row=0, columnspan = 2);
self.labelVariable.set(u"Select a Team:");
self.buttonRed = Tkinter.Button(self, text=u"Red Base", font=("Courier", 22), command=self.setRed)
self.buttonRed.grid(column=0, row=1);
self.buttonBlue = Tkinter.Button(self, text=u"Blue Base", font=("Courier", 22), command=self.setBlue)
self.buttonBlue.grid(column=1, row=1);
def setRed(self):
print 'Setting RED'
GPIO.output(PIN_1, GPIO.LOW);
GPIO.output(PIN_2, GPIO.HIGH);
self.destroyTeamButtons();
self.label.config(fg="white", bg='red')
self.configure(background='red')
self.labelVariable.set(u"Red Team\nRegister Players");
def setBlue(self):
print 'Setting BLUE'
GPIO.output(PIN_1, GPIO.HIGH);
GPIO.output(PIN_2, GPIO.LOW);
self.destroyTeamButtons();
self.label.config(fg="white", bg='blue')
self.configure(background='blue')
self.labelVariable.set(u"Blue Team\nRegister Players");
def destroyTeamButtons(self):
print 'Destroying Buttons'
self.buttonRed.destroy()
self.buttonBlue.destroy()
self.buttonDone = Tkinter.Button(self, text=u"Done", font=("Courier", 22), command=self.doneRegistering)
self.buttonDone.grid(column=0, row=1, columnspan=2);
def doneRegistering(self):
self.buttonDone.destroy()
self.labelVariable.set(u"Score:");
self.score = Tkinter.Label(self, textvariable=self.scoreVariable, font=("Courier", 44));
self.score.grid(column=0, row=1, columnspan = 2);
def setScore(self, red, blue):
print('Updating score')
self.scoreVariable.set(str(red) + "|" + str(blue));
if __name__ == "__main__":
PIN_1 = 26
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_1, GPIO.OUT)
PIN_2 = 19
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_2, GPIO.OUT)
app = app_tk(None)
app.title('Capture The Pi')
app.minsize(width=800, height=400)
while True:
try:
response, content = http.Http().request(
'https://fathomless-inlet-46417.herokuapp.com/score',
'GET',
'',
{
'Accept': 'application/json',
'Content-Type': 'application/json; charset=UTF-8'
}
)
score = json.loads(content)
print("Received score from aws:")
print(score)
app.setScore(score.get("red"), score.get("blue"))
except:
print('Connection refused; will try again')
app.update_idletasks()
app.update()
| StarcoderdataPython |
3258304 | <gh_stars>0
primeiro=int(input('Primeiro termo:'))
razao=int(input('Razão:'))
decimo=primeiro+(10-1)*razao
for c in range(primeiro,decimo+razao,razao):
print('{}'.format(c), end='->')
print('ACABOU')
#progressão aritmética (PA) | StarcoderdataPython |
3372545 | <filename>python/foundation/suite/suite.py
# Copyright © 2019 by <NAME>
# All rights reserved. No part of this publication/code may not be reproduced,
# distributed, or transmitted in any form or by any means, including
# photocopying, recording, or other electronic or mechanical methods,
# without the prior written permission of the publisher/author, except in the
# case noncommercial uses permitted by copyright law. For permission requests,
# write to the publisher, addressed “Attention: Nirvana Project,”
# at the address below.
# email: <EMAIL>
import os
import shutil
import subprocess
from nlogger.logger import logging
class Suite(object):
'''
'''
def __init__(self, suite_root, suite_name):
self._suite_root= suite_root
self._suite_name = suite_name
def create(self):
'''
Creates an empty suite
'''
logging.info("Creating a new rez-suite names {0}".format(self.suite_name))
cmd = "rez-suite {0} --create".format(self.suite_dir)
logging.debug("CMD EXEC: {0}".format(cmd))
process = subprocess.Popen([cmd], shell=True)
process.communicate()
return True
def release(self):
'''
Switches the current to the specified suite
'''
dest = os.path.join(self._suite_root, "current")
if os.path.exists(dest):
os.remove(dest)
os.symlink(self.suite_dir, dest)
logging.info("Releasing {0} to production".format(self._suite_name))
return True
def clone(self, source_suite):
'''
'''
raise NotImplementedError
# if self._validate_source_suite(source_suite):
# self.create()
# shutil.copytree(source_suite, self.suite_dir)
def _validate_source_suite(self, source_suite):
return os.path.isfile(os.path.join(source_suite, 'suite.yaml'))
@property
def suite_dir(self):
return os.path.join(self._suite_root, self._suite_name)
@property
def suite_name(self):
return self._suite_name | StarcoderdataPython |
51474 | import os
import time
while (True):
os.system("pipenv run start --token 1<PASSWORD> --board 1 --time-factor=1 --logic LowerRight")
time.sleep(1) | StarcoderdataPython |
3387290 | import torch
import torch.utils.model_zoo as model_zoo
from urllib.parse import urlparse
def load_weights(network, save_path, partial=True):
if urlparse(save_path).scheme != '':
pretrained_dict = model_zoo.load_url(save_path)
else:
pretrained_dict = torch.load(save_path)
if 'state_dict' in pretrained_dict:
pretrained_dict = pretrained_dict['state_dict']
try:
network.load_state_dict(pretrained_dict)
print('Pretrained network has absolutely the same layers!')
except:
model_dict = network.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
try:
network.load_state_dict(pretrained_dict)
print('Pretrained network has excessive layers; Only loading layers that are used')
except:
print('Pretrained network has fewer layers; The following are not initialized:')
not_initialized = set()
partially_initialized = set()
for k, v in pretrained_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
elif partial:
min_shape = [min(v.size()[i], model_dict[k].size()[i]) for i in range(len(min(v.size(), model_dict[k].size())))]
if len(model_dict[k].size()) in [2, 4]: # fc and conv layers
model_dict[k][:min_shape[0], :min_shape[1], ...] = \
v[:min_shape[0], :min_shape[1], ...]
elif len(model_dict[k].size()) == 1:
model_dict[k][:min_shape[0]] = v[:min_shape[0]]
else:
print('{} has size: '.format(k, model_dict[k].size()))
for k, v in model_dict.items():
if k not in pretrained_dict or (not partial and v.size() != pretrained_dict[k].size()):
not_initialized.add(k[:k.rfind('.')])
elif partial and v.size() != pretrained_dict[k].size():
partially_initialized.add(k[:k.rfind('.')])
print(sorted(not_initialized))
if partial:
print('Partially initialized:')
print(sorted(partially_initialized))
network.load_state_dict(model_dict) | StarcoderdataPython |
1910731 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/8/008 21:15
# @Author : Woe
# @Site :
# @File : login.py
# @Software: PyCharm
from http.client import HTTPConnection
from src.utils import get_logger
logger = get_logger('Tieba_login')
def getCookieValue(cookies, cookieName):
'''从cookies中获取指定cookie的值'''
for cookie in cookies:
if cookieName in cookie:
index = cookie.index("=") + 1
value = cookie[index:]
return value
def getCookiesFromHeaders(headers):
'''从http响应中获取所有cookie'''
cookies = list()
for header in headers:
if "Set-Cookie" in header:
cookie = header[1].split(";")[0]
cookies.append(cookie)
return cookies
def formatCookies(headers, cookies):
'''保存cookies'''
for cookie in cookies:
headers["Cookie"] += cookie + ";"
return headers
def tblogin(username, password):
"""
:param username:
:param password:
:return: (header,cookies)
"""
cookies = []
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "zh-CN,zh;q=0.8",
"Cookie": "",
"Host":"wappass.baidu.com"
}
body = f"username={username}&password={password}&submit=%E7%99%BB%E5%BD%95&quick_user=0&isphone=0&sp_login=waprate&uname_login=&loginmerge=1&vcodestr=&u=http%253A%252F%252Fwap.baidu.com%253Fuid%253D1392873796936_247&skin=default_v2&tpl=&ssid=&from=&uid=1392873796936_247&pu=&tn=&bdcm=3f7d51b436d12f2e83389b504fc2d56285356820&type=&bd_page_type="
conn = HTTPConnection("wappass.baidu.com", 80)
conn.request("POST", "/passport/login", body, headers)
resp = conn.getresponse()
cookies += getCookiesFromHeaders(resp.getheaders())
headers = formatCookies(headers,cookies)
logger.info('login success')
return (headers,cookies)
if __name__ == '__main__':
headers,cookies = tblogin("XXX","XXX")
print(headers)
print(cookies) | StarcoderdataPython |
11351032 | import cv2
import numpy as np
img= cv2.imread('ct1.jpg')
# img = cv2.resize(img,(512,512))
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([0,48,80],dtype='uint8')
upper = np. array([20,255,255], dtype='uint8')
mask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11))
mask= cv2.erode( mask, kernel,iterations=2)
mask=cv2.dilate(mask, kernel,iterations=2)
mask = cv2.GaussianBlur(mask, (3,3), 0)
op= cv2.bitwise_and(img, img, mask=mask)
_, contour ,_ = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(op, contour, -1, (0,255,0), thickness=2)
# cv2.imwrite('mask.png',mask)
# cv2.imwrite('op.png',op)
# cv2.imwrite('mask.jpg',mask)
cv2.imshow('Original Image',img)
cv2.imshow("HSV Conversion", hsv)
cv2.imshow("Mask", mask)
cv2.imshow("Hand", op)
cv2.waitKey(0)
# cam.release()
cv2.destroyAllWindows() | StarcoderdataPython |
3249730 | """
mongodb
https://github.com/MongoEngine/mongoengine
"""
from mongoengine import * | StarcoderdataPython |
4940580 | <filename>koabot/utils/net.py
"""Handle network requests"""
import io
from datetime import datetime
from typing import List
import aiohttp
class NetResponse():
"""Custom network response class"""
def __init__(self, response: aiohttp.ClientResponse, **kwargs):
self.client_response = response
self.status: int = self.client_response.status
self.response_body = kwargs.get('response_body', None)
if kwargs.get('json'):
self.json = self.response_body
elif kwargs.get('image'):
self.image = self.response_body
else:
self.plain_text = self.response_body
async def http_request(url: str, **kwargs) -> NetResponse:
"""Make an http request
Arguments:
url::str
The url to point to
Keywords:
auth::aiohttp.BasicAuth
Authentication object to make the connection with
data::json dump str
Stringified json object
headers::json object
object containing headers
post::bool
whether or not the request is a POST request
"""
auth: aiohttp.BasicAuth = kwargs.get('auth')
headers = kwargs.get('headers')
data = kwargs.get('data')
post: bool = kwargs.get('post')
async with aiohttp.ClientSession(auth=auth) as session:
if post:
async with session.post(url, data=data, headers=headers) as response:
return await handle_request(response, **kwargs)
else:
async with session.get(url, data=data, headers=headers) as response:
return await handle_request(response, **kwargs)
async def handle_request(response: aiohttp.ClientResponse, **kwargs) -> NetResponse:
"""Handle the response made by either POST or GET requests
Arguments:
response::ClientResponse
Keywords:
json::bool
True = must return json
False/unset = returns plain text
err_msg::str
message to display on failure
"""
json: bool = kwargs.get('json')
err_msg: str = kwargs.get('err_msg')
if response.status != 200:
# Timeout error
# if response.status == 524
failure_msg = f"> {datetime.now()}\nFailed connecting to {response.real_url}\n"
failure_msg += f"[Network status {response.status}]: {response.reason} \"{err_msg}\""
print(failure_msg)
return NetResponse(response)
if json:
response_body = await response.json(content_type=None)
else:
response_body = await response.read()
return NetResponse(response, response_body=response_body, **kwargs)
async def fetch_image(url: str, /, **kwargs) -> io.BytesIO:
"""Download an image"""
img_bytes = io.BytesIO((await http_request(url, image=True, **kwargs)).image)
return img_bytes
def get_url_filename(url: str, /) -> str:
"""Get the file name from an url"""
return url.split('/')[-1]
def get_url_fileext(url: str, /) -> str:
"""Get the file extension from an url"""
return get_url_filename(url).split('.')[-1].split('?')[0]
def get_domain(url: str, /) -> str:
"""Get domain from an url"""
return url.split('//')[-1].split('/')[0].split('?')[0]
def get_domains(lst: List[str], /) -> List[str]:
"""Get domains from a list of urls
https://stackoverflow.com/questions/9626535/get-protocol-host-name-from-url#answer-36609868
"""
domains = []
for url in lst:
domain = get_domain(url)
domains.append(domain)
return domains
| StarcoderdataPython |
3494398 | <reponame>maxi7587/DRFContact<filename>DRFContact/DRFContact/serializers.py
from rest_framework import serializers
from DRFContact.DRFContact.models import Address, Phone, Web, SocialMedia, Contact
class AddressSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Address
fields = ('id', 'url', 'country', 'state', 'city', 'street_name', 'street_number', 'zip', 'detail')
read_only_fields = ('created_at','updated_at', 'deleted_at')
class PhoneSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Phone
fields = ('id', 'url', 'country_code', 'area_code', 'phone_number')
read_only_fields = ('created_at','updated_at', 'deleted_at')
class SocialMediaSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = SocialMedia
fields = ('id', 'url', 'web_contact', 'social_network', 'link')
read_only_fields = ('created_at','updated_at', 'deleted_at')
class WebSerializer(serializers.HyperlinkedModelSerializer):
social_media = SocialMediaSerializer(many=True)
class Meta:
model = Web
fields = ('id', 'url', 'email', 'web_url', 'social_media')
read_only_fields = ('created_at','updated_at', 'deleted_at')
def create(self, validated_data):
social_media_data = validated_data.pop('social_media')
web = Web.objects.create(**validated_data)
for social_media in social_media_data:
SocialMedia.objects.create(web_contact=web, **social_media)
return web
def update(self, validated_data):
social_media_data = validated_data.pop('social_media')
web = Web.objects.create(**validated_data)
for social_media in social_media_data:
SocialMedia.objects.create(web_contact=web, **social_media)
return web
class ContactSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Contact
fields = ('id', 'url', 'name', 'address', 'phone', 'web')
read_only_fields = ('created_at','updated_at', 'deleted_at')
def to_representation(self, instance):
self.fields['address'] = AddressSerializer(read_only=True)
self.fields['phone'] = PhoneSerializer(read_only=True)
self.fields['web'] = WebSerializer(read_only=True)
return super(ContactSerializer, self).to_representation(instance)
| StarcoderdataPython |
3354428 | import unittest
import numpy as np
from flowutils import transforms
class TransformsTestCase(unittest.TestCase):
def setUp(self):
self.test_data_range = np.linspace(0.0, 1000.0, 10001)
@staticmethod
def test_logicle_range():
"""Test a range of input values"""
data_in = np.array([-10, -5, -1, 0, 0.3, 1, 3, 10, 100, 1000])
correct_output = np.array(
[
0.067574,
0.147986,
0.228752,
0.25,
0.256384,
0.271248,
0.312897,
0.432426,
0.739548,
1.0
]
)
# noinspection PyProtectedMember
data_out = transforms._logicle(data_in, t=1000, m=4.0, w=1.0, a=0)
np.testing.assert_array_almost_equal(data_out, correct_output, decimal=6)
def test_inverse_logicle_transform(self):
xform_data = transforms.logicle(
self.test_data_range.reshape(-1, 1),
[0],
t=10000,
w=0.5,
m=4.5,
a=0
)
x = transforms.logicle_inverse(
xform_data,
[0],
t=10000,
w=0.5,
m=4.5,
a=0
)
np.testing.assert_array_almost_equal(self.test_data_range, x[:, 0], decimal=10)
@staticmethod
def test_asinh_range():
"""Test a range of input values"""
data_in = np.array([-10.0, -5.0, -1.0, 0.0, 0.3, 1.0, 3.0, 10.0, 100.0, 1000.0], dtype=np.float)
data_in = data_in.reshape((-1, 1))
correct_output = np.array(
[[
-0.200009,
-0.139829,
-0.000856,
0.2,
0.303776,
0.400856,
0.495521,
0.600009,
0.8,
1.0
]]
).reshape((-1, 1))
data_out = transforms.asinh(data_in, channel_indices=0, t=1000, m=4.0, a=1.0)
np.testing.assert_array_almost_equal(data_out, correct_output, decimal=6)
def test_inverse_asinh_transform(self):
xform_data = transforms.asinh(
self.test_data_range.reshape(-1, 1),
[0],
t=10000,
m=4.5,
a=0
)
x = transforms.asinh_inverse(
xform_data,
[0],
t=10000,
m=4.5,
a=0
)
np.testing.assert_array_almost_equal(self.test_data_range, x[:, 0], decimal=10)
@staticmethod
def test_hyperlog_range():
"""Test a range of input values"""
data_in = np.array([-10, -5, -1, 0, 0.3, 1, 3, 10, 100, 1000])
correct_output = np.array(
[
0.08355406,
0.15586819,
0.2294768,
0.25,
0.25623887,
0.2705232,
0.30909185,
0.41644594,
0.73187469,
1.
]
)
# noinspection PyProtectedMember
data_out = transforms._hyperlog(data_in, t=1000, m=4.0, w=1.0, a=0)
np.testing.assert_array_almost_equal(data_out, correct_output, decimal=6)
def test_inverse_hyperlog_transform(self):
xform_data = transforms.hyperlog(
self.test_data_range.reshape(-1, 1),
[0],
t=10000,
w=0.5,
m=4.5,
a=0
)
x = transforms.hyperlog_inverse(
xform_data,
[0],
t=10000,
w=0.5,
m=4.5,
a=0
)
np.testing.assert_array_almost_equal(self.test_data_range, x[:, 0], decimal=10)
@staticmethod
def test_log_range():
"""Test a range of input values"""
data_in = np.array(
[-1., 0., 0.5, 1., 10., 100., 1000., 1023., 10000., 100000, 262144],
dtype=np.float
)
data_in = data_in.reshape((-1, 1))
correct_output = np.array(
[[
np.nan,
-np.inf,
0.139794,
0.2,
0.4,
0.6,
0.8,
0.801975,
1.0,
1.2,
1.283708
]]
).reshape((-1, 1))
with np.errstate(divide='ignore', invalid='ignore'):
data_out = transforms.log(data_in, channel_indices=0, t=10000, m=5.0)
np.testing.assert_array_almost_equal(data_out, correct_output, decimal=6)
def test_inverse_log_transform(self):
with np.errstate(divide='ignore'):
xform_data = transforms.log(
self.test_data_range.reshape(-1, 1),
[0],
t=10000,
m=4.5
)
x = transforms.log_inverse(
xform_data,
[0],
t=10000,
m=4.5
)
np.testing.assert_array_almost_equal(self.test_data_range, x[:, 0], decimal=10)
| StarcoderdataPython |
1785608 | <gh_stars>0
from dwave.system import EmbeddingComposite, DWaveSampler
# simple crossroad
h = {0,0,0,0}
J = {
(0, 1): 1,
(0, 3): 1,
(1, 2): 1,
(2, 3): 1
}
# chessboard 3 x 3
h = {0,0,0,0}
J = {
(0, 1): 1,
(0, 3): 1,
(1, 2): 1,
(1, 4): 1,
(2, 5): 1,
(3, 4): 1,
(3, 6): 1,
(4, 5): 1,
(4, 7): 1,
(5, 8): 1
}
# Define the sampler that will be used to run the problem
sampler = EmbeddingComposite(DWaveSampler())
# Run the problem on the sampler
sampleset = sampler.sample_ising(h, J, num_reads = 10)
print(sampleset) | StarcoderdataPython |
9780841 | from .actor import Actor
from .control import ActorHandler
def find_actor_handlers(actor, must_allow_standalones=False, include_same_level=False):
"""
Returns a list of actor handlers, starting from the current node (excluded).
The search goes up in the actor hierarchy, up to the root (i.e., the last
item in the returned list will be most likely a "Flow" actor).
:param actor: the starting point
:type actor: Actor
:param must_allow_standalones: whether the handler must allow standalones
:type must_allow_standalones: bool
:param include_same_level: allows adding of actor handlers that are on
the same level as the current actor, but
are before it
:return: the handlers
:rtype: list
"""
result = []
root = actor.root
child = actor
parent = actor.parent
while parent is not None:
if isinstance(parent, ActorHandler):
handler = parent
if include_same_level:
index = handler.index(child.name)
for i in range(index - 1, -1, -1):
sub_handler = None
# TODO external flows
if isinstance(handler.actors[i], ActorHandler):
sub_handler = handler.actors[i]
if sub_handler is None:
continue
if must_allow_standalones:
if sub_handler.actor_handler_info.can_contain_standalones:
result.append(sub_handler)
else:
result.append(sub_handler)
if must_allow_standalones:
if handler.actor_handler_info.can_contain_standalones:
result.append(handler)
else:
result.append(handler)
if parent == root:
parent = None
else:
child = parent
parent = parent.parent
return result
def _find_closest_type(handler, cls):
"""
Tries to find the cls within the specified actor handler.
:param handler: the actor handler to search
:type handler: ActorHandler
:param cls: the type of actor to look for
:type cls: type
:return: the located actor or None if none found
:rtype: Actor
"""
result = None
for actor in handler.actors:
if isinstance(actor, cls):
return actor
# TODO external actors
return result
def find_closest_type(actor, cls, include_same_level=False):
"""
Tries to find the closest type in the actor tree, starting with the current
actor.
:param actor: the starting point
:type actor: Actor
:param cls: the type to look for
:type cls: type
:param include_same_level: whether to look on the same level or higher up
:type include_same_level: bool
:return: the located actor or None if none found
:rtype: Actor
"""
handlers = find_actor_handlers(actor, True, include_same_level=include_same_level)
for handler in handlers:
if isinstance(handler, cls):
return handler
result = _find_closest_type(handler, cls)
if result is not None:
return result
return None
| StarcoderdataPython |
3460008 | <reponame>keflavich/TurbuStat
"""
From <NAME>'s AG_fft_tools:
Copyright (c) 2009 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy
try:
import matplotlib.pyplot as pyplot
pyplotOK = True
except ImportError:
pyplotOK = False
from radialProfile import azimuthalAverageBins,radialAverageBins
def hanning2d(M, N):
"""
A 2D hanning window, as per IDL's hanning function. See numpy.hanning for the 1d description
"""
if N <= 1:
return numpy.hanning(M)
elif M <= 1:
return numpy.hanning(N) # scalar unity; don't window if dims are too small
else:
return numpy.outer(numpy.hanning(M),numpy.hanning(N))
def power_spectrum(*args,**kwargs):
"""
Thin wrapper of PSD2. Returns the 1D power spectrum in stead of the 2D Power Spectral Density
"""
kwargs['oned']=True
return PSD2(*args,**kwargs)
def PSD2(image, image2=None, oned=False,
fft_pad=False, real=False, imag=False,
binsize=1.0, radbins=1, azbins=1, radial=False, hanning=False,
wavnum_scale=False, twopi_scale=False, **kwargs):
"""
Two-dimensional Power Spectral Density.
NAN values are treated as zero.
image2 - can specify a second image if you want to see the cross-power-spectrum instead of the
power spectrum.
oned - return radial profile of 2D PSD (i.e. mean power as a function of spatial frequency)
freq,zz = PSD2(image); plot(freq,zz) is a power spectrum
fft_pad - Add zeros to the edge of the image before FFTing for a speed
boost? (the edge padding will be removed afterwards)
real - Only compute the real part of the PSD (Default is absolute value)
imag - Only compute the complex part of the PSD (Default is absolute value)
hanning - Multiply the image to be PSD'd by a 2D Hanning window before performing the FTs.
Reduces edge effects. This idea courtesy <NAME> (May 1993), author of the
IDL astrolib psd.pro
wavnum_scale - multiply the FFT^2 by the wavenumber when computing the PSD?
twopi_scale - multiply the FFT^2 by 2pi?
azbins - Number of azimuthal (angular) bins to include. Default is 1, or
all 360 degrees. If azbins>1, the data will be split into [azbins]
equally sized pie pieces. Azbins can also be a numpy array. See
AG_image_tools.azimuthalAverageBins for details
radial - An option to return the *azimuthal* power spectrum (i.e., the spectral power as a function
of angle). Not commonly used.
radbins - number of radial bins (you can compute the azimuthal power spectrum in different annuli)
"""
# prevent modification of input image (i.e., the next two lines of active code)
image = image.copy()
# remove NANs (but not inf's)
image[image!=image] = 0
if hanning:
image = hanning2d(*image.shape) * image
if image2 is None:
image2 = image
else:
image2 = image2.copy()
image2[image2!=image2] = 0
if hanning:
image2 = hanning2d(*image2.shape) * image2
if real:
psd2 = numpy.real( correlate2d(image,image2,return_fft=True,fft_pad=fft_pad) )
elif imag:
psd2 = numpy.imag( correlate2d(image,image2,return_fft=True,fft_pad=fft_pad) )
else: # default is absolute value
psd2 = numpy.abs( correlate2d(image,image2,return_fft=True,fft_pad=fft_pad) )
# normalization is approximately (numpy.abs(image).sum()*numpy.abs(image2).sum())
if wavnum_scale:
wx = numpy.concatenate([ numpy.arange(image.shape[0]/2,dtype='float') , image.shape[0]/2 - numpy.arange(image.shape[0]/2,dtype='float') -1 ]) / (image.shape[0]/2.)
wy = numpy.concatenate([ numpy.arange(image.shape[1]/2,dtype='float') , image.shape[1]/2 - numpy.arange(image.shape[1]/2,dtype='float') -1 ]) / (image.shape[1]/2.)
wx/=wx.max()
wy/=wy.max()
wavnum = numpy.sqrt( numpy.outer(wx,numpy.ones(wx.shape))**2 + numpy.outer(numpy.ones(wy.shape),wx)**2 )
psd2 *= wavnum
if twopi_scale:
psd2 *= numpy.pi * 2
if radial:
azbins,az,zz = radialAverageBins(psd2,radbins=radbins, interpnan=True, binsize=binsize, **kwargs)
if len(zz) == 1:
return az,zz[0]
else:
return az,zz
if oned:
return pspec(psd2, azbins=azbins, binsize=binsize, **kwargs)
# else...
return psd2
def pspec(psd2, return_index=True, wavenumber=False, return_stddev=False, azbins=1, binsize=1.0, view=False, **kwargs):
"""
Create a Power Spectrum (radial profile of a PSD) from a Power Spectral Density image
return_index - if true, the first return item will be the indexes
wavenumber - if one dimensional and return_index set, will return a normalized wavenumber instead
view - Plot the PSD (in logspace)?
"""
#freq = 1 + numpy.arange( numpy.floor( numpy.sqrt((image.shape[0]/2)**2+(image.shape[1]/2)**2) ) )
azbins,(freq,zz) = azimuthalAverageBins(psd2,azbins=azbins,interpnan=True, binsize=binsize, **kwargs)
if len(zz) == 1: zz=zz[0]
# the "Frequency" is the spatial frequency f = 1/x for the standard numpy fft, which follows the convention
# A_k = \sum_{m=0}^{n-1} a_m \exp\left\{-2\pi i{mk \over n}\right\}
# or
# F_f = Sum( a_m e^(-2 pi i f x_m) over the range m,m_max where a_m are the values of the pixels, x_m are the
# indices of the pixels, and f is the spatial frequency
freq = freq.astype('float') # there was a +1.0 here before, presumably to deal with div-by-0, but that shouldn't happen and shouldn't have been "accounted for" anyway
if return_index:
if wavenumber:
fftwavenum = (numpy.fft.fftfreq(zz.size*2)[:zz.size])
return_vals = list((fftwavenum,zz))
#return_vals = list((len(freq)/freq,zz))
else:
return_vals = list((freq,zz))
# return_vals = list((freq/len(freq),zz))
else:
return_vals = list(zz)
if return_stddev:
zzstd = azimuthalAverageBins(psd2,azbins=azbins,stddev=True,interpnan=True, binsize=binsize, **kwargs)
return_vals.append(zzstd)
if view and pyplotOK:
pyplot.loglog(freq,zz)
pyplot.xlabel("Spatial Frequency")
pyplot.ylabel("Spectral Power")
return return_vals
####################################################################################
def correlate2d(im1,im2, boundary='wrap', **kwargs):
"""
Cross-correlation of two images of arbitrary size. Returns an image
cropped to the largest of each dimension of the input images
Options
-------
return_fft - if true, return fft(im1)*fft(im2[::-1,::-1]), which is the power
spectral density
fftshift - if true, return the shifted psd so that the DC component is in
the center of the image
pad - Default on. Zero-pad image to the nearest 2^n
crop - Default on. Return an image of the size of the largest input image.
If the images are asymmetric in opposite directions, will return the largest
image in both directions.
boundary: str, optional
A flag indicating how to handle boundaries:
* 'fill' : set values outside the array boundary to fill_value
(default)
* 'wrap' : periodic boundary
WARNING: Normalization may be arbitrary if you use the PSD
"""
from astropy.convolve import convolve
return convolve(np.conjugate(im1), im2[::-1, ::-1], normalize_kernel=False,
boundary=boundary, ignore_edge_zeros=False, **kwargs) | StarcoderdataPython |
337203 | <filename>dependents/imageloads.py<gh_stars>0
# This file will take care of loading all of the image files in the game
import pyglet, os
# ********Functions*********
#This function returns a list of the png images pyglet image file.
def image_compiler(path):
i = 0 #This is the index number for each image
imageList = []
while i <= 99:
try: #Just go until out of bounce
#This is to make format like this: tile001.png ... tile052.png and so on...
number = str(i)
numOfZeros = 3 - len(number)
number = (numOfZeros * '0') + number
pathName = 'tile' + number + '.png'
i += 1
image = pyglet.image.load(os.path.join(path,pathName))
imageList.append(image)
except:
#nothing hear
pass
#For efficiency
bin = pyglet.image.atlas.TextureBin()
imageList = [bin.add(image) for image in imageList]
for image in imageList:
image.anchor_x = image.width // 2
image.anchor_y = image.height // 2
return imageList
# This function takes a list of images and creates a gif.
# Some info:: DataFrame = [Durration, DownFrames, UpFrames, RightFrames, LeftFrames]]
def animation_maker(images, DataFrame = [], isDyingFrame = 0, loop=True):
animationList = []
if isDyingFrame == 0:
duration = DataFrame[0]
for i in range(1,4):
(start,end) = DataFrame[i]
currentFrames = images[start:end+1]
ani = pyglet.image.Animation.from_image_sequence(currentFrames, duration=duration, loop=loop)
animationList.append(ani)
else:
#Use isDyingFrame as the duration for the DyingFrame
duration = isDyingFrame
ani = pyglet.image.Animation.from_image_sequence(images, duration=duration, loop=loop)
animationList.append(ani)
return animationList
# ********Paths*********
#Image directory
dirname = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'resources','images'))
#Gatherer
gathererHandsPath = os.path.join(dirname,"imp1","imp_hands")
gathererHandsPathAttack = os.path.join(gathererHandsPath,"attack")
gathererHandsPathWalk = os.path.join(gathererHandsPath,"walk")
gathererHandsPathdie = os.path.join(gathererHandsPath,"die")
gathererSpearPath = os.path.join(dirname,"imp1","imp_spear")
gathererSpearPathAttack = os.path.join(gathererHandsPath,"attack")
gathererSpearPathWalk = os.path.join(gathererHandsPath,"walk")
gathererSpearPathdie = os.path.join(gathererHandsPath,"die")
#Builder
BuilderHandsPath = os.path.join(dirname,"golem")
BuilderHandsPathAttack = os.path.join(BuilderHandsPath,"attack")
BuilderHandsPathWalk = os.path.join(BuilderHandsPath,"walk")
BuilderHandsPathdie = os.path.join(BuilderHandsPath,"die")
#Warrior
WarriorHandsPath = os.path.join(dirname,"goblin","hand")
WarriorHandsPathAttack = os.path.join(WarriorHandsPath,"attack")
WarriorHandsPathWalk = os.path.join(WarriorHandsPath,"walk")
WarriorHandsPathdie = os.path.join(WarriorHandsPath,"die")
WarriorSwordPath = os.path.join(dirname,"goblin","sword")
WarriorSwordPathAttack = os.path.join(WarriorSwordPath,"attack")
WarriorSwordPathWalk = os.path.join(WarriorSwordPath,"walk")
WarriorSwordPathdie = os.path.join(WarriorSwordPath,"die")
#Projectiles
ArcaneBolt_path = os.path.join(dirname,"Arcane_Effect","bestone")
#FireExplosion
FireExplosion_path = os.path.join(dirname,"FireExplosion")
# ********Compiled Image File Names*********
# List Formats
# DataFrame = [Durration, DownFrames, UpFrames, RightFrames, LeftFrames]
# FinishedImageAniData_XxxXxx = [[StillImages], [AttackAnimation], [WalkAnimation], [DieAnimation]]
# Gatherer
# Hands
GathererDataFrame = [0.2, (0,3), (4,7), (8,11), (12,15)]
gathererHandsAttackImages = image_compiler(gathererHandsPathAttack)
gathererHandsWalkImages = image_compiler(gathererHandsPathWalk)
gathererHandsDieImages = image_compiler(gathererHandsPathdie)
gathererHandsstillImages = [gathererHandsWalkImages[0], gathererHandsWalkImages[4], gathererHandsWalkImages[8], gathererHandsWalkImages[12]]
gathererHandsAttackAnimation = animation_maker(gathererHandsAttackImages ,GathererDataFrame)
gathererHandsWalkAnimation = animation_maker(gathererHandsWalkImages ,GathererDataFrame)
gathererHandsDieAnimation = animation_maker(gathererHandsDieImages ,GathererDataFrame, isDyingFrame = 0.2)
FinishedImageAniData_gathererHands = [gathererHandsstillImages] + [gathererHandsAttackAnimation] + [gathererHandsWalkAnimation] + [gathererHandsDieAnimation]
# Spear
gathererSpearAttackImages = image_compiler(gathererSpearPathAttack)
gathererSpearWalkImages = image_compiler(gathererSpearPathWalk)
gathererSpearDieImages = image_compiler(gathererSpearPathdie)
gathererSpearstillImages = [gathererSpearWalkImages[0],gathererSpearWalkImages[4],gathererSpearWalkImages[8],gathererSpearWalkImages[12]]
gathererSpearAttackAnimation = animation_maker(gathererSpearAttackImages ,GathererDataFrame)
gathererSpearWalkAnimation = animation_maker(gathererSpearWalkImages ,GathererDataFrame)
gathererSpearDieAnimation = animation_maker(gathererSpearDieImages ,GathererDataFrame, isDyingFrame = 0.2)
FinishedImageAniData_gathererHands = [gathererSpearstillImages] + [gathererSpearAttackAnimation] + [gathererSpearWalkAnimation] + [gathererSpearDieAnimation]
# Builder
# Hands
BuilderDataFrame = [0.2, (14,20), (0,6), (21,27), (7,13)]
BuilderHandsAttackImages = image_compiler(BuilderHandsPathAttack)
BuilderHandsWalkImages = image_compiler(BuilderHandsPathWalk)
BuilderHandsDieImages = image_compiler(BuilderHandsPathdie)
BuilderHandsstillImages = [BuilderHandsAttackImages[14],BuilderHandsAttackImages[0],BuilderHandsAttackImages[21],BuilderHandsAttackImages[7]]
# Warrior
# Hands
WarriorDataFrame = [0.2, (0,4), (10,14), (5,9), (15,19)]
WarriorHandsAttackImages = image_compiler(WarriorHandsPathAttack)
WarriorHandsWalkImages = image_compiler(WarriorHandsPathWalk)
WarriorHandsDieImages = image_compiler(WarriorHandsPathdie)
WarriorHandsstillImages = [WarriorHandsAttackImages[0],WarriorHandsAttackImages[10],WarriorHandsAttackImages[5],WarriorHandsAttackImages[15]]
# Sword
WarriorSwordAttackImages = image_compiler(WarriorSwordPathAttack)
WarriorSwordWalkImages = image_compiler(WarriorSwordPathWalk)
WarriorSwordDieImages = image_compiler(WarriorSwordPathdie)
WarriorHandsstillImages = [WarriorSwordAttackImages[0],WarriorSwordAttackImages[10],WarriorSwordAttackImages[5],WarriorSwordAttackImages[15]]
# Projectiles
#Arcane_bolt
ArcaneBolt_Images = image_compiler(ArcaneBolt_path)
ArcaneBolt_Animation = animation_maker(ArcaneBolt_Images, isDyingFrame = 0.1)
#FireExplosion
FireExplosion_Images = image_compiler(FireExplosion_path)
FireExplosion_Animation = animation_maker(FireExplosion_Images, isDyingFrame = 0.01, loop = False)
# To print the Image files
if __name__ == '__main__':
pass
| StarcoderdataPython |
1701990 | <gh_stars>0
"""
Users
Equipment
"""
class Users:
def __init__(self):
self.users = {}
self.teaching_assistants = {} # Subset of users
def insert(self,user):
self.users[user.barcode] = user
if(user.ta == 'yes'):
self.teaching_assistants[user.barcode] = user
def get_user(self,barcode):
if(not self.barcode_registered(barcode)):
return None
return self.users[barcode]
def barcode_registered(self,barcode):
if(barcode in self.users):
return True
return False
def is_registered(self,user):
return self.barcodeRegistered(user.barcode)
def is_TA(self, user):
if(user.barcode in self.teaching_assistants):
return True
return False
class User:
def __init__(self,row):
self.barcode = row[0]
self.pid = row[1]
self.first_name = row[2]
self.last_name = row[3]
self.class_name = row[4]
self.ta = row[5]
#print('%s, %s, %s, %s, %s, %s' % (row[0],row[1],row[2],row[3], row[4], row[5]))
class Equipment:
def __init__(self,row):
self.date = row[0]
self.first_name = row[1]
self.last_name = row[2]
self.pid = row[3]
self.class_name = row[4]
self.laptop_num = row[5]
self.mouse_num = row[6]
self.ta_sign = row[7]
self.checked_in = row[8]
self.checked_in = row[9]
| StarcoderdataPython |
144218 | <gh_stars>1-10
"""
ABC Class of any Experiment Matrix Generating Module
"""
from abc import ABC, abstractmethod
from typing import Any, Dict
import numpy as np
class _Generator(ABC):
"""
ABC Class of Experiment Matrix Generator
Method
------
get_exmatrix(**info: Dict[str, Any]) -> np.ndarray
get_alias_matrix(max_dim: int) -> np.ndarray
"""
@abstractmethod
def __init__(self, **kwargs: Dict[str, Any]):
"""
Parameters
----------
kwargs: Dict[str, Any]
generate method options of each Generator
"""
pass
@abstractmethod
def get_exmatrix(self, **info: Dict[str, Any]) -> np.ndarray:
"""
Generate Experiment Matrix
Parameters
----------
info: Dict[str, Any]
it is expected to contain following info
1. n_factor: int
number of factors you use in this experiment
2. n_level: int
number of levels, requires every factor has the same level.
3. mode: str default = "mode"
mode of factor, 'cont' or 'cat' or ''
requires every factor is under the same mode.
Return
------
exmatrix: np.ndarray
Experiment Matrix (n_experiment x n_factor)
"""
pass
| StarcoderdataPython |
3330452 | expected_output = {
"tag": {
"1": {
"flex-algo": 131
}
}
} | StarcoderdataPython |
11324764 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-15 20:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdcop_web', '0002_campaign_num_tips'),
]
operations = [
migrations.AddField(
model_name='campaign',
name='campaign_image_url',
field=models.TextField(default='https://commons.wikimedia.org/wiki/File:Pup2.JPG'),
preserve_default=False,
),
]
| StarcoderdataPython |
6620621 | <filename>eveil/map.py
# Copyright (C) 2018 <NAME>
# <pierrejean dot fichet at posteo dot net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# This submodule implements a directed graph.
# The path algorithm comes from: https://www.python.org/doc/essays/graphs/
# Some other ideas come from:
# https://triangleinequality.wordpress.com/2013/08/21/graphs-as-objects-in-python/
from .template import Template
from .item import Container
from .message import pose, expose_format, info
from datetime import datetime, timedelta
class Map():
"""The Map class implements a graph: rooms are nodes, and links
are edges. Rooms and links instances should be created from a Map
object, using the new_room and new_link methods."""
def __init__(self, game):
self.game = game
self.rooms = []
self.links = []
self.linklists = []
def new_room(self, region, uid):
room = Room(self.game, region, uid)
self.rooms.append(room)
return room
def new_link(self, source, target):
# First, our link is a simple list
link = [source, target]
if link in self.linklists:
self.game.log("There is yet a link from room {} to room {}."
.format(source.shortdesc, target.shortdesc)
)
return
self.linklists.append(link)
# Now, we create a true Link instance
link = Link(source, target)
self.links.append(link)
source.add_link(link)
return link
def path(self, source, target, path=[]):
""" Returns the shortest path from source to target.
Comes from: https://www.python.org/doc/essays/graphs/
"""
path = path + [source]
if source == target:
return path
if source not in self.rooms:
return None
shortest = None
for room in source.get_targets():
if room not in path:
newpath = self.path(room, target, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
class Link():
""" An unidirectional link between two rooms."""
def __init__(self, source, target):
self.rooms = [source, target]
self.source = source
self.target = target
# pose_leave and pose_enter define the characters'
# automated poses when they leave a room and
# enter another.
# self.pose_move = None
# self.pose_leave = None
# self.pose_enter = None
self.door = None
def move(self, character):
self.leave(character)
character.queue.add(self.enter, character)
def leave(self, character):
pose(character, "/Il se dirige vers {}."
.format(self.target.shortdesc))
def enter(self, character):
pose(character, "/Il quitte les environs en rejoignant {}."
.format(self.target.shortdesc))
self.source.del_character(character)
self.target.send_longdesc(character)
self.target.add_character(character)
character.set_room(self.target)
pose(character, "/Il arrive par ici depuis {}."
.format(self.source.shortdesc))
class Door():
"""A door."""
def __init__(self):
self.is_opened = True
self.can_close = False
self.is_closed = False
self.can_lock = False
self.is_locked = False
self.key = None
class Room():
NEVER = datetime(2000, 1, 1)
RPDELTA = timedelta(minutes=15)
def __init__(self, game, region, uid):
self.game = game
self.region = region
self.uid = region + '_' + uid
if self.game.db.has('room', self.uid):
self.data = self.game.db.get('room', self.uid)
self.container = Container(self.game, self.data['container'])
else:
self.data = {
'container': self.game.db.uid()
}
self.container = Container(self.game, self.data['container'])
self.container.set_volume(10000)
self.game.db.put('room', self.uid, self.data)
self.shortdesc = None
self.longdesc = None
self.links = []
self.sources = []
self.targets = []
self.characters = []
self.next_rp = Room.NEVER
def short(self, text):
"""Sets the short description (title)."""
self.shortdesc = text
def long(self, text, dictionary={}):
"""Sets the long description."""
self.longdesc = Template(
"<h3>{{room.shortdesc}}</h3>"
+ text
#+ "<p>{{list_char}}</p>",
+ "<p>{{list_item}}</p><p>{{list_char}}</p>",
dictionary)
def add_link(self, link):
if self in link.rooms and link not in self.links:
self.links.append(link)
if link.source == self:
self.targets.append(link)
else:
self.sources.append(link)
def get_sources(self):
""" Return the list of rooms from which one can come here."""
return [link.source for link in self.links]
def get_targets(self):
""" Return the list of rooms one can go from here."""
return [link.target for link in self.targets]
def get_target_link(self, target):
""" Return the link leading to the target room."""
for link in self.targets:
if link.target == target:
return link
def get_source_link(self, source):
""" Return the link by which one can come from source room."""
for link in self.sources:
if link.source == source:
return link
def add_character(self, character):
""" add a character to the room."""
self.characters.append(character)
# Give a chance to players to have RP, even if they're not
# exposing yet.
if len(self.characters) > 1:
self.next_rp = datetime.now() + Room.RPDELTA
def del_character(self, character):
"""Removes a character form the rom."""
self.characters.remove(character)
if len(self.characters) < 2:
# A character alone is not having RP.
self.next_rp = Room.NEVER
def send_longdesc(self, character):
""" Send the long description to a character."""
list_char = ", ".join(
[expose_format(char, character, char.data['pose'])
for char in self.characters])
if list_char:
list_char += "."
list_item = ""
if self.container.items:
list_item = ", ".join([item.data['roomdesc']
for item in self.container.items])
list_item = list_item.capitalize() + "."
character.player.client.send(self.longdesc.render({
"character": character,
"room": self,
"list_char": list_char,
"list_item": list_item,
}))
def move(self, character, word):
""" Move a character to an adjacent room. """
for link in self.targets:
if word in link.target.shortdesc:
link.move(character)
return
info(character.player, "Aller où?")
def rp(self):
""" Update the RP status of the room."""
if len(self.characters) > 1:
self.next_rp = datetime.now() + Room.RPDELTA
def has_rp(self, now):
""" Check if the room is RP active."""
# self.del_character() takes care a lonely character
# won't have RP.
return bool(self.next_rp >= now)
| StarcoderdataPython |
11360116 | <filename>docs/plots/peak_detection_baseline_example.py
import tidyms as ms
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1234)
signal_height = 100
snr = 10
n_col = 4
x = np.arange(200)
noise_level = signal_height / snr
noise = np.random.normal(size=x.size, scale=noise_level)
fig, ax = plt.subplots(nrows=3, ncols=n_col, figsize=(12, 12), sharex=True,
sharey=True)
# first row: one peak, different baselines
row = 0
baselines = [4, ms.utils.gauss(x, 100, 40, 20), x ** 2 * 0.002,
np.sin(x * np.pi / 400) * 50]
for col in range(n_col):
signal = ms.utils.gauss(x, 100, 3, signal_height)
y = signal + noise
noise_estimation = ms.peaks.estimate_noise(y)
ys = ms.lcms.gaussian_filter1d(y, 1)
baseline_estimation = ms.peaks.estimate_baseline(ys, noise_estimation)
peaks = ms.peaks.detect_peaks(ys, noise_estimation, baseline_estimation)
ax[row, col].plot(x, y)
ax[row, col].plot(x, baseline_estimation)
for p in peaks:
ax[row, col].fill_between(x[p.start:p.end + 1],
baseline_estimation[p.start:p.end + 1],
y[p.start:p.end + 1], alpha=0.25)
# second row: two peaks, same baselines as first row
row = 1
for col in range(n_col):
gaussian_params = np.array([[100, 3, signal_height],
[110, 3, signal_height]])
signal = ms.utils.gaussian_mixture(x, gaussian_params).sum(axis=0)
y = signal + baselines[col] + noise
noise_estimation = ms.peaks.estimate_noise(y)
ys = ms.lcms.gaussian_filter1d(y, 1)
baseline_estimation = ms.peaks.estimate_baseline(ys, noise_estimation)
peaks = ms.peaks.detect_peaks(ys, noise_estimation, baseline_estimation)
ax[row, col].plot(x, y)
ax[row, col].plot(x, baseline_estimation)
for p in peaks:
ax[row, col].fill_between(x[p.start:p.end + 1],
baseline_estimation[p.start:p.end + 1],
y[p.start:p.end + 1], alpha=0.25)
# third row: different peak widths:
row = 2
widths = [3, 5, 7, 10]
for col in range(n_col):
w = widths[col]
signal = ms.utils.gauss(x, 100, w, signal_height)
y = signal + baselines[0] + noise
noise_estimation = ms.peaks.estimate_noise(y)
ys = ms.lcms.gaussian_filter1d(y, 1)
baseline_estimation = ms.peaks.estimate_baseline(ys, noise_estimation)
peaks = ms.peaks.detect_peaks(ys, noise_estimation, baseline_estimation)
ax[row, col].plot(x, y)
ax[row, col].plot(x, baseline_estimation)
for p in peaks:
ax[row, col].fill_between(x[p.start:p.end + 1],
baseline_estimation[p.start:p.end + 1],
y[p.start:p.end + 1], alpha=0.25)
| StarcoderdataPython |
3425504 | <reponame>Ron423c/chromium<filename>third_party/blink/tools/blinkpy/tool/commands/rebaseline.py
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR/ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import json
import logging
import optparse
import re
from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT
from blinkpy.common.memoized import memoized
from blinkpy.common.net.results_fetcher import Build
from blinkpy.tool.commands.command import Command
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.models.test_expectations import SystemConfigurationRemover, TestExpectations
from blinkpy.web_tests.port import base, factory
_log = logging.getLogger(__name__)
# For CLI compatibility, we would like a list of baseline extensions without
# the leading dot.
# TODO(robertma): Investigate changing the CLI.
BASELINE_SUFFIX_LIST = tuple(ext[1:] for ext in base.Port.BASELINE_EXTENSIONS)
class AbstractRebaseliningCommand(Command):
"""Base class for rebaseline-related commands."""
# Not overriding execute() - pylint: disable=abstract-method
# Generic option groups (list of options):
platform_options = factory.platform_options(use_globs=True)
wpt_options = factory.wpt_options()
no_optimize_option = optparse.make_option(
'--no-optimize',
dest='optimize',
action='store_false',
default=True,
help=
('Do not optimize (de-duplicate) the expectations after rebaselining '
'(default is to de-dupe automatically). You can use "blink_tool.py '
'optimize-baselines" to optimize separately.'))
results_directory_option = optparse.make_option(
'--results-directory', help='Local results directory to use.')
suffixes_option = optparse.make_option(
'--suffixes',
default=','.join(BASELINE_SUFFIX_LIST),
action='store',
help='Comma-separated-list of file types to rebaseline.')
builder_option = optparse.make_option(
'--builder',
help=('Name of the builder to pull new baselines from, '
'e.g. "WebKit Mac10.12".'))
port_name_option = optparse.make_option(
'--port-name',
help=('Fully-qualified name of the port that new baselines belong to, '
'e.g. "mac-mac10.12". If not given, this is determined based on '
'--builder.'))
test_option = optparse.make_option('--test', help='Test to rebaseline.')
build_number_option = optparse.make_option(
'--build-number',
default=None,
type='int',
help='Optional build number; if not given, the latest build is used.')
step_name_option = optparse.make_option(
'--step-name',
help=('Name of the step which ran the actual tests, and which '
'should be used to retrieve results from.'))
flag_specific_option = optparse.make_option(
'--flag-specific',
default=None,
action='store',
help=(
'Name of a flag-specific configuration defined in '
'FlagSpecificConfig. This option will rebaseline '
'results for the given FlagSpecificConfig while ignoring results '
'from other builders. Highdpi is the only suported config '
'at this time.'))
def __init__(self, options=None):
super(AbstractRebaseliningCommand, self).__init__(options=options)
self._baseline_suffix_list = BASELINE_SUFFIX_LIST
self.expectation_line_changes = ChangeSet()
self._tool = None
def baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
return port.baseline_version_dir()
@property
def _host_port(self):
return self._tool.port_factory.get()
def _file_name_for_actual_result(self, test_name, suffix):
# output_filename takes extensions starting with '.'.
return self._host_port.output_filename(
test_name, test_failures.FILENAME_SUFFIX_ACTUAL, '.' + suffix)
def _file_name_for_expected_result(self, test_name, suffix, is_wpt=False):
if is_wpt:
# *-actual.txt produced by wptrunner are actually manifest files
# that can make the test pass if renamed to *.ini.
# WPT bots do not include "external/wpt" in test names.
file_name = self._host_port.get_file_path_for_wpt_test(
'external/wpt/' + test_name)
assert file_name, ('Cannot find %s in WPT' % test_name)
return file_name + '.ini'
# output_filename takes extensions starting with '.'.
return self._host_port.output_filename(
test_name, test_failures.FILENAME_SUFFIX_EXPECTED, '.' + suffix)
class ChangeSet(object):
"""A record of TestExpectation lines to remove.
Note: This class is probably more complicated than necessary; it is mainly
used to track the list of lines that we want to remove from TestExpectations.
"""
def __init__(self, lines_to_remove=None):
self.lines_to_remove = lines_to_remove or {}
def remove_line(self, test, port_name):
if test not in self.lines_to_remove:
self.lines_to_remove[test] = []
self.lines_to_remove[test].append(port_name)
def to_dict(self):
remove_lines = []
for test in self.lines_to_remove:
for port_name in self.lines_to_remove[test]:
remove_lines.append({'test': test, 'port_name': port_name})
return {'remove-lines': remove_lines}
@staticmethod
def from_dict(change_dict):
lines_to_remove = {}
if 'remove-lines' in change_dict:
for line_to_remove in change_dict['remove-lines']:
test = line_to_remove['test']
port_name = line_to_remove['port_name']
if test not in lines_to_remove:
lines_to_remove[test] = []
lines_to_remove[test].append(port_name)
return ChangeSet(lines_to_remove=lines_to_remove)
def update(self, other):
assert isinstance(other, ChangeSet)
assert isinstance(other.lines_to_remove, dict)
for test in other.lines_to_remove:
if test not in self.lines_to_remove:
self.lines_to_remove[test] = []
self.lines_to_remove[test].extend(other.lines_to_remove[test])
class TestBaselineSet(object):
"""Represents a collection of tests and platforms that can be rebaselined.
A TestBaselineSet specifies tests to rebaseline along with information
about where to fetch the baselines from.
"""
def __init__(self, host, prefix_mode=True):
"""Args:
host: A Host object.
prefix_mode: (Optional, default to True) Whether the collection
contains test prefixes or specific tests.
"""
self._host = host
# Set self._port to None to avoid accidentally calling port.tests when
# we are not in prefix mode.
self._port = self._host.port_factory.get() if prefix_mode else None
self._builder_names = set()
self._prefix_mode = prefix_mode
self._test_prefix_map = collections.defaultdict(list)
def __iter__(self):
return iter(self._iter_combinations())
def __bool__(self):
return bool(self._test_prefix_map)
def _iter_combinations(self):
"""Iterates through (test, build, port) combinations."""
for test_prefix, build_port_pairs in self._test_prefix_map.iteritems():
if not self._prefix_mode:
for build, port_name in build_port_pairs:
yield (test_prefix, build, port_name)
continue
for test in self._port.tests([test_prefix]):
for build, port_name in build_port_pairs:
yield (test, build, port_name)
def __str__(self):
if not self._test_prefix_map:
return '<Empty TestBaselineSet>'
return '<TestBaselineSet with:\n %s>' % '\n '.join(
'%s: %s, %s' % triple for triple in self._iter_combinations())
def test_prefixes(self):
"""Returns a sorted list of test prefixes (or tests) added thus far."""
return sorted(self._test_prefix_map)
def all_tests(self):
"""Returns a sorted list of all tests without duplicates."""
tests = set()
for test_prefix in self._test_prefix_map:
if self._prefix_mode:
tests.update(self._port.tests([test_prefix]))
else:
tests.add(test_prefix)
return sorted(tests)
def build_port_pairs(self, test_prefix):
# Return a copy in case the caller modifies the returned list.
return list(self._test_prefix_map[test_prefix])
def add(self, test_prefix, build, port_name=None):
"""Adds an entry for baselines to download for some set of tests.
Args:
test_prefix: This can be a full test path; if the instance was
constructed in prefix mode (the default), this can also be a
directory of tests or a path with globs.
build: A Build object. This specifies where to fetch baselines from.
port_name: This specifies what platform the baseline is for.
"""
port_name = port_name or self._host.builders.port_name_for_builder_name(
build.builder_name)
self._builder_names.add(build.builder_name)
self._test_prefix_map[test_prefix].append((build, port_name))
def all_builders(self):
"""Returns all builder names in in this collection."""
return self._builder_names
class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
"""Base class for rebaseline commands that do some tasks in parallel."""
# Not overriding execute() - pylint: disable=abstract-method
def __init__(self, options=None):
super(AbstractParallelRebaselineCommand,
self).__init__(options=options)
def _release_builders(self):
"""Returns a list of builder names for continuous release builders.
The release builders cycle much faster than the debug ones and cover all the platforms.
"""
release_builders = []
for builder_name in self._tool.builders.all_continuous_builder_names():
port = self._tool.port_factory.get_from_builder_name(builder_name)
if port.test_configuration().build_type == 'release':
release_builders.append(builder_name)
return release_builders
def _builders_to_fetch_from(self, builders_to_check):
"""Returns the subset of builders that will cover all of the baseline
search paths used in the input list.
In particular, if the input list contains both Release and Debug
versions of a configuration, we *only* return the Release version
(since we don't save debug versions of baselines).
Args:
builders_to_check: A collection of builder names.
Returns:
A set of builders that we may fetch from, which is a subset
of the input list.
"""
release_builders = set()
debug_builders = set()
for builder in builders_to_check:
port = self._tool.port_factory.get_from_builder_name(builder)
if port.test_configuration().build_type == 'release':
release_builders.add(builder)
else:
debug_builders.add(builder)
builders_to_fallback_paths = {}
for builder in list(release_builders) + list(debug_builders):
port = self._tool.port_factory.get_from_builder_name(builder)
fallback_path = port.baseline_search_path()
if fallback_path not in builders_to_fallback_paths.values():
builders_to_fallback_paths[builder] = fallback_path
return set(builders_to_fallback_paths)
def _rebaseline_commands(self, test_baseline_set, options):
path_to_blink_tool = self._tool.path()
cwd = self._tool.git().checkout_root
copy_baseline_commands = []
rebaseline_commands = []
lines_to_remove = {}
builders_to_fetch_from = self._builders_to_fetch_from(
test_baseline_set.all_builders())
for test, build, port_name in test_baseline_set:
if build.builder_name not in builders_to_fetch_from:
continue
suffixes = self._suffixes_for_actual_failures(test, build)
if not suffixes:
# Only try to remove the expectation if the test
# 1. ran and passed ([ Skip ], [ WontFix ] should be kept)
# 2. passed unexpectedly (flaky expectations should be kept)
if self._test_passed_unexpectedly(test, build, port_name):
_log.debug(
'Test %s passed unexpectedly in %s. '
'Will try to remove it from TestExpectations.', test,
build)
if test not in lines_to_remove:
lines_to_remove[test] = []
lines_to_remove[test].append(port_name)
continue
args = []
if options.verbose:
args.append('--verbose')
args.extend([
'--test',
test,
'--suffixes',
','.join(suffixes),
'--port-name',
port_name,
])
# TODO(crbug.com/1154085): Undo this special case when we have WPT
# bots on more ports.
# We may be rebaselining only a subset of all platforms, in which
# case we need to copy any existing baselines first to avoid clobbering
# results from platforms that were not run. See
# https://chromium.googlesource.com/chromium/src/+/master/docs/testing/web_test_baseline_fallback.md#rebaseline
#
# However when running in modes that don't interact with the optimizer,
# we don't want to do this copying.
if (not self._tool.builders.is_wpt_builder(build.builder_name)
and not self._tool.builders.is_flag_specific_builder(
build.builder_name)):
copy_command = [
self._tool.executable, path_to_blink_tool,
'copy-existing-baselines-internal'
] + args
copy_baseline_commands.append(tuple([copy_command, cwd]))
args.extend(['--builder', build.builder_name])
if build.build_number:
args.extend(['--build-number', str(build.build_number)])
if options.results_directory:
args.extend(['--results-directory', options.results_directory])
if (options.flag_specific
and self._tool.builders.is_flag_specific_builder(
build.builder_name)):
args.extend(['--flag-specific', options.flag_specific])
step_name = self._tool.results_fetcher.get_layout_test_step_name(
build)
if step_name:
args.extend(['--step-name', step_name])
rebaseline_command = [
self._tool.executable, path_to_blink_tool,
'rebaseline-test-internal'
] + args
rebaseline_commands.append(tuple([rebaseline_command, cwd]))
return copy_baseline_commands, rebaseline_commands, lines_to_remove
@staticmethod
def _extract_expectation_line_changes(command_results):
"""Parses the JSON lines from sub-command output and returns the result as a ChangeSet."""
change_set = ChangeSet()
for _, stdout, _ in command_results:
updated = False
for line in filter(None, stdout.splitlines()):
try:
parsed_line = json.loads(line)
change_set.update(ChangeSet.from_dict(parsed_line))
updated = True
except ValueError:
_log.debug('"%s" is not a JSON object, ignoring', line)
if not updated:
# TODO(crbug.com/649412): This could be made into an error.
_log.debug('Could not add file based off output "%s"', stdout)
return change_set
def _optimize_baselines(self, test_baseline_set, verbose=False):
"""Returns a list of commands to run in parallel to de-duplicate baselines."""
tests_to_suffixes = collections.defaultdict(set)
builders_to_fetch_from = self._builders_to_fetch_from(
test_baseline_set.all_builders())
for test, build, _ in test_baseline_set:
if build.builder_name not in builders_to_fetch_from:
continue
# TODO(crbug.com/1154085): Undo this special case when we have WPT
# bots on more ports.
if self._tool.builders.is_wpt_builder(build.builder_name):
continue
# For flag_specific(highdpi) we skip both 'copy existing baselines'
# and optimizer.
if self._tool.builders.is_flag_specific_builder(
build.builder_name):
continue
tests_to_suffixes[test].update(
self._suffixes_for_actual_failures(test, build))
optimize_commands = []
for test, suffixes in tests_to_suffixes.iteritems():
# No need to optimize baselines for a test with no failures.
if not suffixes:
continue
# FIXME: We should propagate the platform options as well.
# Prevent multiple baseline optimizer to race updating the manifest.
# The manifest has already been updated when listing tests.
args = ['--no-manifest-update']
if verbose:
args.append('--verbose')
args.extend(['--suffixes', ','.join(suffixes), test])
path_to_blink_tool = self._tool.path()
cwd = self._tool.git().checkout_root
command = [
self._tool.executable, path_to_blink_tool, 'optimize-baselines'
] + args
optimize_commands.append(tuple([command, cwd]))
return optimize_commands
def _update_expectations_files(self, lines_to_remove):
tests = lines_to_remove.keys()
to_remove = collections.defaultdict(set)
all_versions = frozenset([
config.version.lower() for config in self._tool.port_factory.get().
all_test_configurations()
])
# This is so we remove lines for builders that skip this test.
# For example, Android skips most tests and we don't want to leave
# stray [ Android ] lines in TestExpectations.
# This is only necessary for "blink_tool.py rebaseline".
for port_name in self._tool.port_factory.all_port_names():
port = self._tool.port_factory.get(port_name)
for test in tests:
if (port.test_configuration().version.lower() in all_versions
and port.skips_test(test)):
to_remove[test].add(
port.test_configuration().version.lower())
# Get configurations to remove based on builders for each test
for test, port_names in lines_to_remove.items():
for port_name in port_names:
port = self._tool.port_factory.get(port_name)
if port.test_configuration().version.lower() in all_versions:
to_remove[test].add(
port.test_configuration().version.lower())
port = self._tool.port_factory.get()
path = port.path_to_generic_test_expectations_file()
test_expectations = TestExpectations(
port,
expectations_dict={
path: self._tool.filesystem.read_text_file(path)
})
system_remover = SystemConfigurationRemover(test_expectations)
for test, versions in to_remove.items():
system_remover.remove_os_versions(test, versions)
system_remover.update_expectations()
def _run_in_parallel(self, commands):
if not commands:
return {}
command_results = self._tool.executive.run_in_parallel(commands)
for _, _, stderr in command_results:
if stderr:
_log.error(stderr)
change_set = self._extract_expectation_line_changes(command_results)
return change_set.lines_to_remove
def rebaseline(self, options, test_baseline_set):
"""Fetches new baselines and removes related test expectation lines.
Args:
options: An object with the command line options.
test_baseline_set: A TestBaselineSet instance, which represents
a set of tests/platform combinations to rebaseline.
"""
if self._tool.git().has_working_directory_changes(
pathspec=self._web_tests_dir()):
_log.error(
'There are uncommitted changes in the web tests directory; aborting.'
)
return
for test in sorted({t for t, _, _ in test_baseline_set}):
_log.info('Rebaselining %s', test)
# extra_lines_to_remove are unexpected passes, while lines_to_remove are
# failing tests that have been rebaselined.
copy_baseline_commands, rebaseline_commands, extra_lines_to_remove = self._rebaseline_commands(
test_baseline_set, options)
lines_to_remove = {}
self._run_in_parallel(copy_baseline_commands)
lines_to_remove = self._run_in_parallel(rebaseline_commands)
for test in extra_lines_to_remove:
if test in lines_to_remove:
lines_to_remove[test] = (
lines_to_remove[test] + extra_lines_to_remove[test])
else:
lines_to_remove[test] = extra_lines_to_remove[test]
if lines_to_remove:
self._update_expectations_files(lines_to_remove)
if options.optimize:
self._run_in_parallel(
self._optimize_baselines(test_baseline_set, options.verbose))
self._tool.git().add_list(self.unstaged_baselines())
def unstaged_baselines(self):
"""Returns absolute paths for unstaged (including untracked) baselines."""
baseline_re = re.compile(r'.*[\\/]' + WEB_TESTS_LAST_COMPONENT +
r'[\\/].*-expected\.(txt|png|wav)$')
unstaged_changes = self._tool.git().unstaged_changes()
return sorted(self._tool.git().absolute_path(path)
for path in unstaged_changes
if re.match(baseline_re, path))
def _generic_baseline_paths(self, test_baseline_set):
"""Returns absolute paths for generic baselines for the given tests.
Even when a test does not have a generic baseline, the path where it
would be is still included in the return value.
"""
filesystem = self._tool.filesystem
baseline_paths = []
for test in test_baseline_set.all_tests():
filenames = [
self._file_name_for_expected_result(test, suffix)
for suffix in BASELINE_SUFFIX_LIST
]
baseline_paths += [
filesystem.join(self._web_tests_dir(), filename)
for filename in filenames
]
baseline_paths.sort()
return baseline_paths
def _web_tests_dir(self):
return self._tool.port_factory.get().web_tests_dir()
def _suffixes_for_actual_failures(self, test, build):
"""Gets the baseline suffixes for actual mismatch failures in some results.
Args:
test: A full test path string.
build: A Build object.
Returns:
A set of file suffix strings.
"""
test_result = self._result_for_test(test, build)
if not test_result:
return set()
# Regardless of the test type, we only need the text output (i.e. the
# INI manifest) on a WPT bot (a reftest produces both text and image
# output, but the image is only informative).
if self._tool.builders.is_wpt_builder(build.builder_name):
return {'txt'}
return test_result.suffixes_for_test_result()
def _test_passed_unexpectedly(self, test, build, port_name):
"""Determines if a test passed unexpectedly in a build.
The routine also takes into account the port that is being rebaselined.
It is possible to use builds from a different port to rebaseline the
current port, e.g. rebaseline-cl --fill-missing, in which case the test
will not be considered passing regardless of the result.
Args:
test: A full test path string.
build: A Build object.
port_name: The name of port currently being rebaselined.
Returns:
A boolean.
"""
if self._tool.builders.port_name_for_builder_name(
build.builder_name) != port_name:
return False
test_result = self._result_for_test(test, build)
if not test_result:
return False
return test_result.did_pass() and not test_result.did_run_as_expected()
@memoized
def _result_for_test(self, test, build):
# We need full results to know if a test passed or was skipped.
# TODO(robertma): Make memoized support kwargs, and use full=True here.
results = self._tool.results_fetcher.fetch_results(build, True)
if not results:
_log.debug('No results found for build %s', build)
return None
test_result = results.result_for_test(test)
if not test_result:
_log.info('No test result for test %s in build %s', test, build)
return None
return test_result
class Rebaseline(AbstractParallelRebaselineCommand):
name = 'rebaseline'
help_text = 'Rebaseline tests with results from the continuous builders.'
show_in_main_help = True
argument_names = '[TEST_NAMES]'
def __init__(self):
super(Rebaseline, self).__init__(options=[
self.no_optimize_option,
# FIXME: should we support the platform options in addition to (or instead of) --builders?
self.results_directory_option,
optparse.make_option(
'--builders',
default=None,
action='append',
help=
('Comma-separated-list of builders to pull new baselines from '
'(can also be provided multiple times).')),
])
def _builders_to_pull_from(self):
return self._tool.user.prompt_with_list(
'Which builder to pull results from:',
self._release_builders(),
can_choose_multiple=True)
def execute(self, options, args, tool):
self._tool = tool
if not args:
_log.error('Must list tests to rebaseline.')
return
if options.builders:
builders_to_check = []
for builder_names in options.builders:
builders_to_check += builder_names.split(',')
else:
builders_to_check = self._builders_to_pull_from()
test_baseline_set = TestBaselineSet(tool)
for builder in builders_to_check:
for test_prefix in args:
test_baseline_set.add(test_prefix, Build(builder))
_log.debug('Rebaselining: %s', test_baseline_set)
self.rebaseline(options, test_baseline_set)
| StarcoderdataPython |
5064253 | import codecs
import os
import re
from setuptools import setup
from setuptools.command.test import test as TestCommand
with codecs.open(os.path.join(os.path.abspath(os.path.dirname(
__file__)), 'sphinxcontrib', 'asyncio.py'), 'r', 'latin1') as fp:
try:
version = re.findall(r"^__version__ = '([^']+)'\r?$",
fp.read(), re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
install_requires = ['sphinx>=3.0']
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
class PyTest(TestCommand):
user_options = []
def run(self):
import subprocess
import sys
errno = subprocess.call([sys.executable, '-m', 'pytest', 'tests'])
raise SystemExit(errno)
tests_require = install_requires + ['pytest']
setup(
name='sphinxcontrib-asyncio',
version=version,
description=('sphinx extension to support coroutines in markup'),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
classifiers=[
'Environment :: Plugins',
'Framework :: AsyncIO',
'Framework :: Sphinx :: Extension',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Documentation :: Sphinx',
'Topic :: Software Development :: Documentation'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/aio-libs/sphinxcontrib-asyncio',
license='Apache 2',
packages=['sphinxcontrib'],
install_requires=install_requires,
tests_require=tests_require,
include_package_data=True,
cmdclass=dict(test=PyTest))
| StarcoderdataPython |
3581700 | import json
import os
from datetime import datetime
from croniter import croniter
from boto3.dynamodb.types import TypeDeserializer
from lambda_client import invoke_lambda
from model import client, table_name, cron_table_name, cron_table
from util import make_chunks
from scheduler import schedule_cron_events
deserializer = TypeDeserializer()
def run():
current_segment = int(datetime.utcnow().replace(second=0, microsecond=0).timestamp() + 10 * 60) # scan the minute that is 10 minutes away, not the one that is already progressing
count = 0
# get events which match the current time and add those to ids
for page in client.get_paginator('scan').paginate(
TableName=cron_table_name,
):
items = []
items_to_execute = []
current_date = datetime.utcnow().replace(second=0, microsecond=0)
next_date = ''
current_date_str = datetime.utcnow().replace(second=0, microsecond=0).isoformat()
for item in page.get('Items', []):
event = {k: deserializer.deserialize(v) for k, v in item.items()}
if "eventIdentifier" in event:
items.append({
'pk': event['pk'],
'eventIdentifier': event['eventIdentifier'],
'application': event['application'],
'last_date': event['last_date'],
'cronExpression': event['cronExpression'],
'payload': event['payload'],
'target': event['target'],
'end_date': event['end_date'] if 'end_date' in event else "",
'start_date': event['start_date'],
})
for item in items:
print(f"current date time : {datetime.utcnow()} {datetime.fromisoformat(item['start_date'])}")
if croniter.is_valid(item['cronExpression']) and datetime.fromisoformat(item['start_date']) < datetime.utcnow():
iter = croniter(item['cronExpression'], datetime.fromisoformat(item['last_date']))
next_date = iter.get_next(datetime)
while next_date < current_date:
next_date = iter.get_next(datetime)
if (next_date - current_date).total_seconds() <= 60:
if (len(item['end_date']) > 0 and next_date < datetime.fromisoformat(item['end_date'])) or (len(item['end_date']) == 0):
item['next_date'] = next_date.isoformat()
print(f" item : {item}")
items_to_execute.append(item)
print('Items needs to be executed: ')
print(items_to_execute)
for item in items_to_execute:
response = cron_table.update_item(
Key= {
'pk': item['pk']
},
UpdateExpression="set last_date=:l",
ExpressionAttributeValues={
':l': item['next_date'],
},
ReturnValues="UPDATED_NEW"
)
schedule_cron_events(items_to_execute)
for page in client.get_paginator('query').paginate(
TableName=table_name,
ProjectionExpression='pk,sk',
KeyConditionExpression='pk = :s',
ExpressionAttributeValues={
':s': {
'N': str(current_segment)
}
}):
ids = []
for item in page.get('Items', []):
event = {k: deserializer.deserialize(v) for k, v in item.items()}
ids.append({
'pk': int(event['pk']),
'sk': event['sk']
})
for chunk in make_chunks(ids, 200):
invoke_lambda(os.environ.get('SCHEDULE_FUNCTION'), json.dumps(chunk).encode('utf-8'))
count += page['Count']
print('Batched %d entries' % count)
| StarcoderdataPython |
35029 | import json
import uuid
from dataclasses import dataclass
from typing import Callable, Sequence, Any, Optional, Tuple, Union, List, Generic, TypeVar
from serflag import SerFlag
from handlers.graphql.graphql_handler import ContextProtocol
from handlers.graphql.utils.string import camelcase
from xenadapter.task import get_userids
from xenadapter.xenobject import XenObject
from functools import partial
import constants.re as re
from sentry_sdk import capture_exception
def call_mutation_from_string(mutable_object, changes, function):
def f():
old_value = {function: getattr(mutable_object, f'get_{function}')()}
new_value = {function: getattr(changes, function)}
getattr(mutable_object, f'set_{function}')(new_value[function])
return old_value, new_value
return f
@dataclass
class MutationMethod:
'''
Represents a mutation method - a function equipped with action name that is passed to check_access.
Attributes:
func: A mutation performer name without set prefix: a function that accepts an argument that is a part of used input named after the func.
i.e. if func == "name_label", it'll invoke set_name_label(user_input.name_label)
OR
a tuple of functions: 1st is going to be called with user input,
and 2nd is a validator, taking user input and returning a tuple of validation result and reason
access_action: An access action required for performing this mutation. None means this mutation is for administrators only
deps: Tuple of dependencies: lambdas that are called with our object as first argument and returning tuple of Boolean and reason string
'''
Input = TypeVar('Input')
InputArgument = TypeVar('InputArgument')
MutationFunction = Callable[[Input, "XenObject"], Tuple[InputArgument, InputArgument]]
MutationCheckerFunction = Callable[[Input, "XenObject"], Tuple[bool, Optional[str]]]
func: Union[str, Tuple[MutationFunction, MutationCheckerFunction]]
access_action: Optional[SerFlag]
deps: Tuple[Callable[["XenObject"], Tuple[bool, str]]] = tuple()
def call_mutation_from_function(mutable_object, changes, function: MutationMethod.MutationFunction):
return partial(function, changes, mutable_object)
@dataclass
class MutationHelper:
"""
A Mutation helper. Parameters:
- mutations: Sequence of mutations to be performed
- ctx: Request's context
- mutable_object: A Xen object to perform mutation on
"""
mutations: Sequence[MutationMethod]
ctx: ContextProtocol
mutable_object: XenObject
def prepare_mutations_for_item(self, item, changes):
dep_checks : List[Callable[[], Tuple[bool, str]]] = []
# Filling dependency checks in
for dep in item.deps:
dep_checks.append(partial(dep, self.mutable_object))
if isinstance(item.func, str):
if getattr(changes, item.func) is None:
return
else:
granted, reason = item.func[1](changes, self.mutable_object, self.ctx)
if not granted:
if not reason: # if Reason is None, we're instructed to skip this mutation as user didn't supply anything
return
else:
return reason
# Checking access
if not(item.access_action is None and \
self.ctx.user_authenticator.is_admin() or \
self.mutable_object.check_access(self.ctx.user_authenticator, item.access_action)):
if item.access_action:
return f"{camelcase(item.func if isinstance(item.func, str) else item.func[0].__name__)}: Access denied: object {self.mutable_object}; action: {item.access_action}"
else:
return f"{camelcase(item.func if isinstance(item.func, str) else item.func[0].__name__)}: Access denied: not an administrator"
else:
if isinstance(item.func, str):
function_candidate = call_mutation_from_string(self.mutable_object, changes, item.func)
else:
function_candidate = call_mutation_from_function(self.mutable_object, changes, item.func[0])
# Running dependency checks
for dep_check in dep_checks:
ret = dep_check()
if not ret[0]:
return f"{camelcase(item.func if isinstance(item.func, str) else '')}: {ret[1]}"
return function_candidate
def perform_mutations(self, changes: MutationMethod.Input) -> Tuple[bool, Optional[str]]:
'''
Perform mutations in a transaction fashion: Either all or nothing.
This method also inserts tasks in task table for each mutation.
If mutation fails, "failure" status is set.
In the result field, there's a JSON document of the following structure:
{
"old_val": {
"setting_name": "old_setting_value"
}
"new_val" : {
"setting_name": "new_setting_value"
}
}
:param changes: Graphene Input type instance with proposed changes
:return: Tuple [True, None] or [False, "String reason what's not granted"] where access is not granted]
'''
tasks : List[dict] = []
for item in self.mutations:
function_or_error = self.prepare_mutations_for_item(item, changes)
if not function_or_error:
continue
new_uuid = str(uuid.uuid4())
action = item.access_action.serialize()[0]
who = "users/" + self.ctx.user_authenticator.get_id() if not self.ctx.user_authenticator.is_admin() else None
object_ref = self.mutable_object.ref
object_type = self.mutable_object.__class__
task = {
"ref": new_uuid,
"object_ref": object_ref,
"object_type": object_type.__name__,
"action": action,
"error_info" : [],
"created": re.r.now().run(),
"name_label": f"{object_type.__name__}.{action}",
"name_description": "",
"uuid": new_uuid,
"progress": 1,
"resident_on": None,
"who": who,
"access" : {user: ['remove'] for user in get_userids(object_type, object_ref, action)}
}
if isinstance(function_or_error, str):
task['status'] = 'failure'
task['error_info'].append(function_or_error)
task['finished'] = re.r.now().run()
re.db.table('tasks').insert(task).run()
return False, function_or_error
else:
task['call'] = function_or_error
tasks.append(task)
for task in tasks:
try:
new_value, old_value = task['call']()
task['status'] = 'success'
task['result'] = json.dumps({"old_val": old_value, "new_val": new_value})
task['finished'] = re.r.now().run()
except Exception as e:
capture_exception(e)
task['status'] = 'failure'
task['error_info'].append(str(e))
task['result'] = ""
task['finished'] = re.r.now().run()
finally:
del task['call']
re.db.table('tasks').insert(task).run()
return True, None
| StarcoderdataPython |
3561575 | <reponame>osrf/cloudsim-legacy
#!/usr/bin/env python
from __future__ import with_statement
from __future__ import print_function
import cgitb
import json
from common import get_javascripts
cgitb.enable()
import common
from common import authorize
email = authorize()
udb = common.UserDatabase()
role = udb.get_role(email)
user_info = json.dumps({'user':email, 'role':role})
scripts = get_javascripts([ 'jquery.flot.js' ])
print("Content-Type: text/html")
print("\n")
# <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
# <script language="javascript" type="text/javascript" src="/js/jquery-1.8.3.min.js"></script>
# <script language="javascript" type="text/javascript" src="/js/jquery.flot.js"></script>
#
template = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>CloudSim seeder</title>
<link href="/js/layout.css" rel="stylesheet" type="text/css">
<link rel="stylesheet" href="/js/jquery-ui.css" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jqueryui/1.9.2/jquery-ui.min.js"></script>
<script language="javascript" type="text/javascript" src="/js/jquery.flot.js"></script>
""" + scripts +"""
<script language="javascript">
/////////////////////////////////////////////
// "You Can Recognize A Pioneer By The Arrows In His Back."
function cloudseed(email, key, secret )
{
alert('user: ' +email + " key: '"+ key + "' secret: '" + secret + "'");
var url = '/cloudsim/inside/cgi-bin/cloudsim_cmd.py?command=cloudseed';
url += '&email=' + email;
url += '&key=' + key;
url += '&secret=' + secret;
console.log(url);
msg = httpGet(url);
console.log(msg);
}
function create_cloudseed_widget(div_name)
{
var div = document.getElementById(div_name);
var username_text = document.createElement('input');
username_text.setAttribute('type','text');
var key_text = document.createElement('input');
key_text.setAttribute('type','text');
var secret_text = document.createElement('input');
secret_text.setAttribute('type','text');
var launch_button= document.createElement('input');
launch_button.setAttribute('type','button');
launch_button.setAttribute('value','Launch');
launch_button.onclick = function()
{
var x = confirm("Are you sure?");
if(!x) return;
var email = username_text.value;
var key = key_text.value;
var secret = secret_text.value;
cloudseed(email, key, secret);
};
var status_img = document.createElement('img');
status_img.src = "/js/images/gray_status.png";
div.appendChild(document.createTextNode("email: "));
div.appendChild(username_text);
div.appendChild(document.createTextNode("AWS key: "));
div.appendChild(key_text);
div.appendChild(document.createTextNode("AWS secret key: "));
div.appendChild(secret_text);
div.appendChild(launch_button);
var div = document.getElementById("prog_div");
$( "#"+ div_name ).progressbar({
value: 37
});
var progress = 0;
var test_button= document.createElement('input');
test_button.setAttribute('type','button');
test_button.setAttribute('value','Test');
test_button.onclick = function()
{
progress += 10;
}
div.appendChild(test_button);
}
////////////////////////////////////////////
function create_progress_widget(div_name)
{
var div = document.getElementById(div_name);
$( "#"+ div_name ).progressbar({
value: 37
});
}
////////////////////////////////////////////
function on_load_page()
{
var user_info = """ + user_info + """;
if(user_info.role == "admin")
{
$('.admin_only').show();
}
// create_progress_widget("prog_div");
create_server_monitor_widget("server_monitor_div");
create_cloudseed_widget("cloudseed_div");
stream();
}
var log_events = true;
function stream()
{
var stream_url = '/cloudsim/inside/cgi-bin/console_stream.py';
console.log(stream_url);
var es = new EventSource(stream_url);
es.addEventListener("cloudsim", function(event)
{
var str_data = event.data;
if(log_events)
console.log(str_data);
var data = eval( '(' + str_data + ')' );
$.publish("/cloudsim", data);
}, false);
es.addEventListener("done", function(event)
{
alert("Unexpected 'done' msg received");
es.close();
},false);
}
</script>
</head>
<body onload = "on_load_page()">
<h1>CloudSeed</h1>
<div id="server_monitor_div"></div>
<div id="cloudseed_div"></div>
<div id="prog_div"></div>
<br>
<hr>
<div style="float:right;">
<img src="/js/images/osrf.png" width="200px"/>
</div>
<div style="float:left;">
Logged in as: """ + email + """<br>
<a href="/cloudsim/inside/cgi-bin/logout">Logout</a><br>
<div class="admin_only" style="display:none;">
<a href="/cloudsim/inside/cgi-bin/admin_download">SSH key download</a><br>
</div>
</div>
</body>
</html>
"""
page = template
print(page )
| StarcoderdataPython |
1990976 | <filename>3.py
class Edge:
def __init__(self, p1, p2, steps):
self.p1 = p1
self.p2 = p2
self.steps = steps
self.vertical = p1.x == p2.x
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
class Intersection:
def __init__(self, point, steps1, steps2):
self.point = point
self.steps1 = steps1
self.steps2 = steps2
class Movement:
def __init__(self, direction, amount):
self.direction = direction
self.amount = amount
def get_edges(path):
steps = 0
x = 0
y = 0
edges = []
for movement in path:
x1 = x
y1 = y
direction = movement.direction
amount = movement.amount
if direction == "R":
x1 += amount
elif direction == "L":
x1 -= amount
elif direction == "U":
y1 += amount
elif direction == "D":
y1 -= amount
else:
raise Exception("Unknown direction: " + direction)
edges.append(Edge(Point(x, y), Point(x1, y1), steps))
x = x1
y = y1
steps += amount
return edges
def parse_path(line):
parts = line.split(",")
return map(lambda x: Movement(x[0], int(x[1:])), parts)
def intersect_point(edge1, edge2):
if edge1.vertical == edge2.vertical:
return None
horizontal = edge2 if edge1.vertical else edge1
vertical = edge1 if edge1.vertical else edge2
hy = horizontal.p1.y
vy1 = min(vertical.p1.y, vertical.p2.y)
vy2 = max(vertical.p1.y, vertical.p2.y)
vx = vertical.p1.x
hx1 = min(horizontal.p1.x, horizontal.p2.x)
hx2 = max(horizontal.p1.x, horizontal.p2.x)
if vy1 < hy < vy2 and hx1 < vx < hx2:
return Intersection(Point(vx, hy),
horizontal.steps + abs(horizontal.p1.x - vx),
vertical.steps + abs(vertical.p1.y - hy))
return None
text1 = "R990,U475,L435,D978,L801,D835,L377,D836,L157,D84,R329,D342,R931,D522,L724,U891,L508,U274,L146,U844,R686," \
"D441,R192,U992,L781,D119,R436,D286,R787,D85,L801,U417,R619,D710,R42,U261,R296,U697,L354,D843,R613,U880,R789," \
"D134,R636,D738,L939,D459,L338,D905,R811,D950,L44,U992,R845,U771,L563,D76,L69,U839,L57,D311,L615,D931,L437," \
"D201,L879,D1,R978,U415,R548,D398,L560,D112,L894,D668,L708,D104,R622,D768,R901,D746,L793,D26,R357,U216,L216," \
"D33,L653,U782,R989,U678,L7,D649,R860,D281,L988,U362,L525,U652,R620,D376,L983,U759,R828,D669,L297,U207,R68," \
"U77,R255,U269,L661,U310,L309,D490,L55,U471,R260,D912,R691,D62,L63,D581,L289,D366,L862,D360,L485,U946,R937," \
"D470,L792,D614,R936,D963,R611,D151,R908,D195,R615,U768,L166,D314,R640,U47,L161,U872,R50,U694,L917,D149,L92," \
"U244,L337,U479,R755,U746,L196,D759,L936,U61,L744,D774,R53,U439,L185,D504,R769,D696,L285,D396,R791,U21,L35," \
"D877,L9,U398,R447,U101,R590,U862,L351,D210,L935,U938,R131,U758,R99,U192,L20,U142,L946,D981,R998,U214,R174," \
"U710,L719,D879,L411,U839,L381,U924,L221,D397,R380,U715,R139,D367,R253,D973,L9,U624,L426,D885,R200,U940,R214," \
"D75,R717,D2,R578,U161,R421,U326,L561,U311,L701,U259,R836,D920,R35,D432,R610,D63,R664,D39,L119,D47,L605,D228," \
"L364,D14,L226,D365,R796,D233,R476,U145,L926,D907,R681,U267,R844,U735,L948,U344,L629,U31,L383,U694,L666,U158," \
"R841,D27,L150,D950,L335,U275,L184,D157,R504,D602,R605,D185,L215,D420,R700,U809,L139,D937,L248,U693,L56,U92," \
"L914,U743,R445,U417,L504,U23,R332,U865,R747,D553,R595,U845,R693,U915,R81"
text2 = "L1004,U406,L974,D745,R504,D705,R430,D726,R839,D550,L913,D584,R109,U148,L866,U664,R341,U449,L626,D492,R716," \
"U596,L977,D987,L47,U612,L478,U928,L66,D752,R665,U415,R543,U887,R315,D866,R227,D615,R478,U180,R255,D316,L955," \
"U657,R752,U561,R786,U7,R918,D755,R506,U131,L875,D849,R823,D755,L604,U944,R186,D326,L172,U993,L259,D765,R427," \
"D193,R663,U470,L294,D437,R645,U10,L926,D814,L536,D598,R886,D290,L226,U156,R754,D105,L604,D136,L883,U87,R839," \
"D807,R724,U184,L746,D79,R474,U186,R727,U9,L69,U565,R459,D852,R61,U370,L890,D439,L431,U846,R460,U358,R51," \
"D407,R55,U179,L385,D652,R193,D52,L569,U980,L185,U813,R636,D275,L585,U590,R215,U947,R851,D127,L249,U954,L884," \
"D235,R3,U735,R994,D883,L386,D506,L963,D751,L989,U733,L221,U890,L711,D32,L74,U437,L700,D977,L49,U478,R438," \
"D27,R945,D670,L230,U863,L616,U461,R267,D25,L646,D681,R426,D918,L791,U712,L730,U715,L67,U359,R915,D524,L722," \
"U374,L582,U529,L802,D865,L596,D5,R323,U235,R405,D62,R304,U996,L939,U420,L62,D299,R802,D803,L376,U430,L810," \
"D334,L67,U395,L818,U953,L817,D411,L225,U383,R247,D234,L430,U315,L418,U254,L964,D372,R979,D301,R577,U440," \
"R924,D220,L121,D785,L609,U20,R861,U288,R388,D410,L278,D748,L800,U755,L919,D985,L785,U676,R916,D528,L507," \
"D469,L582,D8,L900,U512,L764,D124,L10,U567,L379,D231,R841,D244,R479,U145,L769,D845,R651,U712,L920,U791,R95," \
"D958,L608,D755,R967,U855,R563,D921,L37,U699,L944,U718,R959,D195,L922,U726,R378,U258,R340,D62,L555,D135,L690," \
"U269,L273,D851,L60,D851,R1,D315,R117,D855,L275,D288,R25,U503,R569,D596,L823,U687,L450"
path1 = parse_path(text1)
path2 = parse_path(text2)
edges1 = get_edges(path1)
edges2 = get_edges(path2)
intersections = []
for e1 in edges1:
for e2 in edges2:
intersection = intersect_point(e1, e2)
if intersection is not None:
intersections.append(intersection)
print(min(map(lambda i: abs(i.point.x) + abs(i.point.y), intersections)))
print(min(map(lambda i: i.steps1 + i.steps2, intersections))) | StarcoderdataPython |
9604968 | import numpy as np
import scipy.interpolate as interpolate
import h5py as h5
import os
from lxml import objectify, etree
import sharpy.utils.generator_interface as generator_interface
import sharpy.utils.settings as settings
import sharpy.utils.cout_utils as cout
@generator_interface.generator
class TurbVelocityField(generator_interface.BaseGenerator):
r"""
Turbulent Velocity Field Generator
``TurbVelocitityField`` is a class inherited from ``BaseGenerator``
The ``TurbVelocitityField`` class generates a velocity field based on the input from an [XDMF](http://www.xdmf.org) file.
It supports time-dependant fields as well as frozen turbulence.
To call this generator, the ``generator_id = TurbVelocityField`` shall be used.
This is parsed as the value for the ``velocity_field_generator`` key in the desired aerodynamic solver's settings.
Supported files:
- `field_id.xdmf`: Steady or Unsteady XDMF file
This generator also performs time interpolation between two different time steps. For now, only linear interpolation is possible.
Space interpolation is done through `scipy.interpolate` trilinear interpolation. However, turbulent fields are
read directly from the binary file and not copied into memory. This is performed using `np.memmap`.
The overhead of this procedure is ~18% for the interpolation stage, however, initially reading the binary velocity field
(which will be much more common with time-domain simulations) is faster by a factor of 1e4.
Also, memory savings are quite substantial: from 6Gb for a typical field to a handful of megabytes for the whole program.
Args:
in_dict (dict): Input data in the form of dictionary. See acceptable entries below:
Attributes:
See Also:
.. py:class:: sharpy.utils.generator_interface.BaseGenerator
"""
generator_id = 'TurbVelocityField'
generator_classification = 'velocity-field'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Output solver-specific information in runtime.'
settings_types['turbulent_field'] = 'str'
settings_default['turbulent_field'] = None
settings_description['turbulent_field'] = 'XDMF file path of the velocity field'
settings_types['offset'] = 'list(float)'
settings_default['offset'] = np.zeros((3,))
settings_description['offset'] = 'Spatial offset in the 3 dimensions'
settings_types['centre_y'] = 'bool'
settings_default['centre_y'] = True
settings_description['centre_y'] = 'Flat for changing the domain to [``-y_max/2``, ``y_max/2``]'
settings_types['periodicity'] = 'str'
settings_default['periodicity'] = 'xy'
settings_description['periodicity'] = 'Axes in which periodicity is enforced'
settings_types['frozen'] = 'bool'
settings_default['frozen'] = True
settings_description['frozen'] = 'If ``True``, the turbulent field will not be updated in time'
settings_types['store_field'] = 'bool'
settings_default['store_field'] = False
settings_description['store_field'] = 'If ``True``, the xdmf snapshots are stored in memory. Only two at a time for the linear interpolation'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.in_dict = dict()
self.settings = dict()
self.file = None
self.extension = None
self.grid_data = dict()
self.interpolator = 3*[None]
self.x_periodicity = False
self.y_periodicity = False
# variables for interpolator wrapper
self._t0 = -1
self._t1 = -1
self._it0 = -1
self._it1 = -1
self._interpolator0 = None
self._interpolator1 = None
self.coeff = 0.
self.double_initialisation = True
self.vel_holder0 = 3*[None]
self.vel_holder1 = 3*[None]
def initialise(self, in_dict):
self.in_dict = in_dict
settings.to_custom_types(self.in_dict, self.settings_types, self.settings_default)
self.settings = self.in_dict
_, self.extension = os.path.splitext(self.settings['turbulent_field'])
if self.extension is '.h5':
self.read_btl(self.settings['turbulent_field'])
if self.extension in '.xdmf':
self.read_xdmf(self.settings['turbulent_field'])
if 'z' in self.settings['periodicity']:
raise ValueError('Periodicitiy setting in TurbVelocityField cannot be z.\n A turbulent boundary layer is not periodic in the z direction!')
if 'x' in self.settings['periodicity']:
self.x_periodicity = True
if 'y' in self.settings['periodicity']:
self.y_periodicity = True
# ADC: VERY VERY UGLY. NEED A BETTER WAY
def interpolator_wrapper0(self, coords, i_dim=0):
coeff = self.get_coeff()
return (1.0 - self.coeff)*self._interpolator0[i_dim](coords) + self.coeff*self._interpolator1[i_dim](coords)
def interpolator_wrapper1(self, coords, i_dim=1):
coeff = self.get_coeff()
return (1.0 - self.coeff)*self._interpolator0[i_dim](coords) + self.coeff*self._interpolator1[i_dim](coords)
def interpolator_wrapper2(self, coords, i_dim=2):
coeff = self.get_coeff()
return (1.0 - self.coeff)*self._interpolator0[i_dim](coords) + self.coeff*self._interpolator1[i_dim](coords)
def get_coeff(self):
return self.coeff
def init_interpolator(self):
if self.settings['frozen']:
self.interpolator = self._interpolator0
return
# continuing the ugliness
self.interpolator[0] = self.interpolator_wrapper0
self.interpolator[1] = self.interpolator_wrapper1
self.interpolator[2] = self.interpolator_wrapper2
# these functions need to define the interpolators
def read_btl(self, in_file):
"""
Legacy function, not using the custom format based on HDF5 anymore.
"""
raise NotImplementedError('The BTL reader is not up to date!')
def read_xdmf(self, in_file):
"""
Reads the xml file `<case_name>.xdmf`. Writes the self.grid_data data structure
with all the information necessary.
Note: this function does not load any turbulence data (such as ux000, ...),
it only reads the header information contained in the xdmf file.
"""
# store route of file for the other files
self.route = os.path.dirname(os.path.abspath(in_file))
# file to string
with open(in_file, 'r') as self.file:
data = self.file.read().replace('\n', '')
# parse data
# this next line is necessary to avoid problems with parsing in the Time part:
# <!--Start....
# 0.0, 1.0 ...
# see https://stackoverflow.com/a/18313932
parser = objectify.makeparser(remove_comments=True)
tree = objectify.fromstring(data, parser=parser)
# mesh dimensions
self.grid_data['dimensions'] = np.fromstring(tree.Domain.Topology.attrib['Dimensions'],
sep=' ',
count=3,
dtype=int)
# origin
self.grid_data['origin'] = np.fromstring(tree.Domain.Geometry.DataItem[0].text,
sep=' ',
count=int(tree.Domain.Geometry.DataItem[0].attrib['Dimensions']),
dtype=float)
# dxdydz
# because of how XDMF does it, it is actually dzdydx
self.grid_data['dxdydz'] = (
np.fromstring(tree.Domain.Geometry.DataItem[1].text,
sep=' ',
count=int(tree.Domain.Geometry.DataItem[1].attrib['Dimensions']),
dtype=float))
# now onto the grid
# time information
# [0] is start, [1] is stride
self.grid_data['time'] = np.fromstring(tree.Domain.Grid.Time.DataItem.text,
sep=' ',
count=2,
dtype=float)
self.grid_data['n_grid'] = len(tree.Domain.Grid.Grid)
# self.grid_data['grid'] = [dict()]*self.grid_data['n_grid']
self.grid_data['grid'] = []
for i, i_grid in enumerate(tree.Domain.Grid.Grid):
self.grid_data['grid'].append(dict())
# cycle through attributes
for k_attrib, v_attrib in i_grid.attrib.items():
self.grid_data['grid'][i][k_attrib] = v_attrib
# get Attributes (upper case A is not a mistake)
for i_attrib, attrib in enumerate(i_grid.Attribute):
self.grid_data['grid'][i][attrib.attrib['Name']] = dict()
self.grid_data['grid'][i][attrib.attrib['Name']]['file'] = (
attrib.DataItem.text.replace(' ', ''))
if attrib.DataItem.attrib['Precision'].strip() == '4':
self.grid_data['grid'][i][attrib.attrib['Name']]['Precision'] = np.float32
elif attrib.DataItem.attrib['Precision'].strip() == '8':
self.grid_data['grid'][i][attrib.attrib['Name']]['Precision'] = np.float64
# now we have the file names and the dimensions
self.grid_data['initial_x_grid'] = np.array(np.arange(0,
self.grid_data['dimensions'][2]))*self.grid_data['dxdydz'][2]
# z in the file is -y for us in sharpy (y_sharpy = right)
self.grid_data['initial_y_grid'] = np.array(np.arange(0,
self.grid_data['dimensions'][1]))*self.grid_data['dxdydz'][1]
# y in the file is z for us in sharpy (up)
self.grid_data['initial_z_grid'] = np.array(np.arange(0,
self.grid_data['dimensions'][0]))*self.grid_data['dxdydz'][0]
# the domain now goes:
# x \in [0, dimensions[0]*dx]
# y \in [-dimensions[2]*dz, 0]
# z \in [0, dimensions[1]*dy]
centre_z_offset = 0.
if self.settings['centre_y']:
centre_z_offset = -0.5*(self.grid_data['initial_z_grid'][-1] - self.grid_data['initial_z_grid'][0])
self.grid_data['initial_x_grid'] += self.settings['offset'][0] + self.grid_data['origin'][0]
self.grid_data['initial_x_grid'] -= np.max(self.grid_data['initial_x_grid'])
self.grid_data['initial_y_grid'] += self.settings['offset'][1] + self.grid_data['origin'][1]
self.grid_data['initial_z_grid'] += self.settings['offset'][2] + self.grid_data['origin'][2] + centre_z_offset
self.bbox = self.get_field_bbox(self.grid_data['initial_x_grid'],
self.grid_data['initial_y_grid'],
self.grid_data['initial_z_grid'],
frame='G')
if self.settings['print_info']:
cout.cout_wrap('The domain bbox is:', 1)
cout.cout_wrap(' x = [' + str(self.bbox[0, 0]) + ', ' + str(self.bbox[0, 1]) + ']', 1)
cout.cout_wrap(' y = [' + str(self.bbox[1, 0]) + ', ' + str(self.bbox[1, 1]) + ']', 1)
cout.cout_wrap(' z = [' + str(self.bbox[2, 0]) + ', ' + str(self.bbox[2, 1]) + ']', 1)
def generate(self, params, uext):
zeta = params['zeta']
for_pos = params['for_pos']
t = params['t']
self.update_cache(t)
self.update_coeff(t)
self.init_interpolator()
self.interpolate_zeta(zeta,
for_pos,
uext)
def update_cache(self, t):
self.double_initialisation = False
if self.settings['frozen']:
if self._interpolator0 is None:
self._t0 = self.timestep_2_time(0)
self._it0 = 0
self._interpolator0 = self.read_grid(self._it0, i_cache=0)
return
# most common case: t already in the [t0, t1] interval
if self._t0 <= t <= self._t1:
return
# t < t0, something weird (time going backwards)
if t < self._t0:
raise ValueError('Please make sure everything is ok. Your time is going backwards.')
# t > t1, need initialisation
if t > self._t1:
new_it = self.time_2_timestep(t)
# new timestep requires initialising the two of them (not likely at all)
# this means that the simulation timestep > LES timestep
if new_it > self._it1:
self.double_initialisation = True
else:
# t1 goes to t0
self._t0 = self._t1
self._it0 = self._it1
self._interpolator0 = self._interpolator1.copy()
# t1 updates to the next (new_it + 1)
self._it1 = new_it + 1
self._t1 = self.timestep_2_time(self._it1)
self._interpolator1 = self.read_grid(self._it1, i_cache=1)
return
# last case, both interp need to be initialised
if (self._t0 is None or self.double_initialisation):
self._t0 = self.timestep_2_time(new_it)
self._it0 = new_it
self._interpolator0 = self.read_grid(self._it0, i_cache=0)
self._it1 = new_it + 1
self._t1 = self.timestep_2_time(self._it1)
self._interpolator1 = self.read_grid(self._it1, i_cache=1)
def update_coeff(self, t):
if self.settings['frozen']:
self.coeff = 0.0
return
self.coeff = self.linear_coeff([self._t0, self._t1], t)
return
def time_2_timestep(self, t):
return int(max(0, np.floor((t - self.grid_data['time'][0])/self.grid_data['time'][1])))
def timestep_2_time(self, it):
return it*self.grid_data['time'][1] + self.grid_data['time'][0]
def get_field_bbox(self, x_grid, y_grid, z_grid, frame='G'):
bbox = np.zeros((3, 2))
bbox[0, :] = [np.min(x_grid), np.max(x_grid)]
bbox[1, :] = [np.min(y_grid), np.max(y_grid)]
bbox[2, :] = [np.min(z_grid), np.max(z_grid)]
if frame == 'G':
bbox[:, 0] = self.gstar_2_g(bbox[:, 0])
bbox[:, 1] = self.gstar_2_g(bbox[:, 1])
return bbox
def create_interpolator(self, data, x_grid, y_grid, z_grid, i_dim):
interpolator = interpolate.RegularGridInterpolator((x_grid, y_grid, z_grid),
data,
bounds_error=False,
fill_value=0.0)
return interpolator
def interpolate_zeta(self, zeta, for_pos, u_ext, interpolator=None, offset=np.zeros((3))):
if interpolator is None:
interpolator = self.interpolator
for isurf in range(len(zeta)):
_, n_m, n_n = zeta[isurf].shape
for i_m in range(n_m):
for i_n in range(n_n):
coord = self.g_2_gstar(self.apply_periodicity(zeta[isurf][:, i_m, i_n] + for_pos[0:3] + offset))
for i_dim in range(3):
try:
u_ext[isurf][i_dim, i_m, i_n] = self.interpolator[i_dim](coord)
except ValueError:
print(coord)
raise ValueError()
u_ext[isurf][:, i_m, i_n] = self.gstar_2_g(u_ext[isurf][:, i_m, i_n])
@staticmethod
def periodicity(x, bbox):
try:
new_x = bbox[0] + divmod(x - bbox[0], bbox[1] - bbox[0])[1]
except ZeroDivisionError:
new_x = x
return new_x
def apply_periodicity(self, coord):
new_coord = coord.copy()
if self.x_periodicity:
i = 0
new_coord[i] = self.periodicity(new_coord[i], self.bbox[i, :])
if self.y_periodicity:
i = 1
new_coord[i] = self.periodicity(new_coord[i], self.bbox[i, :])
# if self.x_periodicity:
#TODO I think this does not work when bbox is not ordered (bbox[i, 0] is not < bbox[i, 1])
# i = 0
# # x in interval:
# if self.bbox[i, 0] <= new_coord[i] <= self.bbox[i, 1]:
# pass
# # lower than min bbox
# elif new_coord[i] < self.bbox[i, 0]:
# temp = divmod(new_coord[i], self.bbox[i, 0])[1]
# if np.isnan(temp):
# pass
# else:
# new_coord[i] = temp
# # greater than max bbox
# elif new_coord[i] > self.bbox[i, 1]:
# temp = divmod(new_coord[i], self.bbox[i, 1])[1]
# if np.isnan(temp):
# pass
# else:
# new_coord[i] = temp
# if self.y_periodicity:
# i = 1
# # y in interval:
# if self.bbox[i, 0] <= new_coord[i] <= self.bbox[i, 1]:
# pass
# # lower than min bbox
# elif new_coord[i] < self.bbox[i, 0]:
# try:
# temp = divmod(new_coord[i], self.bbox[i, 0])[1]
# except ZeroDivisionError:
# temp = new_coord[i]
# if np.isnan(temp):
# pass
# else:
# new_coord[i] = temp
# if new_coord[i] < 0.0:
# new_coord[i] = self.bbox[i, 1] + new_coord[i]
# # greater than max bbox
# elif new_coord[i] > self.bbox[i, 1]:
# temp = divmod(new_coord[i], self.bbox[i, 1])[1]
# if np.isnan(temp):
# pass
# else:
# new_coord[i] = temp
# if new_coord[i] < 0.0:
# new_coord[i] = self.bbox[i, 1] + new_coord[i]
return new_coord
@staticmethod
def linear_coeff(t_vec, t):
# this is 0 when t == t_vec[0]
# 1 when t == t_vec[1]
return (t - t_vec[0])/(t_vec[1] - t_vec[0])
def read_grid(self, i_grid, i_cache=0):
"""
This function returns an interpolator list of size 3 made of `scipy.interpolate.RegularGridInterpolator`
objects.
"""
velocities = ['ux', 'uy', 'uz']
interpolator = list()
for i_dim in range(3):
file_name = self.grid_data['grid'][i_grid][velocities[i_dim]]['file']
if i_cache == 0:
if not self.settings['store_field']:
# load file, but dont copy it
self.vel_holder0[i_dim] = np.memmap(self.route + '/' + file_name,
# dtype='float64',
dtype=self.grid_data['grid'][i_grid][velocities[i_dim]]['Precision'],
shape=(self.grid_data['dimensions'][2],
self.grid_data['dimensions'][1],
self.grid_data['dimensions'][0]),
order='F')
else:
# load and store file
self.vel_holder0[i_dim] = (np.fromfile(open(self.route + '/' + file_name, 'rb'),
dtype=self.grid_data['grid'][i_grid][velocities[i_dim]]['Precision']).\
reshape((self.grid_data['dimensions'][2],
self.grid_data['dimensions'][1],
self.grid_data['dimensions'][0]),
order='F'))
interpolator.append(self.create_interpolator(self.vel_holder0[i_dim],
self.grid_data['initial_x_grid'],
self.grid_data['initial_y_grid'],
self.grid_data['initial_z_grid'],
i_dim=i_dim))
elif i_cache == 1:
if not self.settings['store_field']:
# load file, but dont copy it
self.vel_holder1[i_dim] = np.memmap(self.route + '/' + file_name,
# dtype='float64',
dtype=self.grid_data['grid'][i_grid][velocities[i_dim]]['Precision'],
shape=(self.grid_data['dimensions'][2],
self.grid_data['dimensions'][1],
self.grid_data['dimensions'][0]),
order='F')
else:
# load and store file
self.vel_holder1[i_dim] = (np.fromfile(open(self.route + '/' + file_name, 'rb'),
dtype=self.grid_data['grid'][i_grid][velocities[i_dim]]['Precision']).\
reshape((self.grid_data['dimensions'][2],
self.grid_data['dimensions'][1],
self.grid_data['dimensions'][0]),
order='F'))
interpolator.append(self.create_interpolator(self.vel_holder1[i_dim],
self.grid_data['initial_x_grid'],
self.grid_data['initial_y_grid'],
self.grid_data['initial_z_grid'],
i_dim=i_dim))
else:
raise Error('i_cache has to be 0 or 1')
return interpolator
@staticmethod
def g_2_gstar(coord_g):
return np.array([coord_g[0], coord_g[2], -coord_g[1]])
@staticmethod
def gstar_2_g(coord_star):
return np.array([coord_star[0], -coord_star[2], coord_star[1]])
| StarcoderdataPython |
8029582 | <gh_stars>1-10
def simulate(registers, instructions):
i = 0
while i < len(instructions):
args = instructions[i].split()
if args[0] == 'hlf':
registers[args[1]] //= 2
i += 1
elif args[0] == 'tpl':
registers[args[1]] *= 3
i += 1
elif args[0] == 'inc':
registers[args[1]] += 1
i += 1
elif args[0] == 'jmp':
sign = 1 if args[1][0] == '+' else -1
i += sign * int(args[1][1:])
elif args[0] == 'jie':
if registers[args[1][:-1]] % 2 == 0:
sign = 1 if args[2][0] == '+' else -1
i += sign * int(args[2][1:])
else:
i += 1
elif args[0] == 'jio':
if registers[args[1][:-1]] == 1:
sign = 1 if args[2][0] == '+' else -1
i += sign * int(args[2][1:])
else:
i += 1
else:
print('Unrecognized instruction.')
def main():
with open('input.txt', 'r') as f:
rows = f.read().strip().split('\n')
registers = {'a': 0, 'b': 0}
simulate(registers, rows)
print('Starting both at 0, b ends up with: %d.' % registers['b'])
registers = {'a': 1, 'b': 0}
simulate(registers, rows)
print('Starting a at 1, b ends up with: %d.' % registers['b'])
if __name__ == '__main__':
main()
| StarcoderdataPython |
6532459 | # output.__init__.py
from .dict_format import OutputModule
from .sqlite import SQLiteModule
from .textcloud import TextCloudModule
| StarcoderdataPython |
6650887 | from typing import Callable
from typing import List
from typing import Optional
from typing import Union
from torch import nn
from caldera.defaults import CalderaDefaults as D
from caldera.utils import pairwise
class MLPBlock(nn.Module):
"""A multilayer perceptron block."""
def __init__(
self,
input_size: int,
output_size: int = None,
layer_norm: bool = True,
dropout: float = None,
activation: Callable = D.activation,
):
super().__init__()
if output_size is None:
output_size = input_size
layers = [nn.Linear(input_size, output_size), activation()]
if layer_norm:
layers.append(nn.LayerNorm(output_size))
if dropout:
layers.append(nn.Dropout(dropout))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class MLP(nn.Module):
"""A multilayer perceptron."""
def __init__(
self,
*latent_sizes: List[int],
layer_norm: bool = True,
dropout: float = None,
activation: Callable = D.activation
):
super().__init__()
self.layers = nn.Sequential(
*[
MLPBlock(
n1,
n2,
layer_norm=layer_norm,
dropout=dropout,
activation=activation,
)
for n1, n2 in pairwise(latent_sizes)
]
)
def forward(self, x):
return self.layers(x)
| StarcoderdataPython |
11275622 | <reponame>bigfoolliu/liu_aistuff
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""查找和替换字符串"""
import re
s = "this is china, and i love it"
target = "loe"
# 查找
ret = re.findall(target, s)
print(s, ret)
# 替换
ret = re.sub("i", "we", s)
print(s, ret)
print(s.replace("china", "world"))
| StarcoderdataPython |
3470332 | # @Time : 2021/4/19
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.evaluator.averagelength_evaluator
##########################################
"""
import numpy as np
from textbox.evaluator.abstract_evaluator import AbstractEvaluator
class AvgLenEvaluator(AbstractEvaluator):
def _calc_metrics_info(self, generate_corpus, reference_corpus=None):
result = {}
length = []
for sentence in generate_corpus:
length.append(len(sentence))
result['avg-length'] = length
return result
| StarcoderdataPython |
4812410 | <reponame>shreyaphirke/interbotix_ros_manipulators
from interbotix_xs_modules.arm import InterbotixManipulatorXS
# This script commands some arbitrary positions to the arm joints
#
# To get started, open a terminal and type...
# 'roslaunch interbotix_xsarm_control xsarm_control.launch robot_model:=wx250s'
# Then change to this directory and type 'python joint_position_control.py'
def main():
joint_positions = [-1.0, 0.5 , 0.5, 0, -0.5, 1.57]
bot = InterbotixManipulatorXS("wx250s", "arm", "gripper")
bot.arm.go_to_home_pose()
bot.arm.set_joint_positions(joint_positions)
bot.arm.go_to_home_pose()
bot.arm.go_to_sleep_pose()
if __name__=='__main__':
main()
| StarcoderdataPython |
134347 | <filename>MLlib/models.py
from MLlib.optimizers import GradientDescent
from MLlib.activations import sigmoid
from MLlib.utils.misc_utils import generate_weights
from MLlib.utils.decision_tree_utils import partition, find_best_split
from MLlib.utils.decision_tree_utils import Leaf, Decision_Node
from MLlib.utils .knn_utils import get_neighbours
from MLlib.utils.naive_bayes_utils import make_likelihood_table
from MLlib.utils.gaussian_naive_bayes_utils import get_mean_var, p_y_given_x
from collections import Counter
import numpy as np
import pickle
from datetime import datetime
import math
DATE_FORMAT = '%d-%m-%Y_%H-%M-%S'
class LinearRegression():
"""
Implement Linear Regression Model.
ATTRIBUTES
==========
None
METHODS
=======
fit(X,Y,optimizer=GradientDescent,epochs=25, \
zeros=False,save_best=False):
Implement the Training of
Linear Regression Model with
suitable optimizer, inititalised
random weights and Dataset's
Input-Output, upto certain number
of epochs.
predict(X):
Return the Predicted Value of
Output associated with Input,
using the weights, which were
tuned by Training Linear Regression
Model.
save(name):
Save the Trained Linear Regression
Model in rob format , in Local
disk.
"""
def fit(
self,
X,
Y,
optimizer=GradientDescent,
epochs=25,
zeros=False,
save_best=False
):
"""
Train the Linear Regression Model
by fitting its associated weights,
according to Dataset's Inputs and
their corresponding Output Values.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
Y: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Output.
optimizer: class
Class of one of the Optimizers like
AdamProp,SGD,MBGD,RMSprop,AdamDelta,
Gradient Descent,etc.
epochs: int
Number of times, the loop to calculate loss
and optimize weights, will going to take
place.
zeros: boolean
Condition to initialize Weights as either
zeroes or some random decimal values.
save_best: boolean
Condition to enable or disable the option
of saving the suitable Weight values for the
model after reaching the region nearby the
minima of Loss-Function with respect to Weights.
epoch_loss: float
The degree of how much the predicted value
is diverted from actual values, given by
implementing one of choosen loss functions
from loss_func.py .
version: str
Descriptive update of Model's Version at each
step of Training Loop, along with Time description
according to DATA_FORMAT.
RETURNS
=======
None
"""
self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
self.best_weights = {"weights": None, "loss": float('inf')}
print("Starting training with loss:",
optimizer.loss_func.loss(X, Y, self.weights))
for epoch in range(1, epochs + 1):
print("======================================")
print("epoch:", epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
epoch_loss = optimizer.loss_func.loss(X, Y, self.weights)
if save_best and epoch_loss < self.best_weights["loss"]:
print("updating best weights (loss: {})".format(epoch_loss))
self.best_weights['weights'] = self.weights
self.best_weights['loss'] = epoch_loss
version = "model_best_" + datetime.now().strftime(DATE_FORMAT)
print("Saving best model version: ", version)
self.save(version)
print("Loss in this step: ", epoch_loss)
version = "model_final_" + datetime.now().strftime(DATE_FORMAT)
print("Saving final model version: ", version)
self.save(version)
print("======================================\n")
print("Finished training with final loss:",
optimizer.loss_func.loss(X, Y, self.weights))
print("=====================================================\n")
def predict(self, X):
"""
Predict the Output Value of
Input, in accordance with
Linear Regression Model.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Predicted Values corresponding to
each Input of Dataset.
"""
return np.dot(X, self.weights)
def save(self, name):
"""
Save the Model in rob
format for further usage.
PARAMETERS
==========
name: str
Title of the Model's file
to be saved in rob format.
RETURNS
=======
None
"""
with open(name + '.rob', 'wb') as robfile:
pickle.dump(self, robfile)
class LogisticRegression(LinearRegression):
"""
Implements Logistic Regression Model.
ATTRIBUTES
==========
LinearRegression: Class
Parent Class from where Output Prediction
Value is expressed, after Linear Weighted
Combination of Input is calculated .
METHODS
=======
predict(X):
Return the probabilistic value
of an Input, belonging to either
class 0 or class 1, by using final
weights from Trained Logistic
Regression Model.
classify(X):
Return the Class corresponding to
each Input of Dataset, Predicted by
Trained Logistic Regression Model,
i.e in this scenario, either class 0
or class 1.
"""
def predict(self, X):
"""
Predict the Probabilistic Value of
Input, in accordance with
Logistic Regression Model.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
prediction: ndarray(dtype=float,ndim=1)
1-D Array of Predicted Values
corresponding to each Input of
Dataset.
RETURNS
=======
ndarray(dtype=float,ndim=1)
1-D Array of Probabilistic Values
of whether the particular Input
belongs to class 0 or class 1.
"""
prediction = np.dot(X, self.weights).T
return sigmoid(prediction)
def classify(self, X):
"""
Classify the Input, according to
Logistic Regression Model,i.e in this
case, either class 0 or class 1.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
prediction: ndarray(dtype=float,ndim=1)
1-D Array of Predicted Values
corresponding to their Inputs.
actual_predictions: ndarray(dtype=int,ndim=1)
1-D Array of Output, associated
to each Input of Dataset,
Predicted by Trained Logistic
Regression Model.
RETURNS
=======
ndarray
1-D Array of Predicted classes
(either 0 or 1) corresponding
to their inputs.
"""
prediction = np.dot(X, self.weights).T
prediction = sigmoid(prediction)
actual_predictions = np.zeros((1, X.shape[0]))
for i in range(prediction.shape[1]):
if prediction[0][i] > 0.5:
actual_predictions[0][i] = 1
return actual_predictions
class DecisionTreeClassifier():
root = None
def fit(self, rows):
"""
Build the tree.
Rules of recursion: 1) Believe that it works. 2) Start by checking
for the base case (no further information gain). 3) Prepare for
giant stack traces.
"""
# Try partitioing the dataset on each of the unique attribute,
# calculate the information gain,
# and return the question that produces the highest gain.
gain, question = find_best_split(rows)
# Base case: no further info gain
# Since we can ask no further questions,
# we'll return a leaf.
if gain == 0:
return Leaf(rows)
# If we reach here, we have found a useful feature / value
# to partition on.
true_rows, false_rows = partition(rows, question)
# Recursively build the true branch.
true_branch = self.fit(true_rows)
# Recursively build the false branch.
false_branch = self.fit(false_rows)
# Return a Question node.
# This records the best feature / value to ask at this point,
self.root = Decision_Node(question, true_branch, false_branch)
def print_tree(self, spacing=""):
"""
A tree printing function.
"""
# Base case: we've reached a leaf
if isinstance(self.root, Leaf):
print(spacing + "Predict", self.root.predictions)
return
# Print the question at this node
print(spacing + str(self.root.question))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(self.root.true_branch, spacing + " ")
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(self.root.false_branch, spacing + " ")
def classify(self, row):
"""
Classify a bit of data
"""
# Base case: we've reached a leaf
if isinstance(self.root, Leaf):
return self.root.predictions
# Decide whether to follow the true-branch or the false-branch.
# Compare the feature / value stored in the node,
# to the example we're considering.
if self.root.question.match(row):
return self.classify(row, self.root.true_branch)
else:
return self.classify(row, self.root.false_branch)
class KNN():
"""
A single Class that can act as both KNN classifier or regressor
based on arguements given to the prediction function.
ATTRIBUTES
==========
None
METHODS
=======
predict(train, test_row, num_neighbours=7, classify=True):
K Nearest Neighbour Model, used as Classifier, to
predict the class of test point , with respect to
its n nearest neighbours.
"""
def predict(self, train, test_row, num_neighbours=7, classify=True):
"""
KNN Prediction Model, used for either Regression or
Classification , in respect to Test Point and
Dataset Type.
PARAMETERS
==========
train: ndarray
Array Representation of Collection
of Points, with their corresponding
x1,x2 and y features.
test_row: ndarray(dtype=int,ndim=1,axis=1)
Array representation of test point,
with its corresponding x1,x2 and y
features.
num_neighbours: int
Number of nearest neighbours, close
to the test point, with respect to
x1,x2 and y features.
classify: Boolean
Type of Mode, K Nearest Neighbour
Model wants to be applied, according
to Dataset and Application Field.
neighbours: list
List of n nearest neighbours, close
to the test point, with their
associated Point Array and distance
from the Test point.
ouput: list
List of Distances of n nearest
neighbours, calculated with respect
to the test point, using either
Block or Euclidean Metric.
key: int
Count of number of terms inside
ouput list.
RETURNS
=======
prediction: float/int
If used as a Classifier, gives
Class number as prediction. Else,
it will give the mean of Cluster
made by test point and its n
nearest neighbours.
"""
neigbours = get_neighbours(
train, test_row, num_neighbours, distance_metrics="block")
ouput = [row[-1] for row in neigbours]
if classify:
prediction = max(set(ouput), key=ouput.count)
else:
prediction = sum(ouput) / len(ouput)
return prediction
class Naive_Bayes():
"""
pyx: P(y/X) is proportional to p(x1/y)*p(x2/y)...*p(y)
using log and adding as multiplying for smaller
numbers can make them very small
As denominator P(X)=P(x1)*P(x2), is common we can ignore it.
"""
def predict(self, x_label, y_class):
pyx = []
likelihood = make_likelihood_table(x_label, y_class)
Y = np.unique(y_class)
X = np.unique(x_label)
for j in range(len(Y)):
total = 0
for i in range(len(X)):
if(likelihood[i][j] == 0):
continue
total += math.log(likelihood[i][j])
y_sum = (y_class == Y[j]).sum()
if y_sum:
total += math.log(y_sum / len(y_class))
pyx.append([total, X[i], Y[j]])
prediction = max(pyx)
return [prediction[1], prediction[2]]
class Gaussian_Naive_Bayes():
# data is variable input given b user for which we predict the label.
# Here we predict the gender from given list of height, weight, foot_size
def predict(self, data, x_label, y_class):
mean, var = get_mean_var(x_label, y_class)
argmax = 0
for (k1, v1), (k2, v2) in zip(mean.items(), var.items()):
pre_prob = Counter(x_label)[k1] / len(x_label)
pro = 1
for i in range(len(v1)):
pro *= p_y_given_x(data[i], v1[i], v2[i])
pxy = pro * pre_prob
if(pxy > argmax):
prediction = k1
return prediction
| StarcoderdataPython |
4893902 | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.api import test_share_networks
class ShareNetworkAdminTest(
base.BaseSharesAdminTest,
test_share_networks.ShareNetworkListMixin):
@classmethod
def resource_setup(cls):
super(ShareNetworkAdminTest, cls).resource_setup()
ss_data = cls.generate_security_service_data()
cls.ss_ldap = cls.create_security_service(**ss_data)
cls.data_sn_with_ldap_ss = {
'name': 'sn_with_ldap_ss',
'neutron_net_id': '1111',
'neutron_subnet_id': '2222',
'created_at': '2002-02-02',
'updated_at': None,
'network_type': 'vlan',
'segmentation_id': 1000,
'cidr': '10.0.0.0/24',
'ip_version': 4,
'description': 'fake description',
}
cls.sn_with_ldap_ss = cls.create_share_network(
cleanup_in_class=True,
**cls.data_sn_with_ldap_ss)
cls.shares_client.add_sec_service_to_share_network(
cls.sn_with_ldap_ss["id"],
cls.ss_ldap["id"])
cls.isolated_client = cls.get_client_with_isolated_creds(
type_of_creds='alt')
cls.data_sn_with_kerberos_ss = {
'name': 'sn_with_kerberos_ss',
'created_at': '2003-03-03',
'updated_at': None,
'neutron_net_id': 'test net id',
'neutron_subnet_id': 'test subnet id',
'network_type': 'local',
'segmentation_id': 2000,
'cidr': '10.0.0.0/13',
'ip_version': 6,
'description': 'fake description',
}
cls.ss_kerberos = cls.isolated_client.create_security_service(
ss_type='kerberos',
**cls.data_sn_with_ldap_ss)
cls.sn_with_kerberos_ss = cls.isolated_client.create_share_network(
cleanup_in_class=True,
**cls.data_sn_with_kerberos_ss)
cls.isolated_client.add_sec_service_to_share_network(
cls.sn_with_kerberos_ss["id"],
cls.ss_kerberos["id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_list_share_networks_all_tenants(self):
listed = self.shares_client.list_share_networks_with_detail(
{'all_tenants': 1})
self.assertTrue(any(self.sn_with_ldap_ss['id'] == sn['id']
for sn in listed))
self.assertTrue(any(self.sn_with_kerberos_ss['id'] == sn['id']
for sn in listed))
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_list_share_networks_filter_by_project_id(self):
listed = self.shares_client.list_share_networks_with_detail(
{'project_id': self.sn_with_kerberos_ss['project_id']})
self.assertTrue(any(self.sn_with_kerberos_ss['id'] == sn['id']
for sn in listed))
self.assertTrue(all(self.sn_with_kerberos_ss['project_id'] ==
sn['project_id'] for sn in listed))
| StarcoderdataPython |
1739608 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import pyauto_functional # Must be imported before pyauto
import pyauto
import pyauto_utils
class AboutPluginsUITest(pyauto.PyUITest):
"""Testcase for chrome://plugins UI."""
def testAboutPluginDetailInfo(self):
"""Verify chrome://plugins page shows plugin details."""
self.NavigateToURL('chrome://plugins/')
driver = self.NewWebDriver()
detail_link = driver.find_element_by_id('details-link')
self.assertTrue(self.WaitUntil(lambda: detail_link.is_displayed()),
msg='Details link could not be found.')
detail_link.click()
# Verify that detail info for Remote Viewer plugin shows up.
# Remote Viewer plugin is expected to be present on all platforms.
self.assertTrue(self.WaitUntil(lambda: len(driver.find_elements_by_xpath(
'//*[@jscontent="path"][text()="internal-remoting-viewer"]'))))
class ChromeAboutPluginsUITest(pyauto.PyUITest):
"""Testcase for official build only plugins in chrome://plugins UI."""
def Debug(self):
"""chrome://plugins test debug method.
This method will not run automatically.
"""
self.NavigateToURL('chrome://plugins/')
driver = self.NewWebDriver()
import pdb
pdb.set_trace()
def _IsEnabled(self, plugin_name):
"""Checks if plugin is enabled.
Args:
plugin_name: Plugin name to verify.
Returns:
True, if plugin is enabled, or False otherwise.
"""
for plugin in self.GetPluginsInfo().Plugins():
if re.search(plugin_name, plugin['name']):
return plugin['enabled']
def _ExpandDetailInfoLink(self, driver):
"""Expand detail info link.
Args:
driver: A Chrome driver object.
"""
detail_link = driver.find_element_by_id('details-link')
self.assertTrue(self.WaitUntil(lambda: detail_link.is_displayed()),
msg='Details link could not be found.')
detail_link.click()
def _OverridePluginPageAnimation(self, driver):
"""Override the animation for expanding detail info to make sure element
remain at the same location where web driver found it.
Args:
driver: A Chrome driver object.
"""
override_animation_style_js = """
style = document.createElement('style');
style.innerHTML = "* { -webkit-transition: all 0s ease-in !important}";
document.head.appendChild(style);
"""
driver.execute_script(override_animation_style_js)
def testAboutPluginEnableAndDisablePDFPlugin(self):
"""Verify enable and disable pdf plugins from about:plugins page."""
self.NavigateToURL('chrome://plugins/')
driver = self.NewWebDriver()
self._OverridePluginPageAnimation(driver)
self._ExpandDetailInfoLink(driver)
pdf_disable_path = '//*[@class="plugin-name"][text()="Chrome PDF Viewer"' \
']//ancestor::*[@class="plugin-text"]//a[text()="Disable"]'
pdf_enable_path = '//*[@class="plugin-name"][text()="Chrome PDF Viewer"' \
']//ancestor::*[@class="plugin-text"]//a[text()="Enable"]'
# Confirm Chrome PDF Viewer plugin is found and find disable PDF link.
pdf_disable_link = pyauto_utils.WaitForDomElement(self, driver,
pdf_disable_path)
# Disable PDF viewer plugin in about:plugins.
pdf_disable_link.click()
self.assertTrue(self.WaitUntil(lambda: not
self._IsEnabled('Chrome PDF Viewer')))
# Re-enable PDF viewer plugin.
pdf_enable_link = driver.find_element_by_xpath(pdf_enable_path)
pdf_enable_link.click()
self.assertTrue(self.WaitUntil(lambda:
self._IsEnabled('Chrome PDF Viewer')))
def testEnableAndDisableFlashPlugin(self):
"""Verify enable and disable flash plugins from about:plugins page."""
self.NavigateToURL('chrome://plugins/')
driver = self.NewWebDriver()
self._OverridePluginPageAnimation(driver)
self._ExpandDetailInfoLink(driver)
flash_plugins_elem = driver.find_element_by_xpath(
'//*[@jscontent="name"][text()="Flash"]//ancestor' \
'::*[@class="plugin-text"]')
# Disable flash plugin from flash detail info.
flash_disable_link = flash_plugins_elem.find_element_by_xpath(
'.//a[text()="Disable"]')
flash_disable_link.click()
self.assertTrue(self.WaitUntil(lambda: not
self._IsEnabled('Shockwave Flash')))
# Re-enable Flash plugin from flash detail info.
flash_enable_link = flash_plugins_elem.find_element_by_xpath(
'.//a[text()="Enable"]')
flash_enable_link.click()
self.assertTrue(self.WaitUntil(lambda:
self._IsEnabled('Shockwave Flash')))
if __name__ == '__main__':
pyauto_functional.Main()
| StarcoderdataPython |
9676823 | <reponame>yanshengjia/nlp
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from snownlp import SnowNLP
s = SnowNLP(u':#微感动#【一次搀扶,四载照顾[心]】路遇一摔倒老人蜷缩在地,她将老人扶起送回家。不放心老人她次日再去拜访,得知老人孤身一人后从此义务照顾!每天看望,帮洗衣服,陪着聊天…她坚持至今4年!她说当下好多人不敢扶老人,想用行动改变大家看法!31岁山西籍好人赵艳善心无畏惧[赞](央视记者杨晓东)')
print '中文分词:'
for each in s.words:
print each,
print '\n'
print '词性标注:'
for each in s.tags:
print "('" + each[0] + "','" + each[1] + "')"
print '\n'
print '拼音:'
for each in s.pinyin:
print each,
print '\n'
print '提取文本关键字:'
for each in s.keywords(3):
print each,
print '\n'
print '提取文本摘要:'
for each in s.summary(3):
print each
print '\n'
print '分割成句子:'
for each in s.sentences:
print each
print '\n'
print "积极情感度:" + str(s.sentiments) + '\n'
| StarcoderdataPython |
10560 |
class IOEngine(object):
def __init__(self, node):
self.node = node
self.inputs = []
self.outputs = []
def release(self):
self.inputs = None
self.outputs = None
self.node = None
def updateInputs(self, names):
# remove prior outputs
for inputNode in self.inputs:
if not inputNode in names:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
newInputs = []
for nodeId in names:
if self.node.model.existNode(nodeId):
newInputs.append(nodeId)
if not nodeId in self.inputs:
self.node.model.getNode(nodeId).ioEngine.addOutput(
self.node.identifier)
self.inputs = newInputs
def removeOutput(self, nodeId):
if nodeId in self.outputs:
self.outputs.remove(nodeId)
def removeInput(self, nodeId):
if nodeId in self.inputs:
self.inputs.remove(nodeId)
def addOutput(self, nodeId):
self.outputs.append(nodeId)
def updateNodeId(self, oldId, newId):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(
inputNode).ioEngine.updateOutputId(oldId, newId)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(
outputNode).ioEngine.updateInputId(oldId, newId)
def updateOnDeleteNode(self):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(outputNode).ioEngine.removeInput(
self.node.identifier)
def updateOutputId(self, oldId, newId):
if oldId in self.outputs:
self.outputs.remove(oldId)
self.outputs.append(newId)
def updateInputId(self, oldId, newId):
if oldId in self.inputs:
self.inputs.remove(oldId)
self.inputs.append(newId)
self.node.updateDefinitionForChangeId(oldId, newId)
| StarcoderdataPython |
1943197 | """
__new__()方法, 对象创建的过程,
1- new方法返回一个对象 2- init利用new返回的对象进行属性的添加
"""
class Person(object):
# 监听创建一个实例对象的过程,需要返回一个对象赋值给xiaoming
# new中不return的话,那么久不会执行init方法
def __new__(cls, *args, **kwargs):
print("new")
print((object.__new__(cls)))
return object.__new__(cls)
# 构造方法,当执行init方法的时候对象**已经创建成功**,剩下的是将属性添加到对象中
def __init__(self, name):
print("init")
self.name = name
# 类的toString方法
# def __str__(self):
# return "我的名字是: %s" % self.name
# 监听引用计数为0的时候,python会执行del方法
def __del__(self):
print("再见")
# xioaming的地址和new中return的obj的地址一样,说明new中返回的obj就是xiaoming
xiaoming = Person("小明")
print(xiaoming)
print("=" * 28)
"""
python的单例模式,需要使用到new关键方法
1- 保证返回的对象是同一个,在new中修改
2- 保证对象的属性只能赋值一次,在init方法中修改
3- 一般单例模式中的包含静态方法, 类似于Tools.XX, 不需要创建多个对象来调用同一个静态方法
"""
class Student(object):
# 定义一个类属型保存实例对象
__instance = None
# 类属型保证实例属性只能被赋值一次
__is_first = True
# s1,s2要保证使用一份内存, 需要new的时候返回同一个对象
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self, name, age):
if self.__is_first:
self.name = name
self.age = age
self.__is_first = False
# 静态方法
@staticmethod
def add_num(a, b):
return a + b
s1 = Student("小明", 25)
s2 = Student("小红", 28)
print(s1)
print(s2)
print(s1.name)
print(s2.name)
| StarcoderdataPython |
3426861 | # -*- coding: utf-8 -*-
from music21 import *
import copy
def richardBreedGetWell():
'''
<NAME> is a donor who supports the purchases of early music materials at M.I.T. --
I used this code as part of a get well card for him, it finds the name BREED in the Beethoven
quartets. (well something close, B-rest-E-E-D returned nothing, so I instead did b-r-E-d, where
the e has to be long...)
finds a few places in opus132 and nothing else
'''
for workName in corpus.getBeethovenStringQuartets('.xml'):
if 'opus132' not in workName:
continue
beethovenScore = converter.parse(workName)
for partNum in range(len(beethovenScore)):
print(workName, str(partNum))
thisPart = beethovenScore[partNum]
thisPart.title = workName + str(partNum)
display = stream.Stream()
notes = thisPart.flat.notesAndRests
for i in range(len(notes) - 5):
if (notes[i].isNote and notes[i].name == 'B') and \
notes[i+1].isRest is True and \
(notes[i+2].isNote and notes[i+2].name == 'E') and \
(notes[i+3].isNote and notes[i+3].name == 'D') and \
(notes[i+2].duration.quarterLength > notes[i].duration.quarterLength) and \
(notes[i+2].duration.quarterLength > notes[i+1].duration.quarterLength) and \
(notes[i+2].duration.quarterLength > notes[i+3].duration.quarterLength):
measureNumber = 0
lastMeasure = None
for j in range(4):
thisMeasure = notes[i+j].getContextByClass(stream.Measure)
if thisMeasure is not None and thisMeasure is not lastMeasure:
lastMeasure = thisMeasure
measureNumber = thisMeasure.number
thisMeasure.insert(0, thisMeasure.bestClef())
display.append(thisMeasure)
notes[i].lyric = workName + " " + str(thisPart.id) + " " + str(measureNumber)
if len(display) > 0:
display.show()
def annotateWithGerman():
'''
annotates a score with the German notes for each note
'''
from music21 import corpus
bwv295 = corpus.parse('bach/bwv295')
for thisNote in bwv295.flat.notes:
thisNote.addLyric(thisNote.pitch.german)
bwv295.show()
def bachParallels():
'''
find all instances of parallel fifths or octaves in Bach chorales.
Checking the work of <NAME> and <NAME>,
"Parallel successions of perfect fifths in the Bach chorales"
Proceedings of the fourth Conference on Interdisciplinary Musicology (CIM08)
Thessaloniki, Greece, 3-6 July 2008, http://web.auth.gr/cim08/
'''
from music21 import corpus
for fn in corpus.getBachChorales():
print (fn)
c = corpus.parse(fn)
displayMe = False
for i in range(len(c.parts) - 1):
iName = c.parts[i].id
if iName.lower() not in ['soprano', 'alto', 'tenor', 'bass']:
continue
ifn = c.parts[i].flat.notesAndRests
omi = ifn.offsetMap
for j in range(i+1, len(c.parts)):
jName = c.parts[j].id
if jName.lower() not in ['soprano', 'alto', 'tenor', 'bass']:
continue
jfn = c.parts[j].flat.notesAndRests
for k in range(len(omi) - 1):
n1pi = omi[k]['element']
n2pi = omi[k+1]['element']
n1pj = jfn.getElementsByOffset(offsetStart = omi[k]['endTime'] - .001, offsetEnd = omi[k]['endTime'] - .001, mustBeginInSpan = False)[0]
n2pj = jfn.getElementsByOffset(offsetStart = omi[k+1]['offset'], offsetEnd = omi[k+1]['offset'], mustBeginInSpan = False)[0]
if n1pj is n2pj:
continue # no oblique motion
if n1pi.isRest or n2pi.isRest or n1pj.isRest or n2pj.isRest:
continue
if n1pi.isChord or n2pi.isChord or n1pj.isChord or n2pj.isChord:
continue
vlq = voiceLeading.VoiceLeadingQuartet(n1pi, n2pi, n1pj, n2pj)
if vlq.parallelMotion('P8') is False and vlq.parallelMotion('P5') is False:
continue
displayMe = True
n1pi.addLyric('par ' + str(vlq.vIntervals[0].name))
n2pi.addLyric(' w/ ' + jName)
# m1 = stream.Measure()
# m1.append(n1pi)
# m1.append(n2pi)
# r1 = note.Rest()
# r1.duration.quarterLength = 8 - m1.duration.quarterLength
# m1.append(r1)
# m2 = stream.Measure()
# m2.append(n1pj)
# m2.append(n2pj)
# r2 = note.Rest()
# r2.duration.quarterLength = 8 - m2.duration.quarterLength
# m2.append(r2)
#
# p1.append(m1)
# p2.append(m2)
# sc.append(p1)
# sc.append(p2)
# sc.show()
if displayMe:
c.show()
def towersOfHanoi(show = False, numParts = 6, transpose = False):
'''
generates a score solution to the Tower of Hanoi problem
similar in spirit to the one that <NAME> made, but
with any number of parts. iterating over numParts(1...8) and
setting transpose to False gives the same solution as
<NAME> found.
'''
sc = stream.Score()
lowPitch = pitch.Pitch("C5")
medPitch = pitch.Pitch("D5")
highPitch = pitch.Pitch("E5")
descendingPitches = [medPitch, lowPitch, highPitch]
ascendingPitches = [lowPitch, medPitch, highPitch]
if (numParts/2.0) == int(numParts/2.0):
oddPitches = descendingPitches
evenPitches = ascendingPitches
else:
oddPitches = ascendingPitches
evenPitches = descendingPitches
for i in range(1, numParts + 1):
baseQuarterLength = 2**(i-2) # .5, 1, 2, 4, etc.
firstNote = note.Note("E5")
firstNote.quarterLength = baseQuarterLength
if (i/2.0) == int(i/2.0):
pitchCycle = copy.deepcopy(evenPitches)
else:
pitchCycle = copy.deepcopy(oddPitches)
if transpose == True and i != 1:
for pe in pitchCycle: # take down P4s
pe.transpose(-5 * (i-1), inPlace = True)
firstNote.transpose(-5 * (i-1), inPlace = True)
p = stream.Part()
p.id = "v. " + str(i)
p.append(firstNote)
pc = -1
maxNumber = 2**(numParts-i)
for j in range(maxNumber):
pc += 1
if pc > 2:
pc = 0
n = note.Note()
n.duration.quarterLength = baseQuarterLength * 2
n.pitch = pitchCycle[pc]
if j == maxNumber - 1: # last note
n.duration.quarterLength = (baseQuarterLength) + 3.0
p.append(n)
finalRest = note.Rest()
finalRest.duration.quarterLength = 1
p.append(finalRest)
sc.insert(0, p)
if show == True:
sc.show()
def pcsFromHumdrum(show = False):
'''
show how music21 can read Humdrum code to append the forte name to
each vertical simultaneity in a score.
Asked by <NAME> on 12/8/2010
'''
from music21.humdrum import testFiles
myScore = converter.parse(testFiles.mazurka6)
onePartScore = myScore.chordify()
output = ""
for thisChord in onePartScore.flat.getElementsByClass(chord.Chord):
output = output + thisChord.forteName + "\n"
if show == True:
print (output)
#-------------------------------------------------------------------------------
if (__name__ == "__main__"):
# richardBreedGetWell()
# annotateWithGerman()
# countCs()
bachParallels()
# towersOfHanoi(show = False, transpose = False, numParts = 8)
# pcsFromHumdrum(show = True)
#------------------------------------------------------------------------------
# eof
| StarcoderdataPython |
6509824 | # Copyright (c) 2020 <NAME>
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import logging
from moneysocket.beacon.beacon import MoneysocketBeacon
PERSIST_FILENAME = "connect-persist.json"
EMPTY_DB = {"asset_beacons": [],
}
class ConnectDb():
def __init__(self, persist_dir):
ConnectDb._make_dirs_exist(persist_dir)
self.filename = os.path.join(persist_dir, PERSIST_FILENAME)
logging.info("using: %s" % self.filename)
self.make_exist(self.filename)
self.db = self.read_json(self.filename)
###########################################################################
@staticmethod
def _make_dirs_exist(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
###########################################################################
def make_exist(self, filename):
if os.path.exists(filename):
return
logging.info("initializing new connect persit db: %s" % filename)
dir_path = os.path.dirname(filename)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record = EMPTY_DB.copy()
self.write_json(filename, record)
def write_file(self, path, content):
f = open(path, 'w')
f.write(content)
f.close()
def write_json(self, path, info, quick=True):
content = (json.dumps(info) if quick else
json.dumps(info, indent=1, sort_keys=True))
self.write_file(path, content)
def read_json(self, path):
f = open(path, 'r')
c = f.read()
info = json.loads(c)
f.close()
return info
def persist(self):
self.write_json(self.filename, self.db)
def depersist(self):
os.remove(self.filename)
###########################################################################
def add_beacon(self, beacon):
beacon_str = beacon.to_bech32_str()
if beacon_str not in self.db['asset_beacons']:
self.db['asset_beacons'].append(beacon_str)
self.persist()
def remove_beacon(self, beacon):
beacon_str = beacon.to_bech32_str()
self.db['asset_beacons'].remove(beacon_str)
self.persist()
def get_asset_beacons(self):
return [MoneysocketBeacon.from_bech32_str(b)[0] for b in
self.db['asset_beacons']]
def has_beacon(self, beacon):
beacon_str = beacon.to_bech32_str()
return beacon_str in self.db['asset_beacons']
| StarcoderdataPython |
8148812 | <reponame>slowmosteve/news-app<gh_stars>0
import os
import json
import time
import datetime
import gcsfs
import requests
import uuid
import logging
import papermill
from flask import Flask, request
from subscriber import Subscriber
from loader import Loader
from google.cloud import pubsub, bigquery, storage
import google.auth
from google.auth import impersonated_credentials
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
GCP_PROJECT_ID = os.getenv('GCP_PROJECT_ID')
ENV = os.getenv('ENV')
gcsfs = gcsfs.GCSFileSystem(project=GCP_PROJECT_ID)
def get_news():
"""This function retrieves news data and stores in cloud storage
"""
logger = logging.getLogger('app.get_news')
credentials, gcp_project_id = google.auth.default()
gcs_client = storage.Client(credentials=credentials)
if ENV=='prod':
secrets_bucket_name = os.getenv('SECRETS_BUCKET')
secrets_bucket = gcs_client.get_bucket(secrets_bucket_name)
secret_blob = secrets_bucket.get_blob('news-api-key.json').download_as_string()
secret_json = json.loads(secret_blob.decode('utf-8'))
api_key = secret_json['key']
else:
api_key = os.getenv('NEWS_API_KEY')
date_filter = (datetime.date.today() - datetime.timedelta(1)).strftime('%Y-%m-%d')
domain_list_string = """
abcnews.go.com, apnews.com, aljazeera.com, axios.com, bbc.co.uk, bloomberg.com,
cbc.ca, us.cnn.com, engadget.com, ew.com, espn.go.com, business.financialpost.com,
fortune.com, foxnews.com, news.google.com, news.ycombinator.com, ign.com,
mashable.com, msnbc.com, mtv.com, nationalgeographic.com, nbcnews.com,
newscientist.com, newsweek.com, nymag.com, nextbigfuture.com, polygon.com,
reuters.com, techcrunch.com, techradar.com, theglobeandmail.com,
huffingtonpost.com, thenextweb.com, theverge.com, wsj.com, washingtonpost.com,
time.com, usatoday.com, news.vice.com, wired.com
"""
url_base = 'https://newsapi.org'
url_path = '/v2/everything'
url_params = {
'from': date_filter,
'language': 'en',
'apiKey': api_key,
'pageSize': 100,
'sortBy': 'publishedAt',
'domains': domain_list_string
}
print('requesting news for endpoint: {}, params: {}'.format(url_path, url_params))
logger.info('requesting news for endpoint: {}, params: {}'.format(url_path, url_params))
url_params['apiKey'] = api_key
request_url = str(url_base + url_path)
response = requests.get(request_url, params=url_params)
print('status: '+str(response.json()['status']))
logger.info('status: '+str(response.json()['status']))
articles = []
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
filename_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
for i in range(len(response.json()['articles'])):
# create empty list of article details
details = {}
# populate fields from news results
result = response.json()['articles'][i]
# print('\n{}\n'.format(result))
columns = ['title','author','description','content','url','urlToImage','publishedAt']
details['article_id'] = str(uuid.uuid4())
details['article_order'] = i
details['load_timestamp'] = current_time
for column in columns:
details[column] = result[column]
articles.append(details)
bucket_path = os.getenv('ARTICLES_BUCKET')
filename = 'news-{}'.format(filename_time)
output_file = gcsfs.open("{}/{}.ndjson".format(bucket_path, filename), 'w')
for item in articles:
output_file.write(json.dumps(item))
output_file.write('\n')
print('wrote file {}.ndjson to bucket'.format(filename))
logger.info('wrote file {}.ndjson to bucket'.format(filename))
return "Retrieved news data", 200
def load_news():
"""This function will load news files in the storage bucket to the BigQuery tables
"""
logger = logging.getLogger('app.load_news')
credentials, gcp_project_id = google.auth.default()
gcs_client = storage.Client(project=gcp_project_id, credentials=credentials)
bigquery_client = bigquery.Client(project=gcp_project_id, credentials=credentials)
# instantiate Loader class and load file to BigQuery
loader = Loader(bigquery_client, gcs_client)
dataset_id = 'news'
articles_bucket = os.getenv('ARTICLES_BUCKET')
articles_processed_bucket = os.getenv('ARTICLES_PROCESSED_BUCKET')
articles_table_id = 'articles'
print('loading news from bucket')
logger.info('loading news from bucket')
articles_load_job = loader.load_from_bucket(articles_bucket, articles_processed_bucket, dataset_id, articles_table_id)
return "Loaded news data to BigQuery", 200
def get_tracking():
"""This function will retrieve tracking messages from the Pubsub topic
"""
logger = logging.getLogger('app.get_tracking')
credentials, gcp_project_id = google.auth.default()
# instantiate a pubsub subscriber client and subscriber class
subscriber_client = pubsub.SubscriberClient(credentials=credentials)
subscriber = Subscriber(subscriber_client, gcsfs)
# use current time for filenames
current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# subscribe to impressions topic to retrieve messages and write to bucket
print('*** processing impressions ***')
logger.info('*** processing impressions ***')
impressions_bucket = os.getenv('IMPRESSIONS_BUCKET')
subscription_path = subscriber_client.subscription_path(gcp_project_id, 'news_impressions')
impressions_filename = 'impression-{}'.format(current_time)
impressions_messages = subscriber.get_messages(subscription_path, impressions_bucket, impressions_filename)
# subscribe to clicks topic to retrieve messages and write to bucket
print('*** processing clicks ***')
logger.info('*** processing clicks ***')
clicks_bucket = os.getenv('CLICKS_BUCKET')
subscription_path = subscriber_client.subscription_path(gcp_project_id, 'news_clicks')
clicks_filename = 'clicks-{}'.format(current_time)
clicks_messages = subscriber.get_messages(subscription_path, clicks_bucket, clicks_filename)
# return impressions_messages
return "Pulled tracking messages from topic", 200
def load_tracking():
"""This function will load tracking files in the storage bucket to the BigQuery tables
"""
credentials, gcp_project_id = google.auth.default()
bigquery_client = bigquery.Client(project=gcp_project_id, credentials=credentials)
gcs_client = storage.Client(project=gcp_project_id, credentials=credentials)
loader = Loader(bigquery_client, gcs_client)
dataset_id = 'tracking'
impressions_bucket = os.getenv('IMPRESSIONS_BUCKET')
impressions_processed_bucket = os.getenv('IMPRESSIONS_PROCESSED_BUCKET')
impressions_table_id = 'impressions'
impressions_load_job = loader.load_from_bucket(impressions_bucket, impressions_processed_bucket, dataset_id, impressions_table_id)
clicks_bucket = os.getenv('CLICKS_BUCKET')
clicks_processed_bucket = os.getenv('CLICKS_PROCESSED_BUCKET')
clicks_table_id = 'clicks'
clicks_load_job = loader.load_from_bucket(clicks_bucket, clicks_processed_bucket, dataset_id, clicks_table_id)
return "Loaded tracking data to BigQuery", 200
@app.route('/', methods=['GET'])
def index():
return ('Backend server running', 200)
@app.route('/get_and_load_news', methods=['POST'])
def get_and_load_news():
"""This route retrieves news data and writes to cloud storage before loading to BigQuery
"""
logger = logging.getLogger('app.get_and_load_news')
print('requesting news')
logger.info('requesting news')
get_news()
retries = 3
count = 1
status = None
while (status != 200 or count < retries):
time.sleep(10)
print('loading news (attempt: {}'.format(count))
logger.info('loading news (attempt: {}'.format(count))
status = load_news()[1]
print('loading news status {}'.format(status))
logger.info('loading news status {}'.format(status))
if status == 200:
return "Retrieved news and loaded data to BigQuery", 200
count += 1
return "Unable to retrieve and load news", 204
@app.route('/get_and_load_tracking', methods=['POST'])
def get_and_load_tracking():
"""This route will retrieve messages from the Pubsub topic and load to BigQuery
"""
logger = logging.getLogger('app.get_and_load_tracking')
print('retrieving tracking messages')
logger.info('retrieving tracking messages')
get_tracking()
retries = 3
count = 1
status = None
while (status != 200 or count < retries):
time.sleep(10)
print('loading tracking (attempt: {}'.format(count))
logger.info('loading tracking (attempt: {}'.format(count))
status = load_tracking()[1]
print('loading tracking status {}'.format(status))
logger.info('loading tracking status {}'.format(status))
if status == 200:
return "Retrieved tracking and loaded data to BigQuery", 200
count += 1
return "Unable to retrieve and load tracking", 204
@app.route('/get_recommendations', methods=['POST'])
def get_recommendations():
"""This route will run the topic model used to populate the recommended articles for all users
"""
logger = logging.getLogger('app.get_recommendations')
print('Updating topic model and recommendations')
logger.info('Updating topic model and recommendations')
notebook_bucket = os.getenv('NOTEBOOK_BUCKET')
run_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
papermill.execute_notebook(
'gs://{}/prod/topic-prod.ipynb'.format(notebook_bucket),
'gs://{}/out/topic-out-{}.ipynb'.format(notebook_bucket, run_time),
kernel_name = 'python3'
)
return "Ran topic model and updated recommendations", 200
if __name__ == '__main__':
PORT = int(os.getenv('PORT')) if os.getenv('PORT') else 8081
# This is used when running locally. Gunicorn is used to run the
# application on Cloud Run. See entrypoint in Dockerfile.
# app.run(host='127.0.0.1', port=PORT, debug=True)
app.run(host='0.0.0.0', port=PORT, debug=True) | StarcoderdataPython |
6500598 | from unittest import TestCase
from expects import expect, equal, raise_error
from slender import Set
class TestIntersection(TestCase):
def test_intersection_other_with_inersection(self):
e = Set({1, 2, 3, 4})
o = {3, 4, 5, 6}
expect(e.intersection(o).to_set()).to(equal({3, 4}))
def test_intersection_other_without_intersection(self):
e = Set({1, 2, 3})
o = Set({4, 5, 6})
expect(e.intersection(o).to_set()).to(equal(set()))
def test_intersection_other_is_different(self):
e = Set({1, 2, 3})
expect(lambda: e.intersection(None)).to(raise_error(TypeError))
| StarcoderdataPython |
6508193 | from .mime import (
get_by_filename
)
__all__ = [
'get_by_filename'
]
| StarcoderdataPython |
37366 | <reponame>DramatikMan/mlhl-01-python-bot
from typing import Any
from telegram.ext import CallbackContext, Dispatcher
CCT = CallbackContext[
dict[Any, Any],
dict[Any, Any],
dict[Any, Any]
]
DP = Dispatcher[
CCT,
dict[Any, Any],
dict[Any, Any],
dict[Any, Any]
]
DataRecord = tuple[
int, int, int, int, int, int, int, int, int, str, str, float
]
| StarcoderdataPython |
3264720 | <filename>allauthdemo/auth/insurance.py
import datetime
from django.db import models
class Automotive(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.IntegerField(default=0)
make = models.CharField(max_length=25, default='Toyota')
model = models.CharField(max_length=100, default='Corolla')
year = models.IntegerField(default=2010)
abs = models.BooleanField(default=False)
anti_theft = models.BooleanField(default=False)
experience = models.IntegerField(default=0)
# Coverage
third_party = models.FloatField(default=200000)
statutory_accident = models.FloatField(default=65000)
uninsured_auto = models.FloatField(default=200000)
income_replacement = models.FloatField(default=0)
medical = models.FloatField(default=0)
caregiver = models.FloatField(default=0)
housekeeping = models.FloatField(default=0)
death = models.FloatField(default=0)
dependent = models.FloatField(default=0)
indexation = models.FloatField(default=0)
specified_perils = models.FloatField(default=0)
comprehensive = models.FloatField(default=0)
collision = models.FloatField(default=0)
all_perils = models.FloatField(default=0)
# Contract
company = models.CharField(max_length=25, default='The Co-operators')
start = models.DateField(default=datetime.date.today)
end = models.DateField(default=datetime.date.today)
monthly_premium = models.FloatField(default=230)
class Meta:
verbose_name = ('automotive')
def get_fields(self):
return [(field.name, getattr(self,field.name)) for field in Automotive._meta.fields]
class Disability(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.IntegerField(default=0)
# Factors
coverage = models.FloatField(default=35000)
benefit_period = models.IntegerField(default=1)
waiting_period = models.IntegerField(default=0)
age = models.IntegerField(default=40)
health = models.IntegerField(default=10)
occupation = models.CharField(max_length=1, default='B')
# Contract
start = models.DateField(default=datetime.date.today)
end = models.DateField(default=datetime.date.today)
monthly_premium = models.FloatField(default=120)
class Meta:
verbose_name = ('disability')
def get_fields(self):
return [(field.name, getattr(self,field.name)) for field in Disability._meta.fields]
class Health(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.IntegerField(default=0)
# Factors
age = models.IntegerField(default=40)
sex = models.CharField(max_length=2,default='M')
bmi = models.FloatField(default=20.0)
children = models.IntegerField(default=2)
smoker = models.BooleanField(default=False)
region = models.TextField(max_length=10, default='southeast')
# Contract
company = models.CharField(max_length=25, default='Anthem')
start = models.DateField(default=datetime.date.today)
end = models.DateField(default=datetime.date.today)
monthly_premium = models.FloatField(default=236)
class Meta:
verbose_name = ('health')
def get_fields(self):
return [(field.name, getattr(self,field.name)) for field in Health._meta.fields]
class House(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.IntegerField(default=0)
# Coverage
dwelling = models.FloatField(default=10000)
contents = models.FloatField(default=10000)
personal_liability = models.FloatField(default=10000)
flood = models.FloatField(default=0)
windstorm = models.FloatField(default=0)
sewer_backup = models.FloatField(default=0)
scheduled_articles = models.FloatField(default=0)
equipment_breakdown = models.FloatField(default=0)
guaranteed_replacement = models.FloatField(default=0)
earthquake = models.FloatField(default=0)
# Contract
start = models.DateField(default=datetime.date.today)
end = models.DateField(default=datetime.date.today)
monthly_premium = models.FloatField(default=100)
class Meta:
verbose_name = ('house')
def get_fields(self):
return [(field.name, getattr(self,field.name)) for field in House._meta.fields]
class Life(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.IntegerField(default=0)
# Coverage
coverage = models.FloatField(default=10000)
# Needs
permanent_need = models.BooleanField(default=False)
permanent_need_but_can_be_changed = models.BooleanField(default=False)
only_for_a_year = models.BooleanField(default=False)
less_than_10 = models.BooleanField(default=False)
small_budget = models.BooleanField(default=False)
# Types
whole = models.BooleanField(default=False)
universal = models.BooleanField(default=False)
annual_renewable = models.BooleanField(default=False)
fixed_traditional = models.BooleanField(default=False)
fixed_reentry = models.BooleanField(default=False)
decreasing_level = models.BooleanField(default=False)
decreasing_mortgage = models.BooleanField(default=False)
# Contract
start = models.DateField(default=datetime.date.today)
end = models.DateField(default=datetime.date.today)
next_premium = models.FloatField(default=125)
class Meta:
verbose_name = ('life')
def get_fields(self):
return [(field.name, getattr(self,field.name)) for field in Life._meta.fields]
| StarcoderdataPython |
12856133 | """
Author: <NAME>
Created in: September 19, 2019
Python version: 3.6
"""
from Least_SRMTL import Least_SRMTL
import libmr
from matplotlib import pyplot, cm
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import Axes3D, art3d
import numpy as np
import numpy.matlib
import sklearn.metrics
class EVeP(object):
"""
evolving Extreme Value Machine
Ruled-based predictor with EVM at the definition of the antecedent of the rules.
1. Create a new instance and provide the model parameters;
2. Call the predict(x) method to make predictions based on the given input;
3. Call the train(x, y) method to evolve the model based on the new input-output pair.
"""
# Model initialization
def __init__(self, sigma=0.5, delta=50, N=np.Inf, rho=None, columns_ts=None):
# Setting EVM algorithm parameters
self.sigma = sigma
self.tau = 99999
self.delta = delta
self.N = N
self.rho = rho
self.columns_ts = columns_ts
if self.rho is not None:
self.init_theta = 2
self.srmtl = Least_SRMTL(rho)
self.R = None
self.mr_x = list()
self.mr_y = list()
self.x0 = list()
self.y0 = list()
self.X = list()
self.y = list()
self.step = list()
self.last_update = list()
self.theta = list()
self.c = 0
# Initialization of a new instance of EV.
def add_EV(self, x0, y0, step):
self.mr_x.append(libmr.MR())
self.mr_y.append(libmr.MR())
self.x0.append(x0)
self.y0.append(y0)
self.X.append(x0)
self.y.append(y0)
self.step.append(step)
self.last_update.append(np.max(step))
self.theta.append(np.zeros_like(x0))
self.c = self.c + 1
if self.rho is None:
# coefficients of the consequent part
self.theta[-1] = np.insert(self.theta[-1], 0, y0, axis=1).T
else:
self.init_theta = 2
# coefficients of the consequent part
self.theta[-1] = np.insert(self.theta[-1], 0, y0, axis=1)
# Add the sample(s) (X, y) as covered by the extreme vector. Remove repeated points.
def add_sample_to_EV(self, index, X, y, step):
self.X[index] = np.concatenate((self.X[index], X))
self.y[index] = np.concatenate((self.y[index], y))
self.step[index] = np.concatenate((self.step[index], step))
if self.X[index].shape[0] > self.N:
indexes = np.argsort(-self.step[index].reshape(-1))
self.X[index] = self.X[index][indexes[: self.N], :]
self.y[index] = self.y[index][indexes[: self.N]]
self.step[index] = self.step[index][indexes[: self.N]]
self.x0[index] = np.average(self.X[index], axis=0).reshape(1, -1)
self.y0[index] = np.average(self.y[index], axis=0).reshape(1, -1)
self.last_update[index] = np.max(self.step[index])
if self.rho is None:
self.theta[index] = np.linalg.lstsq(np.insert(self.X[index], 0, 1, axis=1), self.y[index], rcond=None)[0]
def delete_from_list(self, list_, indexes):
for i in sorted(indexes, reverse=True):
del list_[i]
return list_
# Calculate the firing degree of the sample to the psi curve
def firing_degree(self, index, x=None, y=None):
if y is None:
return self.mr_x[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.x0[index], x).reshape(-1))
elif x is None:
return self.mr_y[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.y0[index], y).reshape(-1))
else:
return np.minimum(self.mr_x[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.x0[index], x).reshape(-1)), self.mr_y[index].w_score_vector(sklearn.metrics.pairwise.pairwise_distances(self.y0[index], y).reshape(-1)))
# Fit the psi curve of the EVs according to the external samples
def fit(self, index, X_ext, y_ext):
self.fit_x(index, sklearn.metrics.pairwise.pairwise_distances(self.x0[index], X_ext)[0])
self.fit_y(index, sklearn.metrics.pairwise.pairwise_distances(self.y0[index], y_ext)[0])
# Fit the psi curve to the extreme values with distance D to the center of the EV
def fit_x(self, index, D):
self.mr_x[index].fit_low(1/2 * D, min(D.shape[0], self.tau))
# Fit the psi curve to the extreme values with distance D to the center of the EV
def fit_y(self, index, D):
self.mr_y[index].fit_low(1/2 * D, min(D.shape[0], self.tau))
# Get the distance from the origin of the input EV which has the given probability to belong to the curve
def get_distance_input(self, percentage, index=None):
if index is None:
return [self.mr_x[i].inv(percentage) for i in range(self.c)]
else:
return self.mr_x[index].inv(percentage)
# Get the distance from the origin of the output EV which has the given probability to belong to the curve
def get_distance_output(self, percentage, index=None):
if index is None:
return [self.mr_y[i].inv(percentage) for i in range(self.c)]
else:
return self.mr_y[index].inv(percentage)
# Obtain the samples that do not belong to the given EV
def get_external_samples(self, index=None):
if index is None:
X = np.concatenate(self.X)
y = np.concatenate(self.y)
else:
if self.c > 1:
X = np.concatenate(self.X[:index] + self.X[index + 1 :])
y = np.concatenate(self.y[:index] + self.y[index + 1 :])
else:
X = np.array([])
y = np.array([])
return (X, y)
# Merge two EVs of different clusters whenever the origin of one is inside the sigma probability of inclusion of the psi curve of the other
def merge(self):
self.sort_EVs()
index = 0
while index < self.c:
if index + 1 < self.c:
x0 = np.concatenate(self.x0[index + 1 : ])
y0 = np.concatenate(self.y0[index + 1 : ])
S_index = self.firing_degree(index, x0, y0)
index_to_merge = np.where(S_index > self.sigma)[0] + index + 1
if index_to_merge.size > 0:
self.init_theta = 2
for i in reversed(range(len(index_to_merge))):
self.add_sample_to_EV(index, self.X[index_to_merge[i]], self.y[index_to_merge[i]], self.step[index_to_merge[i]])
self.remove_EV([index_to_merge[i]])
index = index + 1
# Plot the granules that form the antecedent part of the rules
def plot(self, name_figure_input, name_figure_output, step):
# Input fuzzy granules plot
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(left=-2, right=2)
ax.axes.set_ylim3d(bottom=-2, top=2)
z_bottom = -0.3
ax.set_zticklabels("")
colors = cm.get_cmap('Dark2', self.c)
for i in range(self.c):
self.plot_EV_input(i, ax, '.', colors(i), z_bottom)
legend.append('$\lambda$ = ' + str(round(self.mr_x[new_order[i]].get_params()[0], 1)) + ' $\kappa$ = ' + str(round(self.mr_x[new_order[i]].get_params()[1], 1)))
# Plot axis' labels
ax.set_xlabel('u(t)', fontsize=15)
ax.set_ylabel('y(t)', fontsize=15)
ax.set_zlabel('$\mu_x$', fontsize=15)
ax.legend(legend, fontsize=10, loc=2)
# Save figure
fig.savefig(name_figure_input)
# Close plot
pyplot.close(fig)
# Output fuzzy granules plot
fig = pyplot.figure()
ax = fig.add_subplot(111)
ax.axes.set_xlim(left=-2, right=2)
for i in range(self.c):
self.plot_EV_output(i, ax, '.', colors(i), z_bottom)
# Plot axis' labels
ax.set_xlabel('y(t + 1)', fontsize=15)
ax.set_ylabel('$\mu_y$', fontsize=15)
ax.legend(legend, fontsize=10, loc=2)
# Save figure
fig.savefig(name_figure_output)
# Close plot
pyplot.close(fig)
# Plot the probability of sample inclusion (psi-model) together with the samples associated with the EV for the input fuzzy granules
def plot_EV_input(self, index, ax, marker, color, z_bottom):
# Plot the input samples in the XY plan
ax.scatter(self.X[index][:, 0], self.X[index][:, 1], z_bottom * np.ones((self.X[index].shape[0], 1)), marker=marker, color=color)
# Plot the radius for which there is a probability sigma to belong to the EV
radius = self.get_distance_input(self.sigma, index)
p = Circle((self.x0[index][0, 0], self.x0[index][0, 1]), radius, fill=False, color=color)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=z_bottom, zdir="z")
# Plot the psi curve of the EV
r = np.linspace(0, self.get_distance_input(0.05, index), 100)
theta = np.linspace(0, 2 * np.pi, 145)
radius_matrix, theta_matrix = np.meshgrid(r,theta)
X = self.x0[index][0, 0] + radius_matrix * np.cos(theta_matrix)
Y = self.x0[index][0, 1] + radius_matrix * np.sin(theta_matrix)
points = np.array([np.array([X, Y])[0, :, :].reshape(-1), np.array([X, Y])[1, :, :].reshape(-1)]).T
Z = self.firing_degree(index, points)
ax.plot_surface(X, Y, Z.reshape((X.shape[0], X.shape[1])), antialiased=False, cmap=cm.coolwarm, alpha=0.1)
# Plot the probability of sample inclusion (psi-model) together with the samples associated with the EV for the output fuzzy granules
def plot_EV_output(self, index, ax, marker, color, z_bottom):
# Plot the output data points in the X axis
ax.scatter(self.y[index], np.zeros_like(self.y[index]), marker=marker, color=color)
# Plot the psi curve of the EV
r = np.linspace(0, self.get_distance_output(0.01, index), 100)
points = np.concatenate((np.flip((self.y0[index] - r).T, axis=0), (self.y0[index] + r).T), axis=0)
Z = self.firing_degree(index, y=points)
#ax.plot(points, Z, antialiased=False, cmap=cm.coolwarm, alpha=0.1)
ax.plot(points, Z, color=color)
# Predict the output given the input sample x
def predict(self, x):
num = 0
den = 0
for i in range(self.c):
p = self.predict_EV(i, x)
num = num + self.firing_degree(i, x, p) * p
den = den + self.firing_degree(i, x, p)
if den == 0:
if self.columns_ts is None:
return np.mean(x)
return np.mean(x[:, self.columns_ts])
return num / den
# Predict the local output of x based on the linear regression of the samples stored at the EV
def predict_EV(self, index, x):
if self.rho is None:
return np.insert(x, 0, 1).reshape(1, -1) @ self.theta[index]
return np.insert(x, 0, 1).reshape(1, -1) @ self.theta[index].T
# Calculate the degree of relationship of all the rules to the rule of index informed as parameter
def relationship_rules(self, index):
distance_x = sklearn.metrics.pairwise.pairwise_distances(self.x0[index], np.concatenate(self.x0)).reshape(-1)
distance_y = sklearn.metrics.pairwise.pairwise_distances(self.y0[index], np.concatenate(self.y0)).reshape(-1)
relationship_x_center = self.mr_x[index].w_score_vector(distance_x)
relationship_y_center = self.mr_y[index].w_score_vector(distance_y)
relationship_x_radius = self.mr_x[index].w_score_vector(distance_x - self.get_distance_input(self.sigma))
relationship_y_radius = self.mr_y[index].w_score_vector(distance_y - self.get_distance_output(self.sigma))
return np.maximum(np.maximum(relationship_x_center, relationship_x_radius), np.maximum(relationship_y_center, relationship_y_radius))
# Remove the EV whose index was informed by parameter
def remove_EV(self, index):
self.mr_x = self.delete_from_list(self.mr_x, index)
self.mr_y = self.delete_from_list(self.mr_y, index)
self.x0 = self.delete_from_list(self.x0, index)
self.y0 = self.delete_from_list(self.y0, index)
self.X = self.delete_from_list(self.X, index)
self.y = self.delete_from_list(self.y, index)
self.step = self.delete_from_list(self.step, index)
self.last_update = self.delete_from_list(self.last_update, index)
self.theta = self.delete_from_list(self.theta, index)
self.c = len(self.mr_x)
# Remove the EVs that didn't have any update in the last threshold steps
def remove_outdated_EVs(self, threshold):
indexes_to_remove = list()
for index in range(self.c):
if self.last_update[index] <= threshold:
indexes_to_remove.append(index)
if len(indexes_to_remove) > 0:
self.remove_EV(indexes_to_remove)
if self.rho is not None:
self.update_R()
self.init_theta = 2
# Sort the EVs according to the last update
def sort_EVs(self):
new_order = (-np.array(self.last_update)).argsort()
self.mr_x = list(np.array(self.mr_x)[new_order])
self.mr_y = list(np.array(self.mr_y)[new_order])
self.x0 = list(np.array(self.x0)[new_order])
self.y0 = list(np.array(self.y0)[new_order])
self.X = list(np.array(self.X)[new_order])
self.y = list(np.array(self.y)[new_order])
self.step = list(np.array(self.step)[new_order])
self.last_update = list(np.array(self.last_update)[new_order])
# Evolves the model (main method)
def train(self, x, y, step):
best_EV = None
best_EV_value = 0
# check if it is possible to insert the sample in an existing model
for index in range(self.c):
tau = self.firing_degree(index, x, y)
if tau > best_EV_value and tau > self.sigma:
best_EV = index
best_EV_value = tau
update = False
# Add the sample to an existing EV
if best_EV is not None:
self.add_sample_to_EV(best_EV, x, y, step)
# Create a new EV
else:
self.add_EV(x, y, step)
update = True
self.update_EVs()
if step != 0 and (step % self.delta) == 0:
self.remove_outdated_EVs(step[0, 0] - self.delta)
self.merge()
update = True
if self.rho is not None:
if update:
self.update_R()
self.theta = self.srmtl.train(self.X, self.y, self.init_theta)
self.init_theta = 1
# Update the psi curve of the EVs
def update_EVs(self):
for i in range(self.c):
(X_ext, y_ext) = self.get_external_samples(i)
if X_ext.shape[0] > 0:
self.fit(i, X_ext, y_ext)
def update_R(self):
S = np.zeros((self.c, self.c))
for i in range(self.c):
S[i, :] = self.relationship_rules(i)
self.R = None
for i in range(self.c):
for j in range(i + 1, self.c):
if S[i, j] > 0 or S[j, i] > 0:
edge = np.zeros((self.c, 1))
edge[i] = max(S[i, j], S[j, i])
edge[j] = - max(S[i, j], S[j, i])
if self.R is None:
self.R = edge
else:
self.R = np.concatenate((self.R, edge), axis=1)
self.srmtl.set_RRt(self.R) | StarcoderdataPython |
12816639 | import os
import socket
import subprocess
from collections import deque
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException, httplib
from . logger import info
from . config import DASHCORE_DIR
def simplemovingaverage(period):
assert period == int(period) and period > 0, "Period must be an integer >0"
val = {"summ": 0.0, "n": 0.0}
values = deque([0.0] * period)
def sma(x):
x = str(x)
x = x.replace(",",".")
x = float(x)
values.append(x)
val['summ'] += x - values.popleft()
val['n'] = min(val['n']+1, period)
return val['summ'] / val['n']
return sma
class DashRPC(object):
def __init__(self,
mainnet=False,
conf=None
):
self.mainnet = mainnet
self.datadir = os.path.join(os.environ['HOME'],
'dash', (not mainnet and 'testnet' or ''))
self.conffile=DASHCORE_DIR + '/dash.conf'
self.config = {}
self.cpu_pct = simplemovingaverage(5)
self._parse_conffile()
if 'rpcbind' not in self.config:
self.config['rpcbind'] = '127.0.0.1'
if 'rpcport' not in self.config:
self.config['rpcport'] = mainnet and 9998 or 19998
def _parse_conffile(self):
with open(self.conffile, 'r') as f:
lines = list(
line
for line in
(l.strip() for l in f)
if line and not line.startswith('#'))
for line in lines:
conf = line.split('=')
self.config[conf[0].strip()] = conf[1].strip()
def connect(self):
protocol = 'http'
if ('rpcssl' in self.config and
bool(self.config['rpcssl']) and
int(self.config['rpcssl']) > 0):
protocol = 'https'
serverURL = protocol + '://' + self.config['rpcuser'] + ':' + \
self.config['rpcpassword'] + '@' + str(self.config['rpcbind']) + \
':' + str(self.config['rpcport'])
self._proxy = AuthServiceProxy(serverURL)
return self._proxy
def get_cpu_average(self):
pidfile = self.mainnet and DASHCORE_DIR + '/dashd.pid' or DASHCORE_DIR + '/testnet/testnet3/dashd.pid' # noqa
cmd = "top -p `cat %s` -n1 | awk '/ dashd /{print $10}'" % pidfile
cpu = subprocess.check_output(str.encode(cmd), shell=True, universal_newlines=True).rstrip('\n') or 100
return self.cpu_pct(cpu)
def ready(self):
self.responding = False
self.synchronised = False
self.get_cpu_average()
try:
self._proxy.getbalance()
self.responding = True
except (ValueError, socket.error, httplib.CannotSendRequest) as e:
info(e)
pass
except JSONRPCException as e:
info(e)
pass
try:
status = self._proxy.mnsync("status")
self.synchronised = (status["IsSynced"] and status["IsBlockchainSynced"])
except (ValueError, socket.error, httplib.CannotSendRequest) as e:
info(e)
pass
except JSONRPCException as e:
resp = str(e.error['message'])
if 'masternode' in resp:
if self.get_cpu_average() < 50:
self.synchronised = True
logmsg = self.responding and 'responding, ' or 'not responding, '
logmsg += self.synchronised and 'synchronised, ' or 'not synchronised, '
logmsg += 'cpu: ' + "{0:.2f}".format(self.get_cpu_average())
info(logmsg)
return (self.responding and self.synchronised)
| StarcoderdataPython |
3484434 | <reponame>defrex/graphql-core<gh_stars>0
from ...error import GraphQLError
from ...language.printer import print_ast
from ...type.definition import GraphQLNonNull
from ...utils.is_valid_literal_value import is_valid_literal_value
from .base import ValidationRule
class DefaultValuesOfCorrectType(ValidationRule):
def enter_VariableDefinition(self, node, key, parent, path, ancestors):
name = node.variable.name.value
default_value = node.default_value
type = self.context.get_input_type()
if isinstance(type, GraphQLNonNull) and default_value:
return GraphQLError(
self.default_for_non_null_arg_message(name, type, type.of_type),
[default_value]
)
if type and default_value:
errors = is_valid_literal_value(type, default_value)
if errors:
return GraphQLError(
self.bad_value_for_default_arg_message(name, type, print_ast(default_value), errors),
[default_value]
)
@staticmethod
def default_for_non_null_arg_message(var_name, type, guess_type):
return u'Variable "${}" of type "{}" is required and will not use the default value. ' \
u'Perhaps you meant to use type "{}".'.format(var_name, type, guess_type)
@staticmethod
def bad_value_for_default_arg_message(var_name, type, value, verbose_errors):
message = (u'\n' + u'\n'.join(verbose_errors)) if verbose_errors else u''
return u'Variable "${}" of type "{}" has invalid default value: {}.{}'.format(var_name, type, value, message)
| StarcoderdataPython |
3307341 | <gh_stars>10-100
giver = open("./to_add.md", 'r', encoding = 'UTF8')
taker = open("./korean-bad-words.md", 'a+', encoding = 'UTF8')
words_to_add = set([line.rstrip() for line in giver])
existing_words= set([line.rstrip() for line in taker])
for word_to_add in words_to_add:
if word_to_add not in existing_words:
taker.write(word_to_add + '\n')
print(word_to_add)
giver.close()
taker.close() | StarcoderdataPython |
6444801 | from covgen.run.inputgenerator import execute
execute() | StarcoderdataPython |
235919 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_emamil_successful(self):
"""Test creating a new user with an email is successful"""
email = '<EMAIL>'
password = '<PASSWORD>'
user = get_user_model().objects.create_user( #call the create user function from the user model do not import models directly
email=email, #adds email note all these are custom properties since the user model will be changed
password=password #add password note all these are custom properties since the user model will be changed
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password)) #you use the check_password function because passwords are encrypted
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = '<EMAIL>'
user = get_user_model().objects.create_user(email, 'adsfhkjhd' );
self.assertEqual(user.email, email.lower()) #test if email is equal to lowercase version of the email.
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error """
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Testing create new superuser"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test1234'
)
#assert true is used for boolean operations.
self.assertTrue(user.is_superuser) #get the boolean value
self.assertTrue(user.is_staff) #get the boolean value. | StarcoderdataPython |
11276534 | <gh_stars>1000+
import pandas as pd
from random import random, randint, choice
from faker import Faker
fake = Faker()
def superstore(count=50):
data = []
for id in range(count):
dat = {}
dat['Row ID'] = id
dat['Order ID'] = '{}-{}'.format(fake.ein(), fake.zipcode())
dat['Order Date'] = fake.date_this_year()
dat['Ship Date'] = fake.date_between_dates(dat['Order Date']).strftime('%Y-%m-%d')
dat['Order Date'] = dat['Order Date'].strftime('%Y-%m-%d')
dat['Ship Mode'] = choice(['First Class', 'Standard Class', 'Second Class'])
dat['Ship Mode'] = choice(['First Class', 'Standard Class', 'Second Class'])
dat['Customer ID'] = fake.zipcode()
dat['Segment'] = choice(['A', 'B', 'C', 'D'])
dat['Country'] = 'US'
dat['City'] = fake.city()
dat['State'] = fake.state()
dat['Postal Code'] = fake.zipcode()
dat['Region'] = choice(['Region %d' % i for i in range(5)])
dat['Product ID'] = fake.bban()
sector = choice(['Industrials', 'Technology', 'Financials'])
industry = choice(['A', 'B', 'C'])
dat['Category'] = sector
dat['Sub-Category'] = industry
dat['Sales'] = randint(1, 100) * 100
dat['Quantity'] = randint(1, 100) * 10
dat['Discount'] = round(random() * 100, 2)
dat['Profit'] = round(random() * 1000, 2)
data.append(dat)
return pd.DataFrame(data)
| StarcoderdataPython |
98473 | import json
import pytest
from custom_components.hacs.validate.brands import Validator
from tests.sample_data import response_rate_limit_header
@pytest.mark.asyncio
async def test_added_to_brands(repository, aresponses):
aresponses.add(
"brands.home-assistant.io",
"/domains.json",
"get",
aresponses.Response(
body=json.dumps({"custom": ["test"]}),
headers=response_rate_limit_header,
),
)
repository.data.domain = "test"
check = Validator(repository)
await check.execute_validation()
assert not check.failed
@pytest.mark.asyncio
async def test_not_added_to_brands(repository, aresponses):
aresponses.add(
"brands.home-assistant.io",
"/domains.json",
"get",
aresponses.Response(
body=json.dumps({"custom": []}),
headers=response_rate_limit_header,
),
)
repository.data.domain = "test"
check = Validator(repository)
await check.execute_validation()
assert check.failed
| StarcoderdataPython |
8140592 | <reponame>Rhoana/dataspec<gh_stars>0
import setuptools
from dataspec.loader import DATASPEC_GROUP
VERSION = "1.1.2"
setuptools.setup(
description="Tilespec data model",
dependency_links=[
'http://github.com/Rhoana/rh_renderer/tarball/master'
'#egg=rh_renderer-0.0.1'],
entry_points={
DATASPEC_GROUP: {
"tilespec = dataspec.backends.backend_tilespec:load"
}
},
install_requires=[
"h5py>=2.5",
"numpy>=1.6"
],
name="dataspec",
packages=["dataspec", "dataspec.backends"],
url="https://github.com/Rhoana/dataspec",
version=VERSION)
| StarcoderdataPython |
4889082 | <reponame>subhadarship/nlp4if-2021
import logging
from typing import List
import pandas as pd
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from transformers import BertTokenizer
from .data import COLUMN_NAMES
from .field import LabelField
logger = logging.getLogger(__name__)
class BertInfodemicDataset(Dataset):
"""Bert Infodemic dataset"""
def __init__(self, df: pd.DataFrame, bert_tokenizer: BertTokenizer, label_fields: List[LabelField]):
self.bert_tokenizer = bert_tokenizer
self.label_fields = label_fields
self.all_original_sentences = df[COLUMN_NAMES[0]].astype(str).tolist()
self.labels = {
f'q{idx + 1}': df[COLUMN_NAMES[idx + 1]].astype(str).tolist() for idx in range(7)
}
self.all_sent_ids = []
self.all_label_ids = []
for sample_idx, sentence in enumerate(
tqdm(self.all_original_sentences, desc='prepare bert data', unit=' samples')):
ids = self.bert_tokenizer.encode(sentence) # [CLS idx, ..., SEP idx]
if len(ids) > self.bert_tokenizer.model_max_length:
logger.warning(
f'trimming sentence {sample_idx} of length {len(ids)} to {self.bert_tokenizer.model_max_length} tokens '
f'(trimmed tokens include {self.bert_tokenizer.cls_token} and {self.bert_tokenizer.sep_token} tokens)'
)
ids = ids[:self.bert_tokenizer.model_max_length - 1] + [self.bert_tokenizer.sep_token_id]
self.all_sent_ids.append(torch.LongTensor(ids))
label_ids = {}
for idx in range(7):
label_ids[f'q{idx + 1}'] = torch.LongTensor(
[self.label_fields[idx].stoi[self.labels[f'q{idx + 1}'][sample_idx]]])
self.all_label_ids.append(label_ids)
def __getitem__(self, idx):
return {
'text': self.all_sent_ids[idx],
'labels': self.all_label_ids[idx],
'orig': self.all_original_sentences[idx],
'orig_labels': ' '.join([self.labels[f'q{label_idx + 1}'][idx] for label_idx in range(7)]),
}
def __len__(self):
return len(self.all_sent_ids)
| StarcoderdataPython |
8067317 | #BMI calculator
print("This is a program to calculte your BMI\n")
weight=int(input("Enter your weight\n"))
height=float(input("Enter your height\n"))
bmi=weight/height **2
bmi_round=round(bmi,2)
print(f"your bmi is {bmi_round}")
| StarcoderdataPython |
4981722 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <<EMAIL>>
# ----------
#
# ----------
from .common import Stop, Help
from .bool import pick_bool
from .list import pick_item
from .obj import pick_method
| StarcoderdataPython |
11329470 | #
# discinfo.py
#
# Copyright (C) 2010 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Red Hat Author(s): <NAME> <<EMAIL>>
#
import logging
logger = logging.getLogger("pylorax.discinfo")
import time
class DiscInfo(object):
def __init__(self, release, basearch):
self.release = release
self.basearch = basearch
def write(self, outfile):
logger.info("writing .discinfo file")
with open(outfile, "w") as fobj:
fobj.write("{0:f}\n".format(time.time()))
fobj.write("{0.release}\n".format(self))
fobj.write("{0.basearch}\n".format(self))
| StarcoderdataPython |
9759390 | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import logging
import datetime
from mediagoblin import mg_globals as mgg
from mediagoblin.processing import \
create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from . import transcoders
from .util import skip_transcode
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
MEDIA_TYPE = 'mediagoblin.media_types.video'
class VideoTranscodingFail(BaseProcessingFail):
'''
Error raised if video transcoding fails
'''
general_message = _(u'Video transcoding failed')
def sniff_handler(media_file, **kw):
transcoder = transcoders.VideoTranscoder()
data = transcoder.discover(media_file.name)
_log.info('Sniffing {0}'.format(MEDIA_TYPE))
_log.debug('Discovered: {0}'.format(data))
if not data:
_log.error('Could not discover {0}'.format(
kw.get('media')))
return None
if data['is_video'] == True:
return MEDIA_TYPE
return None
def process_video(proc_state):
"""
Process a video entry, transcode the queued media files (originals) and
create a thumbnail for the entry.
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
"""
entry = proc_state.entry
workbench = proc_state.workbench
video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
queued_filepath = entry.queued_media_file
queued_filename = proc_state.get_queued_filename()
name_builder = FilenameBuilder(queued_filename)
medium_basename = name_builder.fill('{basename}-640p.webm')
medium_filepath = create_pub_filepath(entry, medium_basename)
thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)
# Create a temporary file for the video destination (cleaned up with workbench)
tmp_dst = os.path.join(workbench.dir, medium_basename)
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
progress_callback = ProgressCallback(entry)
dimensions = (
mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
# Extract metadata and keep a record of it
metadata = transcoders.VideoTranscoder().discover(queued_filename)
store_metadata(entry, metadata)
# Figure out whether or not we need to transcode this video or
# if we can skip it
if skip_transcode(metadata):
_log.debug('Skipping transcoding')
dst_dimensions = metadata['videowidth'], metadata['videoheight']
# Push original file to public storage
_log.debug('Saving original...')
proc_state.copy_original(queued_filepath[-1])
did_transcode = False
else:
transcoder = transcoders.VideoTranscoder()
transcoder.transcode(queued_filename, tmp_dst,
vp8_quality=video_config['vp8_quality'],
vp8_threads=video_config['vp8_threads'],
vorbis_quality=video_config['vorbis_quality'],
progress_callback=progress_callback,
dimensions=dimensions)
dst_dimensions = transcoder.dst_data.videowidth,\
transcoder.dst_data.videoheight
# Push transcoded video to public storage
_log.debug('Saving medium...')
mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
_log.debug('Saved medium')
entry.media_files['webm_640'] = medium_filepath
did_transcode = True
# Save the width and height of the transcoded video
entry.media_data_init(
width=dst_dimensions[0],
height=dst_dimensions[1])
# Temporary file for the video thumbnail (cleaned up with workbench)
tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)
# Create a thumbnail.jpg that fits in a 180x180 square
transcoders.VideoThumbnailerMarkII(
queued_filename,
tmp_thumb,
180)
# Push the thumbnail to public storage
_log.debug('Saving thumbnail...')
mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
entry.media_files['thumb'] = thumbnail_filepath
# save the original... but only if we did a transcoding
# (if we skipped transcoding and just kept the original anyway as the main
# media, then why would we save the original twice?)
if video_config['keep_original'] and did_transcode:
# Push original file to public storage
_log.debug('Saving original...')
proc_state.copy_original(queued_filepath[-1])
# Remove queued media file from storage and database
proc_state.delete_queue_file()
def store_metadata(media_entry, metadata):
"""
Store metadata from this video for this media entry.
"""
# Let's pull out the easy, not having to be converted ones first
stored_metadata = dict(
[(key, metadata[key])
for key in [
"videoheight", "videolength", "videowidth",
"audiorate", "audiolength", "audiochannels", "audiowidth",
"mimetype"]
if key in metadata])
# We have to convert videorate into a sequence because it's a
# special type normally..
if "videorate" in metadata:
videorate = metadata["videorate"]
stored_metadata["videorate"] = [videorate.num, videorate.denom]
# Also make a whitelist conversion of the tags.
if "tags" in metadata:
tags_metadata = metadata['tags']
# we don't use *all* of these, but we know these ones are
# safe...
tags = dict(
[(key, tags_metadata[key])
for key in [
"application-name", "artist", "audio-codec", "bitrate",
"container-format", "copyright", "encoder",
"encoder-version", "license", "nominal-bitrate", "title",
"video-codec"]
if key in tags_metadata])
if 'date' in tags_metadata:
date = tags_metadata['date']
tags['date'] = "%s-%s-%s" % (
date.year, date.month, date.day)
# TODO: handle timezone info; gst.get_time_zone_offset +
# python's tzinfo should help
if 'datetime' in tags_metadata:
dt = tags_metadata['datetime']
tags['datetime'] = datetime.datetime(
dt.get_year(), dt.get_month(), dt.get_day(), dt.get_hour(),
dt.get_minute(), dt.get_second(),
dt.get_microsecond()).isoformat()
metadata['tags'] = tags
# Only save this field if there's something to save
if len(stored_metadata):
media_entry.media_data_init(
orig_metadata=stored_metadata)
| StarcoderdataPython |
5059989 | <filename>dsvidgp/__init__.py
"""Module to build deep Gaussian process models."""
| StarcoderdataPython |
11385957 | from .teacher import build_teacher
from .roi_heads import TeacherROIHeads
from .rpn import TeacherRPN | StarcoderdataPython |
3295844 | <reponame>kelseykm/kelchat
#!/usr/bin/env python3
#Written by kelseykm
##Creates TCP chatroom server with messages encrypted in TLS
import os
import socket
import sys
import threading
import ssl
HOST = ''
PORT = 1999
ADDR = (HOST, PORT)
SSL_KEY = os.path.abspath('key.pem')
SSL_CERT = os.path.abspath('cert.pem')
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
server.listen()
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_context.load_cert_chain(SSL_CERT, SSL_KEY)
server_s = ssl_context.wrap_socket(server, server_side=True)
print(f"[SERVER] Listening on port {PORT}...")
connections = []
nicknames = []
def broadcast(conn, mesg):
for client in connections:
try:
if not client == conn:
client.send(mesg)
except:
index = connections.index(client)
connections.remove(client)
for rem_client in connections:
rem_client.send(f"[SERVER] {nicknames[index]} has left...\n".encode())
nicknames.remove(nicknames[index])
client.close()
def listener(conn):
while True:
data = conn.recv(1024)
if data:
if data.decode().upper() == 'LEAVE':
index = connections.index(conn)
connections.remove(conn)
print(f"[SERVER] A client disconnected. Current active connections: {threading.activeCount()-2}")
broadcast(conn, f"[SERVER] {nicknames[index]} has left...".encode())
nicknames.remove(nicknames[index])
conn.close()
sys.exit()
else:
broadcast(conn, f"[{nicknames[connections.index(conn)]}] {data.decode()}".encode())
def handle(conn,addr):
try:
conn.send("NICK".encode())
nick = conn.recv(1024).decode()
connections.append(conn)
nicknames.append(nick)
broadcast(conn, f"[SERVER] {nick} has joined...".encode())
if len(nicknames) > 1:
conn.send(f"[SERVER] PEOPLE IN THE CHAT ROOM RIGHT NOW:\n\t{nicknames}\n".encode())
else:
conn.send("[SERVER] YOU ARE CURRENTLY THE ONLY ONE IN THE CHAT ROOM\n".encode())
listener(conn)
except:
conn.close()
sys.exit()
def accept():
while True:
try:
connection, address = server_s.accept()
print(f"[SERVER] New connection from",address)
print(f"[SERVER] Currently connected to {threading.activeCount()} clients")
thread = threading.Thread(target=handle, args=(connection, address))
thread.start()
except KeyboardInterrupt:
server.close()
sys.exit()
except ssl.SSLError:
print("[SERVER] A CLIENT TRIED TO CONNECT WITH WRONG CERTIFICATE INFORMATION. THE CONNECTION FAILED")
pass
if __name__ == '__main__':
accept()
| StarcoderdataPython |
3430163 | <filename>src/compare_files.py
import filecmp
from .types import FsPath
class CompareFiles():
"""Provides the basic interface needed by the filehandler when comparing files.
This implementation simply does a filecmp.
"""
def compare(self, f1: FsPath, f2: FsPath) -> bool:
"""Compare two files"""
if f1.stat().st_size != f2.stat().st_size:
return False
return filecmp.cmp(f1, f2, shallow=False)
| StarcoderdataPython |
9607734 | <filename>test/test_parsers/test_broken_parse_data_from_jena.py
import os
from test.data import TEST_DATA_DIR
import pytest
import rdflib
# Recovered from
# https://github.com/RDFLib/rdflib/tree/6b4607018ebf589da74aea4c25408999f1acf2e2
broken_parse_data = os.path.join(TEST_DATA_DIR, "broken_parse_test")
@pytest.fixture
def xfail_broken_parse_data(request):
fname = request.getfixturevalue("testfile")
expected_failures = [
"n3-writer-test-02.n3",
"n3-writer-test-25.n3",
"rdf-test-01.n3",
"rdf-test-08.n3",
"rdf-test-10.n3",
"rdf-test-24.n3",
]
if fname in expected_failures:
request.node.add_marker(
pytest.mark.xfail(reason=f"Expected failure with {fname}")
)
@pytest.mark.parametrize("testfile", os.listdir(broken_parse_data))
@pytest.mark.usefixtures("xfail_broken_parse_data")
def test_n3_serializer_roundtrip(testfile) -> None:
g1 = rdflib.ConjunctiveGraph()
g1.parse(os.path.join(broken_parse_data, testfile), format="n3")
| StarcoderdataPython |
6696756 | class SearchAPI:
"""
This module can be used to search through the database.
A user should supply two things: the query as string,
and a dictionairy with table names as keys, and a list of
table columns as values. This dictionairy indicates which
part of the database is to be search.
The query will be split on spaces (' '), allowing any of
the query's words to match on any part of the given part of
the database.
"""
@staticmethod
def search(stack, needle, case_insensitive=True):
"""
Search generically on specified database tables and columns.
:param stack is a list of (table, [columns]) tuples.
:param needle is a string which will be split for seperate words.
:param case_insensitive indicates case sensitive or insensitive
search, default is True
:returns A set of db.models that have matched the queries
Example:
> stack = [ (Examination, [Examination.title]),
(Course, [Course.name]),
(Education, [Education.name])]
> needle = "inf"
> print searchAPI.search(stack, needle)
set([<app.models.education.Education object at
0x7f4f5c2ac650>, <app.models.education.Education object
at 0x7f4f5c2ac710>])
"""
result_list = []
for model, columns in stack:
for word in ["%%%s%%" % word for word in needle.split()]:
result_list\
.extend(model.query.filter(*[column.ilike(word)
if case_insensitive else column.like(word)
for column in columns]).all())
return set(result_list)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.