id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4900284 | from pso import pso
from optitestfuns import ackley
import unittest
import math
intVar = []
result = pso(ackley, [-5,-5], [5,5], intVar)
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO)) | StarcoderdataPython |
8136335 | <gh_stars>0
import math
import numpy as np
def flattenpoints(points, verticalz = True):
if verticalz:
vert = 2
else:
vert = 1
for i in range(len(points)):
points[i][vert] = 0
def slice(mesh, vertical, verticalz = True, json_serialize=False):
points = np.asarray(mesh.vertices)
triangles = np.asarray(mesh.triangles)
newpoints = []
#tripoints = []
vaxis = 0
if verticalz:
vaxis = 2
else:
vaxis = 1
vnorm = [0, abs(vaxis-2), abs(vaxis-1)]
vcord = [x*vertical for x in vnorm]
lines = []
pointindex = 0
# iterate through list of triangle arrays
# triangle array = [point 1, point 2, point 3]
for i in range(len(triangles)):
#intersect = False
#intersecting = []
intersecting_points = []
for j in range(3):
p0 = points[triangles[i, j]]
p1 = points[triangles[i, (j+1)%3]]
#b = (p0[vaxis] > vertical > p1[vaxis]) or (p0[vaxis] < vertical < p1[vaxis])
plane_splits_edge = splitpoints(p0, p1, vcord, vnorm)
#if b != plane_splits_edge:
# print("Uh oh shitteroo")
if plane_splits_edge:
pi = lp_intersect(p0, p1, vcord, vnorm)
if json_serialize:
newpoints.append(pi.tolist())
intersecting_points.append(pi.tolist())
else:
newpoints.append(pi)
intersecting_points.append(pi)
#intersect = True
#intersecting.append(pointindex)
#pointindex += 1
#print("Gotem")
#print(len(intersecting))
if len(intersecting_points) == 2:
lines.append([intersecting_points[0], intersecting_points[1]])
"""if intersect:
tripoints.append(points[triangles[i, 0]])
tripoints.append(points[triangles[i, 1]])
tripoints.append(points[triangles[i, 2]])"""
return newpoints, lines
def lp_intersect(p0, p1, p_co, p_no, epsilon=1e-6):
"""
p0, p1: Define the line.
p_co, p_no: define the plane:
p_co Is a point on the plane (plane coordinate).
p_no Is a normal vector defining the plane direction;
(does not need to be normalized).
Return a Vector or None (when the intersection can't be found).
"""
u = np.subtract(p1, p0)
dot = np.dot(p_no, u)
# In this case, epsilon is an error bound of some type where if the dot product is close to 0 (difference < epsilon)
# , then the point and plane are parallel
if abs(dot) > epsilon:
# The factor of the point between p0 -> p1 (0 - 1)
# if 'fac' is between (0 - 1) the point intersects with the segment.
# Otherwise:
# < 0.0: behind p0.
# > 1.0: infront of p1.
w = np.subtract(p0, p_co)
fac = -1*np.dot(p_no, w) / dot
u = u * fac
return np.add(p0, u)
# The segment is parallel to plane.
return None
def planeside(p0, pcord, pnorm):
return np.dot(p0, pnorm) - np.dot(pcord, pnorm)
def splitpoints(p0, p1, pcord, pnorm):
v0 = np.dot(p0, pnorm) - np.dot(pcord, pnorm)
v1 = np.dot(p1, pnorm) - np.dot(pcord, pnorm)
return True if v0*v1 <= 0 else False
| StarcoderdataPython |
3586075 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Prefetch
from django.db.models.fields import NOT_PROVIDED
from django.template.loader import render_to_string
from comments.models import Comment
User = get_user_model()
class SearchField(object):
name = None
def get_model_field(self):
return None
def get_value(self, obj):
raise NotImplementedError()
class ModelField(SearchField):
def __init__(self, model_field):
self._model_field = model_field
def get_model_field(self):
return self._model_field
def get_value(self, obj):
field_split = self._model_field.split('__')
for subfield in field_split[:-1]:
obj = getattr(obj, subfield)
if obj is None:
return None
field_split = field_split[-1]
value = getattr(obj, field_split, None)
if value is None:
field = obj.__class__._meta.get_field(field_split)
if not field.null:
if field.default == NOT_PROVIDED:
value = ''
else:
value = field.default
return value
class TemplateField(ModelField):
def __init__(self, model_field=None):
super().__init__(model_field)
def get_template_name(self, obj):
meta = obj.__class__._meta
return f'fulltext/{meta.app_label}/{meta.model_name}_{self.name}.txt';
def get_context_data(self, obj):
ctx = {}
if self._model_field:
ctx['document_field'] = self._model_field
ctx['object'] = obj
return ctx
def get_value(self, obj):
return render_to_string(self.get_template_name(obj), self.get_context_data(obj))
class CommentsField(TemplateField):
def __init__(self):
super().__init__()
def get_context_data(self, obj):
return {'comments': obj.comments.all()}
def get_template_name(self, obj):
return f'fulltext/comments/comments.txt';
class SearchIndexMeta(type):
def __new__(cls, name, bases, dct):
cls_instance = super().__new__(cls, name, bases, dct)
for name, field in dct.items():
if not isinstance(field, SearchField):
continue
field.name = name
return cls_instance
class SearchIndex(object, metaclass=SearchIndexMeta):
register = None
model = None
def get_model(self):
return self.model
def get_index_queryset(self, using=None):
return self.get_model()._default_manager.using(using)
def get_language_code(self, obj): # pylint: disable=unused-argument
return settings.LANGUAGE_CODE
def get_index(self, obj):
instance = self.register.index_class()
instance.language_code = self.get_language_code(obj)
for instance_key, field in self.__class__.__dict__.items():
if not isinstance(field, SearchField):
continue
setattr(instance, instance_key, field.get_value(obj))
return instance
class CommentsPrefetch(Prefetch):
def __init__(self, lookup=None, queryset=None, **kwargs):
lookup = lookup or 'comments'
if queryset is None:
queryset = (Comment.objects.only(
'pk', 'object_id', 'content_type_id', 'parent_id',
'subject', 'filtered_comment', 'is_public', 'is_removed'
))
super().__init__(lookup, queryset=queryset, **kwargs)
class AuthorPrefetch(Prefetch):
def __init__(self, lookup=None, queryset=None, **kwargs):
lookup = lookup or 'author'
if queryset is None:
queryset = (User.objects.only(
'pk', 'avatar', 'first_name', 'last_name', 'username',
))
super().__init__(lookup, queryset=queryset, **kwargs)
| StarcoderdataPython |
4881457 | <filename>web/ptonprowl/students/views.py
# django imports
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
# project imports
from .models import Student
def index(request):
return HttpResponse("Hello, world!")
class AccountDetails( LoginRequiredMixin, generic.DetailView):
login_url = '/login/'
redirect_field_name = 'redirect_to'
model = Student
template_name = 'students/account.html'
# delivers user object upon request
def get_object(self):
return get_object_or_404(Student, netid=self.kwargs.get('netid'))
| StarcoderdataPython |
3263097 | <filename>scripts/x_model_gen.py
"""
This is a small script for generating the initial Go model from the
olca-schema yaml files. To run this script you need to have PyYAML
installed:
pip install pyyaml
You also have to configure the YAML_DIR in this script to point to
the directory where the YAML files are located:
# clone the olca-schema repository to some folder
cd <folder>
git clone https://github.com/GreenDelta/olca-schema.git
# <folder>/olca-schema/yaml is the path for the YAML_DIR
After this you can run this script. It will print the generated structs
to the console:
python x_model_gen.py > [.. path to generated file].go
"""
YAML_DIR = 'C:/Users/Besitzer/Downloads/olca-schema/yaml'
import yaml
from os import listdir
def print_class(class_model):
name = class_model['name']
print('// %s http://greendelta.github.io/olca-schema/html/%s.html' % (name, name))
t = 'type %s struct {\n' % name
if 'superClass' in class_model:
t += '\t%s\n' % class_model['superClass']
if 'properties' in class_model:
for prop in class_model['properties']:
t += '\t' + convert_property(prop) + '\n'
t += '}\n'
print(t)
print_constructor(class_model)
def convert_property(prop):
name = prop['name']
t = name[0].upper() + name[1:]
type = prop['type']
if type == 'integer':
t += ' int' + (' `json:"%s"`' % name)
elif type == 'double':
t += ' float64' + (' `json:"%s"`' % name)
elif type == 'boolean':
t += ' bool' + (' `json:"%s"`' % name)
elif type == 'date' or type == 'dateTime':
t += ' string' + (' `json:"%s,omitempty"`' % name)
elif type == 'List[string]':
t += ' []string' + (' `json:"%s,omitempty"`' % name)
elif type.startswith('List['):
sub = type[5:(len(type)-1)]
t += ' []' + sub + (' `json:"%s,omitempty"`' % name)
else:
t += ' ' + type + (' `json:"%s,omitempty"`' % name)
return t
def print_constructor(class_model):
if 'superClass' not in class_model:
return
name = class_model['name']
s = class_model['superClass']
if s != 'RootEntity' and s != 'CategorizedEntity':
return
t = '// New%s initializes a new %s with the given id and name\n' % (name, name)
v = name[0].lower()
t += 'func New%s(id, name string) *%s {\n' % (name, name)
t += '\t%s := %s{}\n' % (v, name)
t += '\t%s.Context = ContextURL\n' % v
t += '\t%s.Type = "%s"\n' % (v, name)
t += '\t%s.ID = id\n' % v
t += '\t%s.Name = name\n' % v
t += '\treturn &%s\n' % v
t += '}\n'
print(t)
if __name__ == '__main__':
print('package schema\n')
for f in listdir(YAML_DIR):
path = YAML_DIR + '/' + f
with open(path, 'r', encoding='utf-8') as stream:
model = yaml.load(stream)
if 'class' in model:
print_class(model['class'])
| StarcoderdataPython |
5050308 | from concurrent import futures
import time
import logging
import sys
import grpc
import util
import example_pb2_grpc as pbgrpc
import example_pb2 as pb
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# functions to validate
def xor_cipher(key: str, in_str: str) -> str:
key_len = len(key)
ks = bytearray(key, encoding='utf-8')
xs = bytearray()
for i, c in enumerate(bytearray(in_str, encoding='utf-8')):
xs.append(c ^ ks[i % key_len])
return xs.decode()
def my_sqrt(x: float, epsilon: float) -> float:
if x == 0 or x == 1:
return x
abs_x = abs(x)
while True:
ans = (x + abs_x / x) / 2
if abs(x - ans) < epsilon:
break
x = ans
return ans
class Servicer(pbgrpc.ExampleServicer):
def Ping(self, request, _context):
logging.info('Ping received')
return pb.Status(status='OK')
def XorCipher(self, request, _context):
out_str = xor_cipher(request.key, request.in_str)
return pb.XorCipherResponse(out_str=out_str)
def MySqrt(self, request, context):
value = my_sqrt(request.value, request.epsilon)
return pb.Calc(value=value, epsilon=request.epsilon)
def serve(port_num):
port_str = f'0.0.0.0:{port_num}'
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
servicer = Servicer()
pbgrpc.add_ExampleServicer_to_server(servicer, server)
port_num = server.add_insecure_port(port_str)
if port_num == 0:
err_str = f'Could not connect to server at port {port_str}'
logging.error(err_str)
raise RuntimeError(err_str)
else:
server.start()
msg_str = f'Server listening on: {port_str}'
logging.info(msg_str)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
def set_logger(filename=None):
fmt_str = '[%(levelname)s:Py Server] -> %(message)s'
if filename is not None: # file and stdout
logging.basicConfig(filename=filename, filemode='w',
level=logging.INFO, format=fmt_str)
logging.basicConfig(level=logging.INFO, format=fmt_str)
root = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(fmt_str))
root.addHandler(handler)
else:
logging.basicConfig(level=logging.INFO, format=fmt_str)
root = logging.getLogger()
return root
if __name__ == '__main__':
root_logger = set_logger()
config = util.read_config('config.yaml')
port = 50051
if 'servers' in config and 'py' in config['servers'] and 'port' in config['servers']['py']:
port = config['servers']['py']['port']
serve(port)
| StarcoderdataPython |
93187 | """Python package treeplot vizualizes a tree based on a randomforest or xgboost model."""
# --------------------------------------------------
# Name : treeplot.py
# Author : E.Taskesen
# Contact : <EMAIL>
# github : https://github.com/erdogant/treeplot
# Licence : See Licences
# --------------------------------------------------
# %% Libraries
import os
import sys
import zipfile
import numpy as np
from sklearn.tree import export_graphviz
# from sklearn.tree.export import export_text
from sklearn.tree import export_text
from subprocess import call
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from graphviz import Source
import wget
URL = 'https://erdogant.github.io/datasets/graphviz-2.38.zip'
# %% Plot tree
def plot(model, featnames=None, num_trees=None, plottype='horizontal', figsize=(25,25), verbose=3):
"""Make tree plot for the input model.
Parameters
----------
model : model
xgboost or randomforest model.
featnames : list, optional
list of feature names. The default is None.
num_trees : int, default None
The best performing tree is choosen. Specify any other ordinal number for another target tree
plottype : str, (default : 'horizontal')
Works only in case of xgb model.
* 'horizontal'
* 'vertical'
figsize: tuple, default (25,25)
Figure size, (height, width)
verbose : int, optional
Print progress to screen. The default is 3.
0: NONE, 1: ERROR, 2: WARNING, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
ax : Figure axis
Figure axis of the input model.
"""
modelname = str(model).lower()
if ('xgb' in modelname):
if verbose>=4: print('xgboost plotting pipeline.')
ax = xgboost(model, featnames=featnames, num_trees=num_trees, figsize=figsize, plottype=plottype, verbose=verbose)
elif ('tree' in modelname) or ('forest' in modelname) or ('gradientboosting' in modelname):
if verbose>=4: print('tree plotting pipeline.')
ax = randomforest(model, featnames=featnames, num_trees=num_trees, figsize=figsize, verbose=verbose)
elif ('lgb' in modelname):
ax = lgbm(model, featnames=featnames, num_trees=num_trees, figsize=figsize, verbose=verbose)
else:
print('[treeplot] >Model not recognized: %s' %(modelname))
ax = None
return ax
# %% Plot tree
def lgbm(model, featnames=None, num_trees=None, figsize=(25,25), verbose=3):
try:
from lightgbm import plot_tree, plot_importance
except:
if verbose>=1: raise ImportError('lightgbm must be installed. Try to: <pip install lightgbm>')
return None
# Check model
_check_model(model, 'lgb')
# Set env
_set_graphviz_path()
if (num_trees is None) and hasattr(model, 'best_iteration_'):
num_trees = model.best_iteration_
if verbose>=3: print('[treeplot] >Best detected tree: %.0d' %(num_trees))
elif num_trees is None:
num_trees = 0
ax1 = None
try:
fig, ax1 = plt.subplots(1, 1, figsize=figsize)
plot_tree(model, tree_index=num_trees, dpi=200, ax=ax1)
except:
if _get_platform() != "windows":
print('[treeplot] >Install graphviz first: <sudo apt install python-pydot python-pydot-ng graphviz>')
# Plot importance
ax2 = None
try:
fig, ax2 = plt.subplots(1, 1, figsize=figsize)
plot_importance(model, max_num_features=50, ax=ax2)
except:
print('[treeplot] >Error: importance can not be plotted. Booster.get_score() results in empty. This maybe caused by having all trees as decision dumps.')
return(ax1, ax2)
# %% Plot tree
def xgboost(model, featnames=None, num_trees=None, plottype='horizontal', figsize=(25,25), verbose=3):
"""Plot tree based on a xgboost.
Parameters
----------
model : model
xgboost model.
featnames : list, optional
list of feature names. The default is None.
num_trees : int, default None
The best performing tree is choosen. Specify any other ordinal number for another target tree
plottype : str, optional
Make 'horizontal' or 'vertical' plot. The default is 'horizontal'.
figsize: tuple, default (25,25)
Figure size, (height, width)
verbose : int, optional
Print progress to screen. The default is 3.
0: NONE, 1: ERROR, 2: WARNING, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
ax : Figure axis
Figure axis of the input model.
"""
try:
from xgboost import plot_tree, plot_importance
except:
if verbose>=1: raise ImportError('xgboost must be installed. Try to: <pip install xgboost>')
_check_model(model, 'xgb')
# Set env
_set_graphviz_path()
if plottype=='horizontal': plottype='UD'
if plottype=='vertical': plottype='LR'
if (num_trees is None) and hasattr(model, 'best_iteration'):
num_trees = model.best_iteration
if verbose>=3: print('[treeplot] >Best detected tree: %.0d' %(num_trees))
elif num_trees is None:
num_trees = 0
ax1 = None
try:
fig, ax1 = plt.subplots(1, 1, figsize=figsize)
plot_tree(model, num_trees=num_trees, rankdir=plottype, ax=ax1)
except:
if _get_platform() != "windows":
print('[treeplot] >Install graphviz first: <sudo apt install python-pydot python-pydot-ng graphviz>')
# Plot importance
ax2 = None
try:
fig, ax2 = plt.subplots(1, 1, figsize=figsize)
plot_importance(model, max_num_features=50, ax=ax2)
except:
print('[treeplot] >Error: importance can not be plotted. Booster.get_score() results in empty. This maybe caused by having all trees as decision dumps.')
return(ax1, ax2)
# %% Plot tree
def randomforest(model, featnames=None, num_trees=None, filepath='tree', export='png', resolution=100, figsize=(25,25), verbose=3):
"""Plot tree based on a randomforest.
Parameters
----------
model : model
randomforest model.
featnames : list, optional
list of feature names. The default is None.
num_trees : int, default 0
Specify the ordinal number of target tree
filepath : str, optional
filename to export. The default is 'tree'.
export : list of str, optional
Export type. The default is 'png'.
Alternatives: 'pdf', 'png'
resolution : int, optional
resolution of the png file. The default is 100.
figsize: tuple, default (25,25)
Figure size, (height, width)
verbose : int, optional
Print progress to screen. The default is 3.
0: NONE, 1: ERROR, 2: WARNING, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
ax : Figure axis
Figure axis of the input model.
"""
ax=None
dotfile = None
pngfile = None
if num_trees is None: num_trees = 0
# Check model
_check_model(model, 'randomforest')
# Set env
_set_graphviz_path()
if export is not None:
dotfile = filepath + '.dot'
pngfile = filepath + '.png'
if featnames is None:
featnames = np.arange(0,len(model.feature_importances_)).astype(str)
# Get model parameters
if ('gradientboosting' in str(model).lower()):
estimator = model.estimators_[num_trees][0]
else:
if hasattr(model, 'estimators_'):
estimator = model.estimators_[num_trees]
else:
estimator = model
# Make dot file
dot_data = export_graphviz(estimator,
out_file=dotfile,
feature_names=featnames,
class_names=model.classes_.astype(str),
rounded=True,
proportion=False,
precision=2,
filled=True,
)
# Save to pdf
if export == 'pdf':
s = Source.from_file(dotfile)
s.view()
# Save to png
elif export == 'png':
try:
call(['dot', '-Tpng', dotfile, '-o', pngfile, '-Gdpi=' + str(resolution)])
fig, ax = plt.subplots(1, 1, figsize=figsize)
img = mpimg.imread(pngfile)
plt.imshow(img)
plt.axis('off')
plt.show()
except:
if _get_platform() != "windows":
print('[treeplot] >Install graphviz first: <sudo apt install python-pydot python-pydot-ng graphviz>')
else:
graph = Source(dot_data)
plt.show()
return(ax)
# %% Import example dataset from github.
def import_example(data='random', n_samples=1000, n_feat=10):
"""Import example dataset from sklearn.
Parameters
----------
data : str
'random' : str, two-class
'breast' : str, two-class
'titanic': str, two-class
'iris' : str, multi-class
n_samples : int, optional
Number of samples to generate. The default is 1000.
n_feat : int, optional
Number of features to generate. The default is 10.
Returns
-------
tuple (X,y).
X is the dataset and y the response variable.
"""
try:
from sklearn import datasets
except:
print('This requires: <pip install sklearn>')
return None, None
if data=='iris':
X, y = datasets.load_iris(return_X_y=True)
elif data=='breast':
X, y = datasets.load_breast_cancer(return_X_y=True)
elif data=='titanic':
X, y = datasets.fetch_openml("titanic", version=1, as_frame=True, return_X_y=True)
elif data=='random':
X, y = datasets.make_classification(n_samples=n_samples, n_features=n_feat)
return X, y
# %% Get graphiz path and include into local PATH
def _set_graphviz_path(verbose=3):
finPath=''
if _get_platform()=="windows":
# Download from github
[gfile, curpath] = _download_graphviz(URL, verbose=verbose)
# curpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'RESOURCES')
# filesindir = os.listdir(curpath)[0]
idx = gfile[::-1].find('.') + 1
dirname = gfile[:-idx]
getPath = os.path.abspath(os.path.join(curpath, dirname))
getZip = os.path.abspath(os.path.join(curpath, gfile))
# Unzip if path does not exists
if not os.path.isdir(getPath):
if verbose>=3: print('[treeplot] >Extracting graphviz files..')
[pathname, _] = os.path.split(getZip)
# Unzip
zip_ref = zipfile.ZipFile(getZip, 'r')
zip_ref.extractall(pathname)
zip_ref.close()
getPath = os.path.join(pathname, dirname)
# Point directly to the bin
finPath = os.path.abspath(os.path.join(getPath, 'release', 'bin'))
else:
pass
# sudo apt install python-pydot python-pydot-ng graphviz
# dpkg -l | grep graphviz
# call(['dpkg', '-l', 'grep', 'graphviz'])
# call(['dpkg', '-s', 'graphviz'])
# Add to system
if finPath not in os.environ["PATH"]:
if verbose>=3: print('[treeplot] >Set path in environment.')
os.environ["PATH"] += os.pathsep + finPath
return(finPath)
# %%
def _get_platform():
platforms = {
'linux1':'linux',
'linux2':'linux',
'darwin':'osx',
'win32':'windows'
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
# %% Check input model
def _check_model(model, expected):
modelname = str(model).lower()
if (expected=='randomforest'):
if ('forest' in modelname) or ('tree' in modelname) or ('gradientboosting' in modelname):
pass
else:
print('[treeplot] >>Warning: The input model seems not to be a tree-based model?')
if (expected=='xgb'):
if ('xgb' not in modelname):
print('[treeplot] >Warning: The input model seems not to be a xgboost model?')
if (expected=='lgb'):
if ('lgb' not in modelname):
print('[treeplot] >Warning: The input model seems not to be a lightgbm model?')
# %% Import example dataset from github.
def _download_graphviz(url, verbose=3):
"""Import example dataset from github.
Parameters
----------
url : str, optional
url-Link to graphviz. The default is 'https://erdogant.github.io/datasets/graphviz-2.38.zip'.
verbose : int, optional
Print message to screen. The default is 3.
Returns
-------
tuple : (gfile, curpath).
gfile : filename
curpath : currentpath
"""
curpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'RESOURCES')
gfile = wget.filename_from_url(url)
PATH_TO_DATA = os.path.join(curpath, gfile)
if not os.path.isdir(curpath):
if verbose>=3: print('[treeplot] >Downloading graphviz..')
os.makedirs(curpath, exist_ok=True)
# Check file exists.
if not os.path.isfile(PATH_TO_DATA):
# Download data from URL
if verbose>=3: print('[treeplot] >Downloading graphviz..')
wget.download(url, curpath)
return(gfile, curpath)
| StarcoderdataPython |
3432672 | <filename>setup.py
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="triggercmd_cli",
version="0.1.0",
url="https://github.com/GussSoares/triggercmd-cli",
license="MIT License",
author="<NAME>",
author_email="<EMAIL>",
keywords="triggercmd alexa echo-dot cli archlinux manjaro",
description=u"CLI to TriggerCMD to archlinux distribuction based.",
packages=["triggercmd_cli"],
install_requires=[],
)
| StarcoderdataPython |
11314771 | import os
import re
import shutil
from sinolify.converters.base import ConverterBase
from sinolify.converters.mapping import ConversionMapping
from sinolify.utils.log import log, warning_assert, error_assert, die
from sinolify.heuristics.limits import pick_time_limits
class SowaToSinolConverter(ConverterBase):
""" A converter used to convert Sowa packages to Sinol packages.
See /dev/null for Sowa format specification and /dev/null for Sinol format
specification.
"""
_prog_ext = '(?:cpp|c|cc|pas)'
def __init__(self, *args, auto_time_limits=True, threads=1, checkers=None, **kwargs):
""" Instantiates new SowaToSinolConverter.
:param auto_time_limits: If true, automatically sets time limits.
:param threads: Number of threads for parallel execution.
"""
super().__init__(*args, **kwargs)
self.auto_time_limits = auto_time_limits
self.threads = threads
if checkers:
checkers_find, checkers_replace = checkers
log.info(f"Setting up checker mapping {checkers_find} -> {checkers_replace}")
self.checkers_mapper = ConversionMapping(checkers_find, checkers_replace)
else:
self.checkers_mapper = None
@property
def _id(self) -> str:
""" A helper extracting source package ID. """
return self._source.id
def make_tests(self) -> None:
""" Copies tests. """
error_assert(self.copy(rf'in/{self._id}\d+[a-z]*.in') > 0,
'No input files')
warning_assert(self.copy(rf'out/{self._id}\d+[a-z]*.out') > 0,
'No output files')
def make_doc(self):
""" Copies documents.
Extracts the statement and its source to doc, along with anything
that might be some sort of dependency.
"""
error_assert(self.copy_rename(rf'doc/{self._id}\.pdf',
f'doc/{self._id}zad.pdf') > 0,
'No problem statement')
warning_assert(self.copy_rename(rf'desc/{self._id}\.tex',
f'doc/{self._id}zad.tex'),
'No statement source')
self.ignore(f'desc/{self._id}_opr.tex')
self.copy_rename(rf'desc/(.*\.(?:pdf|tex|cls|png|jpg|JPG|sty|odg))', rf'doc/\1',
ignore_processed=True)
def make_solutions(self) -> None:
""" Copies solutions.
Copies the main solution (i.e. named exactly {task id}) to {task id}1.
The remaining solutions are copied in unspecified order and are
numbered with integers starting from 2.
"""
log.debug('Making solutions')
error_assert(self.copy_rename(rf'sol/{self._id}\.({self._prog_ext})',
rf'prog/{self._id}1.\1'),
'No main model solution')
for i, p in enumerate(self.find(rf'sol/{self._id}.+\.{self._prog_ext}')):
self.copy(p, lambda p: re.sub(rf'.*\.({self._prog_ext})',
rf'prog/{self._id}{i + 2}.\1', p))
self.copy(rf'utils/.*\.({self._prog_ext}|sh)', lambda p: f'prog/{p}')
self.copy_rename(rf'sol/(.*{self._prog_ext})', r'prog/other/\1', ignore_processed=True)
def make_checker(self) -> None:
""" Converts a checker.
If no checker is found in check/, then no action is performed.
Otherwise a checker is looked up in the checker mapper. If no
match is found, checker is put in the mapper as `todo` and
the program terminates with an error. Otherwise the mapped
replacement is copied as package's checker.
"""
original_checker = self.one(rf'check/.*\.{self._prog_ext}')
if not original_checker:
log.warning('No checker found.')
return
self.ignore(original_checker)
original_checker = self._source.abspath(original_checker)
error_assert(self.checkers_mapper, 'Checker found but no checker mapper was set up.')
try:
replacement = self.checkers_mapper.find(original_checker)
if not replacement:
log.info('Ignoring the checker.')
return
log.info(f'Copying the checker from mapper: {replacement}')
shutil.copy(replacement, self._target.abspath(f'prog/{self._id}chk{os.path.splitext(replacement)[1]}'))
except ConversionMapping.FindError:
todo_filename = self.checkers_mapper.todo(original_checker)
die(f'Putting the checker in mapper as {todo_filename}. Please fix it.')
except ConversionMapping.ReplaceError:
die('Unable to find a replacement for the checker in checker mapper.')
self.ignore('check/[^.]*')
def make_time_limits_config(self) -> str:
""" Heuristically chooses time limit and returns config entry setting it """
main_solution = self.one(rf'sol/{self._id}\.{self._prog_ext}')
error_assert(main_solution, 'No main solution found')
main_solution = self._source.abspath(main_solution)
inputs = [self._source.abspath(p) for p in self.find(rf'in/{self._id}\d+[a-z]*.in')]
limit = int(pick_time_limits(main_solution, inputs, threads=self.threads) * 1000)
config = 'time_limits:\n'
tests = [os.path.basename(i).lstrip(self._id).rstrip('.in') for i in inputs]
config += '\n'.join([f' {test}: {limit}' for test in sorted(tests)])
return config
def make_title_config(self):
""" Extracts title from LaTeX and outputs config entry. """
statement = self.one(rf'desc/{self._id}\.tex')
if not statement:
log.warning('Title requires manual setting.')
return "title: TODO\n"
else:
latex = open(self._source.abspath(statement)).read()
title = re.search(r'\\title{(?:\\mbox{)?([^}]*)}', latex).group(1).replace('~', ' ').replace('$', '')
return f'title: {title}\n'
def make_config(self):
""" Generates Sinol config. """
config = open(self._target.abspath('config.yml'), 'w')
config.write(self.make_title_config())
if self.auto_time_limits:
config.write(self.make_time_limits_config())
def convert(self) -> None:
""" Executes a conversion from Sowa to Sinol.
Emits a warning if some unexpected files are not processed.
"""
self.make_tests()
self.make_solutions()
self.make_doc()
self.make_checker()
self.make_config()
# Ignore editor backup files
self.ignore(r'.*(~|\.swp|\.backup|\.bak)')
# Ignore package creation system files
self.ignore(r'.sowa-sign')
self.ignore(r'(.*/)?Makefile(\.in)?')
# Ignore utils
self.ignore(r'utils/.*')
# Ignore LaTeX's leftovers
self.ignore(r'desc/.*\.(aux|log|synctex)')
# Ignore solution description
self.ignore(f'info/{self._id}_opr.pdf')
# Ignore some common temporary files left by authors
self.ignore(r'tmpdesc/.*')
self.ignore(r'(sol|check)/.*(\.o|_PAS|_CPP|_C|\.out)')
self.ignore(rf'[^/]*\.{self._prog_ext}')
if self.not_processed():
log.warning('%d file(s) not processed: %s', len(self.not_processed()),
', '.join(self.not_processed()))
| StarcoderdataPython |
9744911 | <reponame>mikeengland/fireant<gh_stars>1-10
from .builder import *
| StarcoderdataPython |
9617264 | import os
import pathlib
import torch
SEED_VALUE = 42
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
LOCAL_DATA_LIMIT = 75000
TORCH_DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
DATA_FOLDER = os.path.join(PROJECT_ROOT, "data")
EMBEDDINGS_FOLDER = os.path.join(PROJECT_ROOT, "embeddings")
MODELS_FOLDER = os.path.join(PROJECT_ROOT, "models")
def data_path(path, *paths):
return construct_path(DATA_FOLDER, path, *paths)
def embeddings_path(path, *paths):
return construct_path(EMBEDDINGS_FOLDER, path, *paths)
def models_path(path, *paths):
return construct_path(MODELS_FOLDER, path, *paths)
def construct_path(folder_dir, path, *paths):
constructed_path = os.path.join(folder_dir, path, *paths)
directory = os.path.dirname(constructed_path)
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
return constructed_path | StarcoderdataPython |
3304006 | <reponame>meals-app/django-graphql-auth
from .settings import *
GRAPHQL_AUTH = {
"ALLOW_DELETE_ACCOUNT": True,
"REGISTER_MUTATION_FIELDS": {"email": "String", "username": "String"},
"UPDATE_MUTATION_FIELDS": ["first_name", "last_name"],
"ALLOW_LOGIN_NOT_VERIFIED": False,
}
INSTALLED_APPS += ["tests"]
AUTH_USER_MODEL = "tests.CustomUser"
| StarcoderdataPython |
4826038 | import math
from pathlib import Path
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
class Net(nn.Module):
def __init__(self):
super().__init__()
# N = (W - F + 2P) / S + 1
self.conv1 = nn.Conv2d(1, 32, 3, 1) # [32, 26, 26]
self.conv2 = nn.Conv2d(32, 64, 3, 1) # [64, 24, 24]
# max_pool2d [64, 12, 12]
self.dropout1 = nn.Dropout(0.25)
self.fc1 = nn.Linear(64 * 12 * 12, 128) # [128]
self.dropout2 = nn.Dropout(0.5)
self.fc2 = nn.Linear(128, 10) # [10]
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, (2, 2))
x = self.dropout1(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = F.relu(self.fc2(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
if __name__ == '__main__':
data_dir = Path('data')
checkpoint = '../../checkpoints/mnist.pt'
net = Net()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = net.to(device)
model.load_state_dict(torch.load(checkpoint))
model.eval()
print(model)
transform = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
])
paths = list(data_dir.iterdir())
total = len(paths)
col = 4
row = math.ceil(total / col)
with torch.no_grad():
for i, path in enumerate(paths, start=1):
plt.subplot(row, col, i)
img_path = str(path)
img = Image.open(img_path).convert('L')
img = img.point(lambda p: p < 128 and 255)
tensor = transform(img)
tensor = tensor.unsqueeze(0).to(device)
output = model(tensor)
preds = F.softmax(output, 1)
v, idx = preds.topk(1)
img = img.resize((28, 28))
plt.imshow(img, cmap='gray')
plt.title("{}: {:.3f}".format(idx.item(), v.item()))
plt.show()
| StarcoderdataPython |
1759474 | from graph_tool import GraphView
import numpy as np
def number_of_classes( D, edge_labels=np.empty(0), stats=dict(), print_stats=False ):
"""counts the number of different classes"""
if edge_labels is None or edge_labels.size == 0:
edge_labels = np.array( [ D.ep.c0[p] for p in D.get_edges() ] )
# ae98476863dc6ec5 = http://www.w3.org/1999/02/22-rdf-syntax-ns#type
rdf_type = hash( 'ae98476863dc6ec5' )
C_G = GraphView( D, efilt=edge_labels == rdf_type )
C_G = np.unique( C_G.get_edges()[:,1] )
if print_stats:
print( "number of different classes C_G: %s" % C_G.size )
stats['distinct_classes'] = C_G.size
return C_G
def ratio_of_typed_subjects( D, edge_labels=np.empty(0), stats=dict(), print_stats=False ):
"""
(1) number of all different typed subjects
(2) ratio of typed subjects
"""
if edge_labels is None or edge_labels.size == 0:
edge_labels = np.array( [ D.ep.c0[p] for p in D.get_edges() ] )
# ae98476863dc6ec5 = http://www.w3.org/1999/02/22-rdf-syntax-ns#type
rdf_type = hash( 'ae98476863dc6ec5' )
S_C_G = GraphView( D, efilt=edge_labels == rdf_type )
S_C_G = np.unique( S_C_G.get_edges()[:,0] )
if print_stats:
print( "number of different typed subjects S^{C}_G: %s" % S_C_G.size )
S_G = GraphView( D, vfilt=D.get_out_degrees( D.get_vertices() ) )
if print_stats:
print( "ratio of typed subjects r_T(G): %s" % ( float(S_C_G.size)/S_G.num_vertices() ) )
stats['typed_subjects'], stats['ratio_of_typed_subjects'] = S_C_G.size, ( float(S_C_G.size)/S_G.num_vertices() )
return S_C_G
def collect_number_of_classes( D, edge_labels, vals=set(), stats=dict(), print_stats=False ):
""""""
if vals is None:
vals = set()
return vals | set( number_of_classes( D, edge_labels, stats, print_stats ) )
def reduce_number_of_classes( vals, D, C_G, stats={} ):
""""""
stats['distinct_classes'] = len( vals )
def collect_ratio_of_typed_subjects( D, edge_labels, vals=set(), stats=dict(), print_stats=False ):
""""""
if vals is None:
vals = set()
return vals | set( ratio_of_typed_subjects( D, edge_labels, stats, print_stats ) )
def reduce_ratio_of_typed_subjects( vals, D, S_G, stats={} ):
""""""
S_G = GraphView( D, vfilt=D.get_out_degrees( D.get_vertices() ) )
stats['typed_subjects'], stats['ratio_of_typed_subjects'] = len( vals ), ( float(len( vals ))/S_G.num_vertices() )
METRICS = [ number_of_classes, ratio_of_typed_subjects ]
METRICS_SET = { 'TYPED_SUBJECTS_OBJECTS': METRICS }
LABELS = [ 'distinct_classes', 'typed_subjects', 'ratio_of_typed_subjects' ]
| StarcoderdataPython |
6545317 | from old import *
import graphics
import spin2008
import run5note
| StarcoderdataPython |
30642 | from serendipity.linear_structures.singly_linked_list import LinkedList
class Set:
def __init__(self):
self._list = LinkedList()
def get_size(self):
return self._list.get_size()
def is_empty(self):
return self._list.is_empty()
def contains(self, e):
return self._list.contains(e)
def add(self, e):
if not self._list.contains(e):
self._list.add_first(e)
# def remove(self, e):
# """借助于链表的移出元素实现即可,此处不实现"""
# pass
| StarcoderdataPython |
6700977 | <filename>treadmill/api/trace.py
"""Implementation of state API."""
import logging
from .. import context
from .. import schema
from .. import exc
from .. import zknamespace as z
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill State REST api."""
def __init__(self):
zkclient = context.GLOBAL.zk.conn
cell_state = {
'tasks': {}
}
def _watch_app_tasks(app_task_path, app_task_info):
"""Define a watcher that will update app_task_info for each of the
app names.
"""
@exc.exit_on_unhandled
@zkclient.ChildrenWatch(app_task_path)
def _watch_task(instanceids):
app_task_info[:] = instanceids
return True
@exc.exit_on_unhandled
@zkclient.ChildrenWatch(z.TASKS)
def _watch_tasks(tasks):
"""Watch /tasks data."""
tasks_set = set(tasks)
for new_task in tasks_set - cell_state['tasks'].keys():
app_task_path = z.path.task(new_task)
app_task_info = cell_state['tasks'].setdefault(new_task, [])
_watch_app_tasks(app_task_path, app_task_info)
for task in cell_state['tasks']:
if task not in tasks_set:
cell_state['tasks'].pop(task)
return True
@schema.schema({'$ref': 'app.json#/resource_id'})
def get(rsrc_id):
"""Get trace information for a given application name.
"""
tasks = cell_state['tasks'].get(rsrc_id)
if tasks:
return {
'name': rsrc_id,
'instances': tasks
}
return None
self.get = get
def init(_authorizer):
"""Returns module API wrapped with authorizer function."""
# There is no authorization for state api.
return API()
| StarcoderdataPython |
11357112 | <filename>scrapper_app/spiders/kp_movies_spider.py
from scrapy_redis.spiders import RedisSpider
from scrapper_app.items import MovieDetailsItem
from scrapper_app.loaders import load_movie_details
import os
class KpMoviesSpider(RedisSpider):
"""
Предоставляет парсер страниц кинофильмов с kinopoisk.ru.
"""
name = 'kp_movies_spider'
redis_key = 'movies_2021'
custom_settings = {
'ITEM_PIPELINES': {
'scrapy_redis.pipelines.RedisPipeline': 300,
'scrapper_app.pipelines.DbPipeline': 500,
},
'DOWNLOADER_MIDDLEWARES': {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# Бесплатные прокси, как альтернатива scrapy_zyte_smartproxy
# 'rotating_free_proxies.middlewares.RotatingProxyMiddleware': 610,
# 'scrapper_app.ban_policy.CaptchaPolicy': 620,
'scrapy_zyte_smartproxy.ZyteSmartProxyMiddleware': 610,
'scrapy_user_agents.middlewares.RandomUserAgentMiddleware': 800,
},
'AUTOTHROTTLE_ENABLED': True,
'AUTOTHROTTLE_START_DELAY': 60*5,
'DOWNLOAD_DELAY': 60*5,
'DOWNLOAD_TIMEOUT': 600,
'CONCURRENT_REQUESTS': 64,
'CONCURRENT_REQUESTS_PER_DOMAIN': 64,
}
zyte_smartproxy_enabled = True
zyte_smartproxy_apikey = os.environ.get('ZYTE_API_KEY')
def parse(self, response, **kwargs):
yield load_movie_details(MovieDetailsItem(), response, id=response.meta['movie_ref'])
| StarcoderdataPython |
8035704 | import csv, codecs, cStringIO
from django.http import HttpResponse
# also include UnicodeWriter from the Python docs http://docs.python.org/library/csv.html
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class ExportModel(object):
@staticmethod
def as_csv(meta):
with open(meta['file'], 'w+') as f:
writer = UnicodeWriter(f, encoding='utf-8')
writer.writerow(meta['header'])
for obj in meta['queryset']:
if isinstance(obj, dict):
row = [unicode(obj[field]) for field in meta['fields']]
else:
row = [unicode(getattr(obj, field)) for field in meta['fields']]
writer.writerow(row)
path = f.name
return path
def get_model_as_csv_file_response(meta, content_type, filename):
"""
Call this function from your model admin
"""
with open(ExportModel.as_csv(meta), 'r') as f:
response = HttpResponse(f.read(), content_type=content_type)
# response['Content-Disposition'] = 'attachment; filename=Extraction.csv;'
response['Content-Disposition'] = 'attachment; filename=%s;' % filename
return response
| StarcoderdataPython |
3247941 | <gh_stars>0
# coding: utf-8
"""Módulo para funciones de preprocesamiento de texto."""
import re
def filtrar_cortas(texto, chars=0):
"""Filtra líneas en texto de longitud chars o inferior.
Parameters
----------
texto : str
Texto que se quiere filtrar.
chars : int
Mínimo número de caracteres en una línea de texto.
Returns
-------
str
Texto filtrado.
"""
filtrado = ""
for linea in texto.splitlines():
if len(linea) > chars:
filtrado += linea + "\n"
return filtrado
def unir_fragmentos(texto):
"""Une fragmentos de palabras partidas por final de línea.
Parameters
----------
texto : str
Returns
-------
str
Texto con palabras de fin de línea unidas si estaban partidas.
"""
# Asume ord('-') == 45
return re.sub(r'-\n+', '', texto)
def separar_guiones(texto):
"""Separa guiones de primera y última palabra de fragmentos de texto.
Parameters
----------
texto : str
Returns
-------
str
Texto con guiones de fragmentos separados de las palabras.
"""
# Asume ord 8211 ó 8212
nuevo = re.sub(r'[—–]{1,}', '–', texto)
nuevo = re.sub(r'(\W)–([A-Za-zÀ-Üà-ü]+)', r'\1– \2', nuevo)
return re.sub(r'([A-Za-zÀ-Üà-ü]+)–(\W)', r'\1 –\2', nuevo)
def separar_numeros(texto):
"""Separa números de palabras que los tienen.
Parameters
----------
texto : str
Returns
-------
str
Texto con números separados de palabras.
"""
nuevo = re.sub(r'(\d+)([A-Za-zÀ-Üà-ü]{2,}?|\()', r'\1 \2', texto)
return re.sub(r'([A-Za-zÀ-Üà-ü]{2,}?|\))(\d+)', r'\1 \2', nuevo)
def limpiar_extraccion(texto, chars=0, basura=None):
"""Limpieza de texto extraido.
Parameters
----------
texto : str
chars : int
Mínimo número de caracteres en una línea de texto.
basura : Iterable
Caracteres a eliminar.
Returns
-------
str
Texto procesado.
"""
limpio = unir_fragmentos(texto)
limpio = separar_guiones(limpio)
limpio = separar_numeros(limpio)
if chars:
limpio = filtrar_cortas(limpio, chars=chars)
if basura:
limpio = re.sub(f"[{''.join(basura)}]", '', limpio)
return ' '.join(limpio.split())
| StarcoderdataPython |
9624530 | import pandas as pd
import os
import pyspark.sql
def python_location():
"""work out the location of the python interpretter - this is needed for Pyspark to initialise"""
import subprocess
import pathlib
with subprocess.Popen("where python", shell=True, stdout=subprocess.PIPE) as subprocess_return:
python_exe_paths = subprocess_return.stdout.read().decode('utf-8').split('\r\n')
env_path = pathlib.Path([x for x in python_exe_paths if '\\envs\\' in x][0])
return str(env_path)
def initialise_spark():
"""This function creates a spark session if one doesn't already exist (i.e. within databricks this will do nothing)"""
if 'spark' not in locals():
os.environ["PYSPARK_PYTHON"] = python_location()
spark = pyspark.sql.SparkSession.builder.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
return spark
class DataStore(object):
"""this class acts as a middle man between the ETL processes and wherever the data is being stored.
Iniially this will just be locally in CSV files"""
def __init__(self, source='local', data_format='csv', spark_session=initialise_spark()):
self.source = source
self.format = data_format
self.spark = spark_session
def get_table(self, table_name):
"""Extract the 'table_name' data from the 'source' (where it is stored in the specified 'format'"""
if self.source=='local' and self.format == 'csv':
#pdf_table = pd.read_csv(table_name)
#sdf_table = self.spark.createDataFrame(pdf_table)
sdf_table = self.spark.read.load(table_name, format='csv', sep=',', inferSchema='true',header='true')
return sdf_table
def save_table(self, sdf, table_name):
"""save the 'table_name' to the 'source' in the specified 'format'"""
if self.source=='local' and self.format == 'csv':
# Need to take a dataframe and save it in the required format or whatever.
#sdf.repartition(1).write.csv(table_name) # struggling to get this to work
sdf.toPandas().to_csv(table_name, index=False, sep=',')
| StarcoderdataPython |
6688405 | <reponame>mikimaus78/ml_monorepo<filename>statarb/src/python/bin/fsck_attr.py
#!/usr/bin/env python
import util
import newdb
def main():
util.info("Checking Attributes")
for table in ('co_attr_d', 'co_attr_n', 'co_attr_s', 'sec_attr_s', 'sec_attr_n'):
print "Looking at %s" % table
if table.startswith('co'):
key = 'coid'
else:
key = 'secid'
db.execute("SELECT DISTINCT %s, type, date FROM %s" % (key, table))
rows_read = 0
for row in db._curs.fetchall():
rows_read += 1
if rows_read % 10000 == 0: print "Lines seen: %d" % rows_read
#skip backfill data once that stuff is worked out...
db.execute("SELECT type, value, born, died FROM " + table + " WHERE %s=%s AND type=%s AND date=%s ORDER BY born", (key, row[key], row['type'], row['date']))
attrs = db._curs.fetchall()
if len(attrs)==1:
assert attrs[0]['died'] is None or attrs[0]['died'] > attrs[0]['born']
continue
for i in range(len(attrs)):
if i < len(attrs)-1:
assert attrs[i]['died'] is not None, attrs[i]
assert attrs[i]['died'] > attrs[i]['born'], attrs[i]
assert attrs[i]['died'] <= attrs[i+1]['born'], (attrs[i], attrs[i+1])
if attrs[i]['died'] == attrs[i+1]['born']:
assert attrs[i]['value'] != attrs[i+1]['value'], (attrs[i], attrs[i+1])
else:
assert attrs[i]['died'] is None or attrs[i]['died'] > attrs[i]['born'], attrs[i]
print "Top 10 company attribute counts"
db.execute("SELECT %s, COUNT(*) AS count FROM %s GROUP BY %s ORDER BY count DESC LIMIT 10" % (key, table, key))
for row in db._curs.fetchall():
print row
print "Top 10 attribute counts"
db.execute("SELECT a.name, COUNT(*) AS count FROM " + table + " JOIN attribute_type a on type = a.code GROUP BY a.name ORDER BY count DESC LIMIT 10")
for row in db._curs.fetchall():
print row
if __name__ == "__main__":
util.set_debug()
newdb.init_db()
db = newdb.get_db()
main()
| StarcoderdataPython |
232081 | <reponame>ywen666/CodeXGLUE<filename>Code-Code/CodeCompletion-token/code/dataset.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import gc
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
class TextDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
if args.local_rank==-1:
local_rank=0
world_size=1
else:
local_rank=args.local_rank
world_size=torch.distributed.get_world_size()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cached_file = os.path.join(args.output_dir, file_type+"_langs_%s"%(args.langs)+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
if os.path.exists(cached_file) and not args.overwrite_cache:
if file_type == 'train':
logger.warning("Loading features from cached file %s", cached_file)
with open(cached_file, 'rb') as handle:
self.inputs = pickle.load(handle)
else:
self.inputs = []
if args.langs == 'all':
langs = os.listdir(args.data_dir)
else:
langs = [args.langs]
data=[]
for lang in langs:
datafile = os.path.join(args.data_dir, lang, file_type+'.pkl')
if file_type == 'train':
logger.warning("Creating features from dataset file at %s", datafile)
# with open(datafile) as f:
# data.extend([json.loads(x)['code'] for idx,x in enumerate(f.readlines()) if idx%world_size==local_rank])
dataset = pickle.load(open(datafile, 'rb'))
data.extend(['<s> '+' '.join(x['function'].split())+' </s>' for idx,x in enumerate(dataset) if idx%world_size==local_rank])
# random.shuffle(data)
data = data
length = len(data)
logger.warning("Data size: %d"%(length))
input_ids = []
for idx,x in enumerate(data):
try:
input_ids.extend(tokenizer.encode(x))
except Exception:
pass
if idx % (length//10) == 0:
percent = idx / (length//10) * 10
logger.warning("Rank %d, load %d"%(local_rank, percent))
del data
gc.collect()
length = len(input_ids)
for i in range(0, length-block_size, block_size):
self.inputs.append(input_ids[i : i + block_size])
del input_ids
gc.collect()
if file_type == 'train':
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
logger.warning("Saving features into cached file %s", cached_file)
with open(cached_file, 'wb') as handle:
pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item])
class finetuneDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
if args.local_rank==-1:
local_rank=0
world_size=1
else:
local_rank=args.local_rank
world_size=torch.distributed.get_world_size()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
if os.path.exists(cached_file) and not args.overwrite_cache:
if file_type == 'train':
logger.warning("Loading features from cached file %s", cached_file)
with open(cached_file, 'rb') as handle:
self.inputs = pickle.load(handle)
else:
self.inputs = []
datafile = os.path.join(args.data_dir, f"{file_type}.txt")
if file_type == 'train':
logger.warning("Creating features from dataset file at %s", datafile)
with open(datafile) as f:
data = f.readlines()
length = len(data)
logger.info("Data size: %d"%(length))
input_ids = []
for idx,x in enumerate(data):
x = x.strip()
if x.startswith("<s>") and x.endswith("</s>"):
pass
else:
x = "<s> " + x + " </s>"
try:
input_ids.extend(tokenizer.encode(x))
except Exception:
pass
if idx % (length//10) == 0:
percent = idx / (length//10) * 10
logger.warning("Rank %d, load %d"%(local_rank, percent))
del data
gc.collect()
length = len(input_ids) // world_size
logger.info(f"tokens: {length*world_size}")
input_ids = input_ids[local_rank*length: (local_rank+1)*length]
for i in range(0, length-block_size, block_size):
self.inputs.append(input_ids[i : i + block_size])
del input_ids
gc.collect()
if file_type == 'train':
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
logger.warning("Saving features into cached file %s", cached_file)
with open(cached_file, 'wb') as handle:
pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item])
class EvalDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size))
if os.path.exists(cached_file) and not args.overwrite_cache:
with open(cached_file, 'rb') as handle:
self.inputs = pickle.load(handle)
else:
self.inputs = []
datafile = os.path.join(args.data_dir, f"{file_type}.txt")
with open(datafile) as f:
data = f.readlines()
length = len(data)
logger.info("Data size: %d"%(length))
input_ids = []
for idx,x in enumerate(data):
x = x.strip()
if x.startswith("<s>") and x.endswith("</s>"):
pass
else:
x = "<s> " + x + " </s>"
try:
input_ids.extend(tokenizer.encode(x))
except Exception:
pass
if idx % (length//10) == 0:
percent = idx / (length//10) * 10
logger.warning("load %d"%(percent))
del data
gc.collect()
logger.info(f"tokens: {len(input_ids)}")
self.split(input_ids, tokenizer, logger, block_size=block_size)
del input_ids
gc.collect()
with open(cached_file, 'wb') as handle:
pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
def split(self, input_ids, tokenizer, logger, block_size=1024):
sample = []
i = 0
while i < len(input_ids):
sample = input_ids[i: i+block_size]
if len(sample) == block_size:
for j in range(block_size):
if tokenizer.convert_ids_to_tokens(sample[block_size-1-j])[0] == '\u0120' or tokenizer.convert_ids_to_tokens(sample[block_size-1-j]).startswith("<NUM_LIT"):
break
if sample[block_size-1-j] in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id]:
if sample[block_size-1-j] != tokenizer.bos_token_id:
j -= 1
break
if j == block_size-1:
print(tokenizer.decode(sample))
exit()
sample = sample[: block_size-1-j]
# print(len(sample))
i += len(sample)
pad_len = block_size-len(sample)
sample += [tokenizer.pad_token_id]*pad_len
self.inputs.append(sample)
if len(self.inputs) % 10000 == 0:
logger.info(f"{len(self.inputs)} samples")
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item])
class lineDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='test', block_size=924):
datafile = os.path.join(args.data_dir, f"{file_type}.json")
with open(datafile) as f:
datas = f.readlines()
length = len(datas)
logger.info("Data size: %d"%(length))
self.inputs = []
self.gts = []
for data in datas:
data = json.loads(data.strip())
self.inputs.append(tokenizer.encode(data["input"])[-block_size:])
self.gts.append(data["gt"])
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item]), self.gts[item]
class Example(object):
"""A single training/test example."""
def __init__(self,
idx,
source,
target,
):
self.idx = idx
self.source = source
self.target = target
def read_examples(filename):
"""Read examples from filename."""
examples=[]
assert len(filename.split(','))==2
src_filename = filename.split(',')[0]
trg_filename = filename.split(',')[1]
idx = 0
with open(src_filename) as f1,open(trg_filename) as f2:
for line1,line2 in zip(f1,f2):
examples.append(
Example(
idx = idx,
source=line1.strip(),
target=line2.strip(),
)
)
idx+=1
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
target_ids,
source_mask,
target_mask,
):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.source_mask = source_mask
self.target_mask = target_mask
def convert_examples_to_features(examples, tokenizer, args, logger, stage=None):
features = []
for example_index, example in enumerate(examples):
#source
if tokenizer.pad_token_id is None:
tokenizer.pad_token = tokenizer.eos_token
# tokenizer.add_special_tokens({
# 'pad_token': '<pad>',
# 'cls_token': '<s>',
# 'sep_token': '</s>'
# })
#source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]
#source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]
source_tokens = tokenizer.tokenize(example.source)[:args.block_size]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
source_mask = [1] * (len(source_tokens))
padding_length = args.block_size - len(source_ids)
source_ids+=[tokenizer.pad_token_id]*padding_length
source_mask+=[0]*padding_length
#target
if stage=="test":
target_tokens = tokenizer.tokenize("None")
else:
#target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]
target_tokens = tokenizer.tokenize(example.target)[:args.block_size]
#target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
target_tokens = target_tokens
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
target_mask = [1] *len(target_ids)
padding_length = args.block_size - len(target_ids)
target_ids+=[tokenizer.pad_token_id]*padding_length
target_mask+=[0]*padding_length
if example_index < 5:
if stage=='train':
logger.info("*** Example ***")
logger.info("idx: {}".format(example.idx))
logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
features.append(
InputFeatures(
example_index,
source_ids,
target_ids,
source_mask,
target_mask,
)
)
return features
class JavafinetuneDataset(torch.utils.data.Dataset):
def __init__(self, tokenizer, filename, max_length=512, test=False):
self.max_length = max_length
self.tokenizer = tokenizer
self.examples = self.read_examples(filename)
self.test = test
def read_examples(self, filename):
"""Read examples from filename."""
examples = {'source': [], 'target': []}
assert len(filename.split(','))==2
src_filename = filename.split(',')[0]
trg_filename = filename.split(',')[1]
with open(src_filename) as f1,open(trg_filename) as f2:
for line1,line2 in zip(f1,f2):
examples['source'].append(line1.strip()),
examples['target'].append(line2.strip()),
return examples
def pack_samples(self, idx):
"""
Repeatedly pick question, answer pairs from self.dataroot until we hit max_tokens.
This will not include the tokens for the QUESTION and ANSWER prompt, as well as the
self.question_prefix. These will be added later and the total input will be
truncated if necessary.
Always include the sample at idx at the beginning.
"""
curr_num_tokens = 0
curr_samples = []
curr_q, curr_a = self.examples['source'][idx], self.examples['target'][idx]
if self.test:
curr_samples.append((curr_q, curr_a))
return curr_samples
while curr_num_tokens < self.max_length:
# Never remove. Fixes stalling bug.
curr_q = curr_q[:150000]
curr_a = curr_a[:150000]
curr_num_tokens += len(self.tokenizer.tokenize(curr_q))
curr_num_tokens += len(self.tokenizer.tokenize(curr_a))
curr_samples.append((curr_q, curr_a))
random_idx = random.choice(range(len(self.examples['target'])))
curr_q = self.examples['source'][random_idx]
curr_a = self.examples['target'][random_idx]
return curr_samples
def __len__(self):
return min(len(self.examples['source']),
len(self.examples['target']))
def __getitem__(self, idx):
input_ids = []
label_ids = []
raw_samples = self.pack_samples(idx)
for q_str, a_str in raw_samples:
q_str = self.tokenizer.cls_token + self.examples['source'][idx] + \
self.tokenizer.sep_token
a_str = self.examples['target'][idx]
question_token_ids = self.tokenizer.encode(q_str, verbose=False)
answer_token_ids = self.tokenizer.encode(a_str, verbose=False)
answer_token_ids.append(self.tokenizer.eos_token_id)
input_ids.extend(question_token_ids)
input_ids.extend(answer_token_ids)
label_ids.extend([-100] * len(question_token_ids))
label_ids.extend(answer_token_ids)
if self.test:
return {'q_str': q_str}
else:
# Cut off the excess
input_ids = input_ids[:self.max_length]
label_ids = label_ids[:self.max_length]
padding_length = self.max_length - len(input_ids)
if padding_length > 0:
input_ids += [self.tokenizer.pad_token_id] * padding_length
label_ids += [self.tokenizer.pad_token_id] * padding_length
retval = {
"input_ids" : torch.LongTensor(input_ids),
"labels" : torch.LongTensor(label_ids)
}
gc.collect()
return retval
if __name__ == '__main__':
import argparse
import transformers
parser = argparse.ArgumentParser()
args = parser.parse_args()
data_prefix='/scratch1/08401/ywen/data/c2c_data'
args.trainfile='{}/context_data.final,{}/body_data.final'.format(data_prefix, data_prefix)
args.devfile='{}/context_data.final.100,{}/body_data.final.100'.format(data_prefix, data_prefix)
args.output_dir = 'save/c2c_data_gptneo'
tokenizer = transformers.GPT2Tokenizer.from_pretrained(
'EleutherAI/gpt-neo-125M',
do_lower_case=False,
sep_token='</s>', cls_token='<s>',
pad_token='<pad>', unk_token='<|UNKNOWN|>')
dataset = JavafinetuneDataset(tokenizer, args.devfile, max_length=512)
#dev_dataset = JavafinetuneDataset(tokenizer, args, logger, file_type='dev', block_size=1024)
e = dataset[0]
print(e)
print("------- input_ids ------------------------------------------------------------------------------------")
print(tokenizer.decode(e['input_ids']))
print("------- labels ------------------------------------------------------------------------------------")
labels = e['labels']
labels[labels == -100] = tokenizer.eos_token_id
labels_str = tokenizer.decode(labels)
print(labels_str)
import pdb; pdb.set_trace() | StarcoderdataPython |
6652209 | <reponame>pylangstudy/201707<gh_stars>0
#dir() は主に対話プロンプトでの使用に便利なように提供されている
#厳密性や一貫性を重視して定義された名前のセットというよりも、むしろ興味を引くような名前のセットを返す
print(dir())
class A: pass
print()
print(dir(A))
print()
print(dir(A()))
class B:
def __dir__(self): return ['BBB']
print()
print(dir(B))
print()
print(dir(B()))
| StarcoderdataPython |
1784055 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Use Mayavi to visualize the structure of a VolumeGrid
"""
from enthought.mayavi import mlab
import numpy as np
from enthought.tvtk.api import tvtk
dims = (4, 4, 4)
x, y, z = np.mgrid[0.:dims[0], 0:dims[1], 0:dims[2]]
x = np.reshape(x.T, (-1,))
y = np.reshape(y.T, (-1,))
z = np.reshape(z.T, (-1,))
y += 0.3*np.sin(x)
z += 0.4*np.cos(x)
x += 0.05*y**3
sgrid = tvtk.StructuredGrid(dimensions=(dims[0], dims[1], dims[2]))
sgrid.points = np.c_[x, y, z]
s = np.random.random((dims[0]*dims[1]*dims[2]))
sgrid.point_data.scalars = np.ravel(s.copy())
sgrid.point_data.scalars.name = 'scalars'
mlab.figure(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1))
mlab.clf()
mlab.pipeline.surface(sgrid, opacity=0.4)
mlab.pipeline.surface(mlab.pipeline.extract_edges(sgrid), color=(0, 0, 0))
mlab.pipeline.glyph(sgrid, mode='cube', scale_factor=0.2, scale_mode='none')
mlab.savefig('volume_grid.jpg')
mlab.show()
| StarcoderdataPython |
370209 | # from app import app
import urllib.request
import json
from .models import Source,Article
# Source = source.Source
# Getting api key
api_key = None
# Getting the news base url
source_base_url = None
article_base_url = None
def configure_request(app):
'''
Function to acquire the api key and base urls
'''
global api_key,sources_base_url,article_base_url
api_key = app.config['NEWS_API_KEY']
sources_base_url = app.config['NEWS_API_BASE_URL']
article_base_url = app.config['EVERYTHING_SOURCE_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(category)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
return sources_results
def process_sources(sources_results):
'''
Function that processes the sources result and transform them to a list of Objects
Args:
sources_results: A list of dictionaries that contain sources details
Returns :
sources_list: A list of sources objects
'''
sources_list = []
for source_item in sources_results:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
source_object = Source(id,name,description,url,category)
sources_list.append(source_object)
return sources_list
def get_article(source):
'''
Function that gets the json response to our url request
'''
get_article_url = base_url.format(source,api_key)
with urrlib.request.urlopen(get_article) as url:
get_article_data = url.read()
get_article_response = json.loads(get_article_data)
article_results = None
if get_article_response['article']:
article_results_list = get_article_response['article']
article_results = process_results(article_results_list)
return article_results
def process_article(article_results):
'''
Function that processes the article result and transform them to a list of objects
Args:
article_result: A list of dictionaries that contains article details
Returns :
article_list: A list of article objects
'''
article_list = []
for article_item in article_results:
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedat')
if date and author and image:
article_object = Article(author,title,description,url,image,date)
article_list.append(article_object)
return article_list
| StarcoderdataPython |
11204832 | from django.contrib import admin
from .models import Book
@admin.register(Book)
class ExamplesAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'publication_date', 'author', 'price', 'pages', 'book_type', 'timestamp', 'editora') | StarcoderdataPython |
6403004 | <reponame>VirtualVFix/AndroidTestFramework
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "09/27/17 14:58"
from .base import Base
class Jenkins(Base):
"""
Test default config. This config is part of :class:`src.libs.core.register.Register`
"""
INTEGRATE = False #: Use Jenkins intergation
def __init__(self):
super(Jenkins, self).__init__()
| StarcoderdataPython |
3597183 | # coding: utf-8
"""
Module `chatette.units.ast`
Contains the data structure holding the Abstract Syntax Tree generated
when parsing the template files.
NOTE: this is not exactly an AST as it is not a tree, but it has the same
purpose as an AST in a compiler, i.e. an intermediate representation
of the parsed information used to generate the output.
"""
from chatette.utils import \
Singleton, UnitType, remove_duplicates, extend_list_in_dict
from chatette.statistics import Stats
from chatette.units.modifiable.definitions.alias import AliasDefinition
from chatette.units.modifiable.definitions.slot import SlotDefinition
from chatette.units.modifiable.definitions.intent import IntentDefinition
class AST(Singleton):
_instance = None
def __init__(self):
self._alias_definitions = dict()
self._slot_definitions = dict()
self._intent_definitions = dict()
self.stats = Stats.get_or_create()
def _get_relevant_dict(self, unit_type):
"""Returns the dict that stores units of type `unit_type`."""
if unit_type == UnitType.alias:
return self._alias_definitions
elif unit_type == UnitType.slot:
return self._slot_definitions
elif unit_type == UnitType.intent:
return self._intent_definitions
else:
raise TypeError(
"Tried to get a definition with wrong type " + \
"(expected alias, slot or intent)"
)
def __getitem__(self, unit_type):
"""
Returns the dictionary requested.
`unit_type` can be either a str ("alias", "slot" or "intent")
or a `UnitType`.
@raises: - `KeyError` if `unit_type` is an invalid str.
- `ValueError` if `unit_type´ is neither a str or a `UnitType`.
"""
if isinstance(unit_type, str):
if unit_type == UnitType.alias.value:
unit_type = UnitType.alias
elif unit_type == UnitType.slot.value:
unit_type = UnitType.slot
elif unit_type == UnitType.intent.value:
unit_type = UnitType.intent
else:
raise KeyError(
"Invalid key for the AST: '" + unit_type + "'."
)
elif not isinstance(unit_type, UnitType):
raise TypeError(
"Invalid type of key: " + unit_type.__class__.__name__ + "."
)
return self._get_relevant_dict(unit_type)
def _add_unit(self, unit_type, unit):
"""
Adds the intent definition `intent` in the relevant dict.
@raises: `ValueError` if the intent was already defined.
"""
self.stats.new_variation_unit_declared(unit_type)
relevant_dict = self._get_relevant_dict(unit_type)
if unit.identifier in relevant_dict:
raise ValueError(
"Tried to declare " + unit_type.value + " '" + \
unit.identifier + "' twice."
)
relevant_dict[unit.identifier] = unit
self.stats.new_unit_declared(unit_type)
def add_alias(self, alias):
"""
Adds the alias definition `alias` in the relevant dict.
@raises: `ValueError` if the alias was already defined.
"""
# NOTE there might be a better way to check that the alias is not already defined without needing to call `get_relevant_dict`
self._add_unit(UnitType.alias, alias)
def add_slot(self, slot):
"""
Adds the slot definition `slot` in the relevant dict.
@raises: `ValueError` if the slot was already defined.
"""
self._add_unit(UnitType.slot, slot)
def add_intent(self, intent):
"""
Adds the intent definition `intent` in the relevant dict.
@raises: `ValueError` if the intent was already defined.
"""
self._add_unit(UnitType.intent, intent)
def add_unit(self, unit, unit_type=None):
"""
Adds the unit definition `unit` in the relevant dict.
If `unit_type` is `None`, detects the type of the definition
by itself.
@raises: - `TypeError` if the unit type is of an invalid type.
- `ValueError` if the unit was already declared.
"""
if unit_type is None:
if isinstance(unit, AliasDefinition):
unit_type = UnitType.alias
elif isinstance(unit, SlotDefinition):
unit_type = UnitType.slot
elif isinstance(unit, IntentDefinition):
unit_type = UnitType.intent
else:
raise TypeError( # Should never happen
"Tried to add something else than a unit definition " + \
"to the AST."
)
self._add_unit(unit_type, unit)
def rename_unit(self, unit_type, old_name, new_name):
"""
Changes the name of the unit `old_name` to `new_name` if it exists.
@raises: - `KeyError` if the unit of type `unit_type` and name
`unit_name` wasn't declared.
- `ValueError` if the unit with name `new_name` already
existed.
"""
relevant_dict = self._get_relevant_dict(unit_type)
if old_name not in relevant_dict:
raise KeyError(
"Tried to rename " + unit_type.name + " '" + old_name + \
"', but it wasn't declared."
)
if new_name in relevant_dict:
raise ValueError(
"Tried to rename " + unit_type.name + " '" + old_name + \
"' to '" + new_name + "', but this " + unit_type.name + \
" already existed."
)
unit = relevant_dict[old_name]
del relevant_dict[old_name]
unit.set_identifier(new_name)
relevant_dict[new_name] = unit
def delete_unit(self, unit_type, unit_name, variation_name=None):
"""
Deletes the declared unit `unit_name` of type `unit_type`.
If `variation_name` is not `None`, only deletes this particular
variation for this unit.
@raises: - `KeyError` if the unit `unit_name` wasn't declared.
- `KeyError` if the variation `variation_name` doesn't exist.
"""
relevant_dict = self._get_relevant_dict(unit_type)
if unit_name not in relevant_dict:
raise KeyError(
"Tried to delete " + unit_type.name + " '" + unit_name + \
"', but it wasn't declared."
)
if variation_name is None:
self.stats.one_unit_removed(unit_type)
self.stats.several_variation_units_removed(
unit_type, relevant_dict[unit_name].get_number_variations()
)
del relevant_dict[unit_name]
else:
relevant_dict[unit_name].delete_variation(variation_name)
self.stats.one_variation_unit_removed(unit_type)
def get_entities_synonyms(self): # TODO move that into AST
"""
Makes a dict of all the synonyms of entities
based on the slot value they are assigned.
"""
synonyms = dict()
for slot_definition in self._slot_definitions:
current_synonyms_dict = \
self._slot_definitions[slot_definition].get_synonyms_dict()
for slot_value in current_synonyms_dict:
extend_list_in_dict(
synonyms, slot_value, current_synonyms_dict[slot_value]
)
return remove_duplicates(synonyms)
def print_DBG(self):
print("Aliases (" + str(len(self._alias_definitions)) + "):")
for alias_name in self._alias_definitions:
self._alias_definitions[alias_name].print_DBG()
print("Slots (" + str(len(self._slot_definitions)) + "):")
for slot_name in self._slot_definitions:
self._slot_definitions[slot_name].print_DBG()
print("Intents (" + str(len(self._intent_definitions)) + "):")
for intent_name in self._intent_definitions:
self._intent_definitions[intent_name].print_DBG()
print()
| StarcoderdataPython |
4942120 | # -*- coding:utf8 -*-
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from base.logger import LOG
class MusicTableWidget(QTableWidget):
"""显示音乐信息的tablewidget
"""
signal_play_music = pyqtSignal([int], name='play_music')
signal_remove_music_from_list = pyqtSignal([int], name='remove_music_from_list')
def __init__(self, rows=0, columns=4, parent=None):
super().__init__(rows, columns, parent)
self.__row_mid_map = [] # row 为 index, mid为值
self.__special_focus_out = False
self.__signal_mapper = QSignalMapper() # 把remove_music按钮和mid关联起来
self._alignment = Qt.AlignLeft | Qt.AlignVCenter
self.__set_prop()
self.__init_signal_binding()
def __set_objects_name(self):
pass
def __init_signal_binding(self):
self.cellDoubleClicked.connect(self.on_cell_double_clicked)
self.cellClicked.connect(self.on_remove_music_btn_clicked)
def __set_prop(self):
self.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.horizontalHeader().setDefaultAlignment(self._alignment)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setHorizontalHeaderLabels([u'歌曲名',
u'歌手',
u'时长',
u'移除'])
self.setShowGrid(False) # item 之间的 border
self.setMouseTracking(True)
self.verticalHeader().hide()
self.setFocusPolicy(Qt.StrongFocus)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool)
self.setAlternatingRowColors(True)
def focusOutEvent(self, event):
self.close()
def add_item_from_model(self, music_model):
if self.is_item_already_in(music_model['id']) is not False: # is
return False
artist_name = ''
music_item = QTableWidgetItem(music_model['name'])
if len(music_model['artists']) > 0:
artist_name = music_model['artists'][0]['name']
artist_item = QTableWidgetItem(artist_name)
duration = music_model['duration']
m = int(duration / 60000)
s = int((duration % 60000) / 1000)
duration = str(m) + ':' + str(s)
duration_item = QTableWidgetItem(duration)
music_item.setData(Qt.UserRole, music_model)
row = self.rowCount()
self.setRowCount(row + 1)
self.setItem(row, 0, music_item)
self.setItem(row, 1, artist_item)
self.setItem(row, 2, duration_item)
music_item.setTextAlignment(self._alignment)
artist_item.setTextAlignment(self._alignment)
duration_item.setTextAlignment(self._alignment)
btn = QLabel()
btn.setToolTip(u'从当前播放列表中移除')
btn.setObjectName('remove_music') # 为了应用QSS,不知道这种实现好不好
self.setCellWidget(row, 3, btn)
self.setRowHeight(row, 30)
self.setColumnWidth(3, 30)
row_mid = dict()
row_mid['mid'] = music_model['id']
row_mid['row'] = row
return True
def set_songs(self, tracks):
self.setRowCount(0)
for track in tracks:
self.add_item_from_model(track)
def is_item_already_in(self, mid):
row = self.find_row_by_mid(mid)
if row is not None:
return row
return False
def focus_cell_by_mid(self, mid):
row = self.find_row_by_mid(mid)
self.setCurrentCell(row, 0)
self.setCurrentItem(self.item(row, 0))
self.scrollToItem(self.item(row, 0))
def find_row_by_mid(self, mid):
row = False
total = self.rowCount()
i = 0
while i < total:
item = self.item(i, 0)
data = item.data(Qt.UserRole)
tmp_mid = data['id']
if tmp_mid == mid:
row = i
break
i += 1
return row
def find_mid_by_row(self, row):
item = self.item(row, 0)
data = item.data(Qt.UserRole)
mid = data['mid']
return mid
@pyqtSlot(int, int)
def on_cell_double_clicked(self, row, column):
item = self.item(row, 0)
music_model = item.data(Qt.UserRole)
self.signal_play_music.emit(music_model['id'])
@pyqtSlot(int, int)
def on_remove_music_btn_clicked(self, row, column):
if column != 3:
return
item = self.item(row, 0)
data = item.data(Qt.UserRole)
mid = data['id']
row = self.find_row_by_mid(mid)
self.removeRow(row)
self.signal_remove_music_from_list.emit(mid) | StarcoderdataPython |
9685194 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Part of slugdetection package
@author: <NAME>
github: dapolak
"""
name = "slugdetection"
__all__ = ["Data_Engineering", "confusion_mat", "Slug_Labelling",
"Flow_Recognition", "Slug_Detection", "Slug_Forecasting"]
from slugdetection.Data_Engineering import Data_Engineering
from slugdetection.Data_Engineering import confusion_mat
from slugdetection.Slug_Labelling import Slug_Labelling
from slugdetection.Flow_Recognition import Flow_Recognition
from slugdetection.Slug_Detection import Slug_Detection
from slugdetection.Slug_Forecasting import Slug_Forecasting
from slugdetection.test_Data_Engineering import Test_Data_Engineering
from slugdetection.test_Slug_Labelling import Test_Slug_Labelling
from slugdetection.test_Flow_Recognition import Test_Flow_Recognition
from slugdetection.test_Slug_Detection import Test_Slug_Detection
from slugdetection.test_Slug_Forecasting import Test_Slug_Forecasting
| StarcoderdataPython |
3322203 | try:
import RPi.GPIO as gpio
except:
import x007007007.RPi.GPIO as gpio
class _GPIOPins(object):
def __init__(self):
self._pins = {}
def __getitem__(self, key):
return self._pins[key]
def __setitem__(self, key, value):
self._pins[key] = value
def __getattribute__(self, key):
if not key.startswith("_"):
return object.__getattribute__(self, "_pins")[key]
else:
return object.__getattribute__(self, key)
def __setattribute__(self, key, value):
if not key.startswith("_"):
print(key,value)
object.__getattribute__(self, "_pins")[key] = value
else:
object.__setattribute__(self, key, value)
class GPIOPin(object):
def __init__(self, required=False, pwm=False):
self.required = required
def __set__(self, instance, value):
print(instance, value)
def __get__(self, instance, owner):
print(instance, owner)
class BaseGPIOModule(object):
def __init__(self):
self.gpio_pin = _GPIOPins()
self.gpio = gpio
gpio.setmode(gpio.BCM)
def setup(self, *args, **kwargs):
raise NotImplementedError
def __enter__(self, *args, **kwargs):
self.setup(*args, **kwargs)
return self
def __exit__(self, *args, **kwargs):
for pin in self.gpio_pin._pins.values():
gpio.setup(pin, gpio.IN)
@classmethod
def required_io_name(cls):
raise NotImplementedError
| StarcoderdataPython |
8153738 | <reponame>Genomicsplc/ukb-pret
import numpy
import os
import pandas
from tempfile import TemporaryDirectory
import unittest
from ukb_pret._io import load_phenotype_dictionary
from ukb_pret.error import UkbPretImportError
from ukb_pret.evaluate import _get_overlapping_results, _filter_null_values, _infer_ancestry_from_pcs, evaluate_prs
def generate_prs_data():
prs_dict = {'eid': ['FK1', 'FK2', 'FK3'], 'prs_data': [0.4, -0.87, -2.1]}
return pandas.DataFrame.from_dict(prs_dict).set_index('eid')
def generate_binary_pheno_data():
bin_dict = {'eid': ['FK1', 'FK2', 'FK3'], 'AST': [1, 0, 0]}
return pandas.DataFrame.from_dict(bin_dict).set_index('eid')
def generate_quantitative_pheno_data():
quant_dict = {'eid': ['FK1', 'FK2', 'FK3'], 'LDL': [50.32, 90, -999999999.12345]}
return pandas.DataFrame.from_dict(quant_dict).set_index('eid')
def generate_pc_data():
pc_dict = {'eid': ['FK1', 'FK2', 'FK3'],
'pc1': [0.5, 0.5, 0.5],
'pc2': [-0.3, 0.3, 0.7],
'pc3': [0.1, 0.3, 0.5],
'pc4': [0.7, -0.7, 0.0],
}
return pandas.DataFrame.from_dict(pc_dict).set_index('eid')
def generate_population_clusters():
return pandas.DataFrame({'population': ['AFR', 'AMR', 'EAS', 'EUR', 'SAS'],
'pc1': [0.5, 0.4, 0.3, 0.2, 0.1],
'pc2': [0.1, 0.2, 0.3, 0.4, 0.5],
'pc3': [-0.5, -0.4, -0.3, -0.2, -0.1],
'pc4': [-0.1, -0.2, -0.3, -0.4, -0.5]}).set_index('population')
def generate_sex_and_ukb_testing_data():
pc_dict = {'eid': ['FK1', 'FK2', 'FK3'],
'sex': [0, 0, 1],
'in_ukb_wbu_testing': [0, 1, 1]
}
return pandas.DataFrame.from_dict(pc_dict).set_index('eid')
def generate_custom_df(*args):
return pandas.concat([x() for x in args], axis=1)
class TestEvaluate(unittest.TestCase):
def test_overlapping_ids(self):
prs_df = generate_prs_data()
pheno_df = generate_binary_pheno_data()
pheno_df = pheno_df.append(pandas.DataFrame.from_dict({'eid': ['FK4'], 'AST': [0]}).set_index('eid'))
prs_df_intersect, pheno_df_intersect = _get_overlapping_results(prs_df, pheno_df)
self.assertDictEqual(prs_df_intersect.to_dict(), generate_prs_data().to_dict())
self.assertDictEqual(pheno_df_intersect.to_dict(), generate_binary_pheno_data().to_dict())
def test_duplicate_column_names(self):
prs_df = generate_prs_data()
pheno_df = generate_binary_pheno_data()
prs_df = prs_df.rename(columns={'prs_data': 'AST'})
with self.assertRaises(UkbPretImportError):
_, _ = _get_overlapping_results(prs_df, pheno_df)
def test_removal_of_nulls(self):
prs_df = generate_prs_data()
prs_df = prs_df.append(pandas.DataFrame.from_dict({'eid': ['FK4', 'FK5'],
'prs_data': [None, numpy.nan]}).set_index('eid'))
pheno_df = generate_binary_pheno_data()
pheno_df = pheno_df.append(pandas.DataFrame.from_dict({'eid': ['FK4', 'FK5'],
'AST': [numpy.nan, None]}).set_index('eid'))
out_prs, out_pheno, n_prs, n_pheno = _filter_null_values(prs_df, pheno_df, 'AST')
self.assertEqual(n_prs, 2)
self.assertEqual(n_pheno, 2)
self.assertDictEqual(out_prs.to_dict(), generate_prs_data().to_dict())
self.assertDictEqual(out_pheno.to_dict(), generate_binary_pheno_data().to_dict())
def test_infer_ancestry_from_pcs(self):
df = generate_custom_df(generate_prs_data, generate_binary_pheno_data, generate_pc_data)
pop_clusters = generate_population_clusters()
new_df = _infer_ancestry_from_pcs(df, pop_clusters)
expected_ancestries = {'FK1': 'AFR', 'FK2': 'SAS', 'FK3': 'SAS'}
self.assertDictEqual(new_df['ancestry'].to_dict(), expected_ancestries)
def test_evaluate_prs(self):
df = generate_custom_df(generate_prs_data, generate_quantitative_pheno_data, generate_pc_data,
generate_sex_and_ukb_testing_data)
pop_clusters = generate_population_clusters()
new_df = _infer_ancestry_from_pcs(df, pop_clusters)
# Increasing the size of the df to allow prs binning by PCs
larger_df = pandas.concat([new_df]*40, ignore_index=True)
larger_df['ancestry'] = ['AFR', 'EAS', 'EUR', 'SAS'] * 30
larger_df.index = larger_df.index.astype(str)
traits_yaml = load_phenotype_dictionary('LDL_SF')
tmp_dir = TemporaryDirectory()
os.mkdir(os.path.join(tmp_dir.name, 'plots'))
eval_dict, cross_ancestry_eval_dict = evaluate_prs(larger_df, 'LDL_SF', 'prs_data', traits_yaml, tmp_dir.name)
self.assertTrue(os.path.isdir(os.path.join(tmp_dir.name, 'plots')))
self.assertTrue(os.path.isdir(os.path.join(tmp_dir.name, 'plots', 'prs_data_cross_ancestry')))
expected_files = ['prs_data_prs_hist.png', 'prs_data_prs_box_plot.png'] + \
[f'prs_data_pc{i}_by_ancestry.png' for i in range(1, 5)]
self.assertTrue(all(x in expected_files for x in os.listdir(os.path.join(tmp_dir.name, 'plots',
'prs_data_cross_ancestry'))))
self.assertIsNotNone(eval_dict)
self.assertIsNotNone(cross_ancestry_eval_dict)
| StarcoderdataPython |
4975123 | <filename>cloudmesh/nn/service/nfl_2019.py
import sys
import os
import pandas
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
# prompt user for file name to read, assuming this is a csv
pwd = os.getcwd()
pwd = str(pwd) + "/"
file_name = input("Enter the name of the file including the extension: ")
file_path = str(pwd) + str(file_name)
print(file_path)
with open(str(file_path), 'r') as csvfile:
my_file = pandas.read_csv(csvfile)
player_selection = input("Enter the player name you want to explore: ")
nfl = my_file
# Manipulate file for the nfl example
nfl_numeric = nfl.select_dtypes(include=[np.number])
nfl_normalized = (nfl_numeric - nfl_numeric.mean()) / nfl_numeric.std()
nfl_normalized.fillna(0, inplace=True)
player_normalized = nfl_normalized[nfl["Player"] == str(player_selection)]
euclidean_distances = nfl_normalized.apply(
lambda row: distance.euclidean(row, player_normalized), axis=1)
def euclidean_distance(row, selected_player):
diff = row - selected_player
squares = diff ** 2
sum_squares = squares.sum(axis=1)
sqrt_squares = sum_squares ** 0.5
return sqrt_squares
nfl_dist = euclidean_distance(nfl_normalized, player_normalized)
# Create a new dataframe with distances.
distance_frame = pandas.DataFrame(
data={"dist": euclidean_distances, "idx": euclidean_distances.index})
distance_frame.sort_values("dist", inplace=True)
second_smallest = distance_frame.iloc[1]["idx"]
five_smallest = [distance_frame.iloc[1]["idx"], distance_frame.iloc[2]["idx"],
distance_frame.iloc[3]["idx"],
distance_frame.iloc[4]["idx"], distance_frame.iloc[5]["idx"]]
lst = np.zeros(5)
i = 0
for i in range(5):
lst = (nfl.iloc[int(five_smallest[i])]["Player"])
print(i, lst)
"""
def euclidean_distance(row, selected_player):
diff = row - selected_player
squares = diff ** 2
sum_squares = squares.sum(axis=1)
sqrt_squares = sum_squares ** 0.5
return sqrt_squares
# look at it to make sure we have a real data set
# We need to extract only the numeric volumns for obvious reasons
nfl_numeric = nfl.select_dtypes(include=[np.number])
distance_columns = list(nfl_numeric.head(0))
selected_player = nfl_numeric[nfl["Player"] == "<NAME>"].iloc[0]
# Test box plot
# Normalize all of the numeric columns
nfl_normalized = (nfl_numeric - nfl_numeric.mean()) / nfl_numeric.std()
# Fill in NA values in nfl_normalized
nfl_normalized.fillna(0, inplace=True)
# Find the normalized vector for lebron james.
brady_normalized = nfl_normalized[nfl["Player"] == "<NAME>"]
# Find the distance between lebron james and everyone else.
euclidean_distances = nfl_normalized.apply(lambda row: distance.euclidean(row, brady_normalized), axis=1)
# Create a new dataframe with distances.
distance_frame = pandas.DataFrame(data={"dist": euclidean_distances, "idx": euclidean_distances.index})
distance_frame.sort_values("dist", inplace=True)
# Find the most similar player to lebron (the lowest distance to lebron is lebron,
# the second smallest is the most similar non-lebron player)
second_smallest = distance_frame.iloc[1]["idx"]
five_smallest = [distance_frame.iloc[1]["idx"], distance_frame.iloc[2]["idx"], distance_frame.iloc[3]["idx"],
distance_frame.iloc[4]["idx"], distance_frame.iloc[5]["idx"]]
lst = np.zeros(5)
i=0
for i in range(5):
lst = (nfl.iloc[int(five_smallest[i])]["Player"])
print(i, lst)
most_similar_to_brady = nfl.iloc[int(second_smallest)]["Player"]
print("The player most similar to %s is: %s" % ("<NAME>", most_similar_to_brady))
"""
| StarcoderdataPython |
4925275 | import pwd
import gevent
import pytest
from mock import MagicMock
from volttron.platform import is_rabbitmq_available
from volttron.platform import get_services_core
from volttron.platform.agent.utils import execute_command
from volttron.platform.vip.agent import *
from volttrontesting.fixtures.volttron_platform_fixtures import build_wrapper, cleanup_wrapper, rmq_skipif
from volttrontesting.utils.utils import get_rand_vip
# skip if running on travis because initial secure_user_permissions scripts needs to be run as root/sudo
# TODO: Should we spin a separate docker image just to test this one test case alone?
# May be we can do this along with testing for remote RMQ instance setup for which we need sudo access too.
pytestmark = pytest.mark.skipif(os.environ.get("CI") is not None,
reason="Can't run on travis as this test needs root to run "
"setup script before running test case")
# Run as root or sudo scripts/secure_user_permissions.sh for both the below instance names before running these tests
INSTANCE_NAME1 = "volttron1"
INSTANCE_NAME2 = "volttron2"
def get_agent_user_from_dir(agent_uuid, home):
"""
:param home: path to volttron home
:param agent_uuid:
:return: Unix user ID if installed Volttron agent
"""
user_id_path = os.path.join(home, "agents", agent_uuid, "USER_ID")
with open(user_id_path, 'r') as id_file:
return id_file.readline()
@pytest.fixture(scope="module", params=(
dict(messagebus='zmq', ssl_auth=False, instance_name=INSTANCE_NAME1),
pytest.param(dict(messagebus='rmq', ssl_auth=True,
instance_name=INSTANCE_NAME1), marks=rmq_skipif),
))
def secure_volttron_instance(request):
"""
Fixture that returns a single instance of volttron platform for testing
"""
address = get_rand_vip()
wrapper = build_wrapper(address,
instance_name=request.param['instance_name'],
messagebus=request.param['messagebus'],
ssl_auth=request.param['ssl_auth'],
secure_agent_users=True)
gevent.sleep(3)
yield wrapper
cleanup_wrapper(wrapper)
@pytest.fixture(scope="module")
def query_agent(request, secure_volttron_instance):
# Start a fake agent to query the security agent
agent = secure_volttron_instance.build_agent()
agent.publish_callback = MagicMock(name="publish_callback")
# subscribe to weather poll results
agent.vip.pubsub.subscribe(
peer='pubsub',
prefix="test/publish",
callback=agent.publish_callback).get()
# Add a tear down method to stop the fake agent
def stop_agent():
print("In teardown method of query_agent")
agent.core.stop()
request.addfinalizer(stop_agent)
return agent
@pytest.fixture(scope="module")
def security_agent(request, secure_volttron_instance):
agent = secure_volttron_instance.install_agent(
vip_identity="security_agent",
agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent",
start=False,
config_file=None)
secure_volttron_instance.start_agent(agent)
gevent.sleep(3)
assert secure_volttron_instance.is_agent_running(agent)
users = [user[0] for user in pwd.getpwall()]
agent_user = get_agent_user_from_dir(agent, secure_volttron_instance.volttron_home)
assert agent_user in users
def stop_agent():
print("stopping security agent")
if secure_volttron_instance.is_running():
secure_volttron_instance.stop_agent(agent)
secure_volttron_instance.remove_agent(agent)
request.addfinalizer(stop_agent)
return agent
@pytest.fixture(scope="module")
def secure_multi_messagebus_forwarder(volttron_multi_messagebus):
from_instance, to_instance = volttron_multi_messagebus(INSTANCE_NAME1, INSTANCE_NAME2)
to_instance.allow_all_connections()
forwarder_config = {"custom_topic_list": ["foo"]}
if to_instance.messagebus == 'rmq':
remote_address = to_instance.bind_web_address
to_instance.enable_auto_csr()
print("REQUEST CA: {}".format(os.environ.get('REQUESTS_CA_BUNDLE')))
os.environ['REQUESTS_CA_BUNDLE'] = to_instance.requests_ca_bundle
forwarder_config['destination-address'] = remote_address
else:
remote_address = to_instance.vip_address
forwarder_config['destination-vip'] = remote_address
forwarder_config['destination-serverkey'] = to_instance.serverkey
forwarder_uuid = from_instance.install_agent(
agent_dir=get_services_core("ForwardHistorian"),
config_file=forwarder_config,
start=False
)
from_instance.start_agent(forwarder_uuid)
gevent.sleep(5)
assert from_instance.is_agent_running(forwarder_uuid)
yield from_instance, to_instance
from_instance.stop_agent(forwarder_uuid)
def publish(publish_agent, topic, header, message):
publish_agent.vip.pubsub.publish('pubsub',
topic,
headers=header,
message=message).get(timeout=10)
@pytest.mark.secure
def test_agent_rpc(secure_volttron_instance, security_agent, query_agent):
"""
Test agent running in secure mode can make RPC calls without any errors
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
"""if multiple copies of an agent can be installed successfully"""
# Make sure the security agent can receive an RPC call, and respond
assert query_agent.vip.rpc.call(
"security_agent", "can_receive_rpc_calls").get(timeout=5)
# Try installing a second copy of the agent
agent2 = None
try:
agent2 = secure_volttron_instance.install_agent(
vip_identity="security_agent2",
agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent",
start=False,
config_file=None)
secure_volttron_instance.start_agent(agent2)
gevent.sleep(3)
assert secure_volttron_instance.is_agent_running(agent2)
assert query_agent.vip.rpc.call("security_agent", "can_make_rpc_calls",
"security_agent2").get(timeout=5)
except BaseException as e:
print("Exception {}".format(e))
assert False
finally:
if agent2:
secure_volttron_instance.remove_agent(agent2)
@pytest.mark.secure
def test_agent_pubsub(secure_volttron_instance, security_agent,
query_agent):
"""
Test agent running in secure mode can publish and subscribe to message bus without any errors
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
query_agent.vip.rpc.call("security_agent", "can_publish_to_pubsub")
gevent.sleep(6)
assert "security_agent" == query_agent.publish_callback.call_args[0][1]
assert "Security agent test message" == \
query_agent.publish_callback.call_args[0][5]
assert 0 == query_agent.vip.rpc.call(
"security_agent", "can_subscribe_to_messagebus").get(timeout=5)
query_agent.vip.pubsub.publish(peer='pubsub', topic="test/read",
message="test message")
gevent.sleep(3)
assert 1 == query_agent.vip.rpc.call(
"security_agent", "can_subscribe_to_messagebus").get(timeout=5)
@pytest.mark.secure
def test_install_dir_permissions(secure_volttron_instance, security_agent, query_agent):
"""
Test to make sure agent user only has read and execute permissions for all sub folders of agent install directory
except <agent>.agent-data directory. Agent user should have rwx to agent-data directory
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
assert secure_volttron_instance.is_agent_running(security_agent)
results = query_agent.vip.rpc.call("security_agent", "verify_install_dir_permissions").get(timeout=10)
print(results)
assert results is None
@pytest.mark.secure
def test_install_dir_file_permissions(secure_volttron_instance, security_agent, query_agent):
"""
Test to make sure agent user only has read access to all files in install-directory except for files in
<agent>.agent-data directory. Agent user will be the owner of files in agent-data directory and hence we
need not check files in this dir
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
results = query_agent.vip.rpc.call("security_agent", "verify_install_dir_file_permissions").get(timeout=5)
assert results is None
@pytest.mark.secure
def test_vhome_dir_permissions(secure_volttron_instance, security_agent, query_agent):
"""
Test to make sure we have read and execute access to relevant folder outside of agent's install dir.
Agent should have read access to the below directories other than its own agent install dir. Read access to other
folder are based on default settings in the machine. We restrict only file access when necessary.
- vhome
- vhome/certificates and its subfolders
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
assert secure_volttron_instance.is_agent_running(security_agent)
results = query_agent.vip.rpc.call("security_agent", "verify_vhome_dir_permissions").get(timeout=10)
print(results)
assert results is None
@pytest.mark.secure
def test_vhome_file_permissions(secure_volttron_instance, security_agent, query_agent):
"""
Test to make sure agent does not have any permissions on files outside agent's directory but for the following
exceptions.
Agent user should have read access to
- vhome/config
- vhome/known_hosts
- vhome/rabbitmq_config.yml
- vhome/certificates/certs/<agent_vip_id>.<instance_name>.crt
- vhome/certificates/private/<agent_vip_id>.<instance_name>.pem
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
assert secure_volttron_instance.is_agent_running(security_agent)
# Try installing a second copy of the agent. First agent should not have read/write/execute access to any
# of the files of agent2. rpc call checks all files in vhome
agent2 = None
try:
agent2 = secure_volttron_instance.install_agent(
vip_identity="security_agent2",
agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent",
start=False,
config_file=None)
secure_volttron_instance.start_agent(agent2)
gevent.sleep(3)
assert secure_volttron_instance.is_agent_running(agent2)
# Now verify that security_agent has read access to only its own files
results = query_agent.vip.rpc.call("security_agent",
"verify_vhome_file_permissions",
INSTANCE_NAME1).get(timeout=10)
print(results)
assert results is None
except BaseException as e:
print("Exception {}".format(e))
assert False
finally:
if agent2:
secure_volttron_instance.remove_agent(agent2)
@pytest.mark.secure
def test_config_store_access(secure_volttron_instance, security_agent, query_agent):
"""
Test to make sure agent does not have any permissions on files outside agent's directory but for the following
exceptions.
Agent user should have read access to
- vhome/config
- vhome/known_hosts
- vhome/certificates/certs/<agent_vip_id>.<instance_name>.crt
- vhome/certificates/private/<agent_vip_id>.<instance_name>.pem
:param secure_volttron_instance: secure volttron instance
:param security_agent: Test agent which runs secure mode as a user other than platform user
:param query_agent: Fake agent to do rpc calls to test agent
"""
assert secure_volttron_instance.is_agent_running(security_agent)
# Try installing a second copy of the agent. First agent should not have read/write/execute access to any
# of the files of agent2. rpc call checks all files in vhome
agent2 = None
try:
agent2 = secure_volttron_instance.install_agent(
vip_identity="security_agent2",
agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent",
start=False,
config_file=None)
secure_volttron_instance.start_agent(agent2)
gevent.sleep(3)
assert secure_volttron_instance.is_agent_running(agent2)
# make initial entry in config store for both agents
config_path = os.path.join(secure_volttron_instance.volttron_home, "test_secure_agent_config")
with open(config_path, "w+") as f:
f.write('{"test":"value"}')
gevent.sleep(1)
execute_command(['volttron-ctl', 'config', 'store', "security_agent", "config", config_path, "--json"],
cwd=secure_volttron_instance.volttron_home, env=secure_volttron_instance.env)
execute_command(['volttron-ctl', 'config', 'store', "security_agent2", "config", config_path, "--json"],
cwd=secure_volttron_instance.volttron_home, env=secure_volttron_instance.env)
execute_command(['volttron-ctl', 'config', 'store', "security_agent", "config", config_path, "--json"],
cwd=secure_volttron_instance.volttron_home, env=secure_volttron_instance.env)
# this rpc method will check agents own config store and access and agent's access to other agent's config store
results = query_agent.vip.rpc.call("security_agent",
"verify_config_store_access", "security_agent2").get(timeout=30)
print("RESULTS :::: {}".format(results))
except BaseException as e:
print("Exception {}".format(e))
assert False
finally:
if agent2:
secure_volttron_instance.remove_agent(agent2)
# Please note: This test case needs updated.
# secure_multi_messagebus_forwarder and volttron_multi_messagebus fixtures need to be modified
# to pass secure_agent_user flag as True
# @pytest.mark.secure
# def secure_test_multi_messagebus_forwarder(secure_multi_messagebus_forwarder):
# """
# Forward Historian test with multi message bus combinations
# :return:
# """
# from_instance, to_instance = secure_multi_messagebus_forwarder
# gevent.sleep(5)
# publish_agent = from_instance.dynamic_agent
# subscriber_agent = to_instance.dynamic_agent
#
# subscriber_agent.callback = MagicMock(name="callback")
# subscriber_agent.callback.reset_mock()
# subscriber_agent.vip.pubsub.subscribe(peer='pubsub',
# prefix='devices',
# callback=subscriber_agent.callback).get()
#
# subscriber_agent.analysis_callback = MagicMock(name="analysis_callback")
# subscriber_agent.analysis_callback.reset_mock()
# subscriber_agent.vip.pubsub.subscribe(peer='pubsub',
# prefix='analysis',
# callback=subscriber_agent.analysis_callback).get()
# sub_list = subscriber_agent.vip.pubsub.list('pubsub').get()
# gevent.sleep(10)
#
# # Create timestamp
# now = utils.format_timestamp(datetime.utcnow())
# print("now is ", now)
# headers = {
# headers_mod.DATE: now,
# headers_mod.TIMESTAMP: now
# }
#
# for i in range(0, 5):
# topic = "devices/PNNL/BUILDING1/HP{}/CoolingTemperature".format(i)
# value = 35
# publish(publish_agent, topic, headers, value)
# topic = "analysis/PNNL/BUILDING1/WATERHEATER{}/ILCResults".format(i)
# value = {'result': 'passed'}
# publish(publish_agent, topic, headers, value)
# gevent.sleep(0.5)
#
# gevent.sleep(1)
#
# assert subscriber_agent.callback.call_count == 5
# assert subscriber_agent.analysis_callback.call_count == 5
| StarcoderdataPython |
1848978 | <filename>11/specop.py
#!/usr/bin/python3
import sys
from brilpy import *
PROFILE = 'profile.txt'
def main():
prog = json.load(sys.stdin)
# Hack to make brench work: we wait to open 'profile.txt' until *after*
# we've finished reading from stdin
trace = json.load(open(PROFILE))
mainfunc = list(filter(lambda x: x['name'] == 'main', prog['functions']))[0]
instrs = mainfunc["instrs"]
t_instrs = trace["instrs"]
mainfunc["instrs"] = t_instrs + [{"label":"recover"}] + instrs
json.dump(prog, sys.stdout)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4937101 | <gh_stars>0
from typing import List
from pybm import __version__
from pybm.command import CLICommand
from pybm.status_codes import SUCCESS, ERROR
class BaseCommand(CLICommand):
"""
Commands:
apply - Run a benchmarking workflow specified in a YAML file.
config - Display and change pybm configuration values.
env - Create and manage benchmarking environments.
init - Initialize a git repository for pybm benchmarking.
report - Report results of successful benchmarking runs.
run - Run specified benchmarks in different environments.
"""
usage = "pybm <command> [<options>]"
def __init__(self):
super(BaseCommand, self).__init__(name="")
def add_arguments(self):
# special version action and version kwarg
self.parser.add_argument(
"--version",
action="version",
help="Show pybm version and exit.",
version=f"%(prog)s version {__version__}",
)
def run(self, args: List[str]):
self.add_arguments()
if not args:
self.parser.print_help()
return ERROR
options = self.parser.parse_args(args)
if options.verbose:
print(vars(options))
return SUCCESS
| StarcoderdataPython |
251168 | from lxml import html
from base_test import BaseTest
import model
from database import db_session
class ProblemsTestCase(BaseTest):
"""
Contains tests for the problems blueprint
"""
def _problem_add(self, init_problem_name):
rv = self.app.post(
"/admin/problems/add/",
data={
"problem_type_id": model.ProblemType.query.filter_by(
name="input-output"
).one().id,
"slug": "initprob",
"name": init_problem_name,
"problem_statement": "## is there a problem here",
"sample_input": "1",
"sample_output": "2",
"secret_input": "1 2 3",
"secret_output": "4 5 6",
},
follow_redirects=True,
)
self.assertEqual(rv.status_code, 200, "Failed to add problem")
rv = self.app.get("/admin/problems/")
root = html.fromstring(rv.data)
page_problem_names = [x.text for x in root.cssselect(".problem_name")]
self.assertIn(init_problem_name, page_problem_names, "Problem was not added")
def _problem_edit(self, old_name, new_name):
problem_id = model.Problem.query.filter_by(name=old_name).one().id
rv = self.app.post(
"/admin/problems/add/",
data={
"problem_id": problem_id,
"problem_type_id": model.ProblemType.query.filter_by(
name="input-output"
).one().id,
"slug": "initprob",
"name": new_name,
"problem_statement": "## there is a problem here",
"sample_input": "1",
"sample_output": "2",
"secret_input": "1 2 3",
"secret_output": "4 5 6",
},
follow_redirects=True,
)
self.assertEqual(rv.status_code, 200, "Failed to edit problem")
rv = self.app.get("/admin/problems/")
root = html.fromstring(rv.data)
page_problem_names = [x.text for x in root.cssselect(".problem_name")]
self.assertIn(new_name, page_problem_names, "Problem was not edited")
def _problem_del(self, name):
problem_id = model.Problem.query.filter_by(name=name).one().id
rv = self.app.get(
"/admin/problems/del/{}".format(problem_id), follow_redirects=True
)
self.assertEqual(rv.status_code, 200, "Failed to delete problem")
rv = self.app.get("/admin/problems/")
root = html.fromstring(rv.data)
page_problem_names = [x.text for x in root.cssselect(".problem_name")]
self.assertNotIn(name, page_problem_names, "Problem was not deleted")
def test_problem_crud(self):
init_problem_name = "fibbonaci49495885"
edit_problem_name = "shortestpath31231137"
self.login("admin", "<PASSWORD>")
self._problem_add(init_problem_name)
self._problem_edit(init_problem_name, edit_problem_name)
self._problem_del(edit_problem_name)
| StarcoderdataPython |
8120185 | <reponame>tstu92197t/SC-project
"""
File: hangman.py
name: <NAME>
-----------------------------
This program plays hangman game.
Users sees a dashed word, trying to
correctly figure the un-dashed word out
by inputting one character each round.
If the user input is correct, show the
updated word on console. Players have N_TURNS
to try in order to win this game.
"""
import random
# This constant controls the number of guess the player has
N_TURNS = 7
def main():
"""
Through this program, users can play a hangman game.
Users sees a dashed word, trying to correctly figure the un-dashed word out
by inputting one character each round.
Pre-condition: Users sees a dashed word, and input one character
Post-condition: If the user input is correct, show the updated word on console,
or else shows that the user was wrong
"""
s = random_word()
dashed = ''
for i in range(len(s)): # forming the dashed at the beginning
dashed += '-'
print('The word looks like: '+dashed)
count = N_TURNS
print('You have '+str(count)+' guesses left.')
input_all = '' # input_all includes all the input in each round
while True:
input_ch = input('Your guess: ')
check_alpha = input_ch.isalpha()
# check whether the input is an alphabet or string that its length is over 1
if check_alpha == False or len(input_ch) != 1:
print('illegal format.')
else:
input_ch = input_ch.upper() # case-insensitive
input_all = input_ch + input_all
j = s.find(input_all[:1]) # check if we can find the input at this round in s
if j != -1: # your guess is right (found)
dashed = ''
for base in s:
ans = '-' # the default output is a dash
for k in range(len(input_all)):
if base == input_all[k]:
ans = input_all[k]
dashed += ans
print('You are correct!')
i = dashed.find('-')
if i != -1: # found '-' in dashed string
print('The word looks like: ' + dashed)
print('You have ' + str(count) + ' guesses left.')
else: # does not find '-' in dashed string
print('You win!!')
print('The word was: '+str(s))
return
else: # the case that the user guess wrong
print('There is no '+str(input_ch)+"'s in the word.")
count -= 1 # the number of guess the player has minus one
if count == 0:
print('You are completely hung : (')
print('The word was: '+str(s))
return
else:
print('The word looks like: ' + dashed)
print('You have ' + str(count) + ' guesses left.')
def random_word():
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
| StarcoderdataPython |
4859315 | <filename>common/__init__.py
# -*- coding: utf-8 -*-
# @Time : 2021/3/25 10:37 AM
import base64
import hashlib
import hmac
import json
import time
from hashlib import md5
from Crypto.Cipher import AES
def token(message, enc=False, expire=3600 * 24):
"""
token加密解密算法
:param message:
:param enc:
:param expire:
:return:
"""
# 这个盐不能改😯
salt = '<KEY>'
bs = 16
pad = lambda s: s + (bs - len(s) % bs) * chr(bs - len(s) % bs)
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
if not enc:
content, _time, _hmac = message[:-74], message[-74:-64], message[-64:]
key = md5((_time + salt).encode("utf-8")).hexdigest()
_i_hmac = hmac.new(bytes.fromhex(key), (content + _time).encode('utf-8'), hashlib.sha256).hexdigest()
if _i_hmac != _hmac:
return False
if time.time() - int(_time) > expire:
return False
return json.loads(unpad(AES.new(key[:16], AES.MODE_CBC, key[16:]).decrypt(base64.b64decode(content))))
else:
# _time = str(time.time() - 1.3)[:10]
_time = '1620110661'
key = md5((_time + salt).encode("utf-8")).hexdigest()
raw = pad(json.dumps(message))
cipher = AES.new(key[:16], AES.MODE_CBC, key[16:])
__time = _time.encode('utf-8')
ret = base64.b64encode(cipher.encrypt(raw)) + __time
ret_hmac = hmac.new(bytes.fromhex(key), ret, hashlib.sha256).hexdigest()
return str(ret, encoding='utf-8') + ret_hmac
def token_uid(message):
"""
获取uid
:param message: uid加密信息
:return: 用户主键
"""
return int(token(message))
def pad(s):
bs = 16
return s + (bs - len(s) % bs) * chr(bs - len(s) % bs)
if __name__ == '__main__':
e_key = '<KEY>'
# print(token(123456, True))
print(token('<KEY>'))
| StarcoderdataPython |
41942 | <gh_stars>0
import git
from zeppos_root.root import Root
from zeppos_logging.app_logger import AppLogger
from cachetools import cached, TTLCache
class Branch:
@staticmethod
@cached(cache=TTLCache(maxsize=1024, ttl=600))
def get_current():
g = git.cmd.Git(Root.find_root_of_project(__file__))
for line in g.branch().split('\n'):
if line.startswith("*"):
AppLogger.logger.debug(f"Current Git Branch: {line[1:].strip()}")
return line[1:].strip()
return None
| StarcoderdataPython |
3379522 | <gh_stars>10-100
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return []
l = 1
dummy = head
while dummy.next:
l += 1
dummy = dummy.next
k = k % l
tail = dummy
tail.next = head
for i in range(l - k):
tail = tail.next
new_head = tail.next
tail.next = None
return new_head
| StarcoderdataPython |
5004638 | <gh_stars>0
from flask import Flask, render_template, jsonify, redirect
from splinter import Browser
from flask_pymongo import PyMongo
import pymongo
import scrape_mars
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
# app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
# mongo = PyMongo(app)
# conn="mongodb://localhost:27017"
# client=pymongo.MongoClient(conn)
# db=client.mars_app
# collection=db.mars
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
print(mars)
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars_info = scrape_mars.scrape()
mongo.db.mars.update({}, mars_info, upsert=True)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
12851152 | <gh_stars>0
import struct
import zlib
from wrpg.piaf.common import (
header_structure,
file_entry_structure,
file_entry_size,
get_data_offset,
header_size,
header_check_size,
header_data_size)
class ParserError(Exception):
pass
class ParserMagicHeaderError(ParserError):
pass
class ParserChecksumError(ParserError):
pass
class ParserDatasizeError(ParserError):
pass
def load_data(buffer, archive, file_sizes):
data_offset = 0
for f, file_size in zip(archive["file_entries"], file_sizes):
data_start = get_data_offset(len(archive["file_entries"]))
data_start_position = data_start+data_offset
f["data"] = buffer[data_start_position: data_start_position+file_size]
data_offset += file_size
def unpack_archive(buffer):
def parse_header():
header = buffer[:header_size()]
( magic_header,
header_checksum,
filetable_checksum,
version,
nb_files,
data_size ) = struct.unpack(header_structure(), header)
magic_header = magic_header.decode('utf-8')
archive = {
"version": version }
if magic_header != 'WRPGPIAF':
raise ParserMagicHeaderError('Bad Magic Header')
calculated_header_checksum = zlib.crc32(buffer[
header_check_size()
:header_check_size()+header_data_size() ]) & 0xffffffff
if calculated_header_checksum != header_checksum:
raise ParserChecksumError('Bad Header Checksum : {} != {}'
.format(calculated_header_checksum, header_checksum))
calculated_file_table_checksum = zlib.crc32(buffer[
header_size()
:header_size()+nb_files*file_entry_size()]
) & 0xffffffff
if calculated_file_table_checksum != filetable_checksum:
raise ParserChecksumError('Bad Filetable Checksum : {} != {}'
.format(calculated_file_table_checksum, filetable_checksum))
if len(buffer) != data_size + get_data_offset(nb_files):
raise ParserDatasizeError('Bad Data Size')
return archive, nb_files
def parse_filetable():
result = []
file_sizes = []
for i in range(nb_files):
file_entry_offset = header_size()+file_entry_size()*i
file_name, file_type, compression_type, file_size, data_offset =\
struct.unpack(
file_entry_structure(),
buffer[ file_entry_offset: file_entry_offset+file_entry_size()]
)
file_entry = { "file_type": file_type,
"compression_type": compression_type }
result.append(file_entry)
file_sizes.append(file_size)
return result, file_sizes
archive, nb_files = parse_header()
archive["file_entries"], file_sizes = parse_filetable()
load_data(buffer, archive, file_sizes)
return archive
| StarcoderdataPython |
1893927 | # locust -f locustfile.py
from locust import HttpUser, between, task
class WebsiteUser(HttpUser):
wait_time = between(5, 15)
def on_start(self):
self.client.post("/login", {
"username": "test_user",
"password": ""
})
@task
def index(self):
# self.client.get("/")
# self.client.get("/static/assets.js")
self.client.get("http://localtest.me:8000/getexpcnc/3")
self.client.get("http://localtest.me:8000/getexperimentdatamachine/2/Prep")
self.client.get("http://localtest.me:8000/knowexpmachiningfinalized/yes")
self.client.get("http://localtest.me:8000/getdatamachiningfinalized/no")
self.client.get("http://localtest.me:8000/knowexpwornstatus/worn")
self.client.get("http://localtest.me:8000/getdatatool/worn")
# @task
# def about(self):
# self.client.get("/about/")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/getexpcnc/3")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/getexperimentdatamachine/2/Prep")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/knowexpwornstatus/worn")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/getdatatool/worn")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/knowexppassvisual/no")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/getdatavisualinspection/no")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/knowexpmachiningfinalized/yes")
# @task
# def about(self):
# self.client.get("http://localtest.me:8000/getdatamachiningfinalized/no")
| StarcoderdataPython |
1783927 | r"""
Interfaces for primitives of the :py:mod:`cobald` model
Each :py:class:`~.Pool` provides a varying number of resources.
A :py:class:`~.Controller` adjusts the number of resources that
a :py:class:`~.Pool` must provide.
Several :py:class:`~.Pool`\ s can be combined in a single
:py:class:`~.CompositePool` to appear as one.
To modify how a :py:class:`~.Pool` presents or digests data,
any number of :py:class:`~.PoolDecorator` may proceed it.
.. graphviz::
digraph graphname {
graph [rankdir=LR, splines=lines, bgcolor="transparent"]
controller [label=Controller]
composite [label=CompositePool]
decoa [label=PoolDecorator]
poola, poolb [label=Pool]
controller -> decoa -> composite
composite -> poola
composite -> poolb
}
"""
from ._composite import CompositePool
from ._controller import Controller
from ._pool import Pool
from ._proxy import PoolDecorator
from ._partial import Partial
__all__ = [
cls.__name__ for cls in (Pool, PoolDecorator, Controller, CompositePool, Partial)
]
| StarcoderdataPython |
8084280 | <reponame>RAIJ95/https-github.com-failys-cairis
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask_restful import fields
from flask_restful_swagger import swagger
import PseudoClasses
__author__ = '<NAME>, <NAME>'
import ModelDefinitions
def gen_message_fields(class_ref):
resource_fields = {
"session_id": fields.String,
"object": fields.Nested(class_ref.resource_fields),
}
return resource_fields
def gen_message_multival_fields(class_ref):
resource_fields = {
"session_id": fields.String,
"object": fields.List(fields.Nested(class_ref.resource_fields))
}
return resource_fields
class DefaultMessage(object):
required = ['object']
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AssetEnvironmentPropertiesModel.__name__
)
# endregion
class AssetEnvironmentPropertiesMessage(DefaultMessage):
resource_fields = gen_message_multival_fields(ModelDefinitions.AssetEnvironmentPropertiesModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AssetModel.__name__
)
# endregion
class AssetMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.AssetModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AssetAssociationModel.__name__
)
# endregion
class AssetAssociationMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.AssetAssociationModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.GoalAssociationModel.__name__
)
# endregion
class GoalAssociationMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.GoalAssociationModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AttackerModel.__name__,
)
# endregion
class AttackerMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.AttackerModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.CImportParams.__name__
)
# endregion
class CImportMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.CImportParams)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.CExportParams.__name__
)
# endregion
class CExportMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.CExportParams)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.DocumentationParams.__name__
)
# endregion
class DocumentationMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.DocumentationParams)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.DependencyModel.__name__
)
# endregion
class DependencyMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.DependencyModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.EnvironmentModel.__name__
)
# endregion
class EnvironmentMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.EnvironmentModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.GoalModel.__name__
)
# endregion
class GoalMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.GoalModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ObstacleModel.__name__
)
# endregion
class ObstacleMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ObstacleModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.DomainPropertyModel.__name__
)
# endregion
class DomainPropertyMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.DomainPropertyModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.CountermeasureModel.__name__
)
# endregion
class CountermeasureMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.CountermeasureModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=PseudoClasses.ProjectSettings.__name__
)
# endregion
class ProjectMessage(DefaultMessage):
resource_fields = gen_message_fields(PseudoClasses.ProjectSettings)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.RequirementModel.__name__
)
# endregion
class RequirementMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.RequirementModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ResponseModel.__name__
)
# endregion
class ResponseMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ResponseModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.RiskModel.__name__
)
# endregion
class RiskMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.RiskModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.RoleModel.__name__,
property_0=ModelDefinitions.RoleEnvironmentPropertiesModel.__name__
)
# endregion
class RoleMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.RoleModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ThreatModel.__name__
)
# endregion
class ThreatMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ThreatModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ValueTypeModel.__name__
)
# endregion
class ValueTypeMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ValueTypeModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.VulnerabilityModel.__name__
)
# endregion
class VulnerabilityMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.VulnerabilityModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.PersonaEnvironmentPropertiesModel.__name__
)
# endregion
class PersonaEnvironmentPropertiesMessage(DefaultMessage):
resource_fields = gen_message_multival_fields(ModelDefinitions.PersonaEnvironmentPropertiesModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.PersonaModel.__name__,
)
# endregion
class PersonaMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.PersonaModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.TaskModel.__name__,
)
# endregion
class TaskMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.TaskModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.UseCaseModel.__name__,
)
# endregion
class UseCaseMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.UseCaseModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.FindModel.__name__,
)
# endregion
class FindMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.FindModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ExternalDocumentModel.__name__,
)
# endregion
class ExternalDocumentMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ExternalDocumentModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.DocumentReferenceModel.__name__,
)
# endregion
class DocumentReferenceMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.DocumentReferenceModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.PersonaCharacteristicModel.__name__,
)
# endregion
class PersonaCharacteristicMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.PersonaCharacteristicModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ObjectDependencyModel.__name__,
)
# endregion
class ObjectDependencyMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ObjectDependencyModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ArchitecturalPatternModel.__name__,
)
# endregion
class ArchitecturalPatternMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ArchitecturalPatternModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ValueTypeModel.__name__,
)
# endregion
class ValueTypeMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ValueTypeModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.TemplateGoalModel.__name__,
)
# endregion
class TemplateGoalMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.TemplateGoalModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.TemplateAssetModel.__name__,
)
# endregion
class TemplateAssetMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.TemplateAssetModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.TemplateRequirementModel.__name__,
)
# endregion
class LocationsMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.LocationsModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.LocationsModel.__name__,
)
# endregion
class TraceMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.TraceModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.TraceModel.__name__,
)
# endregion
class TemplateRequirementMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.TemplateRequirementModel)
required = DefaultMessage.required
class CountermeasureTaskMessage(DefaultMessage):
resource_fields = fields.List(fields.Nested(ModelDefinitions.CountermeasureTask.resource_fields))
required = DefaultMessage.required
class SummaryMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.SummaryModel)
required = DefaultMessage.required
| StarcoderdataPython |
1944739 | <reponame>StephenRoille/flit
from pathlib import Path
import pytest
from flit.inifile import read_flit_config, ConfigError
samples_dir = Path(__file__).parent / 'samples'
def test_invalid_classifier():
with pytest.raises(ConfigError):
read_flit_config(samples_dir / 'invalid_classifier.ini')
def test_classifiers_with_space():
"""
Check that any empty lines (including the first one) for
classifiers are stripped
"""
read_flit_config(samples_dir / 'classifiers_with_space.ini')
| StarcoderdataPython |
6694720 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.signature_key_templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
from tink.proto import common_pb2
from tink.proto import ecdsa_pb2
from tink.proto import rsa_ssa_pkcs1_pb2
from tink.proto import rsa_ssa_pss_pb2
from tink.proto import tink_pb2
from tink import tink_config
from tink.signature import public_key_sign_key_manager
from tink.signature import signature_key_templates
ECDSA_DER_PARAMS_P256 = [
signature_key_templates.ECDSA_P256, common_pb2.SHA256, common_pb2.NIST_P256
]
ECDSA_DER_PARAMS_P384 = [
signature_key_templates.ECDSA_P384, common_pb2.SHA512, common_pb2.NIST_P384
]
ECDSA_DER_PARAMS_P521 = [
signature_key_templates.ECDSA_P521, common_pb2.SHA512, common_pb2.NIST_P521
]
ECDSA_IEEE_PARAMS_P256 = [
signature_key_templates.ECDSA_P256_IEEE_P1363, common_pb2.SHA256,
common_pb2.NIST_P256
]
ECDSA_IEEE_PARAMS_P384 = [
signature_key_templates.ECDSA_P384_IEEE_P1363, common_pb2.SHA512,
common_pb2.NIST_P384
]
ECDSA_IEEE_PARAMS_P521 = [
signature_key_templates.ECDSA_P521_IEEE_P1363, common_pb2.SHA512,
common_pb2.NIST_P521
]
RSA_PKCS1_PARAMS_3072 = [
signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4, common_pb2.SHA256,
3072, 65537
]
RSA_PKCS1_PARAMS_4096 = [
signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4, common_pb2.SHA512,
4096, 65537
]
RSA_PSS_PARAMS_3072 = [
signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
common_pb2.SHA256, 3072, 65537
]
RSA_PSS_PARAMS_4096 = [
signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
common_pb2.SHA512, 4096, 65537
]
def bytes_to_num(data):
res = 0
for b in bytearray(data):
res <<= 8
res |= b
return res
def setUpModule():
tink_config.register()
class SignatureKeyTemplatesTest(parameterized.TestCase):
def test_bytes_to_num(self):
for i in range(100000):
res = bytes_to_num(signature_key_templates._num_to_bytes(i))
self.assertEqual(res, i)
@parameterized.named_parameters(('0', 0, b'\x00'), ('256', 256, b'\x01\x00'),
('65537', 65537, b'\x01\x00\x01'))
def test_num_to_bytes(self, number, expected):
self.assertEqual(signature_key_templates._num_to_bytes(number), expected)
with self.assertRaises(OverflowError):
signature_key_templates._num_to_bytes(-1)
@parameterized.named_parameters(
['ecdsa_p256'] + ECDSA_DER_PARAMS_P256,
['ecdsa_p384'] + ECDSA_DER_PARAMS_P384,
['ecdsa_p521'] + ECDSA_DER_PARAMS_P521,
)
def test_ecdsa_der(self, key_template, hash_type, curve):
self.assertEqual(key_template.type_url,
'type.googleapis.com/google.crypto.tink.EcdsaPrivateKey')
self.assertEqual(key_template.output_prefix_type, tink_pb2.TINK)
key_format = ecdsa_pb2.EcdsaKeyFormat()
key_format.ParseFromString(key_template.value)
self.assertEqual(key_format.params.hash_type, hash_type)
self.assertEqual(key_format.params.curve, curve)
self.assertEqual(key_format.params.encoding, ecdsa_pb2.DER)
# Check that the template works with the key manager
key_manager = public_key_sign_key_manager.from_cc_registry(
key_template.type_url)
key_manager.new_key_data(key_template)
@parameterized.named_parameters(
['ecdsa_p256'] + ECDSA_IEEE_PARAMS_P256,
['ecdsa_p384'] + ECDSA_IEEE_PARAMS_P384,
['ecdsa_p521'] + ECDSA_IEEE_PARAMS_P521,
)
def test_ecdsa_ieee(self, key_template, hash_type, curve):
self.assertEqual(key_template.type_url,
'type.googleapis.com/google.crypto.tink.EcdsaPrivateKey')
self.assertEqual(key_template.output_prefix_type, tink_pb2.TINK)
key_format = ecdsa_pb2.EcdsaKeyFormat()
key_format.ParseFromString(key_template.value)
self.assertEqual(key_format.params.hash_type, hash_type)
self.assertEqual(key_format.params.curve, curve)
self.assertEqual(key_format.params.encoding, ecdsa_pb2.IEEE_P1363)
# Check that the template works with the key manager
key_manager = public_key_sign_key_manager.from_cc_registry(
key_template.type_url)
key_manager.new_key_data(key_template)
def test_ed25519(self):
key_template = signature_key_templates.ED25519
self.assertEqual(
key_template.type_url,
'type.googleapis.com/google.crypto.tink.Ed25519PrivateKey')
self.assertEqual(key_template.output_prefix_type, tink_pb2.TINK)
# Check that the template works with the key manager
key_manager = public_key_sign_key_manager.from_cc_registry(
key_template.type_url)
key_manager.new_key_data(key_template)
@parameterized.named_parameters(
['rsa_pkcs1_3072'] + RSA_PKCS1_PARAMS_3072,
['rsa_pkcs1_4096'] + RSA_PKCS1_PARAMS_4096,
)
def test_rsa_pkcs1(self, key_template, hash_algo, modulus_size, exponent):
self.assertEqual(
key_template.type_url,
'type.googleapis.com/google.crypto.tink.RsaSsaPkcs1PrivateKey')
self.assertEqual(key_template.output_prefix_type, tink_pb2.TINK)
key_format = rsa_ssa_pkcs1_pb2.RsaSsaPkcs1KeyFormat()
key_format.ParseFromString(key_template.value)
self.assertEqual(key_format.modulus_size_in_bits, modulus_size)
self.assertEqual(key_format.params.hash_type, hash_algo)
self.assertEqual(bytes_to_num(key_format.public_exponent), exponent)
# Check that the template works with the key manager
key_manager = public_key_sign_key_manager.from_cc_registry(
key_template.type_url)
key_manager.new_key_data(key_template)
@parameterized.named_parameters(
['rsa_pss_3072'] + RSA_PSS_PARAMS_3072,
['rsa_pss_4096'] + RSA_PSS_PARAMS_4096,
)
def test_rsa_pss(self, key_template, hash_algo, modulus_size, exponent):
self.assertEqual(
key_template.type_url,
'type.googleapis.com/google.crypto.tink.RsaSsaPssPrivateKey')
self.assertEqual(key_template.output_prefix_type, tink_pb2.TINK)
key_format = rsa_ssa_pss_pb2.RsaSsaPssKeyFormat()
key_format.ParseFromString(key_template.value)
self.assertEqual(key_format.modulus_size_in_bits, modulus_size)
self.assertEqual(key_format.params.sig_hash, hash_algo)
self.assertEqual(key_format.params.mgf1_hash, hash_algo)
self.assertEqual(bytes_to_num(key_format.public_exponent), exponent)
# Check that the template works with the key manager
key_manager = public_key_sign_key_manager.from_cc_registry(
key_template.type_url)
key_manager.new_key_data(key_template)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
1730244 | from tkinter import *
from tkinter import ttk
from tkinter import filedialog
# your code goes between window and window.mainloop
window = Tk()
window.title('Writy')
window.configure(background = 'gray1')
# function for saving file
def save(event):
with open('text.txt', 'w') as file:
file.write(text1.get("1.0",'end-1c'))
# function for opening file
def openfile(event):
file = filedialog.askopenfilename()
f = open(file)
text1.insert(1.0, f.read())
# text area to write
text1 = Text(window, bg = 'gray1', fg = 'white', insertbackground = 'white')
text1.focus_force() # give focus to text area
text1.bind('<Control-s>', save)
text1.bind('<Control-o>', openfile)
text1.bind('<Control-q>', lambda e: window.destroy())
text1.grid(row = 2, column = 0, columnspan = 50, padx = 5, pady = 5)
window.mainloop()
| StarcoderdataPython |
12863227 | # SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import logging
import cv2
from busedge_protocol import busedge_pb2
from gabriel_protocol import gabriel_pb2
from sign_filter import SignFilter
logger = logging.getLogger(__name__)
import argparse
import multiprocessing
import time
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import CompressedImage, Image, NavSatFix
from std_msgs.msg import UInt8MultiArray
DEFAULT_SOURCE_NAME = "sign_filter3"
CUR_GPS = NavSatFix()
def run_node(source_name):
cam_id = source_name[-1]
camera_name = "camera" + cam_id
rospy.init_node(camera_name + "_sign_filter_node")
rospy.loginfo("Initialized node sign_filter for " + camera_name)
model_dir = "./model/ssd_mobilenet_v1_mtsd_hunter/saved_model"
model = SignFilter(model_dir)
pub = rospy.Publisher(source_name, UInt8MultiArray, queue_size=1)
image_sub = rospy.Subscriber(
camera_name + "/image_raw/compressed",
CompressedImage,
img_callback,
callback_args=(model, camera_name, pub),
queue_size=1,
buff_size=2 ** 24,
)
gps_sub = rospy.Subscriber("/fix", NavSatFix, gps_callback, queue_size=1)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def img_callback(image, args):
global CUR_GPS
model = args[0]
camera_name = args[1]
pub = args[2]
camera_id = int(camera_name[-1])
bridge = CvBridge()
frame = bridge.compressed_imgmsg_to_cv2(
image, desired_encoding="passthrough"
) # BGR images
frame = frame[:, :, ::-1] # BGR to RGB
frame_copy = frame.copy()
# FILTER
# send_flag = model.send(frame_copy, show_flag = True)
min_score_thresh = 0.75
output_dict = model.detect(frame_copy, min_score_thresh)
send_flag = output_dict["num_detections"] > 0
if send_flag == True:
_, jpeg_frame = cv2.imencode(".jpg", frame)
input_frame = gabriel_pb2.InputFrame()
input_frame.payload_type = gabriel_pb2.PayloadType.IMAGE
input_frame.payloads.append(jpeg_frame.tostring())
engine_fields = busedge_pb2.EngineFields()
engine_fields.gps_data.latitude = CUR_GPS.latitude
engine_fields.gps_data.longitude = CUR_GPS.longitude
engine_fields.gps_data.altitude = CUR_GPS.altitude
secs = image.header.stamp.secs
nsecs = image.header.stamp.nsecs
time_stamps = "_{:0>10d}_{:0>9d}".format(secs, nsecs)
image_filename = camera_name + time_stamps + ".jpg"
engine_fields.image_filename = image_filename
input_frame.extras.Pack(engine_fields)
serialized_message = input_frame.SerializeToString()
rospy.loginfo(
"Sent image msg with size {:.2f} KB".format(len(serialized_message) / 1024)
)
pub_data = UInt8MultiArray()
pub_data.data = serialized_message
pub.publish(pub_data)
time.sleep(0.1)
else:
pass
def gps_callback(data):
global CUR_GPS
if data.status.status == -1:
rospy.logdebug("Sign filter node cannot get valid GPS data")
else:
CUR_GPS = data
if __name__ == "__main__":
# run_node('camera3')
parser = argparse.ArgumentParser()
parser.add_argument(
"-n",
"--source-name",
nargs="+",
default=[DEFAULT_SOURCE_NAME],
help="Set source name for this pipeline",
)
args = parser.parse_args()
for source in args.source_name:
multiprocessing.Process(target=run_node, args=(source,)).start()
| StarcoderdataPython |
11318136 | <reponame>netinvent/ofunctions<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of ofunctions package
"""
ofunctions is a general library for basic repetitive tasks that should be no brainers :)
Versioning semantics:
Major version: backward compatibility breaking changes
Minor version: New functionality
Patch version: Backwards compatible bug fixes
"""
__intname__ = "ofunctions.random"
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2014-2021 <NAME>"
__description__ = "Simple random string generator including password generator"
__licence__ = "BSD 3 Clause"
__version__ = "0.1.1"
__build__ = "2020102801"
import string
import random
def random_string(
size: int = 8, chars: list = string.ascii_letters + string.digits
) -> str:
"""
Simple password generator function
"""
return "".join(random.choice(chars) for _ in range(size))
def pw_gen(size: int = 16, chars: list = string.ascii_letters + string.digits) -> str:
return random_string(size, chars)
| StarcoderdataPython |
8077349 | <filename>transfer_learning/neuralnet.py
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Flatten, Reshape, MaxPooling1D, BatchNormalization, Conv2D, GlobalMaxPooling2D, Lambda
from tensorflow.keras.optimizers import Adam, Adadelta
from tensorflow.keras.losses import categorical_crossentropy
INPUT_SHAPE = (96, 96, 3)
base_model = tf.keras.applications.MobileNetV2(
input_shape=INPUT_SHAPE, alpha=0.35,
weights='./transfer-learning-weights/keras/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.35_96.h5',
include_top=True
)
base_model.trainable = False
model = Sequential()
model.add(InputLayer(input_shape=INPUT_SHAPE, name='x_input'))
model.add(Model(inputs=base_model.inputs, outputs=base_model.layers[-3].output))
model.add(Dense(16))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(classes, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
BATCH_SIZE = 32
train_dataset, validation_dataset = set_batch_size(BATCH_SIZE, train_dataset, validation_dataset)
callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count))
model.fit(train_dataset, validation_data=validation_dataset, epochs=10, verbose=2, callbacks=callbacks)
print('')
print('Initial training done.', flush=True)
# How many epochs we will fine tune the model
FINE_TUNE_EPOCHS = 10
# What percentage of the base model's layers we will fine tune
FINE_TUNE_PERCENTAGE = 65
print('Fine-tuning best model for {} epochs...'.format(FINE_TUNE_EPOCHS), flush=True)
# Load best model from initial training
model = load_best_model()
# Determine which layer to begin fine tuning at
model_layer_count = len(model.layers)
fine_tune_from = math.ceil(model_layer_count * ((100 - FINE_TUNE_PERCENTAGE) / 100))
# Allow the entire base model to be trained
model.trainable = True
# Freeze all the layers before the 'fine_tune_from' layer
for layer in model.layers[:fine_tune_from]:
layer.trainable = False
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.000045),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_dataset,
epochs=FINE_TUNE_EPOCHS,
verbose=2,
validation_data=validation_dataset,
callbacks=callbacks)
| StarcoderdataPython |
6477709 | <filename>resolucao/numpy/x2.py
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2, 15)
plt.plot(x, x**2, marker='o')
plt.show()
| StarcoderdataPython |
4855729 | # CWrap imports
from cwrap.backend import cw_ast
from cwrap.config import ASTContainer
# Local package imports
import c_ast
def find_toplevel_items(items):
""" Finds and returns the toplevel items given a list of items, one
of which should be a toplevel namespace node.
"""
for item in items:
if isinstance(item, c_ast.Namespace):
if item.name == '::':
toplevel_ns = item
break
else:
raise RuntimeError('Toplevel namespace not found.')
res_items = []
return toplevel_ns.members[:]
def sort_toplevel_items(items):
""" Sorts the items first by their filename, then by lineno. Returns
a new list of items
"""
key = lambda node: node.location
return sorted(items, key=key)
def _flatten_container(container, items=None, context_name=None):
""" Given a struct or union, replaces nested structs or unions
with toplevel struct/unions and a typdef'd member. This will
recursively expand everything nested. The `items` and `context_name`
arguments are used internally. Returns a list of flattened nodes.
"""
if items is None:
items = []
parent_context = container.context
parent_name = container.name
mod_context = []
for i, field in enumerate(container.members):
if isinstance(field, (c_ast.Struct, c_ast.Union)):
# Create the necessary mangled names
mangled_name = '__%s_%s' % (parent_name, field.name)
mangled_typename = mangled_name + '_t'
# Change the name of the nested item to the mangled
# item the context to the parent context
field.name = mangled_name
field.context = parent_context
# Expand any nested definitions for this container.
_flatten_container(field, items, parent_name)
# Create a typedef for the mangled name with the parent_context
typedef = c_ast.Typedef(mangled_typename, field, parent_context)
# Add the typedef to the list of items
items.append(typedef)
# Add the necessary information to the mod_context so
# we can modify the list of members at the end.
mod_context.append((i, field, typedef))
# Use the mod_context to remove the nest definitions and replace
# any fields that reference them with the typedefs.
for idx, field, typedef in reversed(mod_context):
container.members.pop(idx)
for member in container.members:
if isinstance(member, c_ast.Field):
if member.typ is field:
member.typ = typedef
items.append(container)
return items
def flatten_nested_containers(items):
""" Searches for Struct/Union nodes with nested Struct/Union
definitions, when it finds them, it creates a similar definition
in the namespace with an approprately mangled name, and reorganizes
the nodes appropriately. This is required since Cython doesn't support
nested definitions. Returns a new list of items.
"""
res_items = []
for node in items:
if not isinstance(node, (c_ast.Struct, c_ast.Union)):
res_items.append(node)
else:
res_items.extend(_flatten_container(node))
return res_items
def _ignore_filter(node):
return not isinstance(node, c_ast.Ignored)
def _location_filter(node):
return node.location is not None
def _ignore_and_location_filter(node):
return _ignore_filter(node) and _location_filter(node)
def filter_ignored(items):
""" Searches a list of toplevel items and removed any instances
of c_ast.Ignored nodes. Node members are search as well. Returns
a new list of items.
"""
res_items = filter(_ignore_and_location_filter, items)
for item in res_items:
if isinstance(item, (c_ast.Struct, c_ast.Union)):
item.members = filter(_ignore_filter, item.members)
elif isinstance(item, c_ast.Enumeration):
item.values = filter(_ignore_filter, item.values)
elif isinstance(item, (c_ast.Function, c_ast.FunctionType)):
item.arguments = filter(_ignore_filter, item.arguments)
return res_items
def apply_c_ast_transformations(c_ast_items):
""" Applies the necessary transformations to a list of c_ast nodes
which are the output of the gccxml_parser. The returned list of items
are appropriate for passing the CAstTransformer class.
The following transformations are applied:
1) find and extract the toplevel items
2) sort the toplevel items into the order they appear
3) extract and replace nested structs and unions
4) get rid of any c_ast.Ignored nodes
"""
items = find_toplevel_items(c_ast_items)
items = sort_toplevel_items(items)
items = flatten_nested_containers(items)
items = filter_ignored(items)
return items
class CAstContainer(object):
""" A container object that holds a list of ast items, and the
names of the modules they should be rendered to.
"""
def __init__(self, items, header_name, extern_name, implementation_name):
self.items = items
self.header_name = header_name
self.extern_name = extern_name
self.implementation_name = implementation_name
class CAstTransformer(object):
def __init__(self, ast_containers):
# XXX - work out the symbols
self.ast_containers = ast_containers
self.pxd_nodes = []
self.modifier_stack = []
def transform(self):
for container in self.ast_containers:
items = container.items
self.pxd_nodes = []
self.modifier_stack = []
header_name = container.header_name
for item in items:
# only transform items for this header (not #inlcude'd
# or other __builtin__ stuff)
if item.location is not None:
if not item.location[0].endswith(header_name):
continue
self.visit(item)
extern = cw_ast.ExternFrom(container.header_name, self.pxd_nodes)
cdef_decl = cw_ast.CdefDecl([], extern)
mod = cw_ast.Module([cdef_decl])
yield ASTContainer(mod, container.extern_name + '.pxd')
def visit(self, node):
visitor_name = 'visit_' + node.__class__.__name__
visitor = getattr(self, visitor_name, self.generic_visit)
res = visitor(node)
return res
def generic_visit(self, node):
print 'unhandled node in generic_visit: %s' % node
#--------------------------------------------------------------------------
# Toplevel visitors
#--------------------------------------------------------------------------
def visit_Struct(self, struct):
name = struct.name
body = []
for member in struct.members:
body.append(self.visit_translate(member))
if not body:
body.append(cw_ast.Pass)
struct_def = cw_ast.StructDef(name, body)
cdef = cw_ast.CdefDecl([], struct_def)
self.pxd_nodes.append(cdef)
def visit_Union(self, union):
name = union.name
body = []
for member in union.members:
body.append(self.visit_translate(member))
if not body:
body.append(cw_ast.Pass)
union_def = cw_ast.UnionDef(name, body)
cdef = cw_ast.CdefDecl([], union_def)
self.pxd_nodes.append(cdef)
def visit_Enumeration(self, enum):
name = enum.name
body = []
for value in enum.values:
body.append(self.visit_translate(value))
if not body:
body.append(cw_ast.Pass)
enum_def = cw_ast.EnumDef(name, body)
cdef = cw_ast.CdefDecl([], enum_def)
self.pxd_nodes.append(cdef)
def visit_Function(self, func):
name = func.name
args = []
for arg in func.arguments:
args.append(self.visit_translate(arg))
args = cw_ast.arguments(args, None, None, [])
returns = self.visit_translate(func.returns)
func_def = cw_ast.CFunctionDecl(name, args, returns, None)
self.pxd_nodes.append(func_def)
def visit_Variable(self, var):
name = var.name
type_name = self.visit_translate(var.typ)
expr = cw_ast.Expr(cw_ast.CName(type_name, name))
self.pxd_nodes.append(expr)
def visit_Typedef(self, td):
name = td.name
type_name = self.visit_translate(td.typ)
expr = cw_ast.Expr(cw_ast.CName(type_name, name))
ctypedef = cw_ast.CTypedefDecl(expr)
self.pxd_nodes.append(ctypedef)
#--------------------------------------------------------------------------
# render nodes
#--------------------------------------------------------------------------
def visit_translate(self, node):
name = 'translate_' + node.__class__.__name__
res = getattr(self, name, lambda arg: None)(node)
if res is None:
print 'Unhandled node in translate: ', node
return res
def translate_Field(self, field):
name = field.name
type_name = self.visit_translate(field.typ)
return cw_ast.Expr(cw_ast.CName(type_name, name))
def translate_Enumeration(self, enum):
name = enum.name
return cw_ast.TypeName(cw_ast.Name(name, cw_ast.Param))
def translate_EnumValue(self, value):
name = value.name
return cw_ast.Expr(cw_ast.Name(name, cw_ast.Param))
def translate_Struct(self, struct):
name = struct.name
return cw_ast.TypeName(cw_ast.Name(name, cw_ast.Param))
def translate_Union(self, union):
name = union.name
return cw_ast.TypeName(cw_ast.Name(name, cw_ast.Param))
def translate_Argument(self, arg):
name = arg.name
type_name = self.visit_translate(arg.typ)
if name is None:
name = ''
cname = cw_ast.CName(type_name, name)
return cname
def translate_PointerType(self, pointer):
return cw_ast.Pointer(self.visit_translate(pointer.typ))
def translate_ArrayType(self, array):
min = int(array.min)
max = int(array.max)
dim = max - min + 1
return cw_ast.Array(self.visit_translate(array.typ), dim)
def translate_CvQualifiedType(self, qual):
return self.visit_translate(qual.typ)
def translate_Typedef(self, typedef):
return cw_ast.TypeName(cw_ast.Name(typedef.name, cw_ast.Param))
def translate_FundamentalType(self, fund_type):
return cw_ast.TypeName(cw_ast.Name(fund_type.name, cw_ast.Param))
def translate_FunctionType(self, func_type):
args = []
for arg in func_type.arguments:
args.append(self.visit_translate(arg))
args = cw_ast.arguments(args, None, None, [])
returns = self.visit_translate(func_type.returns)
func_type = cw_ast.CFunctionType(args, returns)
return func_type
| StarcoderdataPython |
11293878 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from .layers import Encoder, Decoder, Discriminator
from .utils_deep import Optimisation_AAE
from ..utils.kl_utils import compute_mse
import numpy as np
from torch.autograd import Variable
import pytorch_lightning as pl
class jointAAE(pl.LightningModule, Optimisation_AAE):
'''
Multi-view Adversarial Autoencoder model with a joint latent representation.
'''
def __init__(
self,
input_dims,
z_dim=1,
hidden_layer_dims=[],
discriminator_layer_dims=[],
non_linear=False,
learning_rate=0.002,
**kwargs):
'''
:param input_dims: columns of input data e.g. [M1 , M2] where M1 and M2 are number of the columns for views 1 and 2 respectively
:param z_dim: number of latent vectors
:param hidden_layer_dims: dimensions of hidden layers for encoder and decoder networks.
:param discriminator_layer_dims: dimensions of hidden layers for encoder and decoder networks.
:param non_linear: non-linearity between hidden layers. If True ReLU is applied between hidden layers of encoder and decoder networks
:param learning_rate: learning rate of optimisers.
'''
super().__init__()
self.automatic_optimization = False
self.save_hyperparameters()
self.model_type = 'joint_AAE'
self.input_dims = input_dims
self.hidden_layer_dims = hidden_layer_dims.copy()
self.z_dim = z_dim
self.hidden_layer_dims.append(self.z_dim)
self.non_linear = non_linear
self.learning_rate = learning_rate
self.n_views = len(input_dims)
self.joint_representation = True
self.wasserstein = False
self.sparse = False
self.variational = False
self.__dict__.update(kwargs)
self.encoders = torch.nn.ModuleList([Encoder(input_dim = input_dim, hidden_layer_dims=self.hidden_layer_dims, variational=False, non_linear=self.non_linear) for input_dim in self.input_dims])
self.decoders = torch.nn.ModuleList([Decoder(input_dim = input_dim, hidden_layer_dims=self.hidden_layer_dims, variational=False, non_linear=self.non_linear) for input_dim in self.input_dims])
self.discriminator = Discriminator(input_dim = self.z_dim, hidden_layer_dims=discriminator_layer_dims, output_dim=1)
def configure_optimizers(self):
optimizers = []
[optimizers.append(torch.optim.Adam(list(self.encoders[i].parameters()), lr=self.learning_rate)) for i in range(self.n_views)]
[optimizers.append(torch.optim.Adam(list(self.decoders[i].parameters()), lr=self.learning_rate)) for i in range(self.n_views)]
[optimizers.append(torch.optim.Adam(list(self.encoders[i].parameters()), lr=self.learning_rate)) for i in range(self.n_views)]
optimizers.append(torch.optim.Adam(list(self.discriminator.parameters()), lr=self.learning_rate))
return optimizers
def encode(self, x):
z = []
for i in range(self.n_views):
z_ = self.encoders[i](x[i])
z.append(z_)
z = torch.stack(z)
mean_z = torch.mean(z, axis=0)
return mean_z
def decode(self, z):
x_recon = []
for i in range(self.n_views):
x_out = self.decoders[i](z)
x_recon.append(x_out)
return x_recon
def disc(self, z):
z_real = Variable(torch.randn(z.size()[0], self.z_dim) * 1.).to(self.device)
d_real = self.discriminator(z_real)
d_fake = self.discriminator(z)
return d_real, d_fake
def forward_recon(self, x):
z = self.encode(x)
x_recon = self.decode(z)
fwd_rtn = {'x_recon': x_recon,
'z': z}
return fwd_rtn
def forward_discrim(self, x):
[encoder.eval() for encoder in self.encoders]
z = self.encode(x)
d_real, d_fake = self.disc(z)
fwd_rtn = {'d_real': d_real,
'd_fake': d_fake,
'z': z}
return fwd_rtn
def forward_gen(self, x):
[encoder.train() for encoder in self.encoders]
self.discriminator.eval()
z = self.encode(x)
_, d_fake = self.disc(z)
fwd_rtn = {'d_fake': d_fake,
'z': z}
return fwd_rtn
@staticmethod
def recon_loss(self, x, fwd_rtn):
x_recon = fwd_rtn['x_recon']
recon = 0
for i in range(self.n_views):
recon+= compute_mse(x[i], x_recon[i])
return recon/self.n_views
@staticmethod
def generator_loss(self, fwd_rtn):
z = fwd_rtn['z']
d_fake = fwd_rtn['d_fake']
gen_loss= -torch.mean(torch.log(d_fake+self.eps))
return gen_loss
@staticmethod
def discriminator_loss(self, fwd_rtn):
z = fwd_rtn['z']
d_real = fwd_rtn['d_real']
d_fake = fwd_rtn['d_fake']
disc_loss= -torch.mean(torch.log(d_real+self.eps)+torch.log(1-d_fake+self.eps))
return disc_loss
def training_step(self, batch, batch_idx):
loss = self.optimise_batch(batch)
self.log(f'train_loss', loss['total'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'train_recon_loss', loss['recon'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'train_disc_loss', loss['disc'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'train_gen_loss', loss['gen'], on_epoch=True, prog_bar=True, logger=True)
return loss['total']
def validation_step(self, batch, batch_idx):
loss = self.validate_batch(batch)
self.log(f'val_loss', loss['total'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'val_recon_loss', loss['recon'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'val_disc_loss', loss['disc'], on_epoch=True, prog_bar=True, logger=True)
self.log(f'val_gen_loss', loss['gen'], on_epoch=True, prog_bar=True, logger=True)
return loss['total'] | StarcoderdataPython |
5182561 | import argparse
from oie_readers.extraction import Extraction
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='combine conll')
parser.add_argument('-inp', type=str, help='input conll files separated by ":"')
parser.add_argument('-gold', type=str, help='gold conll file', default=None)
parser.add_argument('-out', type=str, help='output conll file')
args = parser.parse_args()
f1, f2 = args.inp.split(':')
Extraction.combine_conll(f1, f2, args.out, gold_fn=args.gold) | StarcoderdataPython |
158939 | <filename>setup.py<gh_stars>1-10
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='shares_count',
version='1.0',
author='vlinhart',
author_email='<EMAIL>',
packages=find_packages(),
include_package_data=True,
install_requires=['socialshares~=1.0.0'],
scripts=[],
dependency_links=[],
setup_requires=('setuptools',),
tests_require=[],
zip_safe=False,
)
| StarcoderdataPython |
6638399 | <gh_stars>1-10
# built-in
import json
import pickle
from pathlib import Path
from time import time
from typing import List
# app
from .cached_property import cached_property
from .config import config
class BaseCache:
ext = ''
def __init__(self, *keys, ttl: int = -1):
self.path = Path(config['cache']['path'], *keys)
if self.ext:
self.path = self.path.with_suffix(self.ext)
self.ttl = ttl
self._check_ttl()
def _check_ttl(self) -> None:
if self.ttl < 0:
return
if not self.path.exists():
return
if time() - self.path.stat().st_mtime > self.ttl:
self.path.unlink()
def __str__(self):
return str(self.path)
def __repr__(self):
return '{}({})'.format(type(self), str(self.path))
class BinCache(BaseCache):
ext = '.bin'
def load(self):
if not self.path.exists():
return None
with self.path.open('rb') as stream:
return pickle.load(stream)
def dump(self, data) -> None:
self.path.parent.mkdir(parents=True, exist_ok=True)
with self.path.open('wb') as stream:
pickle.dump(data, stream)
class TextCache(BaseCache):
ext = '.txt'
def load(self):
if not self.path.exists():
return None
with self.path.open('r') as stream:
return stream.read().split('\n')
def dump(self, data: List[str]) -> None:
self.path.parent.mkdir(parents=True, exist_ok=True)
with self.path.open('w') as stream:
stream.write('\n'.join(data))
class JSONCache(BaseCache):
ext = '.json'
def load(self):
if not self.path.exists():
return None
with self.path.open('r') as stream:
try:
return json.load(stream)
except json.JSONDecodeError:
return None
return None
def dump(self, data):
self.path.parent.mkdir(parents=True, exist_ok=True)
with self.path.open('w') as stream:
json.dump(data, stream)
class RequirementsCache(BaseCache):
ext = '.txt'
@cached_property
def converter(self):
from .converters import PIPConverter
return PIPConverter(lock=False)
def load(self):
if not self.path.exists():
return None
root = self.converter.load(self.path)
return root.dependencies
def dump(self, root):
from .controllers import Graph
from .models import Requirement
self.path.parent.mkdir(parents=True, exist_ok=True)
self.converter.dump(
path=self.path,
project=root,
reqs=Requirement.from_graph(graph=Graph(root), lock=False),
)
| StarcoderdataPython |
3594425 | <reponame>gmaterni/teimed2html<filename>writehtmlfile.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import argparse
from teixml2lib.ualog import Log
__date__ = "04-01-2021"
__version__ = "0.1.0"
__author__ = "<NAME>"
logerr = Log("a")
if __name__ == "__main__":
logerr.open("log/writehtmlfile.ERR.log", 1)
parser = argparse.ArgumentParser()
if len(sys.argv) == 1:
print("release: %s %s" % (__version__, __date__))
parser.print_help()
sys.exit(1)
try:
parser.add_argument('-i',
dest="inh",
required=True,
metavar="",
help="-i <file_in.html>")
parser.add_argument('-o',
dest="ouh",
required=True,
metavar="",
help="-o <file_out.html>")
parser.add_argument('-wa',
dest="wa",
required=False,
metavar="",
default="a",
help="[-wa w/a (w)rite a)ppend) default a")
args = parser.parse_args()
html_in=args.inh
html_ou=args.ouh
write_append=args.wa
with open(html_in, "rt") as f:
txt = f.read()
with open(html_ou, write_append) as f:
f.write(txt)
except Exception as e:
logerr.log("ERROR writehtmlfile.py")
logerr.log(e)
sys.exit(1)
| StarcoderdataPython |
4888526 | <reponame>emencia/seantis-questionnaire
# Create your views here.
from django.shortcuts import render_to_response
from django.conf import settings
from django.template import RequestContext
from django import http
from django.utils import translation
from models import Page
def page(request, page):
try:
p = Page.objects.get(slug=page, public=True)
except Page.DoesNotExist:
raise http.Http404('%s page requested but not found' % page)
return render_to_response("page.html",
{ "request" : request, "page" : p, },
context_instance = RequestContext(request)
)
def langpage(request, lang, page):
translation.activate_language(lang)
return page(request, page)
def set_language(request):
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'GET':
lang_code = request.GET.get('language', None)
if lang_code and translation.check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
| StarcoderdataPython |
8179887 | <filename>tests/unit/dataactvalidator/test_c14_award_financial_2.py
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c14_award_financial_2'
def test_column_headers(database):
expected_subset = {'row_number', 'fain', 'uri', 'piid', 'uniqueid_TAS', 'uniqueid_PIID', 'uniqueid_FAIN',
'uniqueid_URI'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Test cases with different combinations of fain, uri, and piid """
# Test with only one present
award_fin_fain = AwardFinancialFactory(uri=None, piid=None)
award_fin_uri = AwardFinancialFactory(fain=None, piid=None)
award_fin_piid = AwardFinancialFactory(fain=None, uri=None)
assert number_of_errors(_FILE, database,
models=[award_fin_fain, award_fin_uri, award_fin_piid]) == 0
def test_failure(database):
""" Test with fain, uri, and piid all present """
# Test with all three
award_fin = AwardFinancialFactory()
# Test with any 2 present
award_fin_piid_uri = AwardFinancialFactory(fain=None)
award_fin_piid_fain = AwardFinancialFactory(uri=None)
award_fin_fain_uri = AwardFinancialFactory(piid=None)
assert number_of_errors(_FILE, database, models=[award_fin, award_fin_piid_uri, award_fin_piid_fain,
award_fin_fain_uri]) == 4
| StarcoderdataPython |
4905165 | <reponame>A-kriti/Amazing-Python-Scripts<filename>Zoom-Auto-Attend/zoomzoom.py
import json
import pyautogui
import re
import pyfiglet
import getpass
import platform
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from clint.textui import colored, puts, indent
from time import sleep
from os import system
class ZoomZoom:
user = getpass.getuser()
data_path = 'data/meetings.json'
screen_width, screen_height = pyautogui.size()
currentMouseX, currentMouseY = pyautogui.position()
chrome_options = Options()
chrome_options.add_argument(
f'--window-size={screen_width},{screen_height}')
# finding the user's operating system because the process differs depending.
operating_system = platform.system()
if operating_system == 'Linux' or operating_system == 'Mac':
clear = 'clear'
elif operating_system == 'Windows':
clear = 'cls'
else:
clear = 'clear'
# this function loads in the meeting data from a the json file in the data directory
def load_meeting_data(self):
with open(self.data_path, 'r') as stored_data:
meeting_data = json.load(stored_data)
return meeting_data
# this function (which is much too large) is retrieving the url and password form the user
# the url and password are then sent to the automatic_join function for use.
def meeting_link(self, data):
while True:
title = pyfiglet.figlet_format('Zoom Zoom !', font='slant')
with indent(4):
puts(colored.cyan(title))
print()
with indent(4, quote=' $'):
puts(colored.green(f'Welcome to ZoomZoom {self.user}!'))
print()
with indent(4, quote=' *'):
puts(
colored.yellow(
'WARNING: make you disable any window tiling before running this script or else it will not work.'
))
puts(
colored.yellow(
'WARNING: also make sure you download and add a compatible webdriver for your browser and put it in the "webdriver" directory.'
))
print()
with indent(4):
puts(colored.cyan('=== List of saved zoom meetings ==='))
print()
meeting_url_list = {
str(count): meeting_name
for count, meeting_name in enumerate(data['meetings'], 1)
}
if len(meeting_url_list) == 0:
with indent(4, quote=' *'):
puts(
colored.red(
'there are currently no saved zoom meetings...'))
else:
for key, value in meeting_url_list.items():
with indent(4, quote=' >'):
puts(colored.blue(f'{key}: {value}'))
print()
print()
meeting_url = input(
'Enter zoom meeting meeting id/url or choose the number of a saved meeting id: '
)
if meeting_url in meeting_url_list:
system(self.clear)
saved_meeting_url = data["meetings"][
meeting_url_list[meeting_url]]["id"]
saved_meeting_psw = data["meetings"][
meeting_url_list[meeting_url]]["psw"]
return (saved_meeting_url, saved_meeting_psw, True)
print()
verify = input('Are you sure about the entered link/id? [y/n]: ')
if verify != 'y':
system(self.clear)
else:
break
print()
password = input('Does this meeting require a password? [y/n]: ')
if password == 'y':
pass_check = ""
while password != pass_check:
print()
password = getpass.getpass('Enter meeting password: ')
pass_check = getpass.getpass('Enter password again: ')
if password != pass_check:
print()
with indent(4, quote=' *'):
puts(colored.red('PASSWORDS DID NOT MATCH!'))
print()
else:
password = 0
return (meeting_url, password, False)
def save_meeting(self, meeting_info, data):
if not meeting_info[2]:
meeting_url = meeting_info[0]
meeting_psw = meeting_info[1]
meeting_data = data
print()
save_meeting = input(
'Would you like to save this meeting for future use? [y/n]: ')
if save_meeting == 'y':
print()
meeting_name = input('Enter name for saved meeting: ')
meeting_data["meetings"].update(
{meeting_name: {
"id": 0,
"psw": 0
}})
meeting_data["meetings"][meeting_name]["id"] = meeting_url
meeting_data["meetings"][meeting_name]["psw"] = meeting_psw
with open(self.data_path, 'w') as stored_data:
json.dump(meeting_data, stored_data)
print()
with indent(4, quote=' $'):
puts(
colored.green(
f'{meeting_name} has been saved at {self.data_path}!'
))
sleep(1)
system(self.clear)
# This is the main function for joining the zoom session.
# This function takes either a meeting id or url and its password
# Then uses selenium and pyautogui to enter the information into the browser
# And then enter the information into the zoom app on your desktop.
def automatic_join(self, meeting_info):
meeting_id = meeting_info[0]
if meeting_id[:5] == 'https':
meeting_id = re.search(r"\b/\d+", meeting_info[0]).group()[1:]
meeting_psw = meeting_info[1]
browser = webdriver.Chrome(options=self.chrome_options,
executable_path='webdriver/chromedriver')
browser.get('https://zoom.us/')
join_meeting = browser.find_element_by_xpath(
'//*[@id="btnJoinMeeting"]')
join_meeting.click()
sleep(1)
for char in meeting_id:
pyautogui.press(char)
join_button = browser.find_element_by_xpath('//*[@id="btnSubmit"]')
join_button.click()
sleep(1)
# For linux users.
# This clicks the open window in the browser to open the zoom app on your computer.
if self.operating_system == 'Linux':
pyautogui.click(0.552 * self.screen_width,
0.152 * self.screen_height)
else:
pyautogui.click(0.552 * self.screen_width,
0.152 * self.screen_height + 75)
# clicks join without video button when no password is needed.
sleep(1)
pyautogui.click(0.597 * self.screen_width, 0.625 * self.screen_height)
# closes the zoom window to expose the password window
sleep(1)
if self.operating_system == 'Linux':
pyautogui.press('escape')
# Enters the password into the zoom password box only if a password is needed.
if meeting_psw != 0:
sleep(1)
for char in meeting_psw:
pyautogui.press(char)
# clicks to submit entered password on the screen
sleep(1)
pyautogui.click(self.screen_width / 2, self.screen_height * 0.594)
# clicks to join without video if a password is needed
sleep(1)
pyautogui.click(0.588 * self.screen_width, 0.628 * self.screen_height)
if __name__ == '__main__':
zoom_zoom = ZoomZoom()
clear = zoom_zoom.clear
system(clear)
meeting_data = zoom_zoom.load_meeting_data()
meeting_info = zoom_zoom.meeting_link(meeting_data)
zoom_zoom.save_meeting(meeting_info, meeting_data)
zoom_zoom.automatic_join(meeting_info)
| StarcoderdataPython |
11356482 | import RPi.GPIO as GPIO
import os, sys, kintone, time
from kintone import getCurrentTimeStamp
GPIO.setmode(GPIO.BCM)
# Start writing your program below
sdomain = "SUB-DOMAIN-NAME"
appId = "APP-ID-NUMBER"
token = "APP-TOKEN"
button = 19
buttonSwitch = 26
GPIO.setup(button, GPIO.OUT)
GPIO.setup(buttonSwitch, GPIO.IN)
GPIO.output(button, GPIO.HIGH)
while True:
try:
if GPIO.input(buttonSwitch) == GPIO.LOW:
print("Button Pushed!", end=" ")
timeStamp = getCurrentTimeStamp()
picFile = "pic/" + timeStamp + ".jpg"
command = "raspistill -t 500 -w 800 -h 600 -o " + picFile
status = os.system(command)
if(status==0):
print(timeStamp, end=" ")
print("Photo captured.")
else:
print("Failed to capture a picture")
sys.exit()
fileKey = kintone.uploadFile(subDomain=sdomain,
apiToken=token,
filePath=picFile)
if fileKey is None:
sys.exit()
memo = "Hi from Raspi!"
payload = {"app": appId,
"record": {"photo": {"value": [{"fileKey": fileKey}] },
"memo": {"value": memo} }}
recordId = kintone.uploadRecord(subDomain=sdomain,
apiToken=token,
record=payload)
if recordId is None:
sys.exit()
# time.sleep(2)
except:
break
# Write your program above this line
GPIO.cleanup() | StarcoderdataPython |
6629082 | <reponame>Nightwish-cn/my_leetcode
class Solution:
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
i, sum = 1, -num
while i * i < num:
if num % i == 0:
sum += i + num // i
i += 1
if i * i == num:
sum += i
return num == sum | StarcoderdataPython |
11383402 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
class TestSchemaRef(object):
def test_source_to_ref_map_is_complete(
self,
schema_ref,
good_source_ref,
bad_source_ref,
source
):
assert schema_ref.get_source_ref(source) == good_source_ref
assert schema_ref.get_source_ref('bad_source') == bad_source_ref
assert len(schema_ref._source_to_ref_map) == 2
def test_source_to_ref_map_can_be_empty(self, schema_ref):
schema_ref.schema_ref = {}
assert len(schema_ref._source_to_ref_map) == 0
def test_defaults_are_respected(self, schema_ref, schema_ref_defaults):
for key, val in schema_ref_defaults.items():
assert schema_ref.get_source_val('bad_source', key) == val
| StarcoderdataPython |
331922 |
import unittest
from programy.storage.stores.sql.dao.trigger import Trigger
class TriggerTests(unittest.TestCase):
def test_init(self):
trigger1 = Trigger(name='name', trigger_class='class')
self.assertIsNotNone(trigger1)
self.assertEqual("<Trigger(id='n/a', name='name', trigger_class='class')>", str(trigger1))
trigger2 = Trigger(id=1, name='name', trigger_class='class')
self.assertIsNotNone(trigger2)
self.assertEqual("<Trigger(id='1', name='name', trigger_class='class')>", str(trigger2))
| StarcoderdataPython |
11263610 | <filename>flask_reddit/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.routing import BaseConverter
app = Flask(__name__, static_url_path='/static')
app.config.from_object('config')
db = SQLAlchemy(app)
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def not_found(error):
return render_template('500.html'), 500
from flask_reddit.users.views import mod as users_module
app.register_blueprint(users_module)
from flask_reddit.threads.views import mod as threads_module
app.register_blueprint(threads_module)
from flask_reddit.frontends.views import mod as frontends_module
app.register_blueprint(frontends_module)
from flask_reddit.apis.views import mod as apis_module
app.register_blueprint(apis_module)
from flask_reddit.subreddits.views import mod as subreddits_module
app.register_blueprint(subreddits_module)
def custom_render(template, *args, **kwargs):
"""
custom template rendering including some flask_reddit vars
"""
return render_template(template, *args, **kwargs)
app.debug = app.config['DEBUG']
if __name__ == '__main__':
print 'We are running flask via main()'
app.run()
| StarcoderdataPython |
1744912 | from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.chainer_experimental.datasets.sliceable.sliceable_dataset \
import _is_iterable
class TransformDataset(GetterDataset):
"""A sliceable version of :class:`chainer.datasets.TransformDataset`.
Note that it requires :obj:`keys` to determine the names of returned
values.
Here is an example.
>>> def transfrom(in_data):
>>> img, bbox, label = in_data
>>> ...
>>> return new_img, new_label
>>>
>>> dataset = TramsformDataset(dataset, ('img', 'label'), transform)
>>> dataset.keys # ('img', 'label')
Args:
dataset: The underlying dataset.
This dataset should have :meth:`__len__` and :meth:`__getitem__`.
keys (string or tuple of strings): The name(s) of data
that the transform function returns.
If this parameter is omitted, :meth:`__init__` fetches a sample
from the underlying dataset to determine the number of data.
transform (callable): A function that is called to transform values
returned by the underlying dataset's :meth:`__getitem__`.
"""
def __init__(self, dataset, keys, transform=None):
if transform is None:
keys, transform = None, keys
super(TransformDataset, self).__init__()
self._dataset = dataset
self._transform = transform
if keys is None:
sample = self._get(0)
if isinstance(sample, tuple):
keys = (None,) * len(sample)
else:
keys = None
self.add_getter(keys, self._get)
if not _is_iterable(keys):
self.keys = 0
def __len__(self):
return len(self._dataset)
def _get(self, index):
return self._transform(self._dataset[index])
| StarcoderdataPython |
3314841 | """
You are given an array A of strings.
A move onto S consists of swapping any two even indexed characters of S, or any two odd indexed characters of S.
Two strings S and T are special-equivalent if after any number of moves onto S, S == T.
For example, S = "zzxy" and T = "xyzz" are special-equivalent because we may make the moves "zzxy" -> "xzzy" -> "xyzz" that swap S[0] and S[2], then S[1] and S[3].
Now, a group of special-equivalent strings from A is a non-empty subset of A such that:
Every pair of strings in the group are special equivalent, and;
The group is the largest size possible (ie., there isn't a string S not in the group such that S is special equivalent to every string in the group)
Return the number of groups of special-equivalent strings from A.
"""
class Solution:
def numSpecialEquivGroups(self, A: List[str]) -> int:
group = list()
count = 0
for el in A:
i = 0
counter = {0: dict(), 1: dict()}
for ch in el:
if ch not in counter[i]:
counter[i][ch] = 1
else:
counter[i][ch] += 1
i = 1 - i
if counter not in group:
group.append(counter)
count += 1
return count
| StarcoderdataPython |
8123586 | <gh_stars>1-10
import re
from w3af.plugins.attack.payloads.base_payload import Payload
from w3af.core.ui.console.tables import table
class gcc_version(Payload):
"""
This payload shows the current GCC Version
"""
def api_read(self):
result = {}
def parse_gcc_version(proc_version):
gcc_version = re.search('(?<=gcc version ).*?\)', proc_version)
if gcc_version:
return gcc_version.group(0)
else:
return ''
version = parse_gcc_version(self.shell.read('/proc/version'))
if version:
result['gcc_version'] = version
return result
def run_read(self):
api_result = self.api_read()
if not api_result['gcc_version']:
return 'GCC version could not be identified.'
else:
rows = []
rows.append(['GCC Version', api_result['gcc_version']])
result_table = table(rows)
result_table.draw(80)
return rows
| StarcoderdataPython |
4909849 | <reponame>frankier/sklearn-ann
from sklearn.neighbors import KNeighborsTransformer
from functools import partial
BallTreeTransformer = partial(KNeighborsTransformer, algorithm="ball_tree")
KDTreeTransformer = partial(KNeighborsTransformer, algorithm="kd_tree")
BruteTransformer = partial(KNeighborsTransformer, algorithm="brute")
__all__ = ["BallTreeTransformer", "KDTreeTransformer", "BruteTransformer"]
| StarcoderdataPython |
11575 | import shutil
from pathlib import Path
from tempfile import mkdtemp
import pytest
from click.testing import CliRunner
import ape
# NOTE: Ensure that we don't use local paths for these
ape.config.DATA_FOLDER = Path(mkdtemp()).resolve()
ape.config.PROJECT_FOLDER = Path(mkdtemp()).resolve()
@pytest.fixture(scope="session")
def config():
yield ape.config
@pytest.fixture(scope="session")
def data_folder(config):
yield config.DATA_FOLDER
@pytest.fixture(scope="session")
def plugin_manager():
yield ape.networks.plugin_manager
@pytest.fixture(scope="session")
def accounts():
yield ape.accounts
@pytest.fixture(scope="session")
def compilers():
yield ape.compilers
@pytest.fixture(scope="session")
def networks():
yield ape.networks
@pytest.fixture(scope="session")
def chain():
yield ape.chain
@pytest.fixture(scope="session")
def project_folder(config):
yield config.PROJECT_FOLDER
@pytest.fixture(scope="session")
def project(config):
yield ape.Project(config.PROJECT_FOLDER)
@pytest.fixture
def keyparams():
# NOTE: password is 'a'
return {
"address": "7e5f4552091a69125d5dfcb7b8c2659029395bdf",
"crypto": {
"cipher": "aes-128-ctr",
"cipherparams": {"iv": "7bc492fb5dca4fe80fd47645b2aad0ff"},
"ciphertext": "43beb65018a35c31494f642ec535315897634b021d7ec5bb8e0e2172387e2812",
"kdf": "scrypt",
"kdfparams": {
"dklen": 32,
"n": 262144,
"r": 1,
"p": 8,
"salt": "<PASSWORD>",
},
"mac": "6a1d520975a031e11fc16cff610f5ae7476bcae4f2f598bc59ccffeae33b1caa",
},
"id": "ee<PASSWORD>",
"version": 3,
}
@pytest.fixture
def temp_accounts_path(config):
path = Path(config.DATA_FOLDER) / "accounts"
path.mkdir(exist_ok=True, parents=True)
yield path
if path.exists():
shutil.rmtree(path)
@pytest.fixture
def runner(project):
yield CliRunner()
| StarcoderdataPython |
8039464 | # -*- coding: utf-8 -*-
"""
校验提交信息是否包含规范的前缀
"""
from __future__ import absolute_import, print_function, unicode_literals
import sys
try:
reload(sys)
sys.setdefaultencoding("utf-8")
except NameError:
# py3
pass
ALLOWED_COMMIT_MSG_PREFIX = [
("feature", "新特性"),
("bugfix", "线上功能bug"),
("minor", "不重要的修改(换行,拼写错误等)"),
("optimization", "功能优化"),
("sprintfix", "未上线代码修改 (功能模块未上线部分bug)"),
("refactor", "功能重构"),
("test", "增加测试代码"),
("docs", "编写文档"),
("merge", "分支合并及冲突解决"),
]
def get_commit_message():
args = sys.argv
if len(args) <= 1:
print("Warning: The path of file `COMMIT_EDITMSG` not given, skipped!")
return 0
commit_message_filepath = args[1]
with open(commit_message_filepath, "r") as fd:
content = fd.read()
return content.strip().lower()
def main():
content = get_commit_message()
for prefix in ALLOWED_COMMIT_MSG_PREFIX:
if content.startswith(prefix[0]):
return 0
else:
print("Commit Message 不符合规范!必须包含以下前缀之一:")
[print("%-12s\t- %s" % prefix) for prefix in ALLOWED_COMMIT_MSG_PREFIX]
return 1
if __name__ == "__main__":
exit(main())
| StarcoderdataPython |
4987514 | <reponame>gupta19avaneesh/DataScience
import numpy as np
import l21cca
from sklearn.preprocessing import StandardScaler
X=[np.random.randn(10,10) for i in range(10)]
reduced1=l21cca.l21_cca(X,5)
reduced2=l21cca.l21_cca(X,10,20,5,5)
| StarcoderdataPython |
6423884 | from rdisq.service import RdisqService, remote_method
from rdisq.redis_dispatcher import PoolRedisDispatcher
class GrumpyException(Exception):
pass
class SimpleWorker(RdisqService):
service_name = "MyClass"
response_timeout = 10 # seconds
redis_dispatcher = PoolRedisDispatcher(host='127.0.0.1', port=6379, db=0)
@staticmethod
@remote_method
def add(a, b):
# Do some simple work
return a + b
@remote_method
def build(self, what, tool=None):
# Showing here that args and kwargs can be used
if tool is not None:
print("%s: I built you %s, using a %s" % (self.service_name, what, tool,))
else:
print("%s: I built you %s, using a my bear [sic] hands" % (self.service_name, what, ))
# Return a dict, just to spice things up a bit
return {"message from the worker": "I'm done!"}
@staticmethod
@remote_method
def grumpy():
raise GrumpyException("I'M ALWAYS GRUMPY!")
if __name__ == '__main__':
myClass = SimpleWorker()
myClass.process()
| StarcoderdataPython |
8199706 | <gh_stars>0
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Extract used apis from markdown and reStructured documents.
"""
import re
import inspect
import os
import argparse
import logging
from contextlib import contextmanager
import docutils
import docutils.core
import docutils.nodes
import markdown
logger = logging.getLogger()
if logger.handlers:
# we assume the first handler is the one we want to configure
console = logger.handlers[0]
else:
console = logging.StreamHandler()
logger.addHandler(console)
console.setFormatter(
logging.Formatter(
"%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s"))
def extract_code_blocks_from_rst(docstr):
"""
extract code-blocks from the given docstring.
DON'T include the multiline-string definition in code-blocks.
Args:
docstr - docstring
Return:
A list of code-blocks, indent removed.
"""
code_blocks = []
ds_list = docstr.expandtabs(tabsize=4).split("\n")
lastlineindex = len(ds_list) - 1
cb_started = False
cb_start_pat = re.compile(r"((code)|(code-block))::\s*i?python[23]?")
cb_cur = []
cb_cur_indent = -1
for lineno, linecont in enumerate(ds_list):
if re.search(cb_start_pat, linecont):
if not cb_started:
cb_started = True
continue
else:
# cur block end
if len(cb_cur):
code_blocks.append(inspect.cleandoc("\n".join(cb_cur)))
cb_started = True # another block started
cb_cur_indent = -1
cb_cur = []
else:
# check indent for cur block ends.
if cb_started:
if lineno == lastlineindex:
mo = re.search(r"\S", linecont)
if mo is not None and cb_cur_indent <= mo.start():
cb_cur.append(linecont)
if len(cb_cur):
code_blocks.append(inspect.cleandoc("\n".join(cb_cur)))
break
if cb_cur_indent < 0:
mo = re.search(r"\S", linecont)
if mo is None: continue
cb_cur_indent = mo.start()
cb_cur.append(linecont)
else:
mo = re.search(r"\S", linecont)
if mo is None: continue
if cb_cur_indent <= mo.start():
cb_cur.append(linecont)
else:
if linecont[mo.start()] == '#':
continue
else:
# block end
if len(cb_cur):
code_blocks.append(
inspect.cleandoc("\n".join(cb_cur)))
cb_started = False
cb_cur_indent = -1
cb_cur = []
logger.info('extracted %d code blocks.', len(code_blocks))
return code_blocks
def extract_code_blocks_from_md(docstr):
"""
Extract code blocks from markdown content.
"""
code_blocks = []
pat = re.compile(r"```i?python[23]?(.*?)```", re.MULTILINE + re.DOTALL)
for cbit in pat.finditer(docstr):
code_blocks.append(inspect.cleandoc(cbit.group()))
# logger.info('extracted %d code blocks.', len(code_blocks))
return code_blocks
def extract_code_blocks_from_file(filename):
r = os.path.splitext(filename)
ext = r[1].lower()
if ext == '.md':
return extract_code_blocks_from_md(open(filename, 'r').read())
elif ext == '.rst':
return extract_code_blocks_from_rst(open(filename, 'r').read())
else:
return []
def get_all_files(p):
"""
Get a filename list from the dir.
"""
filelist = []
for path, dirs, files in os.walk(p):
for filename in files:
r = os.path.splitext(filename)
logger.info('%s found', filename)
if len(r) == 2 and r[1].lower() in ['.md', '.rst']:
filelist.append(os.path.join(path, filename))
logger.info('find %d files from %s.', len(filelist), p)
return filelist
def find_all_paddle_api_from_code_block(cbstr):
"""
Find All Paddle Api
"""
# case 1.1: import paddle.abs # ignore
# case 1.2: from paddle.vision.transforms import ToTensor
# case 1.3: import paddle.vision.transforms.ToTensor as ToTensor # ignore
# case 2: train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=ToTensor())
# case 3: in comments
# case 4: in docstring
api_set = set()
ds_list = cbstr.split("\n")
import_pat = re.compile(r'from\s+([\.\w]+)\s+import\s+(\w+)')
normal_pat = re.compile(r'(paddle\.[\.\w]+)')
docstr_pat = re.compile(r'((\'{3})|(\"{3}))')
in_docstr = False
for line in ds_list:
line = line.strip()
for mo in docstr_pat.finditer(line):
in_docstr = not in_docstr
if in_docstr: continue
sharp_ind = line.find('#')
mo_i = import_pat.search(line)
if mo_i:
if (sharp_ind < 0 or mo_i.start() < sharp_ind
) and mo_i.group(1).startswith('paddle'):
api_set.add('{}.{}'.format(mo_i.group(1), mo_i.group(2)))
else:
mo_n = normal_pat.finditer(line)
for mo in mo_n:
if sharp_ind < 0 or mo.start() < sharp_ind:
api_set.add(mo.group(1))
return api_set
def extract_api_from_file(filename):
api_set = set()
codeblocks = extract_code_blocks_from_file(filename)
logger.info('find %d code-blocks from %s.', len(codeblocks), filename)
for cb in codeblocks:
api_set.update(find_all_paddle_api_from_code_block(cb))
logger.info('find %d apis from %s.', len(api_set), filename)
return api_set
def extract_doc_title_from_file(filename):
r = os.path.splitext(filename)
if len(r) != 2:
return None
if r[1].lower() == '.md':
return extract_md_title(filename)
elif r[1].lower() == '.rst':
return extract_rst_title(filename)
return None
@contextmanager
def find_node_by_class(doctree, node_class, remove):
"""Find the first node of the specified class."""
index = doctree.first_child_matching_class(node_class)
if index is not None:
yield doctree[index]
if remove:
del doctree[index]
else:
yield
def extract_rst_title(filename):
overrides = {
# Disable the promotion of a lone top-level section title to document
# title (and subsequent section title to document subtitle promotion).
'docinfo_xform': 0,
'initial_header_level': 2,
}
with open(filename, 'r') as fileobj:
doctree = docutils.core.publish_doctree(
fileobj.read(), settings_overrides=overrides)
with find_node_by_class(
doctree, docutils.nodes.title, remove=True) as node:
if node is not None:
return node.astext()
return None
def extract_md_title(filename):
with open(filename, 'r') as fileobj:
html = markdown.markdown(fileobj.read())
mo = re.search(r'<h1>(.*?)</h1>', html)
if mo:
mos = re.search(r'<strong>(.*?)</strong>', mo.group(1))
return mos.group(1) if mos else mo.group(1)
return None
def format_filename(filename):
"""
Format the filename
filename may be "/FluidDoc/doc/paddle/guides/xxx" or "../guides/xxx", format it as "guides/xxx".
function get_all_files does not format it.
"""
rp = os.path.realpath(filename)
pat_str = 'doc/paddle/' # if the structure changed, update this pattern
ind = rp.index(pat_str)
if ind >= 0:
return rp[ind + len(pat_str):]
return filename
def extract_all_infos(docdirs):
apis_dict = {}
file_titles = {}
for p in docdirs:
filelist = get_all_files(p)
for fn in filelist:
ffn = format_filename(fn)
file_titles[ffn] = extract_doc_title_from_file(fn)
apis = extract_api_from_file(fn)
if len(apis):
apis_dict[ffn] = list(apis)
return apis_dict, file_titles
arguments = [
# flags, dest, type, default, help
[
'--output', 'output', str, 'called_apis_from_docs.json',
'output filename. default: called_apis_from_docs.json'
],
]
def parse_args():
"""
Parse input arguments
"""
global arguments
parser = argparse.ArgumentParser(
description='extract all the called apis from md or reST files.')
parser.add_argument(
'dir',
type=str,
help='travel all the files include this directory',
default='.',
nargs='+')
for item in arguments:
parser.add_argument(
item[0], dest=item[1], help=item[4], type=item[2], default=item[3])
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
print('{}'.format(args))
logger.setLevel(logging.DEBUG)
apis_dict, file_titles = extract_all_infos(args.dir)
import json
with open(args.output, 'w') as f:
json.dump(apis_dict, f, indent=4)
r = os.path.splitext(args.output)
with open('{}-titles{}'.format(r[0], r[1]), 'w') as f:
json.dump(file_titles, f, indent=4)
print('Done')
| StarcoderdataPython |
3273175 | from ....utils.code_utils import deprecate_module
deprecate_module("ediFilesUtils", "edi_files_utils", "0.16.0", error=True)
from .edi_files_utils import *
| StarcoderdataPython |
227893 | from linebot.models import ImageSendMessage, TextSendMessage
from config.line_bot_api import line_bot_api, heroku_url, num_list
import re
class TextMessageUtil:
def __init__(self, event):
self.event = event
def send_pass_image(self):
num = re.sub("\\D", "", self.event.message.text)
if int(num) not in num_list:
return
image_url = f'{heroku_url}/static/{num}.jpg'
image_message = ImageSendMessage(
original_content_url=image_url,
preview_image_url=image_url
)
announce_message = TextSendMessage(
text="はしれ!まえせつをプレイしてくれてありがとう!\n\n今回の特典写真はこちら!")
thx_message = TextSendMessage(
text="ほかにも様々な特典が隠されているので引き続きプレイしてゲットしよう!\n\nhttps://iudaichi.github.io/maesetu_run_game/")
messages = [announce_message, image_message, thx_message]
line_bot_api.reply_message(
self.event.reply_token, messages=messages)
| StarcoderdataPython |
5177531 | <gh_stars>1-10
#TUI Form
def main():
# Find the largest number among three numbers
L = []
num1 = eval(input("Enter the first number:"))
L.append(num1)
num2 = eval(input("Enter the second number:"))
L.append(num2)
num3 = eval(input("Enter the third number:"))
L.append(num3)
print("The largest number among the three is:",str(max(L)))
main()
| StarcoderdataPython |
1847535 | # California state symbols
# state_bird = 'California quail'
# state_animal = 'Grizzly bear'
# state_flower = 'California poppy'
# state_fruit = 'Avocado'
california_symbols = {
'bird': 'California quail',
'animal': 'Grizzly bear',
'flower': 'California poppy',
'fruit': 'Avocado',
}
california_flower = california_symbols['flower']
print(california_flower) | StarcoderdataPython |
312824 | <reponame>wongongv/scholarship_wonjun<gh_stars>0
from __future__ import division, print_function, absolute_import, unicode_literals
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
import tensorflow.keras.layers as layers
import pandas as pd
# get the image
def load_img(path):
img = tf.io.read_file(path)
#assumed image is in png extension
img = tf.image.decode_png(img)
img = tf.image.convert_image_dtype(img, tf.float32)
return img
def load_data():
imgs = []
#specify location in os.walk
for (path,dirs,files) in os.walk("./data/satelite"):
for file in files:
if file[-3:] == "png":
imgs.append(load_img(os.path.join(path,file)))
return imgs
def preprocess_input(inputs):
#concatenate 8 images.
inputs = tf.cast(inputs, dtype = tf.float32)
input_size = inputs.shape[0]
assert input_size > 8 , "num of input images should be at least 8"
#there are 8 images in one hour
concat_num = 8
indices = np.arange(concat_num) + np.arange(0,input_size,8)[:,np.newaxis]
if input_size % 8 != 0:
indices = indices[:-1]
indices = indices[...,np.newaxis]
res = tf.gather_nd(inputs, indices)
res = tf.reshape(res, tf.concat([res.shape[0:1],[-1],res.shape[3:]],0))
return res
# build a model
imgs = load_data()
imgs = preprocess_input(imgs)
optimizer = tf.keras.optimizers.Adam(1e-4)
#batch_size doesnt matter. revise it.
def load_label():
data = pd.read_excel(io='./data/labels/data_past_time.xls')
data = data.iloc[:,1:3].as_matrix()[1:].astype(np.float32)
data = tf.cast(data, tf.float32)
return data
#build model
class customized_model(tf.keras.Model):
def __init__(self):
super(customized_model, self).__init__()
self.vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
self.seq = tf.keras.Sequential([layers.Flatten(),
layers.Dense(5000),
layers.Dense(2)])
def call(self, inputs):
x = self.vgg(inputs)
x = self.seq(x)
return x
model = customized_model()
#preprocess input
x = tf.keras.applications.vgg19.preprocess_input(imgs*255)
x = tf.image.resize(x, (224, 224))
labels = load_label()
#temporaily slice the labels so that it matches the size of the images
labels = labels[:imgs.shape[0]]
def objective(labels, res):
return tf.reduce_mean(tf.keras.losses.mean_squared_error(y_true = labels, y_pred = res))
@tf.function()
def train():
with tf.GradientTape() as gt:
res = model(x)
obj = objective(labels, res)
grads = gt.gradient(obj, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epochs = 100
#add batch feature
for i in range(epochs):
train()
res = model(x)
obj = objective(labels, res)
print("%2.5f" % (obj)) | StarcoderdataPython |
173093 | #################### version 1 #################################################
a = list(range(10))
# print(a, id(a))
res_1 = list(a)
# print(res_1, id(res_1))
for i in a:
if i in (3, 5):
print(">>>", i, id(i))
res_1 = list(filter(lambda x: x != i, res_1))
# print(type(res_1), id(res_1))
print(list(res_1))
#################### version 2 #################################################
b = list(range(10))
# print(b, id(b))
res_2 = list(b)
# print(res_2, id(res_2))
for i in b:
if i in {3, 5}:
print(">>>", i, id(i))
res_2 = filter(lambda x: x != i, res_2)
for w in res_2:
pass
# print(type(res_2), id(res_2))
res_2 = list(res_2)
print(list(res_2))
#################### version 3 #################################################
c = list(range(10))
# print(c, id(c))
res_3 = list(c)
# print(res_3, id(res_3))
for i in c:
if i in (3, 5):
print(">>>", i, id(i))
res_3 = filter(lambda x: x != i, res_3)
# print(type(res_3), id(res_3))
print(list(res_3))
| StarcoderdataPython |
1968334 | from math import sqrt, acos
def dist(v1, v2):
return sqrt((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2)
def dot(v1, v2):
return v1[0]*v2[0] + v1[1]*v2[1]
def cross(v1, v2, v3):
return (v2[0]-v1[0])*(v3[1]-v1[1]) - (v2[1]-v1[1])*(v3[0]-v1[0])
def norm(v1):
return sqrt(v1[0]*v1[0] + v1[1]*v1[1])
def angle(v1, v2):
return acos(dot(v1,v2)/(norm(v1)*norm(v2)))
def sort_points_by_y(vect):
return sorted(vect, key = lambda x: (x[1], x[0]))
def sort_points_by_angle(vect):
l = len(vect)
angles = list(map(angle, [(1,0) for _ in range(l)], vect))
for k in range(l-1):
for w in range(k+1,l):
if angles[k] > angles[w]:
vect[k], vect[w] = vect[w], vect[k]
angles[k], angles[w] = angles[w], angles[k]
return vect, angles
def remove_collinear(p0, vect, angles):
l = len(vect)
to_remove_vect = []
to_remove_angle = []
for k in range(l-1):
for w in range(k+1,l):
if angles[k] == angles[w]:
if dist(p0,vect[k]) < dist(p0,vect[w]):
to_remove_vect.append(vect[k])
to_remove_angle.append(angles[k])
else:
to_remove_vect.append(vect[w])
to_remove_angle.append(angles[w])
for v,a in zip(to_remove_vect, to_remove_angle):
vect.remove(v)
angles.remove(a)
return vect, angles, to_remove_vect, to_remove_angle
def graham_scan(p0, vect):
if len(vect) < 2:
return "Convex hull is empty"
stack = [p0, vect[0], vect[1]]
stack_size = 3
if len(vect) == 2:
return stack
l = len(vect)
for k in range(2, l):
while(True):
print(stack)
d = cross(stack[stack_size - 2], stack[stack_size - 1], vect[k])
print(d)
if d < 0: # left turn
break
else: # non left turn
stack.pop()
stack_size -= 1
stack.append(vect[k])
stack_size += 1
return stack
p1 = (1,1)
p2 = (5,3)
p3 = (7,6)
p4 = (3,5)
a1 = (4,4)
a2 = (6,4)
# Pipeline
# 1 - sort_points_by_y
# 2 - sort_points_by_angle
# 3 - remove_collinear
| StarcoderdataPython |
11286168 | #!/usr/bin/env python3
"""
Polyglot v2 node server Davice WeatherLink Live weather data
Copyright (C) 2018 <NAME>
"""
CLOUD = False
try:
import polyinterface
except ImportError:
import pgc_interface as polyinterface
CLOUD = True
import sys
import time
import datetime
import requests
import socket
import math
import json
LOGGER = polyinterface.LOGGER
class Controller(polyinterface.Controller):
id = 'weather'
#id = 'controller'
hint = [0,0,0,0]
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = 'WeatherLink'
self.address = 'weather'
self.primary = self.address
self.configured = False
self.myConfig = {}
self.ip_address = ''
self.has_soil = False
self.has_indoor = False
self.poly.onConfig(self.process_config)
# Process changes to customParameters
def process_config(self, config):
if 'customParams' in config:
# Check if anything we care about was changed...
if config['customParams'] != self.myConfig:
changed = False
if 'IP Address' in config['customParams']:
if self.ip_address != config['customParams']['IP Address']:
self.ip_address = config['customParams']['IP Address']
changed = True
self.myConfig = config['customParams']
if changed:
self.removeNoticesAll()
self.configured = True
self.discover_nodes()
if self.ip_address == '':
self.addNotice("WeatherLink IP Address parameter must be set");
self.configured = False
def start(self):
LOGGER.info('Starting node server')
self.check_params()
# TODO: Discovery
LOGGER.info('Node server started')
self.discover_nodes()
if self.has_indoor:
LOGGER.info('Creating node for indoor conditions')
self.addNode(IndoorNode(self, self.address, 'indoor', 'Indoor'))
if self.has_soil:
LOGGER.info('Creating node for soil conditions')
self.addNode(SoilNode(self, self.address, 'soil', 'Soil'))
# Do an initial query to get filled in as soon as possible
self.query_conditions()
def longPoll(self):
pass
def shortPoll(self):
self.query_conditions()
def rain_size(self, size):
if size == None:
return 0
if size == 1:
return 0.01 # inch
if size == 2:
return 0.2 # mm
if size == 3:
return 0.1 # mm
if size == 4:
return 0.001 # inch
return 0
def update(self, driver, value):
if value != None:
self.setDriver(driver, float(value), True, False)
def discover_nodes(self):
if not self.configured:
return
request = 'http://' + self.ip_address + '/v1/current_conditions'
c = requests.get(request)
jdata = c.json()
for record in jdata['data']['conditions']:
if record['data_structure_type'] == 2:
self.has_soil = True
elif record['data_structure_type'] == 1:
self.has_indoor = True
def query_conditions(self):
# Query for the current conditions. We can do this fairly
# frequently, probably as often as once a minute.
#
# By default JSON is returned
request = 'http://' + self.ip_address + '/v1/current_conditions'
LOGGER.debug('request = %s' % request)
if not self.configured:
LOGGER.info('Skipping connection because we aren\'t configured yet.')
return
try:
c = requests.get(request)
except Exception as e:
LOGGER.error('Request for data from WLL failed.')
return
jdata = c.json()
LOGGER.debug(jdata)
# 4 record types can be returned. Lets use a separate
# node for each. We'll start with the ISS current
# condition record as that has the outside info
#
# Other records are:
# Leaf/Soil Moisture
# LSS BAR
# LSS temperature/humidity
#
# { "data":
# { "did: , "ts": , "conditions": [
# {"data_structure_type": 1
for record in jdata['data']['conditions']:
if record['data_structure_type'] == 1:
# We have a local sensor ID and transmitter ID. Do we
# need to look at these?
LOGGER.info('Found current conditions')
# Update node with values in <record>
self.update('CLITEMP', record['temp'])
self.update('CLIHUM', record['hum'])
self.update('DEWPT', record['dew_point'])
self.update('WINDDIR', record['wind_dir_last'])
self.update('GV0', record['wet_bulb'])
self.update('GV1', record['heat_index'])
self.update('GV2', record['wind_chill'])
self.update('SPEED', record['wind_speed_last'])
self.update('SOLRAD', record['solar_rad'])
self.update('GV7', record['uv_index'])
self.update('GV9', record['wind_speed_hi_last_2_min'])
# rainfall is in counts and 1 count = 0.01 inches
# rain size is the tipping bucket calibration.
# size = 1 means 0.01 inches
# size = 2 means 0.2 mm
# size = 3 means 0.1 mm
# size = 4 means 0.001 inches
if record['rain_size'] == 1:
rain_cal = 0.01
elif record['rain_size'] == 2:
rain_cal = 0.0787
elif record['rain_size'] == 3:
rain_cal = 0.0394
elif record['rain_size'] == 4:
rain_cal = 0.001
else:
rain_cal = 0.01
#self.setDriver('GV5', self.rain_size(record['rain_size']), True, False)
if record['rainfall_daily'] != None:
rain = rain_cal * int(record['rainfall_daily'])
self.setDriver('GV10', rain, True, False)
if record['rain_rate_last'] != None:
rain = rain_cal * int(record['rain_rate_last'])
self.setDriver('RAINRT', rain, True, False)
if record['rainfall_year'] != None:
rain = rain_cal * int(record['rainfall_year'])
self.setDriver('GV5', rain, True, False)
# wind gust? wind_speed_hi_last_2_min
# hi temperature
# low temperature
# rain today rainfall_daily (in counts????)
elif record['data_structure_type'] == 3: # pressure
if record['bar_sea_level'] != None:
self.setDriver('BARPRES', float(record['bar_sea_level']), True, False)
if record['bar_trend'] != None:
self.setDriver('GV8', float(record['bar_trend']), True, False)
elif record['data_structure_type'] == 4 and self.has_indoor: # Indoor conditions
LOGGER.info(record)
# self.nodes['indoor'].setDriver(...
# 'temp-in'
# 'hum-in'
# 'dew_point_in'
# 'heat_index_in'
if record['temp_in'] != None:
self.nodes['indoor'].setDriver('CLITEMP', float(record['temp_in']), True, False)
if record['hum_in'] != None:
self.nodes['indoor'].setDriver('CLIHUM', float(record['hum_in']), True, False)
if record['dew_point_in'] != None:
self.nodes['indoor'].setDriver('DEWPT', float(record['dew_point_in']), True, False)
if record['heat_index_in'] != None:
self.nodes['indoor'].setDriver('GV0', float(record['heat_index_in']), True, False)
elif record['data_structure_type'] == 2 and self.has_soil: # Soil Conditions
# self.nodes['soil'].setDriver(...
if record['temp_1'] != None:
self.nodes['soil'].setDriver('GV0', float(record['temp_1']), True, False)
if record['temp_2'] != None:
self.nodes['soil'].setDriver('GV1', float(record['temp_2']), True, False)
if record['temp_3'] != None:
self.nodes['soil'].setDriver('GV2', float(record['temp_3']), True, False)
if record['temp_4'] != None:
self.nodes['soil'].setDriver('GV3', float(record['temp_4']), True, False)
if record['moist_soil_1'] != None:
self.nodes['soil'].setDriver('GV4', float(record['moist_soil_1']), True, False)
if record['moist_soil_2'] != None:
self.nodes['soil'].setDriver('GV5', float(record['moist_soil_2']), True, False)
if record['moist_soil_3'] != None:
self.nodes['soil'].setDriver('GV6', float(record['moist_soil_3']), True, False)
if record['moist_soil_4'] != None:
self.nodes['soil'].setDriver('GV7', float(record['moist_soil_4']), True, False)
if record['wet_leaf_1'] != None:
self.nodes['soil'].setDriver('GV8', float(record['wet_leaf_1']), True, False)
if record['wet_leaf_2'] != None:
self.nodes['soil'].setDriver('GV9', float(record['wet_leaf_2']), True, False)
else:
LOGGER.info('Skipping data type %d' % record['data_structure_type'])
def query(self):
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover(self, *args, **kwargs):
# Create any additional nodes here
LOGGER.info("In Discovery...")
# Delete the node server from Polyglot
def delete(self):
LOGGER.info('Removing node server')
def stop(self):
LOGGER.info('Stopping node server')
def update_profile(self, command):
st = self.poly.installprofile()
return st
def check_params(self):
if 'IP Address' in self.polyConfig['customParams']:
self.ip_address = self.polyConfig['customParams']['IP Address']
self.configured = True
self.addCustomParam( {
'IP Address': self.ip_address} )
self.removeNoticesAll()
if self.ip_address == '':
self.addNotice("WeatherLink IP Address parameter must be set");
self.configured = False
def remove_notices_all(self, command):
self.removeNoticesAll()
commands = {
'DISCOVER': discover,
'UPDATE_PROFILE': update_profile,
'REMOVE_NOTICES_ALL': remove_notices_all
}
# The controller node has the main current condition data
#
drivers = [
{'driver': 'ST', 'value': 1, 'uom': 2}, # node server status
{'driver': 'CLITEMP', 'value': 0, 'uom': 17}, # temperature
{'driver': 'CLIHUM', 'value': 0, 'uom': 22}, # humidity
{'driver': 'BARPRES', 'value': 0, 'uom': 23}, # pressure
{'driver': 'DEWPT', 'value': 0, 'uom': 17}, # dew point
{'driver': 'WINDDIR', 'value': 0, 'uom': 76}, # direction
{'driver': 'SPEED', 'value': 0, 'uom': 48}, # wind speed
{'driver': 'GV9', 'value': 0, 'uom': 48}, # wind gust
{'driver': 'GV0', 'value': 0, 'uom': 17}, # wet bulb
{'driver': 'GV1', 'value': 0, 'uom': 17}, # heat index
{'driver': 'GV2', 'value': 0, 'uom': 17}, # wind chill
{'driver': 'RAINRT', 'value': 0, 'uom': 24}, # rain rate
{'driver': 'GV5', 'value': 0, 'uom': 105}, # rain size
{'driver': 'SOLRAD', 'value': 0, 'uom': 74}, # solar radiation
{'driver': 'GV7', 'value': 0, 'uom': 71}, # UV index
{'driver': 'GV8', 'value': 0, 'uom': 23}, # pressure trend
{'driver': 'GV10', 'value': 0, 'uom': 105}, # daily rainfall
{'driver': 'GV11', 'value': 0, 'uom': 17}, # indoor temp
{'driver': 'GV12', 'value': 0, 'uom': 22}, # indoor humidity
]
class IndoorNode(polyinterface.Node):
id = 'indoor'
drivers = [
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22},
{'driver': 'DEWPT', 'value': 0, 'uom': 17},
{'driver': 'GV0', 'value': 0, 'uom': 17},
]
class SoilNode(polyinterface.Node):
id = 'soil'
drivers = [
{'driver': 'GV0', 'value': 0, 'uom': 17},
{'driver': 'GV1', 'value': 0, 'uom': 17},
{'driver': 'GV2', 'value': 0, 'uom': 17},
{'driver': 'GV3', 'value': 0, 'uom': 17},
{'driver': 'GV4', 'value': 0, 'uom': 87},
{'driver': 'GV5', 'value': 0, 'uom': 87},
{'driver': 'GV6', 'value': 0, 'uom': 87},
{'driver': 'GV7', 'value': 0, 'uom': 87},
{'driver': 'GV8', 'value': 0, 'uom': 56},
{'driver': 'GV9', 'value': 0, 'uom': 56},
]
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('WLL')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
| StarcoderdataPython |
1962809 | <filename>pynlg/spec/list.py
# encoding: utf-8
"""Definition of the ListElement container class."""
from .base import NLGElement
from ..lexicon.feature.internal import COMPONENTS
class ListElement(NLGElement):
"""
ListElement is used to define elements that can be grouped
together and treated in a similar manner. The list element itself
adds no additional meaning to the realisation. For example, the
syntax processor takes a phrase element and produces a list element
containing inflected word elements. Phrase elements only have
meaning within the syntax processing while the morphology processor
(the next in the sequence) needs to work with inflected words.
Using the list element helps to keep the inflected word elements
together.
There is no sorting within the list element and components are added
in the order they are given.
"""
def __init__(self, element=None):
"""The ListElement inherits factory, category and all features
from the phrase.
"""
super(ListElement, self).__init__()
self.features = {COMPONENTS: []}
if element:
if isinstance(element, list):
self.extend(element)
elif isinstance(element, NLGElement):
self.category = element.category
self.features.update(element.features)
self.append(element)
def __bool__(self):
return bool(len(self))
def __len__(self):
return len(self.features[COMPONENTS])
def __getitem__(self, key):
return self.features[COMPONENTS][key]
def __setitem__(self, key, value):
self.features[COMPONENTS][key] = value
value.parent = self
def __delitem__(self, key):
self.features[COMPONENTS][key].parent = None
del self.features[COMPONENTS][key]
def __iter__(self):
return iter(self.features[COMPONENTS])
def append(self, element):
self.features[COMPONENTS].append(element)
element.parent = self
def extend(self, elements):
self.features[COMPONENTS].extend(elements)
for element in elements:
element.parent = self
@property
def children(self):
return self.features[COMPONENTS]
@children.setter
def children(self, value):
self.features[COMPONENTS] = value
for child in value:
child.parent = self
@property
def head(self):
return self[0] if self else None
def realise_syntax(self):
"""Return a new ListElement containing the syntax realisation of
each of the current ListElement elements.
Return None if the ListElement is elided.
"""
if self.elided:
return None
realised_list = ListElement()
for element in self:
realised_list.append(element.realise_syntax())
if len(realised_list) == 1:
return realised_list.head
else:
return realised_list
def realise_morphology(self):
"""Return a new ListElement containing the morphology realisation
of each of the current ListElement elements.
"""
realisations = [element.realise_morphology() for element in self]
return ListElement(realisations)
def realise_orthography(self):
raise NotImplementedError
| StarcoderdataPython |
20117 | <reponame>roberthutto/aws-cfn-bootstrap
#==============================================================================
# Copyright 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
class BuildError(Exception):
"""
Base exception for errors raised while building
"""
pass
class NoSuchConfigSetError(BuildError):
"""
Exception signifying no config error with specified name exists
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NoSuchConfigurationError(BuildError):
"""
Exception signifying no config error with specified name exists
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class CircularConfigSetDependencyError(BuildError):
"""
Exception signifying circular dependency in configSets
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ToolError(BuildError):
"""
Exception raised by Tools when they cannot successfully change reality
Attributes:
msg - a human-readable error message
code - an error code, if applicable
"""
def __init__(self, msg, code=None):
self.msg = msg
self.code = code
def __str__(self):
if (self.code):
return '%s (return code %s)' % (self.msg, self.code)
else:
return self.msg
| StarcoderdataPython |
9673493 | import sys
sys.path.append('../src/org_to_anki')
from org_to_anki.noteModels.models import NoteModels
def testNodeModelsCanBeLoaded():
models = NoteModels()
assert(models.getBasicModel().get("name") == "Basic")
assert(models.getRevseredModel().get("name") == "Basic (and reversed card)")
assert(models.getClozeModel().get("name") == "Cloze") | StarcoderdataPython |
66625 | <reponame>aimo84/ProyectoRe-seikosta
# Cloud y Big Data
# Realizado por <NAME>, <NAME>, <NAME>
# Nombre Script: S10
# Descripcion: Extrae la relacion entre la puntuacion y el numero de comentarios de cada subreddit
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
from pushshift import file_to_dataframe, get_file
conf = SparkConf().setAppName('S10')
sc = SparkContext(conf=conf)
ss = SparkSession(sc)
df = file_to_dataframe(get_file(), ss)
df.select(
"subreddit",
"score",
"num_comments"
).groupBy(
"subreddit"
).sum(
"score",
"num_comments"
).withColumn(
"Relacion",
(f.col("sum(score)")/f.col("sum(num_comments)"))*100
).withColumn(
"Relacion",
f.round(f.col("Relacion"), 2)
).orderBy(
'sum(score)',
ascending=False
).write.json("s10_salida")
| StarcoderdataPython |
3233622 | <gh_stars>1-10
from __future__ import annotations
from corkus.objects.base import CorkusBase
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from .ingredient import Ingredient
class PartialIngredient(CorkusBase):
"""Represents a ``Partial`` version of :py:class:`Ingredient`."""
@property
def name(self) -> str:
"""The name of the ingredient."""
return self._attributes
async def fetch(self, timeout: Optional[int] = None) -> Ingredient:
"""Fetch full ingredient information from API.
.. include:: ../note_api_call.rst
:param timeout: Optionally override default timeout.
"""
return await self._corkus.ingredient.get(self.name, timeout)
def __repr__(self) -> str:
return f"<PartialIngredient name={self.name!r}>"
| StarcoderdataPython |
9682980 | <reponame>vidalmatheus/DS.com<filename>database/teste.py
from flask import Flask
import sqlalchemy as db
from sqlalchemy import *
app = Flask(__name__)
# connect to the db
engine = create_engine('postgresql://postgres:admin@localhost/ds')
con = engine.connect()
@app.route("/hello")
def hello():
return "<h1>Hello BRAZIL! !!!!!!</h1>"
@app.route("/hello/<name>")
def get_name(name):
return "<h1>Hello, {}!</h1>".format(name)
sql = text('SELECT * FROM paciente')
ans = con.execute(sql)
names = [row[3] for row in ans]
print(names)
@app.route("/paciente")
def get_paciente():
return f"{names}"
con.close()
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
8041740 | # -*- coding: UTF-8 -*-
import sys
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from rklib.utils import dirDetectCreate
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
from matplotlib import font_manager as fm
from matplotlib_venn import venn2,venn3
import itertools
from rblib import mutilstats
import scipy.cluster.hierarchy as sch
from rklib import utils
# for projection='3d'
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import fcluster; import pandas
###动态设置字体
###
from matplotlib.patches import Polygon
# to get kmeans and scipy.cluster.hierarchy
from scipy.cluster.vq import *
from scipy.cluster.hierarchy import *
###
from matplotlib.colors import LogNorm
##kmeans归一化处理 from scipy.cluster.vq import whiten
from scipy.cluster.vq import whiten
#mpl.style.use('ggplot')
from rblib import mplconfig
from rblib.mplconfig import styles,color_grad,rgb2hex,makestyles
def test_iter(num):
fig = plt.figure(dpi=300)
x = 1
y = 1
ax = fig.add_subplot(111)
ret_color,ret_lines,ret_marker = styles(num)
for i in range(num):
ax.plot([x,x+1,x+2,x+3,x+4],[y,y,y,y,y],color=ret_color[i],linestyle=ret_lines[i],marker=ret_marker[i],markeredgecolor=ret_color[i],markersize=12,alpha=0.8)
y += 1
plt.savefig("test_style.png",format='png',dpi=300)
plt.clf()
plt.close()
return 0
def admixture_plot():
return 0
def plot_enrich(resultmark,resultothers,fig_prefix,xlabel,ylabel):
fig = plt.figure(figsize=(8,6),dpi=300)
num = len(resultmark) + 1
ret_color,ret_lines,ret_marker = styles(num)
ax = fig.add_subplot(111)
maxlim = 0
for i in range(num-1):
#ax.plot(resultmark[i][1],resultmark[i][2],ret_color[i]+ret_marker[i],label=resultmark[i][0],markeredgecolor=ret_color[i],markersize=8,alpha=0.7)
ax.plot(resultmark[i][1],resultmark[i][2],color=ret_color[i],linestyle='',marker=ret_marker[i],label=resultmark[i][0],markeredgecolor=ret_color[i],markersize=10,alpha=0.7)
if resultmark[i][2] > maxlim:
maxlim = resultmark[i][2]
xarr = []
yarr = []
for ret in resultothers:
xarr.append(ret[0])
yarr.append(ret[1])
ax.plot(xarr,yarr,'ko',label="others",markeredgecolor='k',markersize=3,alpha=0.5)
art = []
lgd = ax.legend(bbox_to_anchor=(1.02, 1),loc=0,borderaxespad=0,numpoints=1,fontsize=6)
art.append(lgd)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_ylim(0,maxlim+2)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',additional_artists=art,bbox_inches="tight",dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',additional_artists=art,bbox_inches="tight",dpi=300)
plt.clf()
plt.close()
return 0
# 1425 ax1.scatter(xy[:,0],xy[:,1],c=colors)
#1426 ax1.scatter(res[:,0],res[:,1], marker='o', s=300, linewidths=2, c='none')
#1427 ax1.scatter(res[:,0],res[:,1], marker='x', s=300, linewidths=2)
def verrorbar(ynames,data,fig_prefix="simerrorbar",figsize=(5,4),log=False):
# data n , mean , lower, upper, sig
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
yaxis_locations = list(range(len(ynames)))
ax.errorbar(data[:,0], yaxis_locations, xerr=np.transpose(data[:,[1,2]]),markeredgewidth=1.25,elinewidth=1.25,capsize=3,fmt="s",c="k",markerfacecolor="white")
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(ynames)
if log == True:
ax.set_xscale("log")
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
return 0
def sim_scatter(X,Y,xlabel,ylabel,alpha=0.3,fig_prefix="simscatter"):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
ax.scatter(X,Y,marker='o',linewidths=0,color='gray',alpha=alpha)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def groups_scatter_flatdata(xvec,yvec,groups,xlabel,ylabel,addline=None,fig_prefix="test",alpha=0.6,colors=None,figsize=(5,4),markersize=10):
## groups is a list , like [0,0,0,0,1,1,1,1,2,2,2,2,2,4,4,4,4,4]
# ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=8)
setgroups = sorted(list(set(groups)))
xs = []
ys = []
npgroups = np.asarray(groups)
for i in setgroups:
xs.append(xvec[npgroups == i])
ys.append(yvec[npgroups == i])
group_scatter(xs,ys,setgroups,xlabel,ylabel,addline,fig_prefix,alpha,figsize=figsize,markersize=markersize)
return 0
from matplotlib.patches import Ellipse
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
def group_scatter(xs,ys,groups,xlabel,ylabel,addline=None,fig_prefix="test",alpha=0.8,colors=None,figsize=(5,4),markersize=30,addEllipse=True,xlim=None,ylim=None):
if colors == None:
colors,lines,markers = styles(len(groups))
else:
lines,markers = styles(len(groups))[1:]
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
patchs = []
nstd = 2
for i in range(len(groups)):
group = groups[i]
x = xs[i]
y = ys[i]
#patch = ax.scatter(x,y,marker=markers[i],linewidths=0,color=colors[i],alpha=alpha,s=markersize)
#patch = ax.scatter(x,y,marker=markers[i],linewidths=0,color=colors[i],alpha=alpha,s=markersize)
patch = ax.scatter(x,y,marker=markers[i],linewidths=0,color=colors[i],alpha=alpha,s=markersize)
patchs.append(patch)
if addline != None:
[x1,x2],[y1,y2] = addline[i]
ax.plot([x1,x2],[y1,y2],color=colors[i],ls='--',lw=1.0)
##
cov = np.cov(x, y)
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
w, h = 2 * nstd * np.sqrt(vals)
if addEllipse:
ell = Ellipse(xy=(np.mean(x), np.mean(y)), width=w, height=h, angle=theta, edgecolor=colors[i],alpha=1.0,facecolor='none')
ax.add_patch(ell)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
#ax.set_ylim(-1.5,1.5)
ax.legend(patchs,groups,loc=0,fancybox=False,frameon=False,numpoints=1,handlelength=0.75)
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def scatter2(x,y,xlabel,ylabel,addline=None,fig_prefix="test",alpha=0.3,ylog=0,xlog=0,log=0,figsize=(10,3),marker='o',linewidths=0): # line is [[x1,x2],[y1,y2]] = addline
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
colors = styles(3)[0]
ax.scatter(x,y,marker=marker,linewidths=linewidths,color=colors[0],alpha=alpha) #,label=labels[0])
if addline is not None:
[x1,x2],[y1,y2] = addline
ax.plot([x1,x2],[y1,y2],color="gray",ls='--',lw=1.0) #ax.plot(xp,yp,color=colors[n-i-1],linestyle='--',lw=1.0)
#ax.set_xlim(x1,x2)
#ax.set_ylim(y1,y2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
if log:
ax.set_yscale("log")
ax.set_xscale("log")
if ylog:
ax.set_yscale("log")
if xlog:
ax.set_xscale("log")
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def volcanoplot(siglog2fc,siglogp,totlog2fc,totlogp,xlabel = "Log2 (Fold Change)", ylabel = "-Log10(q-value)",alpha=0.3,figprefix="test"):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
ax.scatter(totlog2fc,totlogp,marker='o',linewidth=0,color='gray',alpha=alpha)
ax.scatter(siglog2fc[siglog2fc>0],siglogp[siglog2fc>0],marker='o',linewidths=0,color='#F15B6C',alpha=alpha)
ax.scatter(siglog2fc[siglog2fc<0],siglogp[siglog2fc<0],marker='o',linewidths=0,color='#2A5CAA',alpha=alpha)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
plt.savefig(figprefix+".png",format='png',dpi=300)
plt.savefig(figprefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def scatter(xother,yother,xsig,ysig,xlabel="X",ylabel="Y",labels =["No differential","Up regulated","Down regulated"] ,fig_prefix="DEGs_scatter_plot",alpha=0.3):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xother = np.asarray(xother)
yother = np.asarray(yother)
xsig = np.asarray(xsig)
ysig = np.asarray(ysig)
ax.scatter(xother,yother,marker='^',linewidths=0,color='gray',alpha=alpha,label=labels[0])
ax.scatter(xsig[ysig>xsig],ysig[ysig>xsig],marker='o',linewidths=0,color='#F15B6C',alpha=alpha,label=labels[1]) ### up
ax.scatter(xsig[xsig>ysig],ysig[xsig>ysig],marker='o',linewidths=0,color='#2A5CAA',alpha=alpha,label=labels[2]) ### down
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
ax.legend(loc=0,scatterpoints=1)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def venn_plot(datalist,setnames,fig_prefix="venn_plot",hplot=None,figsize=(5,4)):
if len(setnames) == 2:
vennfun = venn2
colors_arr = ["magenta","cyan"]
elif len(setnames) == 3:
vennfun = venn3
colors_arr = ["magenta","cyan","blue"]
else:
sys.stderr.write("[Warning] Only support 2 or 3 sets' venn plot")
return 1
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
vennfun(datalist,setnames,normalize_to=1,set_colors=colors_arr,alpha=0.3)
plt.savefig(fig_prefix+"_venn.png",format='png',dpi=300)
plt.savefig(fig_prefix+"_venn.svg",format='svg',dpi=300)
plt.clf()
plt.close()
dirDetectCreate(fig_prefix+"_venn_list")
outdir = fig_prefix+"_venn_list"
if len(setnames) == 3:
f = open(outdir+"/"+setnames[0]+".specific.lst.xls","w")
f.write("\n".join(datalist[0]-(datalist[1] | datalist[2] )))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[1]+".specific.lst.xls","w")
f.write("\n".join(datalist[1]-(datalist[0] | datalist[2] )))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[2]+".specific.lst.xls","w")
f.write("\n".join(datalist[2]-(datalist[0] | datalist[1] )))
f.write("\n")
f.close()
comb = datalist[0] & datalist[2] & datalist[1]
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[1]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[1] - comb))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[1]+"_and_"+setnames[2]+".lst.xls","w")
f.write("\n".join(datalist[1] & datalist[2] - comb))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[2]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[2] - comb))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[1]+"_and_"+setnames[2]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[2] & datalist[1] ))
f.write("\n")
f.close()
if len(setnames) == 2:
f = open(outdir+"/"+setnames[0]+".specific.lst.xls","w")
f.write("\n".join(datalist[0]-datalist[1]))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[1]+".specific.lst.xls","w")
f.write("\n".join(datalist[1]-datalist[0] ))
f.write("\n")
f.close()
f = open(outdir+"/"+setnames[0]+"_and_"+setnames[1]+".lst.xls","w")
f.write("\n".join(datalist[0] & datalist[1]))
f.write("\n")
f.close()
return 0
def kdensity(var_arr,num = 500,fun='pdf',cdfstart=-np.inf):
"""
plot theory distribution
y = P.normpdf( bins, mu, sigma)
l = P.plot(bins, y, 'k--', linewidth=1.5)
"""
if fun not in ['cdf','pdf']:
sys.stderr.write("kdensity Fun should be 'cdf' or 'pdf'")
sys.exit(1)
#idx = mutilstats.check_vecnan(var_arr)
#if idx == None:
# return [0,0],[0,0]
#kden = stats.gaussian_kde(np.asarray(var_arr)[idx])
kden = stats.gaussian_kde(np.asarray(var_arr))
#kden.covariance_factor = lambda : .25
#kden._compute_covariance()
#============ never use min and max, but use the resample data
#min_a = np.nanmin(var_arr)
#max_a = np.nanmax(var_arr)
tmpmin = []
tmpmax = []
for i in range(30):
resample_dat = kden.resample(5000)
resample_dat.sort()
tmpmin.append(resample_dat[0,4])
tmpmax.append(resample_dat[0,-5])
min_a = np.mean(tmpmin)
max_a = np.mean(tmpmax)
xnew = np.linspace(min_a, max_a, num)
if fun == 'cdf':
ynew = np.zeros(num)
ynew[0] = kden.integrate_box_1d(cdfstart,xnew[0])
for i in range(1,num):
ynew[i] = kden.integrate_box_1d(cdfstart,xnew[i])
else: ynew = kden(xnew)
return xnew,ynew
def hcluster(Xnp,samplenames,fig_prefix,figsize=(5,4)):
linkage_matrix = linkage(Xnp,'ward','euclidean')
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
#dendrogram(linkage_matrix,labels=samplenames,leaf_label_rotation=45) ## new version of scipy
dendrogram(linkage_matrix,labels=samplenames,orientation='right')
ax.grid(visible=False)
fig.tight_layout()
plt.savefig(fig_prefix+"_hcluster.png",format='png',dpi=300)
plt.savefig(fig_prefix+"_hcluster.svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_hmc_curve(X,Y,colors,classlabels,figname_prefix="out",scale=0):
#调和曲线生成Harmonic curve
#X = n x p Y is list, colors is list
n,p = X.shape
if n == len(Y) and len(Y) == len(colors):pass
else: return 1
if scale ==1:
X = whiten(X)
step = 100
t = np.linspace(-np.pi, np.pi, num=step)
f = np.zeros((n,step))
for i in range(n):
f[i,:] = X[i,0]/np.sqrt(2)
for j in range(1,p):
if j%2 == 1:
f[i,:] += X[i,j]*np.sin(int((j+1)/2)*t)
else:
f[i,:] += X[i,j]*np.cos(int((j+1)/2)*t)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
uniq_colors = []
for tmpcolor in colors:
if tmpcolor not in uniq_colors:
uniq_colors.append(tmpcolor)
idx = [colors.index(color) for color in uniq_colors]
labels = [classlabels[i] for i in idx]
for i in idx:
ax.plot(t,f[i,:],colors[i])
ax.legend(labels,loc=0)
for i in range(n):
ax.plot(t,f[i,:],colors[i])
ax.set_xlabel("$t(-\pi,\ \pi)$",fontstyle='italic')
ax.set_ylabel("$f(t)$",fontstyle='italic')
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_simple_lr(X,Y,xlabel,ylabel,color="bo",figname_prefix="out"):
slope,intercept,rvalue,pvalue,stderr = stats.linregress(X,Y)
tmpX = np.linspace(np.min(X),np.max(X),num=50)
tmpY = tmpX*slope+intercept
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tmpX,tmpY,'k--')
ax.grid(True,color='k',alpha=0.5,ls=':')
ax.plot(X,Y,color,alpha=0.6)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title('slope:%.3g,intercept:%.3g,r:%.3g,p:%.3g,stderr:%.3g'%(slope,intercept,rvalue,pvalue,stderr))
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def plot_linear_regress(X,Y,xlabel,ylabel,classnum,h_uniq_colors,h_uniq_classlabels,figname_prefix="out"):
##h_uniq_classlabels = {0:'class1',1:'class2'} , 0 and 1 must be the classnum
##h_uniq_colors = {0:'r^',1:'b.'}
#plt.style.use('grayscale')
if X.size != Y.size != len(classnum):
sys.stderr("Error: X, Y should be same dimensions")
return 1
slope,intercept,rvalue,pvalue,stderr = stats.linregress(X,Y)
tmpX = np.linspace(np.min(X),np.max(X),num=50)
tmpY = tmpX*slope+intercept
uniq_classnum = list(set(classnum))
np_classnum = np.array(classnum)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(tmpX,tmpY,'k--')
ax.grid(True,color='k',alpha=0.5,ls=':')
for i in uniq_classnum:
try:
color = h_uniq_colors[i]
label = h_uniq_classlabels[i]
except:
plt.clf()
plt.close()
sys.stderr("Error: key error")
return 1
idx = np.where(np_classnum == i)
ax.plot(X[idx],Y[idx],color,label=label,alpha=0.6)
ax.legend(loc=0,numpoints=1)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title('slope:%.3g,intercept:%.3g,r:%.3g,p:%.3g,stderr:%.3g'%(slope,intercept,rvalue,pvalue,stderr))
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
#def plot_vec_boxplot(Xvecs,fig_prefix,xlabels,ylabel,xticks_labels,outshow=1,colors=None,ylim=0):
def CNVgenome(X,Y,segY,CNVstatus,fig_prefix,xlabel,ylabel,ylim=[],markersize=10,xlim=[]):
fig = plt.figure(dpi=300,figsize=(12,1.5))
ax = fig.add_subplot(111)
idx_base = CNVstatus == 2
ax.plot(X[idx_base],Y[idx_base],'o',markeredgecolor="None",markerfacecolor="gray",alpha=0.3,markersize=markersize)
ax.plot(X[idx_base],segY[idx_base],'o',markeredgecolor = "black", markerfacecolor = "black", alpha=0.5,markersize=max(markersize-7,1))
idx_base = CNVstatus >=3
ax.plot(X[idx_base],Y[idx_base],'o',markeredgecolor="None",markerfacecolor = "red",alpha=0.3,markersize=markersize)
ax.plot(X[idx_base],segY[idx_base],'o',markeredgecolor = "black",markerfacecolor="black",alpha=0.5,markersize=max(markersize-7,1))
idx_base = CNVstatus <=1
ax.plot(X[idx_base],Y[idx_base],'o',markeredgecolor="None",markerfacecolor = "blue",alpha=0.3,markersize=markersize)
ax.plot(X[idx_base],segY[idx_base],'o',markeredgecolor = "black",markerfacecolor="black",alpha=0.5,markersize=max(markersize-7,1))
# for freec result
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if ylim: ax.set_ylim(ylim)
if xlim: ax.set_xlim(xlim)
plt.savefig(fig_prefix+".png",format='png',dpi=300);plt.savefig(fig_prefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
return 0
def plot_boxplotscatter(X,fig_prefix,xlabel,ylabel,xticks_labels,colors=None,ylim=[],scatter=1,markersize=7,figsize=(5,4)):
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
bp = ax.boxplot(X)
colors = styles(len(xticks_labels))[0]
for box in bp['boxes']:
box.set( color='#7570b3', linewidth=2)
#box.set( facecolor = '#1b9e77')
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
for median in bp['medians']:
median.set(color='red', linewidth=2)
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0)
if scatter:
for i in range(len(X)):
x = np.random.normal(i+1, 0.03, size=len(X[i]))
ax.plot(x, X[i], 'o',color=colors[i] ,alpha=0.3,markersize=markersize)
ax.set_xticklabels(xticks_labels,rotation=45,ha="right")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if ylim:
ax.set_ylim(ylim)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300);plt.savefig(fig_prefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
return 0
from rblib.plotx import retain_y,retain_xy
## statplot.CNVline(xdata,ydata,freqlables,11,"Chromsome %s"%(str(chrom)),11,sys.argv[2]+".chr%s"%(str(chrom)),[rawx_raw,],-0.5,0.5)
def chrom_scatterinfo(xdata,ydata,freqlables,xlabel,ylabel,figprefix,ylimmin=None,ylimmax=None,xlimmin=None,xlimmax=None):
fig = plt.figure(figsize=(8,3),dpi=300)
ax1 = fig.add_subplot(111)
numberscatter = len(freqlables)
hcolor = mplconfig.inscolor(freqlables)
for i in range(len(freqlables)):
ax1.plot(xdata,ydata[i],color=hcolor[freqlables[i]],linestyle="-",lw=2.0,label=freqlables[i])
ax1.set_xlabel(xlabel)
if ylimmin and ylimmax:
ax1.set_ylim(ylimmin,ylimmax)
if xlimmin is not None:
ax1.set_xlim(xlimmin,xlimmax)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_yscale("log",nonposy='clip')
retain_xy(ax1)
ax1.legend(loc=0)
ax1.grid(True,ls="--")
fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);plt.clf();plt.close();
return 0
def CNVline(xdata,ydata,zdata,xtickslabels,xlabel,ylabel,figprefix,lineplot,ylimmin=-1,ylimmax=1):
fig = plt.figure(figsize=(8,3),dpi=300)
ax1 = fig.add_subplot(111)
ax1.plot(xdata,ydata,color="#EF0000",linestyle="-",lw=2.0)
ax1.plot(xdata,zdata*1,color = "#0076AE",linestyle="-",lw=2.0)
ax1.fill(xdata,ydata,"#EF0000",xdata,zdata,"#0076AE")
#ax1.fill(xdata,zdata,color="#0076AE")
ax1.set_xlabel(xlabel)
for x in lineplot:
ax1.plot([x,x],[-1,1],color="gray",ls="--",linewidth=0.5)
ax1.set_ylim(ylimmin,ylimmax)
ax1.set_ylabel("Frequency")
retain_y(ax1)
#fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
#ax.plot(xp,yp,color=colors[n-i-1],linestyle='--',lw=1.0)
return 0
#def plot_boxplotgroup(X,groups,legendgroups,fig_prefix,xlabel,ylabel,xticks_labels,outshow=1,colors=None,ylim=1):
def plotenrich_qipao(plotdatax,figprefix,xlabel,figsize=(8,6),aratio=1.0,color="#3E8CBF"):
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
x = []
y = []
area = []
labels = []
logpvalues = []
nums = []
xx = 0
for i in plotdatax:
item,logpvalue,logqvalue,num,M = i
print(item,logqvalue)
x.append(logqvalue)
xx += 1
y.append(xx)
area.append((M*10 + 20)*aratio)
labels.append(item)
logpvalues.append(logpvalue)
nums.append(M)
if color == None:
cminstance = cm.get_cmap("Spectral") ######("Purples")
cplot = ax.scatter(x,y,s=area,alpha=0.8, c=logpvalues,cmap=cminstance,vmin=0, vmax=np.max(logpvalues),edgecolors="black",linewidths=0.5)
cb = fig.colorbar(cplot,ax=ax,fraction=0.15,shrink=0.25,aspect=6,label='-log$_{10}$p-value')
#cb.ax.yaxis.set_ticks_position('right')
#print cb.ax
else:
cplot = ax.scatter(x,y,s=area,alpha=0.8,c=color)
ax.set_xlabel(xlabel)
ax.set_yticks(np.arange(len(plotdatax))+1)
ax.set_yticklabels(labels)
ax.grid(False)
ax.set_ylim(0,len(plotdatax) + 1)
a,b = ax.get_xlim()
ax.set_xlim(a-(b-a)*0.15,b+(b-a)*0.15)
fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);
plt.clf();plt.close()
return 0
def plot_scatter_qipao(x,y,pvalue,status,figprefix,xlabel,ylabel,figsize=(8,6)): # status = 1 and -1
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
pvaluetrans = np.log(pvalue) * -1 + 60
maxpvaluetrans = np.max(pvaluetrans)
minpvaluetrans = np.min(pvaluetrans)
#print pvaluetrans
#print maxpvaluetrans
#print minpvaluetrans
#print (pvaluetrans - minpvaluetrans) / (maxpvaluetrans - minpvaluetrans)
x = np.asarray(x)
y = np.asarray(y)
pvaluetransed = np.int64(4.5**((pvaluetrans - minpvaluetrans) / (maxpvaluetrans - minpvaluetrans) * 3))
#nx = np.asarray(x) + (np.random.rand(len(x))-0.5) * 0.05
#ny = np.asarray(y) + (np.random.rand(len(y))-0.5) * 0.05
#nx[nx<0] = 0
#ny[ny<0] = 0
#nx = x
#ny = y
cplot = ax.scatter(x,y,s=pvaluetransed*15,alpha=0.8,c = (np.asarray(y)-np.asarray(x))/2,cmap=cm.Spectral,edgecolors="black",linewidths=0.5)#cmap=cm.gist_earth)
#ax.plot([0,1],[0,1],'--',color="gray")
cb = fig.colorbar(cplot,ax=ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
min_all = min(np.min(x),np.min(y))-0.02
max_all = max(np.max(x),np.max(y))+0.02
ax.plot([min_all,max_all],[min_all,max_all],'--',color="gray")
#ax.set_xlim(-0.02,0.3)
#ax.set_ylim(-0.02,0.3)
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(figprefix+".png",format='png',dpi=300);plt.savefig(figprefix+".svg",format='svg',dpi=300);
plt.clf();plt.close();
return 0
def adjacent_values(xmin,xmax, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, xmax)
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, xmin, q1)
return lower_adjacent_value, upper_adjacent_value
def plot_dfboxplot(df,fig_prefix,xlabel,ylabel,outshow=False,colors=None,ylim=[],markersize=8,showmeans=False,showscatter=False,figsize=(3,8),violin=0,uniq_xticklabels=None,linewidths=0.0,rotation=45):
#plt.style.use('bmh')
if uniq_xticklabels is None:
uniq_xticklabels = sorted(set(df["xlabelticks"]))
nxticklabels = len(uniq_xticklabels)
uniq_legends = sorted(set(df["group"]))
nlegends = len(uniq_legends)
if colors == None:
colors = styles(nlegends)[0]
if nlegends == 1:
colors = styles(nxticklabels)[0]
xpos = np.arange(nxticklabels)
width = 0.9/nlegends
if showscatter: alpha=0.4
else:alpha=0.5
fig = plt.figure(figsize=figsize,dpi=300) # 3,8
ax = fig.add_subplot(111)
slplot = []
for i in range(nlegends):
x = []
for j in range(nxticklabels):
tmpdata = df[ (df["xlabelticks"] == uniq_xticklabels[j]) & (df["group"] == uniq_legends[i])]["data"].values
x.append(tmpdata)
if not violin:
print(i)
bp = ax.boxplot(x,widths=width,positions=xpos+width*i,showmeans=showmeans,meanline=showmeans,notch=False,showfliers=outshow)
## violinplot(data, pos, points=20, widths=0.3, showmeans=True, showextrema=True, showmedians=True)
plt.setp(bp['boxes'], color="black",linewidth=1.0); plt.setp(bp['whiskers'], color='black',linewidth=1.0); plt.setp(bp['medians'], color='black',linewidth=1.0)
if outshow: plt.setp(bp['fliers'], color=colors[i], marker='o',markersize=6)
#box = bp['boxes']
for j in range(nxticklabels):
if nlegends > 1: ploti = i
else: ploti = j
if showscatter:
tx = x[j]
ttx = np.random.normal(j+width*i, width/10, size=len(tx))
#ax.plot(ttx, tx, 'o',color=colors[ploti] ,alpha=0.3,markersize=markersize)
ax.scatter(ttx,tx,marker='o',color=colors[ploti],alpha=alpha,s=markersize,linewidths=linewidths)
box = bp['boxes'][j]
boxX = box.get_xdata().tolist(); boxY = box.get_ydata().tolist(); boxCoords = list(zip(boxX,boxY));
boxPolygon = Polygon(boxCoords, facecolor=colors[ploti],alpha=alpha)
ax.add_patch(boxPolygon)
sp, = ax.plot([1,1],'o',color=colors[ploti])
slplot.append(sp)
else:
vp = ax.violinplot(x,xpos+width*i,widths=width,showmeans=False, showmedians=False,showextrema=False)
for j in range(nxticklabels):
if nlegends > 1: ploti = i
else: ploti = j
pc = vp['bodies'][j]
pc.set_facecolor(colors[ploti]); pc.set_edgecolor('black');pc.set_alpha(0.5)
xmin = []; xmax = []; xquartile1 = []; xmedians = []; xquartile3 = []
for xi in x:
quartile1, medians, quartile3 = np.percentile(xi, [25, 50, 75])
xmin.append(np.min(xi)); xmax.append(np.max(xi))
xquartile1.append(quartile1); xmedians.append(medians); xquartile3.append(quartile3)
whiskers = np.array([adjacent_values(x_min,x_max, q1, q3) for x_min,x_max, q1, q3 in zip(xmin,xmax, xquartile1, xquartile3)])
whiskersMin, whiskersMax = whiskers[:, 0], whiskers[:, 1]
ax.scatter(xpos+width*i, xmedians, marker='o', color='black', s=30, zorder=3)
ax.vlines(xpos+width*i, xquartile1, xquartile3, color='white', linestyle='-', lw=5)
ax.vlines(xpos+width*i, whiskersMin, whiskersMax, color='white', linestyle='-', lw=1)
sp, = ax.plot([1,1],'o',color=colors[ploti]) # ax.plot(xarr,yarr,'ko',label="others",markeredgecolor='k',markersize=3,alpha=0.5)
#sp = ax.scatter([1,1],[1,1], marker='o',color=colors[ploti])
slplot.append(sp)
"""
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True)
fig.tight_layout(rect = [0,0,1,0.9])
"""
if nlegends > 1:
ax.legend(slplot,uniq_legends,loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True,numpoints=1)
for sp in slplot: sp.set_visible(False)
ax.set_xticks(xpos+width/2*(nlegends-1))
hafmt = "right" if rotation in [0,90] else "center" ### xticklabel position set
ax.set_xticklabels(uniq_xticklabels,rotation=rotation,ha="center")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0-width*1.4/2,xpos[-1]+1-width/2)
if ylim:ax.set_ylim(ylim[0],ylim[-1])
ax.grid(False)
#ax.grid(True,axis='y')
if nlegends > 1:
fig.tight_layout(rect = [0,0,1,0.9])
else:
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300);plt.savefig(fig_prefix+".svg",format='svg',dpi=300);plt.clf();plt.close();
return 0
def plot_boxplot(Xnp,fig_prefix,xlabel,ylabel,xticks_labels,outshow=1,colors=None,ylim=1,figsize=(6,5)):
fig = plt.figure(dpi=300,figsize=figsize)
ax1 = fig.add_subplot(111)
if outshow == 1:
bp = ax1.boxplot(Xnp.T)
plt.setp(bp['boxes'], color='white')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
else:
bp = ax1.boxplot(Xnp.T,0,'')
n,p = Xnp.shape
if colors == None:
colors = color_grad(n,cm.Paired)
for i in range(n):
box = bp['boxes'][i]
boxX = box.get_xdata().tolist()
boxY = box.get_ydata().tolist()
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=colors[i])
ax1.add_patch(boxPolygon)
ax1.set_xticklabels(xticks_labels,rotation=45)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if ylim:
ax1.set_ylim(-10,10)
ax1.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_Xscore(Xnp,classnums,uniqclassnum,uniqcolor,uniqmarker,uniqclasslabel,fig_prefix,xlabel,ylabel,zlabel=None,dim=2,figsize=(5,4),markersize=30):
#plt.style.use('grayscale')
leng = len(uniqclassnum)
Xnp = np.asarray(Xnp)
fig = plt.figure(figsize=figsize,dpi=300)
if dim == 3:
ax1 = fig.add_subplot(111,projection ='3d')
elif dim==2:
ax1 = fig.add_subplot(111)
else:
sys.stderr.write("[ERROR] Dim '%d' plot failed\n"%dim)
return 1
for i in range(leng):
tmpclassidx = np.array(classnums) == uniqclassnum[i]
tmplabel = uniqclasslabel[i]
tmpcolor = uniqcolor[i%(len(uniqcolor))]
tmpmarker = uniqmarker[i%(len(uniqmarker))]
if dim == 2:
ax1.scatter(Xnp[tmpclassidx,0],Xnp[tmpclassidx,1],color=tmpcolor,marker=tmpmarker,label=tmplabel,alpha=0.7,s=markersize)
ax1.grid(True)
else:ax1.scatter(Xnp[tmpclassidx,0],Xnp[tmpclassidx,1],Xnp[tmpclassidx,2],color=tmpcolor,marker=tmpmarker,label=tmplabel,alpha=0.7,s=markersize) # markerfacecolor=tmpcolor
ax1.legend(loc=0,numpoints=1)
ax1.grid(True,ls='--')
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if dim == 3 and zlabel !=None:
ax1.set_zlabel(zlabel)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_XYscore(Xnp,Y,classnums,uniqclassnum,uniqcolor,uniqmarker,uniqclasslabel,fig_prefix,xlabel,ylabel,zlabel=None,dim=2,figsize=(5,4)):
Xnp[:,dim-1] = Y[:,0]
return plot_Xscore(Xnp,classnums,uniqclassnum,uniqcolor,uniqmarker,uniqclasslabel,fig_prefix,xlabel,ylabel,zlabel,dim,figsize=figsize)
def plot_markxy(X1,Y1,X2,Y2,xlabel,ylabel,fig_prefix):
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
ax.plot(X1,Y1,'b+')
ax.plot(X2,Y2,'ro')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def draw_lines(data,xlabels,legends,ylabel,fig_prefix,colors=None,markers=None,lstyles=None,figsize=(5,4),linewidth=2.0,alpha=0.8,rotation=45):
n,p = data.shape
ret_color,ret_lines,ret_marker = styles(n)
if colors is not None:
ret_color = makestyles(colors,n)
if lstyles is not None:
ret_lines = makestyles(lstyles,n)
if markers is not None:
ret_marker= makestyles(markers,n)
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xloc = list(range(p))
for i in range(n):
tmpdata = data[i,:]
ax.plot(xloc,tmpdata,ls=ret_lines[i],color=ret_color[i],label=legends[i])
#ax.plot(xloc,tmpdata,ls=ret_lines[i],marker=ret_marker[i],markerfacecolor=ret_color[i],markeredgecolor=ret_color[i],color=ret_color[i],label=legends[i])
# ls='--',marker='.',markerfacecolor=linecolor,markeredgecolor=linecolor,color=linecolor
ax.set_ylabel(ylabel)
ax.set_xticklabels(xlabels,ha="right",rotation=rotation)
ax.set_xlim(-0.5,p-0.5)
ax.set_xticks(np.arange(0,p))
yrange = np.max(data) - np.min(data)
ax.set_ylim(np.min(data)-yrange*0.1,np.max(data)+yrange*0.1)
ax.legend(loc=0)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plotline(Xvector,Ys,fig_prefix,xlabel,ylabel,colors,legends=None,title=None,xlimmax = None,ylimmax = None, figsize=(6,4),linewidth=1.0,xlim=[]):
n,p = Ys.shape
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
if legends is not None:
leng = len(legends)
else:
leng = 0
for i in range(n):
if i < leng:
tmplabel = legends[i]
ax.plot(Xvector,Ys[i,:],colors[i],label=tmplabel,linewidth=linewidth)
else:
ax.plot(Xvector,Ys[i,:],colors[i],linewidth=linewidth)
if legends != None:
ax.legend(loc=0)
#ax.grid()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if title != None:
ax.set_title(title)
if ylimmax:
ax.set_ylim(0,ylimmax)
if xlimmax:
ax.set_xlim(0,p)
if xlim:
ax.set_xlim(xlim[0],xlim[-1])
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_time_pos(hdata,fig_prefix,xlabel,colors=None,t1color=None,t2color=None,figsize=(6,4),width=5.0,hcolor=None,hshape=None):
fig = plt.figure(dpi=300,figsize=figsize)
timepos = []
for pid in hdata:
timepos.append([pid,hdata[pid][0],hdata[pid][1]])
timepos_sort = utils.us_sort(timepos,1,2)
ax = fig.add_subplot(111)
idx = 0
vmin = np.inf
vmax = -np.inf
if colors is None:
pcolors = styles(len(hdata))[0]
else:
pcolors = [colors,] * len(hdata)
for pid,start,end in timepos_sort:
idx += 1
pdtp1tp2 = hdata[pid]
# ax.plot([start,end],[idx,idx],pcolors,linewidth=linewidth)
vmin = np.min([vmin,start])
vmax = np.max([vmax,end])
ax.arrow(start,idx,end-start,0,fc=pcolors[idx-1], ec=pcolors[idx-1],lw=0.5,ls='-',width=width,head_width=width,head_length=0,shape='full',alpha=0.2,length_includes_head=True)
# hcolor to plot, hshape to plot
tmpall = hdata[pid][2]
for ttimepos,tclin,tissue in tmpall:
ax.scatter([ttimepos],[idx,],marker=hshape[tclin],color=hcolor[tissue],s=30) # ax.scatter(ttx,tx,marker='o',color=colors[ploti],alpha=alpha,s=markersize,linewidths=linewidths)
ax.set_yticks(np.arange(1,idx+1,1))
ax.yaxis.set_ticks_position('left')
yticklabels = ax.set_yticklabels([t[0] for t in timepos_sort])
ax.set_ylim(0,idx+1)
ax.set_xlim(vmin,vmax)
ax.grid(True,ls='--',axis='y')
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def barh_dict_class(hdata,fig_prefix,xlabel,ylabel,title = "",width=0.4,legends=[],colors=[],fmt="%.2f",ylog=0,rotation=0,plot_txt = 1):
data = []
yticklabels = []
classnames = []
classnumbers = [0] * len(hdata.keys())
if not colors:
color_class = cm.Paired(np.linspace(0, 1, len(hdata.keys())))
else:
color_class = colors
idx = 0
plot_idx = []
plot_start = 0
for classname in sorted(hdata.keys()):
classnames.append(classname)
for key in hdata[classname]:
if hdata[classname][key] <=0:continue
yticklabels.append(key)
classnumbers[idx] += 1
data.append(hdata[classname][key])
plot_idx.append([plot_start,len(data)])
plot_start += len(data)-plot_start
idx += 1
if len(data) > 16:
fig = plt.figure(figsize=(5,15),dpi=300)
fontsize_off = 2
else:
fig = plt.figure(figsize=(5,7),dpi=300)
ax = fig.add_subplot(111)
linewidth = 0
alpha=0.8
ylocations = np.arange(len(data))+width*2
rects = []
for i in range(len(plot_idx)):
s,e = plot_idx[i]
rect = ax.barh(ylocations[s:e],np.asarray(data[s:e]),width,color=color_class[i],linewidth=linewidth,alpha=alpha,align='center')
rects.append(rect)
ax.set_yticks(ylocations)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ylabelsL = ax.set_yticklabels(yticklabels)
ax.set_ylim(0,ylocations[-1]+width*2)
tickL = ax.yaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
ax.xaxis.grid(True)
ax.legend(classnames,loc=0,fontsize=8)
#print fig.get_size_inches()
fig.set_size_inches(10,12)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def bar_dict_class(hdata,fig_prefix,xlabel,ylabel,title = "",width=0.35,legends=[],colors=[],fmt="%.2f",ylog=0,rotation=0,plot_txt = 1):
data = []
xticklabels = []
classnames = []
classnumbers = [0] * len(hdata.keys())
if not colors:
color_class = cm.Paired(np.linspace(0, 1, len(hdata.keys())))
else:
color_class = colors
idx = 0
plot_idx = []
plot_start = 0
for classname in sorted(hdata.keys()):
flagxx = 0
for key in hdata[classname]:
if hdata[classname][key] <=0:continue
xticklabels.append(key)
classnumbers[idx] += 1
data.append(hdata[classname][key])
flagxx = 1
if flagxx:
plot_idx.append([plot_start,len(data)])
plot_start += len(data)-plot_start
idx += 1
classnames.append(classname)
fontsize_off = 2
if len(data) > 16:
fig = plt.figure(figsize=(10,5),dpi=300)
fontsize_off = 3
else:
fig = plt.figure(figsize=(7,5),dpi=300)
ax = fig.add_subplot(111)
if ylog:
ax.set_yscale("log",nonposy='clip')
linewidth = 0
alpha=0.8
xlocations = np.arange(len(data))+width*2
#rects = ax.bar(xlocations,np.asarray(data),width,color=plot_colors,linewidth=linewidth,alpha=alpha,align='center')
rects = []
for i in range(len(plot_idx)):
s,e = plot_idx[i]
rect = ax.bar(xlocations[s:e],np.asarray(data[s:e]),width,color=color_class[i],linewidth=linewidth,alpha=alpha,align='center')
rects.append(rect)
max_height = 0
if plot_txt:
for rk in rects:
for rect in rk:
height = rect.get_height()
if height < 0.1:continue
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=(8-fontsize_off))
ax.set_xticks(xlocations)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if rotation == 0 or rotation == 90:hafmt="center"
else:hafmt="right"
xlabelsL = ax.set_xticklabels(xticklabels,ha=hafmt,rotation=rotation)
#print xlocations
ax.set_xlim(0,xlocations[-1]+width*2)
tickL = ax.xaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
ax.yaxis.grid(True)
#print classnames
if ylog:
ax.set_ylim(0.99,np.max(data)*2)
else:
ax.set_ylim(0,np.max(data)*1.35)
ax.legend(classnames,fancybox=True, loc=0, fontsize=(8-fontsize_off))
#ax.legend(classnames,loc='upper center', bbox_to_anchor=(0.5, 1.0),ncol=6,fancybox=True, shadow=True)
#else:
#ax.xaxis.set_major_locator(plt.NullLocator())
plt.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='on') # labels along the bottom edge are off
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def lineraworder(data,xticklabels,fig_prefix,xlabel,ylabel,title = "",width=0.4,fmt="%.2f",ylog=0,rotation=0,linecolor="r",ls="--",marker='.'):
fig = plt.figure(figsize=(7,5),dpi=300)
ax = fig.add_subplot(111)
if ylog: ax.set_yscale("log",nonposy='clip')
linewidth = 0; alpha=1.0
if not linecolor:
linecolor = styles(len(data))[0]
xlocations = np.arange(len(data))+width*2
ax.plot(xlocations,data,ls=ls,marker=marker,markerfacecolor=linecolor,markeredgecolor=linecolor,color=linecolor)
ax.set_xticks(xlocations);ax.set_ylabel(ylabel); ax.set_xlabel(xlabel);
ax.set_xlim(0,xlocations[-1]+width*2);fig.tight_layout();
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def lineplot(data,labels,fig_prefix,xlabel,ylabel,title = "",width=0.4,fmt="%.2f",ylog=0,rotation=0):
fig = plt.figure(figsize=(7,6),dpi=300)
ax = fig.add_subplot(111)
if ylog: ax.set_yscale("log",nonposy='clip')
linewidth = 0; alpha=1.0
n,p = data.shape
linecolors,lses,markers = styles(p)
assert p >=2
for i in range(1,p):
ax.plot(data[:,0],data[:,i],ls=lses[i],marker=markers[i],markerfacecolor=linecolors[i],markeredgecolor=linecolors[i],color=linecolors[i],label=labels[i])
ax.set_ylabel(ylabel); ax.set_xlabel(xlabel);
ax.legend(loc=0,numpoints=1) # ax.legend(labels,loc=0,numpoints=1)
ax.grid(True)
fig.tight_layout();
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def barlineraworder(data,xticklabels,fig_prefix,xlabel,ylabel,title = "",width=0.4,colors=[],fmt="%.2f",ylog=0,rotation=0,linecolor="r",figsize=(7,5)):
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
if ylog: ax.set_yscale("log",nonposy='clip')
linewidth = 0; alpha=1.0
if not colors:
colors = styles(len(data))[0]
xlocations = np.arange(len(data))+width*2
rects = ax.bar(xlocations,np.asarray(data),width,color=colors,linewidth=linewidth,alpha=alpha,align='center')
idxtmp = 0
for rect in rects:
height = rect.get_height()
idxtmp += 1
if height < 0.1:continue
if data[idxtmp-1] < 0:
height = -1 * height
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='top',fontsize=10)
else:
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=10)
ax.plot(xlocations,data,ls='--',marker='.',markerfacecolor=linecolor,markeredgecolor=linecolor,color=linecolor)
ax.set_xticks(xlocations)
ax.set_ylabel(ylabel); ax.set_xlabel(xlabel)
if rotation == 0 or rotation == 90:
hafmt='center'
else:hafmt = 'right'
xlabelsL = ax.set_xticklabels(xticklabels,ha=hafmt,rotation=rotation)
ax.set_title(title)
ax.set_xlim(0,xlocations[-1]+width*2)
#tickL = ax.xaxis.get_ticklabels()
#for t in tickL:
# t.set_fontsize(t.get_fontsize() - 2)
ax.yaxis.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def bar_dict(hdata,fig_prefix,xlabel,ylabel,title = "",width=0.4,legends=[],colors=[],fmt="%.2f",ylog=0,hlist=None,rotation=0,filter_flag=1):
data = []
xticklabels = []
if hlist == None:
for key in sorted(hdata):
if hdata[key] <=0 and filter_flag:
continue
xticklabels.append(key)
data.append(hdata[key])
else:
for key in sorted(hlist):
if hdata[key] <=0 and filter_flag:
continue
xticklabels.append(key)
data.append(hdata[key])
fig = plt.figure(figsize=(7,5),dpi=300)
ax = fig.add_subplot(111)
if ylog:
ax.set_yscale("log",nonposy='clip')
linewidth = 0
alpha=1.0
if not colors:
colors = cm.Accent(np.linspace(0, 1, len(data)))
xlocations = np.arange(len(data))+width*2
rects = ax.bar(xlocations,np.asarray(data),width,color=colors,linewidth=linewidth,alpha=alpha,align='center')
idxtmp = 0
for rect in rects:
height = rect.get_height()
idxtmp += 1
if height < 0.1:continue
if data[idxtmp-1] < 0:
height = -1 * height
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='top',fontsize=8)
else:
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=8)
ax.set_xticks(xlocations)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if rotation == 0 or rotation == 90:
hafmt='center'
else:
hafmt = 'right'
xlabelsL = ax.set_xticklabels(xticklabels,ha=hafmt,rotation=rotation)
#if rotation:
# for label in xlabelsL:
# label.set_rotation(rotation)
ax.set_title(title)
ax.set_xlim(0,xlocations[-1]+width*2)
tickL = ax.xaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
ax.yaxis.grid(True)
#ax.set_adjustable("datalim")
if ylog and filter_flag:
ax.set_ylim(0.99,np.max(data)*2)
elif filter_flag:
ax.set_ylim(0,np.max(data)*1.5)
#ax.set_ylim(ymin=0)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def cluster_stackv_bar_plot(data,xticks_labels,fig_prefix,xlabel,ylabel,title="",width=0.7,legends=[],colors=[],scale=0,rotation=0,nocluster=0,noline=0):
Xnpdata = data.T.copy()
#Xnpdata = np.random.random((12,9))
lfsm = 4#8
if len(xticks_labels) > 40:
lfsm = int(len(xticks_labels) * 1.0 * 8/40); lfsm = np.min([lfsm,16])
widsm = 8#8
fig = plt.figure(figsize=(widsm,lfsm))
stackmapGS = gridspec.GridSpec(1,2,wspace=0.0,hspace=0.0,width_ratios=[0.15,1])
if not nocluster:
col_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnpdata,'euclidean'))
#print col_pairwise_dists
col_clusters = linkage(col_pairwise_dists,method='ward')
col_denAX = fig.add_subplot(stackmapGS[0,0])
col_denD = dendrogram(col_clusters,orientation='left')
col_denAX.set_axis_off()
n,p = data.shape
ind = np.arange(p)
if not nocluster:
tmp = np.float64(data[:,col_denD['leaves']])
else:
tmp = data
if scale:
tmp = tmp/np.sum(tmp,0)*100
if not colors:
colors = styles(n)[0]
lfsm = 8
stackvAX = fig.add_subplot(stackmapGS[0,1])
linewidth = 0
alpha=0.8
def plot_line_h(ax,rects):
for i in range(len(rects)-1):
rk1 = rects[i]
rk2 = rects[i+1]
x1 = rk1.get_x()+rk1.get_width()
y1 = rk1.get_y()+rk1.get_height()
x2 = rk2.get_x()+rk2.get_width()
y2 = rk2.get_y()
ax.plot([x1,x2],[y1,y2],'k-',linewidth=0.4)
return 0
for i in range(n):
if i:
cumtmp = cumtmp + np.asarray(tmp[i-1,:])[0]
rects = stackvAX.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,left=cumtmp,align='edge',label=legends[i])
if not noline:plot_line_h(stackvAX,rects)
else:
cumtmp = 0
rects = stackvAX.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,align='edge',label=legends[i])
if not noline:plot_line_h(stackvAX,rects)
stackvAX.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,fancybox=True, shadow=True)
stackvAX.set_ylim(0-(1-width),p)
#clean_axis(stackvAX)
#stackvAX.set_ylabel(xlabel)
#stackvAX.set_yticks(ind)
#stackvAX.set_yticklabels(xticks_labels,rotation=rotation)
if scale:
stackvAX.set_xlim(0,100)
if nocluster:
t_annonames = xticks_labels
else:
t_annonames = [xticks_labels[i] for i in col_denD['leaves']]
stackvAX.set_yticks(np.arange(p)+width/2)
stackvAX.yaxis.set_ticks_position('right')
stackvAX.set_yticklabels(t_annonames)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def stackv_bar_plot(data,xticks_labels,fig_prefix,xlabel,ylabel,title="",width=0.8,legends=[],colors=[],scale=0,rotation=45,orientation="vertical",legendtitle="",figsize=(8,6)):
"""orientation is "vertical" or horizontal"""
n,p = data.shape
ind = np.arange(p)
tmp = np.float64(data.copy())
#tmp = np.cumsum(data,0)
#print tmp - data
if scale:
tmp = tmp/np.sum(tmp,0)*100
#print tmp
#tmp = np.cumsum(tmp,0)
if not colors:
#colors = cm.Dark2(np.linspace(0, 1, n))
colors = styles(n)[0]
if figsize is None:
lfsm = 6
widsm = 8
if len(xticks_labels) > 40:
lfsm = int(len(xticks_labels) * 1.0 * 8/40); lfsm = np.min([lfsm,16])
else:
widsm, lfsm = figsize
if orientation == "vertical":
fig = plt.figure(figsize=(widsm,lfsm),dpi=300)
elif orientation == "horizontal":
fig = plt.figure(figsize=(widsm,lfsm),dpi=300)
ax = fig.add_subplot(121)
linewidth = 0
alpha=1.0
def plot_line_h(ax,rects):
for i in range(len(rects)-1):
rk1 = rects[i]
rk2 = rects[i+1]
x1 = rk1.get_x()+rk1.get_width()
y1 = rk1.get_y()+rk1.get_height()
x2 = rk2.get_x()+rk2.get_width()
y2 = rk2.get_y()
ax.plot([x1,x2],[y1,y2],'k-',linewidth=0.4)
return 0
def plot_line_v(ax,rects):
for i in range(len(rects)-1):
rk1 = rects[i]
rk2 = rects[i+1]
x1 = rk1.get_y()+ rk1.get_height()
y1 = rk1.get_x()+rk1.get_width()
x2 = rk2.get_y()+rk2.get_height()
y2 = rk2.get_x()
ax.plot([y1,y2],[x1,x2],'k-',linewidth=0.4)
for i in range(n):
if i:
cumtmp = cumtmp + np.asarray(tmp[i-1,:])[0]
if orientation == "vertical":
rects = ax.bar(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,bottom=cumtmp,align='center',label=legends[i])
#for rk in rects:
# print "h",rk.get_height()
# print "w",rk.get_width()
# print "x",rk.get_x()
# print "y",rk.get_y()
#break
if scale:
plot_line_v(ax,rects)
elif orientation == "horizontal":
rects = ax.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,left=cumtmp,align='center',label=legends[i])
if scale:
plot_line_h(ax,rects)
#for rk in rects:
#print "h",rk.get_height()
#print "w",rk.get_width()
#print "x",rk.get_x()
#print "y",rk.get_y()
else:
cumtmp = 0
#print ind,np.asarray(tmp[i,:])[0]
if orientation == "vertical":
rects = ax.bar(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,align='center',label=legends[i])
if scale:
plot_line_v(ax,rects)
elif orientation == "horizontal":
rects = ax.barh(ind,np.asarray(tmp[i,:])[0],width,color=colors[i],linewidth=linewidth,alpha=alpha,align='center',label=legends[i])
if scale:
plot_line_h(ax,rects)
#ax.legend(loc=0)
if orientation == "vertical":
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(ind)
ax.set_xticklabels(xticks_labels,rotation=rotation,ha="right")
if scale:
ax.set_ylim(0,100)
ax.set_xlim(0-1,p)
else:
ax.set_xlim(0-1,p)
else:
ax.set_ylabel(xlabel)
ax.set_xlabel(ylabel)
ax.set_yticks(ind)
ax.set_yticklabels(xticks_labels,rotation=rotation)
if scale:
ax.set_xlim(0,100)
ax.set_ylim(0-1,p)
else:
ax.set_ylim(0-1,p)
ax.set_title(title)
#ax.grid(True)
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True, shadow=True, handlelength=1.1)
#ax.legend(loc=0, fancybox=True, bbox_to_anchor=(1.02, 1),borderaxespad=0)
plt.legend(title=legendtitle,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def bar_group(data,group_label,xticklabel,xlabel,ylabel,colors=None,fig_prefix="bar_group",title=None,width=0.3,ylog=0,text_rotation=0):
num_groups,p = data.shape
assert num_groups == len(group_label)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(p)
rects = []
if colors == None:
"""
110 def color_grad(num,colorgrad=cm.Set2):
111 color_class = cm.Set2(np.linspace(0, 1, num))
112 return color_class
"""
colors = color_grad(num_groups,colorgrad="Dark2")
for i in range(num_groups):
rect=ax.bar(xlocations+width*i, np.asarray(data)[i,:], width=width,linewidth=0,color=colors[i],ecolor=colors[i],alpha=0.6,label=group_label[i])
rects.append(rect)
for rk in rects:
for rect in rk:
height = rect.get_height()
if height < 0.0001:continue
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, "%.0f"%float(height),ha='center', va='bottom',fontsize=(8-0),rotation=text_rotation)
ax.legend(group_label,loc=0)
ax.set_xticks(xlocations+width/2*num_groups)
ax.set_xticklabels(xticklabel,ha="right",rotation=45)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if ylog:
ax.set_yscale("log")
ax.grid(True)
if title is not None:ax.set_title(title)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def err_line_group(data,error,group_label,xticklabel,xlabel,ylabel,colors,fig_prefix,title=None,xlim=None,ylim=None,figsize=(5,4)):
num_groups,p = data.shape
assert num_groups == len(group_label)
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xlocations = np.arange(p) + 1
ret_color,ret_lines,ret_marker = styles(num_groups)
for i in range(num_groups):
ax.errorbar(xlocations,data[i,:],yerr=error[i,:],marker=ret_marker[i],ms=8,ls='dotted',color=ret_color[i],capsize=5,alpha=0.6,label=group_label[i])
ax.legend(group_label,loc=0)
ax.set_xticks(xlocations)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if title is not None:ax.set_title(title)
xregion = (xlocations[-1] - xlocations[0]) * 0.1
if xlim == None:
ax.set_xlim(xlocations[0]-xregion,xlocations[-1]+xregion)
yregion = np.max(data) - np.min(data)
if ylim == None:
ax.set_ylim(np.min(data)-yregion*0.1,np.max(data) + yregion*0.1)
fig.tight_layout()
ax.grid(False)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def err_line_group_low_up(data,lower,upper,group_label,xticklabel,xlabel,ylabel,fig_prefix="test",title=None,xlim=None,ylim=None,figsize=(5,4),ylog=1):
num_groups,p = data.shape
assert num_groups == len(group_label)
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xlocations = np.arange(p) + 1
ret_color,ret_lines,ret_marker = styles(num_groups)
tmperr = np.zeros((2,p))
tmpwidth = 0.95/num_groups
for i in range(num_groups):
tmperr[0,:] = lower[i,:]
tmperr[1,:] = upper[i,:]
ax.errorbar(xlocations + tmpwidth *i ,data[i,:],yerr=tmperr,marker=ret_marker[i],ms=8,color=ret_color[i],capsize=5,alpha=0.8,label=group_label[i])
ax.legend(group_label,loc=0)
ax.set_xticks(xlocations+tmpwidth*num_groups/2)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if title is not None:ax.set_title(title)
xregion = (xlocations[-1]+0.95 - xlocations[0]) * 0.1
if xlim is None:
ax.set_xlim(xlocations[0]-xregion,xlocations[-1]+xregion)
yregion = np.max(upper) - np.min(lower)
if ylim is None:
ax.set_ylim(np.min(data)-yregion*0.1,np.max(data) + yregion*0.1)
if ylog:
ax.set_yscale('log')
fig.tight_layout()
ax.grid(False)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def bargrouperr(data,yerror=None,xlabel=None,ylabel=None,colors = None,fig_prefix="test",title=None,width=None,figsize=(5,4),rotation=0):
groupnames = data.columns
xticklabels = data.index
num_groups = len(groupnames)
if colors is None: colors = styles(num_groups)[0]
if width is None: width = 0.95/num_groups
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
xlocations = np.arange(len(xticklabels))
for i in range(num_groups):
groupname = groupnames[i]
if yerror is None:
ax.bar(xlocations+width*i, data.loc[:,groupname].tolist(),width=width,linewidth=1.0,facecolor=colors[i],edgecolor='black',alpha=0.6,label=groupnames[i])
else:
yerrlim = np.zeros((2,len(yerror.loc[:,groupname].tolist())))
yerrlim[1,:] = np.float64(yerror.loc[:,groupname].tolist())
ax.bar(xlocations+width*i, data.loc[:,groupname].tolist(),yerr=yerrlim,capsize=10,error_kw={'elinewidth':1.0,'capthick':1.0,},width=width,linewidth=1.0,facecolor=colors[i],edgecolor='black',ecolor=colors[i],alpha=0.6,label=groupnames[i])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True)
ax.set_xticks(xlocations+width/2*(num_groups-1))
ax.set_xticklabels(xticklabels,rotation=rotation)
if xlabel is not None:ax.set_xlabel(xlabel)
if ylabel is not None:ax.set_ylabel(ylabel)
if title is not None: ax.set_title(title)
ax.set_xlim(0-width*0.75,xlocations[-1]+(num_groups-1+0.75)*width)
fig.tight_layout()
#ax.grid(True,axis="y")
fig.tight_layout(rect = [0,0,1,0.9])
plt.savefig(fig_prefix+".png",format='png',dpi=300); plt.savefig(fig_prefix+".svg",format='svg',dpi=300); plt.clf();plt.close()
return 0
def bargroup(data,group_label,xticklabel,xlabel,ylabel,colors=None,fig_prefix="test",title=None,width=None): # group * xticks
num_groups,p = data.shape
if colors == None:
colors = styles(len(group_label))[0]
assert num_groups == len(group_label)
if width==None:
width = 0.95/num_groups
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(p)
for i in range(num_groups):
ax.bar(xlocations+width*i, data[i,:],width=width,linewidth=0,color=colors[i],ecolor=colors[i],alpha=0.6,label=group_label[i])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0, fancybox=True)
ax.set_xticks(xlocations+width/2*(num_groups-1))
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(-1,xlocations[-1]+1)
if title is not None:ax.set_title(title)
fig.tight_layout()
ax.grid(True,axis="y")
fig.tight_layout(rect = [0,0,1,0.9])
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def err_bar_group(data,error,group_label,xticklabel,xlabel,ylabel,colors=None,fig_prefix="test",title=None,width=0.3,ylog=0,rotation=0):
num_groups,p = data.shape
if colors == None:
colors = color_grad(len(group_label))
assert num_groups == len(group_label)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(p)
for i in range(num_groups):
ax.bar(xlocations+width*i, data[i,:],yerr=error[i,:], width=width,linewidth=0,color=colors[i],ecolor=colors[i],alpha=0.6,label=group_label[i])# capsize=5
#ax.legend(group_label,loc=0)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),ncol=6,borderaxespad=0)
ax.set_xticks(xlocations+width/2*num_groups)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel,rotation=rotation)
if title is not None:ax.set_title(title)
if ylog: ax.set_yscale("log",nonposy='clip')
fig.tight_layout()
ax.grid(True)
fig.tight_layout(rect = [0,0,1,0.9])
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def err_bar(data,error,xlabel,ylabel,fig_prefix,title=None,mark_sig=None,mark_range=[[0,1],],width=0.3):
num = len(data)
assert num == len(error) == len(xlabel)
#colors = cm.Set3(np.linspace(0, 1, len(xlabel)))
#colors = ["black","gray"]
if num == 2:
colors = ["black","gray"]
colors,ret_lines,ret_marker = styles(num)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(len(data))+width
ax.bar(xlocations, data, yerr=error, width=width,linewidth=0.5,ecolor='r',capsize=5,color=colors,alpha=0.5)
ax.set_xticks(xlocations+width/2)
ax.set_xticklabels(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim(0, xlocations[-1]+width*2)
if title is not None:ax.set_title(title)
if mark_sig is not None:
xlocations = xlocations+width/2
ybin = np.max(np.asarray(data)+np.asarray(error))
step = ybin/20
offset = ybin/40
assert len(mark_sig) == len(mark_range)
for i in range(len(mark_range)):
mark_r = mark_range[i]
sig_string = mark_sig[i]
xbin = np.asarray(mark_r)
ybin += step
ax.plot([xlocations[mark_r[0]],xlocations[mark_r[1]]],[ybin,ybin],color='gray',linestyle='-',alpha=0.5)
ax.text((xlocations[mark_r[0]]+xlocations[mark_r[1]])/2,ybin,sig_string)
ax.set_ylim(0,ybin+step*2.5)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def trendsiglabel(Xvec,Yvec,meansdata,totmean,color,xticklabel,fig_prefix="trend",rotation=45):
num = len(Xvec)
ngenes_sig,p = meansdata.shape
ngenes_tot,p = totmean.shape
assert num == len(Yvec) == len(xticklabel) == p
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
#ax.plot(Xvec,Yvec,color+'^-',markeredgecolor='None',markersize = 12)
for i in range(ngenes_tot):
#print i
ax.plot(Xvec,totmean[i,:],'g-',lw=0.5,alpha=0.3)
for i in range(ngenes_sig):
ax.plot(Xvec,meansdata[i,:],'b-',lw=0.5,alpha=0.3)
ax.plot(Xvec,Yvec,color+'^-',markeredgecolor=color,markersize = 5)
ax.set_xticks(np.arange(num))
xlabelsL = ax.set_xticklabels(xticklabel,rotation=rotation)
ax.grid(True)
#clean y
#ax.get_yaxis().set_ticks([])
#min_a = np.min(Yvec)
#max_a = np.max(Yvec)
#ax.set_ylim(min_a-1,max_a+1)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def twofactor_diff_plot(Xmeanarr,Xstdarr,xticklabel,fig_prefix="Sigplot",title=None,xlabel=None,ylabel="Expression",width=0.3,labels=None,ylimmin=-0.5):
num = Xmeanarr.shape[-1]
fmts = ['o-','^--','x-.','s--','v-.','+-.']
ecolors = ['r','b','g','c','m','y','k']
assert num == Xstdarr.shape[-1] == len(xticklabel)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(num)+width
n,p = Xmeanarr.shape
for i in range(n):
ax.errorbar(xlocations, Xmeanarr[i,:], yerr=Xstdarr[i,:],fmt=fmts[i],ecolor=ecolors[i],markeredgecolor=ecolors[i])
if labels:
ax.legend(labels,loc=0,numpoints=1)
ax.set_xticks(xlocations)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
if xlabel: ax.set_xlabel(xlabel)
ax.set_xlim(0, xlocations[-1]+width*2)
#ax.set_ylim(bottom=ylimmin)
if title is not None:ax.set_title(title)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def onefactor_diff_plot(Xmeanarr,Xstdarr,xticklabel,fig_prefix="Sigplot",title=None,xlabel=None,ylabel="Expression",width=0.3):
num = len(Xmeanarr)
assert num == len(Xstdarr) == len(xticklabel)
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
xlocations = np.arange(len(Xmeanarr))+width
ax.errorbar(xlocations, Xmeanarr, yerr=Xstdarr,fmt='o-',ecolor='r')
ax.set_xticks(xlocations)
ax.set_xticklabels(xticklabel)
ax.set_ylabel(ylabel)
if xlabel: ax.set_xlabel(xlabel)
ax.set_xlim(0, xlocations[-1]+width*2)
if title is not None:ax.set_title(title)
ax.grid(True)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def bar_plot(data,xticks_labels,fig_prefix,xlabel,ylabel,title="",width=0.3,rotation=0,fmt='%.2f',ylog=0,colors=None):
ind = np.arange(len(data))
fig = plt.figure()
ax = fig.add_subplot(111)
if ylog:
ax.set_yscale("log",nonposy='clip')
linewidth = 0
alpha=0.5
if not colors:
colors = 'k'
rects = ax.bar(ind,data,width,color=colors,linewidth=linewidth,alpha=alpha,align='center')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xticks(ind)
ax.yaxis.grid(True)
#ax.set_xticks(ind+width/2)
if rotation == 0 or rotation == 90:hafmt='center'
else:hafmt = 'right'
xlabelsL = ax.set_xticklabels(xticks_labels,ha=hafmt,rotation=rotation)
#rotate labels 90 degrees
if rotation:
for label in xlabelsL:
label.set_rotation(rotation)
ax.set_title(title)
for rect in rects:
height = rect.get_height()
if height < 0.1:continue
ax.text(rect.get_x()+rect.get_width()/2., 1.01*height, fmt%float(height),ha='center', va='bottom',fontsize=8)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def MA_vaco_plot2(sfc,slogq,fc,logq,fig_prefix,xlabel,ylabel,xlim=None,ylim=None,title="MAplot",figsize=(5,4)):
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
ax.plot(sfc[sfc>0],slogq[sfc>0],'o',markersize=2.0,alpha=0.5,markeredgecolor='#BC3C29',markerfacecolor='#BC3C29')
ax.plot(sfc[sfc<0],slogq[sfc<0],'o',markersize=2.0,alpha=0.5,markeredgecolor='#00468B',markerfacecolor='#00468B')
ax.plot(fc,logq,'o',markersize=1.0,markeredgecolor='#9E9E9E',markerfacecolor='#9E9E9E')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True,ls='--')
if xlim is not None:ax.set_xlim(xlim[0],xlim[-1])
if ylim is not None:ax.set_ylim(ylim[0],ylim[-1])
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();
plt.close()
return 0
def MA_vaco_plot(avelogFC,logFC,totavelogFC,totlogFC,fig_prefix,xlabel,ylabel,xlim=None,ylim=None,title="MAplot",figsize=(5,4)):
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
ax.plot(avelogFC,logFC,'ro',markersize = 1.5,alpha=0.5,markeredgecolor='r')
ax.plot(totavelogFC,totlogFC,'bo',markersize = 1.5,alpha=0.5,markeredgecolor='b')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.grid(True,ls='--')
if xlim is not None:
ax.set_xlim(xlim[0],xlim[-1])
if ylim is not None:
ax.set_ylim(ylim[0],ylim[-1])
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def vaco_plot(X,Y,Xcut,Ycut,fig_prefix,xlabel,ylabel,title=None,figsize=(5,4)):
# X is rho or fc
Xcutx = [np.min(X),np.max(X)]
Ycuts = [Ycut,Ycut]
idx1 = (Y > Ycut) & (np.abs(X) > Xcut)
idx2 = ~idx1
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
ax.plot(X[idx1],Y[idx1],'ro',markersize = 5,alpha=0.5,markeredgecolor='None')
ax.plot(X[idx2],Y[idx2],'bo',markersize = 5,alpha=0.5,markeredgecolor='None')
ax.plot(Xcutx,Ycuts,'r--')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
#ax.set_xlim(-6,6)
if title != None:
ax.set_title(title)
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def baohedu_plot(genes,reads,samples,fig_prefix,xlabel="number of reads",ylabel="number of detected genes",title=None,lim=0):
n1,p1 = genes.shape
n2,p2 = reads.shape
assert n1==n2 and p1==p2
"saturability"
#types = ['ro-','b^--','gs-.','kv:','c^-.','m*--','yp:']
ret_color,ret_lines,ret_marker = styles(n1)
fig = plt.figure(figsize=(8,6),dpi=300)
ax = fig.add_subplot(111)
for i in range(n1):
x = reads[i,:]
y = genes[i,:]
ax.plot(x,y,color=ret_color[i],linestyle=ret_lines[i],marker=ret_marker[i],markeredgecolor=ret_color[i],markersize = 4,alpha=0.7,label=samples[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if title != None: ax.set_title(title)
ax.legend(loc=0,numpoints=1)
ax.grid(True)
ax.set_ylim(bottom=0)
if lim:
ax.set_xlim(-1,101)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.tight_layout()
plt.clf()
plt.close()
return 0
def plotyy(Xvector,Y1np,Y2np,fig_prefix,xlabel,ylabel1,ylabel2,title=None,figsize=(6,5)):
Y1np = np.asarray(Y1np)
Y2np = np.asarray(Y2np)
fig = plt.figure(figsize=figsize,dpi=300)
ax1 = fig.add_subplot(111)
try:
n1,p1 = Y1np.shape
except ValueError:
n1 = 1
try:
n2,p2 = Y2np.shape
except ValueError:
n2 = 1
for i in range(n1):
if n1 == 1:
ax1.plot(Xvector,Y1np, 'b-')
break
if i == 0:
ax1.plot(Xvector,Y1np[i,:], 'b-')
else:
ax1.plot(Xvector,Y1np[i,:], 'b--')
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel1, color='b')
if title: ax1.set_title(title)
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
for i in range(n2):
if n2 == 1:
ax2.plot(Xvector,Y2np, 'r-')
break
if i == 0:
ax2.plot(Xvector,Y2np[i,:], 'r-')
else:
ax2.plot(Xvector,Y2np[i,:], 'r-.')
ax2.set_ylabel(ylabel2, color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.grid(True,ls='--')
plt.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plotyy_barline(Xvec,Y1vec,Y2vec,fig_prefix,xlabel,ylabel1,ylabel2,figsize=(6,5),xticklabels=None):
assert len(Xvec) == len(Y1vec) == len(Y2vec) > 1
fig = plt.figure(figsize=figsize,dpi=300)
ax1 = fig.add_subplot(111)
width = np.abs(Xvec[-1]-Xvec[0]) / (len(Xvec)-1)
ax1.bar(Xvec,Y1vec,width*0.9,color='b',lw=1.0,alpha=0.5)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel1, color='b')
for tl in ax1.get_yticklabels(): tl.set_color('b')
if xticklabels is not None:
ax1.set_xticks(Xvec)
ax1.set_xticklabels(xticklabels,ha="right",rotation=45)
ax2 = ax1.twinx()
ax2.plot(Xvec,Y2vec,'r-',lw=1.0)
ax2.set_ylabel(ylabel2, color='r')
for tl in ax2.get_yticklabels():tl.set_color('r')
ax1.grid(True,ls='--')
plt.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis"""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for spx in ax.spines.values():
spx.set_visible(False)
def density_plt(Xarr,colors,legendlabel,figname_prefix="density",xlabel=None,ylabel=None,fun="pdf",fill=0,title=None,exclude=0.0,xlog=0,xliml=None,xlimr=None):
"""not at the same scale """
fig = plt.figure(dpi=300)
ax = fig.add_subplot(111)
n = len(Xarr)
assert len(colors) == len(legendlabel)
for i in range(n):
dat = np.asarray(Xarr[i])
xp,yp = kdensity(dat[dat != exclude],num = 400,fun=fun)
ax.plot(xp,yp,colors[i],label=legendlabel[i],markeredgecolor='None')
if fill:
ax.fill_between(xp,yp,y2=0,color=colors[i],alpha=0.2)
ax.legend(loc=0,numpoints=1)
if xliml is not None:
ax.set_xlim(left = xliml)
if xlimr is not None:
ax.set_xlim(right = xlimr)
#if xliml and xlimr:
# print "get"
# ax.set_xlim((xliml,xlimr))
if xlog:
ax.set_xscale("log")
if xlabel: ax.set_xlabel(xlabel)
if ylabel: ax.set_ylabel(ylabel)
if title: ax.set_title(title)
ax.grid(True)
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def exprs_density(Xnp,colors,classlabels,figname_prefix="out",xlabel=None,ylabel=None,fun="cdf",exclude=0.0,ylim=10,figsize=(6,5)):
n,p = Xnp.shape
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
uniq_colors = []
for tmpcolor in colors:
if tmpcolor not in uniq_colors:
uniq_colors.append(tmpcolor)
idx = [colors.index(color) for color in uniq_colors]
labels = [classlabels[i] for i in idx]
for i in idx:
dat = np.asarray(Xnp[i,:])
if fun == "cdf":
xp,yp = kdensity(dat[dat != exclude],fun="cdf")
elif fun == "pdf":
xp,yp = kdensity(dat[dat != exclude],fun="pdf")
ax.plot(xp,yp,colors[i])
ax.legend(labels,loc=0)
for i in range(n):
dat = np.asarray(Xnp[i,:])
if fun == "cdf":
xp,yp = kdensity(dat[dat != exclude],fun="cdf")
elif fun == "pdf":
xp,yp = kdensity(dat[dat != exclude],fun="pdf")
ax.plot(xp,yp,colors[i])
#print xp
#print yp
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
if ylim:
ax.set_xlim(0,ylim)
fig.tight_layout()
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def hist_groups(data,labels,xlabel,fig_prefix,bins=25,alpha=0.7,normed=True,colors=None,rwidth=1,histtype="stepfilled",linewidth=0.5,xlim=None,ylim=None,hist=True,figsize=(6,2)):
"""
histtype='bar', rwidth=0.8
stepfilled
"""
n = len(data)
assert n == len(labels)
if colors is None:
ret_color,ret_lines,ret_marker = styles(n)
colors = ret_color
if normed:ylabel = "Probability density"
else:ylabel = "Frequency"
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
for i in range(n):
xp,yp = kdensity(data[i],fun="pdf")
if hist:
ax.hist(data[i],histtype=histtype,rwidth=rwidth,linewidth=linewidth,bins=bins, alpha=alpha,density=normed,color=colors[n-i-1])
ax.plot(xp,yp,color=colors[n-i-1],linestyle='--',lw=1.0)
else:
ax.plot(xp,yp,color=colors[n-i-1],linestyle='-',lw=2.0)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend(labels,loc=0)
if xlim is not None:
ax.set_xlim(xlim[0],xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0],ylim[1])
ax.grid(True,ls='--')
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def hist_groups2(data,labels,xlabel,fig_prefix,bins=25,alpha=0.7,normed=True,colors=None,rwidth=1,histtype="stepfilled",linewidth=0.5,xlim=(0,10000),cutline = 0.54,observe=0.64,figsize=(4,2.5)):
n = len(data)
colors = styles(n)[0]
if normed:ylabel = "Density"
else:ylabel = "Frequency"
fig = plt.figure(dpi=300,figsize=figsize)
ax = fig.add_subplot(111)
miny = 1
maxy = 0
for i in range(n):
xp,yp = kdensity(data[i],fun="pdf")
miny = min(miny,np.min(yp))
maxy = max(maxy,np.max(yp))
ax.plot(xp,yp,color=colors[n-i-1],linestyle='-',lw=1.0,label=labels[i])
ax.fill(xp,yp,color=colors[n-i-1],alpha=0.3)
ax.plot([cutline,cutline],[miny,maxy],linestyle="--",color="black",lw=2,label="cutoff")
ax.plot([observe,observe],[miny,maxy],linestyle="--",color="red",lw=3,label="your data")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend(loc=0)
if xlim:
ax.set_xlim(xlim[0],xlim[1])
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf(); plt.close()
return 0
def logdist(data,fig_prefix,cutline=0.54,observe=0.64):
# Theoretical
#x = np.linspace(-50,50,100)
#p = 1.0/(1+np.exp(x))
fig = plt.figure(dpi=300,figsize=(7,5))
ax = fig.add_subplot(111)
#ax.plot(x,p,color="black",linestyle='--',lw=1.0,label="Theoretical")
ax.hist(data, 50, normed=1, histtype='step', cumulative=True, label='Empirical')
ax.grid(True)
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf(); plt.close()
return 0
def exprs_RLE(Xnp,mean="median",fig_prefix=None,samplenames=None,colors=None):
###!!!!用median 保持robust
#在同一组实验中,即使是相互比较的对照组与实验组之间,大部分基因的表达量还是应该保持一致的,何况平行实验之间。当我们使用相对对数表达(Relative Log Expression(RLE))的的箱线图来控制不同组之间的实验质量时,我们会期待箱线图应该在垂直中央相类的位置(通常非常接近0)。如果有一个芯片的表现和其它的平行组都很不同,那说明它可能出现了质量问题。
n,p = Xnp.shape
if mean == "median":
Xmean =np.median(Xnp,axis=0)
elif mean == "mean":
Xmean =np.mean(Xnp,axis=0)
plot_boxplot(Xnp-Xmean,fig_prefix,"","Relative Log Expression",samplenames,colors=colors,ylim=0)
return 0
def exprs_NUSE():
#1. hist
#2. julei
#3. RLE
#array corr
#
#相对标准差(Normalized Unscaled Standard Errors(NUSE))
#是一种比RLE更为敏感 的质量检测手段。如果你在RLE图当中对某组芯片的质量表示怀疑,那当你使用NUSE图时,这种怀疑就很容易被确定下来。NUSE的计算其实也很简单,它是某芯片基因标准差相对于全组标准差的比值。我们期待全组芯片都是质量可靠的话,那么,它们的标准差会十分接近,于是它们的NUSE值就会都在1左右。然而,如果有实验芯片质量有问题的话,它就会严重的偏离1,进而影响其它芯片的NUSE值偏向相反的方向。当然,还有一种非常极端的情况,那就是大部分芯片都有质量问题,但是它们的标准差却比较接近,反而会显得没有质量问题的芯片的NUSE值会明显偏离1,所以我们必须结合RLE及NUSE两个图来得出正确的结论
return 0
#from itertools import izip
izip = zip
def show_values2(pc,markvalues,fmt="%.3f",fontsize=10,**kw):
pc.update_scalarmappable()
newmarkvalues = []
n,p = markvalues.shape
#for i in range(n-1,-1,-1):
for i in range(n):
newmarkvalues.extend(markvalues[i,:].tolist())
ax = pc.axes
count = 0
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % newmarkvalues[count], ha="center", va="center", color=color,fontsize=fontsize)
count += 1
def show_values(pc, fmt="%.3f", **kw):
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color)
def pcolor_plot(Xnp,xsamplenames,ylabelnames,figname_prefix,txtfmt = "%.3f",figsize=(8,6),measure="correlation"):
n,p = Xnp.shape
print(n,p)
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
clean_axis(ax)
cplot = ax.pcolor(Xnp, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap = cm.Blues)
ax.set_yticks(np.arange(n)+ 0.5)
ax.set_yticklabels(ylabelnames)
ax.set_xticks(np.arange(p)+0.5)
xlabelsL = ax.set_xticklabels(xsamplenames)
for label in xlabelsL:
label.set_rotation(90)
cb = fig.colorbar(cplot,ax=ax)
cb.set_label(measure)
cb.outline.set_linewidth(0)
ax.grid(visible=False)
show_values(cplot,fmt=txtfmt)
#fig.tight_layout()
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def exprs_corrarray(Xnp,samplenames,figname_prefix,txtfmt = "%.2f",plottext=1,Xdist=None,cbarlabel = "correlation",figsize=(7,6)):
"""
def show_values(pc, fmt="%.3f", **kw):
from itertools import izip
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
"""
if type(Xdist) == type(None):
corr_coef = np.abs(np.corrcoef(Xnp))
else:
corr_coef = Xdist
n,p = corr_coef.shape
fig = plt.figure(figsize=figsize,dpi=300)
ax = fig.add_subplot(111)
clean_axis(ax)
cplot = ax.pcolor(corr_coef, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap = 'RdBu_r')
#image_instance = ax.imshow(corr_coef,interpolation='nearest',aspect='auto',alpha=0.8,origin='lower',cmap=cm.coolwarm)
ax.set_yticks(np.arange(p)+ 0.5)
ax.set_yticklabels(samplenames)
ax.set_xticks(np.arange(n)+0.5)
xlabelsL = ax.set_xticklabels(samplenames)
for label in xlabelsL:
label.set_rotation(90)
cb = fig.colorbar(cplot,ax=ax)
cb.set_label(cbarlabel)
cb.outline.set_linewidth(0)
ax.grid(visible=False)
if plottext:
show_values(cplot,fmt=txtfmt,fontsize=4)
fig.tight_layout()
plt.savefig(figname_prefix+".png",format='png',dpi=300)
plt.savefig(figname_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return corr_coef
def pie_plot(sizes,labels,fig_prefix="pie_plot",autopct='%1.1f%%',colors=None,explode=None,shadow=False, startangle=90,radius=1):
fig = plt.figure(figsize=(6,6),dpi=300)
ax5 = fig.add_subplot(111)
if not colors:
colors = cm.Paired(np.linspace(0, 1, len(labels)))
#patches, texts, autotexts = ax5.pie(sizes,explode,labels=labels, colors=colors,autopct=autopct, shadow=shadow, startangle=startangle,radius=radius)
patches, texts = ax5.pie(sizes,explode,colors=colors, shadow=shadow, startangle=startangle,radius=radius)
tmplabels = []
total = sum(sizes)
for i in range(len(labels)):
lable = labels[i]
size = float(sizes[i])/total*100
#print lable+"("+ autopct+")"
tmplabels.append((lable+"("+ autopct+")")%size)
ax5.legend(patches,tmplabels,loc='best')
for w in patches:
w.set_linewidth(0.2)
w.set_edgecolor('white')
##plt.legend(patches, labels, loc="best")
#proptease = fm.FontProperties()
#proptease.set_size('xx-small')
#plt.setp(autotexts, fontproperties=proptease)
#plt.setp(texts, fontproperties=proptease)
plt.axis('equal')
plt.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def polar_pie(percentage,labels,fig_prefix='polar_plot',figsize=(6,5),width=None,color = None,ylog=0):
n = len(percentage) # 0~100
assert np.max(percentage) <=100 and np.min(percentage) >= 0
theta = np.linspace(0.0, 2 * np.pi, n, endpoint=False)
radii = np.float64(percentage)
radiixx = radii*1.0
radiixx[radiixx<10] = 10.0
if width is None:
width = 2 * np.pi / n * (radiixx/np.max(radii))
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111,projection='polar')
bars = ax.bar(theta, radii, width=width, bottom=0.0)
if color is None:
for r, bar in zip(radii, bars):
bar.set_facecolor(cm.viridis(r/100.0))
bar.set_alpha(0.5)
else:
colors = styles(n,colorgrad=color)[0]
idx = 0
for r, bar in zip(radii, bars):
bar.set_facecolor(colors[idx])
idx +=1
bar.set_alpha(0.8)
## color use rhe str to get grade
if ylog:
ax.set_yscale('log')
ax.set_xticks(theta)
ax.set_xticklabels(labels)
ax.grid(True,ls='-',alpha=0.5,)
plt.savefig("%s.png"%fig_prefix,format='png',ppi=300)
plt.savefig("%s.svg"%fig_prefix,format='svg',ppi=300)
plt.clf()
plt.close()
return 0
def cluster_pcolor_dist(Xdist,samplenames,annos,fig_prefix="test_cluster_pcolor",colornorm = True,normratio=0.1,nosample=False,noannos=False,plotxlabel=1,plotylabel=1,cbarlabel="scaled measures",usepcolor=1,cmcolor="coolwarm",spacelinewidth=1.0,markvalues = None,markfmt = "%.2f",markfontsize=12,colorbarfmt="%.1f",figsize=(12,10),metric='euclidean'):# show_values2(pc,markvalues,fmt="%.3f",**kw):
n,p = Xdist.shape
if n > p:
Xdist = Xdist.T
samplenames, annos = annos,samplenames
n,p = p,n
nosample,noannos = noannos,nosample
plotxlabel,plotylabel = plotylabel,plotxlabel
if markvalues is not None: markvalues = markvalues.T
if colornorm:
vmin = np.min(Xdist) # np.floor(np.min(Xdist))
vmax = np.max(Xdist) # np.ceil(np.max(Xdist))
#vmax = max([vmax,abs(vmin)])
vrange = (vmax - vmin) * normratio
my_norm = mpl.colors.Normalize(vmin-vrange, vmax+vrange)
else: my_norm = None
lfsm = 8
if len(samplenames) > 20:
lfsm = int(len(samplenames) * 1.0 * 4/40); lfsm = np.min([lfsm,8])
print(n,p)
rfsm = 8
if len(annos) > 20:
rfsm = int(len(annos) * 1.0 * 4/40); rfsm = np.min([rfsm,8])
print(lfsm,rfsm)
fig = plt.figure(figsize=figsize) # width, height, rfsm,lfsm ## 14,10
heatmapGS = gridspec.GridSpec(2,2,wspace=0.0,hspace=0.0,width_ratios=[0.14,p*1.0/n],height_ratios=[0.14,1])
if not nosample: # gene is col
row_clusters = linkage(Xdist,method='average',metric=metric)
row_denAX = fig.add_subplot(heatmapGS[1,0])
sch.set_link_color_palette(['black'])
row_denD = dendrogram(row_clusters,color_threshold=np.inf,orientation='left')
row_denAX.set_axis_off()
Xtmp = Xdist[row_denD['leaves'],:]
if markvalues is not None: markertmp = markvalues[row_denD['leaves'],:]
else:
Xtmp = Xdist
markertmp = markvalues
if not noannos:
col_clusters = linkage(Xdist.T,method='average',metric=metric)
col_denAX = fig.add_subplot(heatmapGS[0,1])
sch.set_link_color_palette(['black'])
col_denD = dendrogram(col_clusters,color_threshold=np.inf,)
col_denAX.set_axis_off()
Xtmp = Xtmp[:,col_denD['leaves']]
if markvalues is not None: markertmp = markertmp[:,col_denD['leaves']]
heatmapAX = fig.add_subplot(heatmapGS[1,1])
clean_axis(heatmapAX)
axi = heatmapAX.pcolor(np.asarray(Xtmp), edgecolors='gray', linestyle= '-', linewidths=spacelinewidth,norm=my_norm ,cmap = cmcolor)
#heatmapAX.grid(visible=False)
if markvalues is not None:
show_values2(axi,markertmp,markfmt,fontsize=markfontsize)
print(row_denD['leaves'])
print(samplenames)
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in row_denD['leaves']]
else: t_samplenames = samplenames
heatmapAX.set_yticks(np.arange(n)+0.5)
heatmapAX.yaxis.set_ticks_position('right')
heatmapAX.set_yticklabels(t_samplenames)
if plotylabel:
if not noannos: t_annonames = [annos[i] for i in col_denD['leaves']]
else: t_annonames = annos
heatmapAX.set_xticks(np.arange(p)+0.5)
xlabelsL = heatmapAX.set_xticklabels(t_annonames,rotation=90)
#for label in xlabelsL: label.set_rotation(90)
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines(): l.set_markersize(0)
scale_cbAX = fig.add_subplot(heatmapGS[0,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,shrink=1.0,fraction=2.0,aspect=1.5)
font = {'size': 10}
tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('right')
cb.ax.yaxis.set_label_position('right')
tmpticks = cb.ax.get_yticks()
cb.ax.yaxis.set_ticks([tmpticks[0],(tmpticks[0]+tmpticks[-1])/2.0,tmpticks[-1]])
cb.ax.yaxis.set_ticklabels(map(str,[colorbarfmt%vmin,colorbarfmt%((vmax+vmin)/2.0),colorbarfmt%vmax]))
cb.outline.set_linewidth(0)
tl = cb.set_label(cbarlabel,fontdict=font)
tickL = cb.ax.yaxis.get_ticklabels()
for t in tickL: t.set_fontsize(t.get_fontsize() - 3)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def cluster_heatmap_dist(Xdist,samplenames,fig_prefix="test_cluster_heatmap",colornorm = True,nosample=False,plotxlabel=1,plotylabel=1,cbarlabel="scaled measures",usepcolor=0,cmcolor="autumn"):
n,p = Xdist.shape
assert n == p
assert np.sum(np.isnan(Xdist)) == 0
if colornorm:
vmin = np.floor(np.min(Xdist))
vmax = np.ceil(np.max(Xdist))
vmax = max([vmax,abs(vmin)])
my_norm = mpl.colors.Normalize(vmin, vmax)
else:my_norm = None
lfsm = 8
if len(samplenames) > 20:
lfsm = int(len(samplenames) * 1.0 * 8/40); lfsm = np.min([lfsm,16])
sys.stderr.write("[INFO] plot size is %dX%d\n"%(lfsm,lfsm))
fig = plt.figure(figsize=(lfsm,lfsm))
heatmapGS = gridspec.GridSpec(2,2,wspace=0.0,hspace=0.0,width_ratios=[0.15,1],height_ratios=[0.15,1])
if not nosample:
col_clusters = linkage(Xdist,method='average')
col_denAX = fig.add_subplot(heatmapGS[0,1])
sch.set_link_color_palette(['black'])
col_denD = dendrogram(col_clusters,color_threshold=np.inf,) # use color_threshold=np.inf not to show color
col_denAX.set_axis_off()
heatmapAX = fig.add_subplot(heatmapGS[1,1])
if nosample:pass
else:
Xtmp = Xdist[:,col_denD['leaves']]
Xtmp = Xtmp[col_denD['leaves'],:]
clean_axis(heatmapAX)
if not usepcolor:
axi = heatmapAX.imshow(Xtmp,interpolation='nearest',aspect='auto',origin='lower',norm=my_norm,cmap = cmcolor)
else:
axi = heatmapAX.pcolor(np.asarray(Xtmp), edgecolors='k', linestyle= 'dashdot', linewidths=0.2, cmap = cmcolor) # cmap = cm.coolwarm
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in col_denD['leaves']]
else:
t_samplenames = samplenames
heatmapAX.set_xticks(np.arange(n)+0.5)
xlabelsL = heatmapAX.set_xticklabels(t_samplenames)
for label in xlabelsL:
label.set_rotation(90)
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
#heatmapAX.grid()
scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=heatmapGS[1,0],wspace=0.0,hspace=0.0)
scale_cbAX = fig.add_subplot(scale_cbGSSS[0,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,shrink=0.6,fraction=0.8,aspect=8)
font = {'size': 10}
tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('right')
cb.ax.yaxis.set_label_position('right')
cb.outline.set_linewidth(0)
tl = cb.set_label(cbarlabel,fontdict=font)
tickL = cb.ax.yaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 2)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def highfreq_mutmap(topgenesmuted,mut_stack,samplenames,annonames,fig_prefix="test_cluster_muted",colornorm=True,nosample=False,nogene=False,plotxlabel= 1,plotylabel=1,cbarlabel="Mutation Frequency",genecolors=None,samplecolors=None,cmap='RdYlBu_r',tree=3,stacklegends=[],colorbarlabels=[]):
Xnp = topgenesmuted
n,p = Xnp.shape
assert n == len(samplenames) and p == len(annonames)
if colornorm:
vmin = np.floor(np.min(Xnp))
vmax = np.ceil(np.max(Xnp))
vmax = max([vmax,abs(vmin)])
my_norm = mpl.colors.Normalize(vmin, vmax)
else:my_norm = None
if len(samplenames)/3 <=9:rightx = 8
else:rightx = len(samplenames)/3
if len(annonames)/5 <=9: leftx = 8
else:
leftx = int(len(annonames)/4.5)
if len(samplenames) > 80:
rightx = 8;plotxlabel = 0
if len(annonames) > 80:
leftx = 8;plotylabel = 0
leftx = min(int(32700/300.0),leftx)
rightx = min(int(32700/300.0),rightx)
fig = plt.figure(figsize=(rightx,leftx))
sys.stderr.write("[INFO] plot size is %dX%d\n"%(leftx,rightx))
width_ratios = [0.07,0.115,1];height_ratios=[0.15,1]
samples_l = 3; genes_l = 2;
if samplecolors is not None:
samples_l += 1
width_ratios = [0.07,0.115,0.05,1]
if genecolors is not None:
genes_l = 3
height_ratios = [0.1,0.05,1]
heatmapGS = gridspec.GridSpec(samples_l,genes_l,wspace=0.0,hspace=0.0,width_ratios=height_ratios,height_ratios=width_ratios)
Xtmp = Xnp.T.copy()
if not nosample:
col_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnp))
col_clusters = linkage(col_pairwise_dists,method='average')
#cutted_trees = cut_tree(col_clusters)
col_denAX = fig.add_subplot(heatmapGS[0,genes_l-1])
col_denD = dendrogram(col_clusters)
col_denAX.set_axis_off()
Xtmp = Xtmp[:,col_denD['leaves']]
if not nogene:
row_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xtmp))
row_clusters = linkage(row_pairwise_dists,method='average')
#assignments = fcluster(row_clusters, cut_tree, 'distance')
#row_cluster_output = pandas.DataFrame({'team':annonames, 'cluster':assignments})
row_denAX = fig.add_subplot(heatmapGS[samples_l-1,0])
row_denD = dendrogram(row_clusters,orientation='left')
row_denAX.set_axis_off()
Xtmp = Xtmp[row_denD['leaves'],:]
# stack plot:
stackvAX = fig.add_subplot(heatmapGS[1,genes_l-1])
mut_stack = np.asmatrix(mut_stack)
stackn,stackp = mut_stack.shape
stackcolors = color_grad(3,cm.Dark2)
#mut_stackT = mut_stack.T
if not nosample: mut_stack = mut_stack[col_denD['leaves'],:]
ind = np.arange(stackn)
for i in range(stackp):
if i:
cumtmp = cumtmp + np.asarray(mut_stack[:,i-1].T)[0]
rects = stackvAX.bar(ind,np.asarray(mut_stack[:,i].T)[0],0.6,color=stackcolors[i],linewidth=0,alpha=0.7,align='center',bottom=cumtmp,label=stacklegends[i])
else:
cumtmp = 0
rects = stackvAX.bar(ind,np.asarray(mut_stack[:,i].T)[0],0.6,color=stackcolors[i],linewidth=0,alpha=0.7,align='center',label=stacklegends[i])
# ax.legend(alx,bbox_to_anchor=(1.02, 1),loc=0,borderaxespad=0,numpoints=1,fontsize=6)
stackvAX.legend(loc=0, fancybox=True, bbox_to_anchor=(1.02, 1),borderaxespad=0)
stackvAX.set_ylabel("Mutations")
stackvAX.set_xlim(-0.5,stackn-0.5)
heatmapAX = fig.add_subplot(heatmapGS[samples_l-1,genes_l-1])
if samplecolors is not None:
if not nosample:
tmpxxx = []
for x in col_denD['leaves']:
tmpxxx.append(samplecolors[x])
samplecolors = tmpxxx[:]
del tmpxxx
col_cbAX = fig.add_subplot(heatmapGS[2,genes_l-1])
col_axi = col_cbAX.imshow([list(samplecolors)],interpolation='nearest',aspect='auto',origin='lower')
clean_axis(col_cbAX)
if genecolors is not None:
if not nogene:
genecolors = genecolors[row_denD['leaves']]
row_cbAX = fig.add_subplot(heatmapGS[samples_l-1,1])
row_axi = row_cbAX.imshow([genecolors.tolist(),],interpolation='nearest',aspect='auto',origin='lower')
clean_axis(row_cbAX)
# cmap = 'RdBu_r'
#tmpmap = cm.Set2()
axi = heatmapAX.pcolor(Xtmp,edgecolors='w', linewidths=1,cmap="Set2")
#axi = heatmapAX.imshow(Xtmp,interpolation='nearest',aspect='auto',origin='lower',norm=my_norm,cmap = cmap)
clean_axis(heatmapAX)
if plotylabel:
if not nogene:
t_annonames = [annonames[i] for i in row_denD['leaves']]
else:
t_annonames = annonames
heatmapAX.set_yticks(np.arange(p)+0.5)
heatmapAX.yaxis.set_ticks_position('right')
heatmapAX.set_yticklabels(t_annonames)
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in col_denD['leaves']]
else:
t_samplenames = samplenames
heatmapAX.set_xticks(np.arange(n)+0.5)
xlabelsL = heatmapAX.set_xticklabels(t_samplenames)
for label in xlabelsL:
label.set_rotation(90)
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
heatmapAX.grid(False)
#scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=heatmapGS[samples_l-1,0],wspace=0.0,hspace=0.0)
scale_cbAX = fig.add_subplot(heatmapGS[samples_l-1,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,fraction=0.5,shrink=0.6)
font = {'size': 8}
#tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('left')
cb.ax.yaxis.set_label_position('left')
#cb.outline.set_linewidth(0)
#tickL = cb.ax.yaxis.get_ticklabels()
cb.set_ticks(np.arange(len(colorbarlabels)))
cb.set_ticklabels(colorbarlabels)
#for t in tickL:
# t.set_fontsize(t.get_fontsize() - 7)
fig.subplots_adjust(bottom = 0)
fig.subplots_adjust(top = 1)
fig.subplots_adjust(right = 1)
fig.subplots_adjust(left = 0)
plt.savefig(fig_prefix+".png",format='png',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.clf()
plt.close()
return 0
def mesh_contour(X,Y,Z,xlabel,ylabel,zlabel,figprefix = "test",color=cm.coolwarm,alpha=0.3):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=alpha,linewidth=0, antialiased=False)
ax.plot_wireframe(X, Y, Z, rstride=8, cstride=8)
#linewidth=0, antialiased=False
cset = ax.contour(X, Y, Z, zdir='z', offset=-0.4, cmap=color)
cset = ax.contour(X, Y, Z, zdir='x', offset=-3, cmap=color)
cset = ax.contour(X, Y, Z, zdir='y', offset=3, cmap=color)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
ax.set_xlim(-3, 3)
ax.set_ylim(-3, 3)
ax.set_zlim(-0.4,0.4)
plt.savefig(figprefix+".png",format='png',dpi=300)
plt.savefig(figprefix+".svg",format='svg',dpi=300)
plt.clf()
plt.close()
return 0
def plot_contest(data,ynames,xlabel=None,ylabel=None,fig_prefix="plot_ContEst"):
"""
data = [[mean,low,up],...]
"""
meandat = []; lowdat = []; updat = []; rangedat = []; num = len(data); yoffset = []
for i in range(num):
meandat.append(data[i][0]); lowdat.append(data[i][1]); updat.append(data[i][2]); yoffset.append(i+1); rangedat.append(data[i][2]-data[i][1])
if num < 25: heightsize = 6
else: heightsize = int(num * 1.0 * 6/30)
widthsize = 6
fig = plt.figure(figsize=(widthsize,heightsize))
ax = fig.add_subplot(111)
ax.errorbar(meandat,yoffset,xerr=[lowdat,updat],ls="none", marker='o',color='r',markersize=4,markeredgecolor='None',capsize=2.2)
yoffset.append(num+1)
yoffset.insert(0,0)
# ls='',markerfacecolor=tmpcolor,marker=tmpmarker,label=tmplabel,markeredgecolor = tmpcolor,alpha=0.7
#ax.plot([1.5,1.5],[0,yoffset[-1]],ls='--',markerfacecolor=u'#E24A33',markeredgecolor = u'#E24A33', alpha=0.7)
ax.plot([1.0,1.0],[0,yoffset[-1]],ls='--',markerfacecolor=u'#E24A33',markeredgecolor = u'#E24A33', alpha=0.7)
ax.plot([5,5],[0,yoffset[-1]],ls='--',markerfacecolor=u'#988ED5',markeredgecolor = u'#988ED5', alpha=0.7)
#ax.plot([1.5,1.5],[yoffset,yoffset],ls='--',markerfacecolor=u'#E24A33',markeredgecolor = u'#E24A33', alpha=0.7)
#ax.fill_betweenx(yoffset,0,1.5,color=u'#E24A33',alpha=0.3)
#ax.fill_betweenx(yoffset,1.5,5,color=u'#348ABD',alpha=0.3)
#ax.fill_betweenx(yoffset,5,np.max(updat)+1,color=u'#988ED5',alpha=0.3)
ax.set_yticks(np.arange(1,num+1))
ax.yaxis.set_ticks_position('left')
ax.set_yticklabels(ynames)
ax.grid(True)
#ax.set_ylim(0,num+1)
#ax.set_xlim(0,np.max(updat)+1)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
fig.tight_layout()
plt.savefig(fig_prefix+".png",format='png',dpi=300)
plt.savefig(fig_prefix+".svg",format='svg',dpi=300)
plt.clf();plt.close();
return 0
def cluster_heatmap(Xnp,samplenames,annonames,fig_prefix="test_cluster_heatmap",colornorm = True,nosample=False,nogene=False,plotxlabel=1,plotylabel=1,cbarlabel="Expression",genecolors=None,samplecolors=None,cmap='RdYlBu_r', trees = 3,numshow=80,metric="euclidean",usepcolor=0,normratio=1.0,samplecolormap="Dark2"):
n,p = Xnp.shape
#print n,p,len(samplenames),len(annonames)
assert n == len(samplenames) and p == len(annonames)
# make norm
if colornorm:
vmin = np.floor(np.min(Xnp))
vmax = np.ceil(np.max(Xnp))
vmax = max([vmax,abs(vmin)]) # choose larger of vmin and vmax
#vmin = vmax * -1
vrange = (vmax - vmin) * (1-normratio) / normratio * 0.5
my_norm = mpl.colors.Normalize(vmin-vrange, vmax+vrange)
else:my_norm = None
# heatmap with row names
if len(samplenames)/3 <=9:
rightx = 8
else:
rightx = len(samplenames)/3
if len(annonames)/3 <=9:
leftx = 8
else:
leftx = int(len(annonames)/4.5)
if len(samplenames) > numshow:
rightx = 8
plotxlabel = 0
if len(annonames) > numshow:
leftx = 8
plotylabel = 0
#import pdb; pdb.set_trace()
leftx = min(int(32700/300.0),leftx)
rightx = min(int(32700/300.0),rightx)
sys.stderr.write("[INFO] plot size is %dX%d\n"%(leftx,rightx))
# rightx, leftx
fig = plt.figure(figsize=(14,8))
samples_l = 2; genes_l = 2;
width_ratios = [0.15,1];height_ratios=[0.15,1]
if samplecolors is not None:
samples_l= 3
width_ratios = [0.15,0.05,1]
if (genecolors is not None) or (not nogene):
genes_l = 5
height_ratios = [0.15,0.015,0.025,0.015,1]
heatmapGS = gridspec.GridSpec(samples_l,genes_l,wspace=0.0,hspace=0.0,width_ratios=height_ratios,height_ratios=width_ratios)
### col dendrogram ### col is sample cluster
#import pdb; pdb.set_trace()
if not nosample and n >1:
col_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnp,metric)) # 'correlation'
col_clusters = linkage(col_pairwise_dists,method='average')#ward, average
assignments = cut_tree(col_clusters,[trees,])
col_cluster_output = pandas.DataFrame({'team': samplenames, 'cluster':assignments.T[0]})
#print col_cluster_output
col_denAX = fig.add_subplot(heatmapGS[0,genes_l-1])
col_denD = dendrogram(col_clusters)
col_denAX.set_axis_off()
### fcluster(col_clusters,0.7*max(col_clusters[:,2]),'distance')
### to return the index of each sample for each cluster
### row dendrogram ### row is anno cluster
if not nogene and p > 1:
row_pairwise_dists = sp.spatial.distance.squareform(sp.spatial.distance.pdist(Xnp.T,metric))
row_clusters = linkage(row_pairwise_dists,method='average')
assignments = cut_tree(row_clusters,[trees,])
row_cluster_output = pandas.DataFrame({'team':annonames, 'cluster':assignments.T[0]})
#print row_cluster_output
numbergenescluter = len(set(assignments.T[0].tolist()))
row_denAX = fig.add_subplot(heatmapGS[samples_l-1,0])
row_denD = dendrogram(row_clusters,orientation='left')
row_denAX.set_axis_off()
### heatmap ####
heatmapAX = fig.add_subplot(heatmapGS[samples_l-1,genes_l-1])
if nogene:
Xtmp = Xnp.T.copy()
else:
Xtmp = Xnp.T[row_denD['leaves'],:]
if nosample:
pass
else:
Xtmp = Xtmp[:,col_denD['leaves']]
if samplecolors is not None:
if not nosample:
tmpxxx = []
for x in col_denD['leaves']:
tmpxxx.append(samplecolors[x])
samplecolors = tmpxxx[:]
del tmpxxx
col_cbAX = fig.add_subplot(heatmapGS[1,genes_l-1])
print(samplecolors)
if not usepcolor:
col_axi = col_cbAX.imshow([list(samplecolors)],interpolation='nearest',aspect='auto',origin='lower',cmap=samplecolormap)
else:
col_axi = col_cbAX.pcolor([list(samplecolors)],edgecolors='gray',linestyle= 'dashdot', linewidths=0.3, cmap = samplecolormap,norm=my_norm)
clean_axis(col_cbAX)
if (genecolors is not None) or (not nogene):
if not nogene:
uniqgenecolors = color_grad(numbergenescluter,colorgrad="Accent")
genecolors = [i for i in assignments.T[0]]
#print genecolors
genecolors = np.asarray(genecolors)[row_denD['leaves']]
#print genecolors
row_cbAX = fig.add_subplot(heatmapGS[samples_l-1,2])
print(np.asarray([genecolors.tolist(),]).T)
row_axi = row_cbAX.imshow(np.asarray([genecolors.tolist(),]).T,interpolation='nearest',aspect='auto',origin='lower',alpha=0.6)
clean_axis(row_cbAX)
tickoffset = 0
if not usepcolor:
axi = heatmapAX.imshow(Xtmp,interpolation='nearest',aspect='auto',origin='lower',norm=my_norm,cmap = cmap)## 'RdBu_r' 'RdYlGn_r'
else:
tickoffset += 0.5
axi = heatmapAX.pcolor(Xtmp,edgecolors='gray',linestyle= 'dashdot', linewidths=0.3, cmap = cmap,norm=my_norm)
clean_axis(heatmapAX)
## row labels ##
if plotylabel:
if not nogene:
t_annonames = [annonames[i] for i in row_denD['leaves']]
else:
t_annonames = annonames
heatmapAX.set_yticks(np.arange(p) + tickoffset)
heatmapAX.yaxis.set_ticks_position('right')
heatmapAX.set_yticklabels(t_annonames)
## col labels ##
if plotxlabel:
if not nosample:
t_samplenames = [samplenames[i] for i in col_denD['leaves']]
else:
t_samplenames = samplenames
heatmapAX.set_xticks(np.arange(n) + tickoffset)
xlabelsL = heatmapAX.set_xticklabels(t_samplenames)
#rotate labels 90 degrees
for label in xlabelsL:
label.set_rotation(90)
#remove the tick lines
for l in heatmapAX.get_xticklines() + heatmapAX.get_yticklines():
l.set_markersize(0)
heatmapAX.grid(False)
#cplot = ax.pcolor(corr_coef, edgecolors='k', linestyle= 'dashed', linewidths=0.2, cmap = 'RdBu_r')
### scale colorbar ###
#scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=heatmapGS[0,0],wspace=0.0,hspace=0.0)
#scale_cbAX = fig.add_subplot(scale_cbGSSS[0,1])
scale_cbGSSS = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=heatmapGS[0,0],wspace=0.0,hspace=0.0)
scale_cbAX = fig.add_subplot(scale_cbGSSS[0,0])
scale_cbAX.set_axis_off()
cb = fig.colorbar(axi,ax=scale_cbAX,fraction=0.5,shrink=1.0)
font = {'size': 8}
tl = cb.set_label(cbarlabel,fontdict=font)
cb.ax.yaxis.set_ticks_position('left')
cb.ax.yaxis.set_label_position('left')
cb.outline.set_linewidth(0)
#print cb.get_ticks()
#print cb.ax.get_fontsize()
tickL = cb.ax.yaxis.get_ticklabels()
for t in tickL:
t.set_fontsize(t.get_fontsize() - 7)
#fig.tight_layout()
fig.subplots_adjust(bottom = 0)
fig.subplots_adjust(top = 1)
fig.subplots_adjust(right = 1)
fig.subplots_adjust(left = 0)
#plt.savefig(fig_prefix+".tiff",format='tiff',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.savefig(fig_prefix+".png",format='png',additional_artists=fig,bbox_inches="tight",dpi=300)
#if n * p < 200000:
plt.savefig(fig_prefix+".svg",format='svg',additional_artists=fig,bbox_inches="tight",dpi=300)
plt.clf()
plt.close()
try:
return 0, row_cluster_output
except:
return 0, ''
def loess_testplot(x,y,ynp,labels=[]):
fig = plt.figure()
ax = fig.add_subplot(111)
n,p = ynp.shape
assert len(labels) == n
ret_color,ret_lines,ret_marker = styles(n)
ax.plot(x,y,"ko")
#for i in range(n)
def show_grad():
colors = mplconfig.__getallcolors()
numcolors = len(colors)
ns = 10
fig = plt.figure(figsize=(6,34))
ax = fig.add_subplot(111)
idx = 1
x = np.arange(10)
y = 0
for color in colors:
retcolors = styles(10,color)[0]
for i in range(10):
ax.plot([x[i],],y,'o',color=retcolors[i],markersize=12)
y += 1
ax.set_xlim(-1,10)
ax.set_ylim(-1,y+1)
ax.set_yticks(np.arange(0,y))
ax.set_yticklabels(colors)
fig.tight_layout()
plt.savefig("colorgrad_show.png",format='png',dpi=300)
plt.savefig("colorgrad_show.svg",format='svg',dpi=300)
plt.clf();plt.close()
return 0
def __test():
X1 = np.random.normal(0,0.5,(3,3))
X2 = np.random.normal(3,0.5,(2,3))
X3 = np.random.normal(6,0.5,(4,3))
X = np.concatenate((X1,X2,X3))
Y = [0,0,0,1,1,2,2,2,2]
color = ['r-','k--','g+']
uniqclasslables= ['r3','k2','g4']
colors = [color[i] for i in Y]
classlabels = [uniqclasslables[i] for i in Y]
print(plot_hmc_curve(X,Y,colors,classlabels,"test_hmc_curve"))
def __testplot():
##绘制kde估计的概率密度 测试 kdensity
#======================================
aa = np.random.randn(10000)
xn,yn = kdensity(aa.tolist())
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xn,yn,'r--',label="Scott Rule")
ax.legend(loc=0)
plt.savefig("test_density.png",format='png',dpi=300)
#plt.savefig("test_density.jpg",format='jpg',dpi=300)
#plt.savefig("test_density.tif",format='tif',dpi=300)
plt.savefig("test_density.svg",format='svg',dpi=300)
plt.savefig("test_density.pdf",format='pdf',dpi=300)
plt.clf()
plt.close()
##boxplot
#======================================
mm = np.array([np.random.randn(100).tolist(),np.random.lognormal(1,1, 100).tolist()])
mm = mm.transpose()
boxColors = ['darkkhaki','royalblue']
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.2)
bp = ax1.boxplot(mm)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
for i in range(2):
box = bp['boxes'][i]
boxX = box.get_xdata().tolist()
boxY = box.get_ydata().tolist()
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=boxColors[i])
ax1.add_patch(boxPolygon)
#ax1.set_xticklabels(["Normal","Uniform"],rotation=45)
ax1.set_xticklabels(["Normal","Lognormal"],rotation=45)
ax1.set_title('Test Boxplot')
#ax1.set_title(u'箱图')
ax1.set_xlabel('Distribution',fontstyle='italic')
#ax1.set_xlabel('Distribution',fontstyle='oblique')
ax1.set_ylabel('Values')
#ax1.set_axis_off() 不显示坐标轴
plt.savefig("test_boxplot.png",format='png',dpi=300)
plt.savefig("test_boxplot.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#=====================================
##kmeans class plot
pt1 = np.random.normal(1, 0.2, (100,2))
pt2 = np.random.normal(2, 0.5, (300,2))
pt3 = np.random.normal(3, 0.3, (100,2))
pt2[:,0] += 1
pt3[:,0] -= 0.5
xy = np.concatenate((pt1, pt2, pt3))
##归一化处理 from scipy.cluster.vq import whiten
xy = whiten(xy)
## res 是类中心点坐标,idx为类别
res, idx = kmeans2(xy,3)
## 非常好的生成colors的方法
colors = ([([0.4,1,0.4],[1,0.4,0.4],[0.1,0.8,1])[i] for i in idx])
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(xy[:,0],xy[:,1],c=colors)
ax1.scatter(res[:,0],res[:,1], marker='o', s=300, linewidths=2, c='none')
ax1.scatter(res[:,0],res[:,1], marker='x', s=300, linewidths=2)
plt.savefig("test_kmeans.png",format='png',dpi=300)
plt.savefig("test_kmeans.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================
##plot hierarchy
mat1 = np.random.normal(0,1,(3,3))
mat2 = np.random.normal(2,1,(2,3))
mat = np.concatenate((mat1,mat2))
linkage_matrix = linkage(mat,'ward','euclidean')
fig = plt.figure()
#ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
dendrogram(linkage_matrix,labels=["N1","N2","N3","P1","P2"],leaf_rotation=45)
ax3 = fig.add_subplot(223)
dendrogram(linkage_matrix,labels=["N1","N2","N3","P1","P2"],orientation='right',leaf_rotation=45)
#ax4 = fig.add_subplot(224)
plt.savefig("test_hcluster.png",format='png',dpi=300)
plt.savefig("test_hcluster.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#======================================
##plot hierarchy with image
mat1 = np.random.normal(0,1,(4,10))
mat2 = np.random.normal(5,1,(3,10))
mat = np.concatenate((mat1,mat2))
mat[:,3:] -= 20
mat -= np.mean(mat,axis=0)
samplenames = ["N1","N2","N3","N4","P1","P2","P3"]
dimensions = ["A1","A2","A3","A4","A5","A6","A7","A8","A9","A10"]
cluster_heatmap(mat,samplenames,dimensions)
#===============================================
##bar plot and err barplot
N = 5
menMeans = (20, 35, 30, 35, 27)
womenMeans = (25, 32, 34, 20, 25)
menStd = (2, 3, 4, 1, 2)
womenStd = (3, 5, 2, 3, 3)
ind = np.arange(N)
width = 0.35
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(ind, menMeans, width, color='r', yerr=womenStd,label='Men')
ax.bar(ind, womenMeans, width, color='y',bottom=menMeans, yerr=menStd,label = 'Women')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width/2)
ax.set_xlim(left = -0.25)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
#ax.set_xticks(ind+width/2., ('G1', 'G2', 'G3', 'G4', 'G5'))
ax.set_yticks(np.arange(0,81,10))
ax.legend(loc=0)
plt.savefig("test_bar.png",format='png',dpi=300)
plt.savefig("test_bar.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#==============================================
##hist plot
mu=2
x = mu + np.random.randn(1000,3)
fig = plt.figure()
ax = fig.add_subplot(111)
n,bins,patches = ax.hist(x, 15, normed=1, histtype='bar',linewidth=0,color=['crimson', 'burlywood', 'chartreuse'],label=['Crimson', 'Burlywood', 'Chartreuse'])
ax.legend(loc=0)
plt.savefig("test_hist.png",format='png',dpi=300)
plt.savefig("test_hist.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##hist2D plot and image colorbar plot on the specific ax
x = np.random.randn(100000)
y = np.random.randn(100000)+5
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
counts, xedges, yedges, image_instance = ax4.hist2d(x, y, bins=40, norm=LogNorm())
ax1.set_axis_off()
plt.colorbar(image_instance,ax=ax1)
plt.savefig("test_hist2d.png",format='png',dpi=300)
plt.savefig("test_hist2d.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##image show plot
y,x = np.ogrid[-2:2:200j,-3:3:300j]
z = x*np.exp(-x**2 - y**2)
extent = [np.min(x),np.max(z),np.min(y),np.max(y)]
fig = plt.figure()
ax1 = fig.add_subplot(111)
#alpha: scalar The alpha blending value, between 0 (transparent) and 1 (opaque)
#设定每个图的colormap和colorbar所表示范围是一样的,即归一化
#norm = matplotlib.colors.Normalize(vmin=160, vmax=300), 用法 imshow(norm = norm)
image_instance = ax1.imshow(z,extent=extent,cmap=cm.coolwarm,alpha=0.6,origin='lower')
plt.colorbar(image_instance,ax=ax1)
plt.savefig("test_image.png",format='png',dpi=300)
plt.savefig("test_image.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##contour map with image
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
#cs = ax1.contour(z,5,extent = extent,origin = 'lower',linestyles='dashed')
cs = ax2.contour(z,10,extent = extent,origin = 'lower',cmap=cm.coolwarm)
plt.clabel(cs,fmt = '%1.1f',ax=ax2)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
cs1 = ax4.contour(x.reshape(-1),y.reshape(-1),z,10,origin = 'lower',colors = 'k',linestyles='solid')
cs2 = ax4.contourf(x.reshape(-1),y.reshape(-1),z,10,origin = 'lower',cmap=cm.coolwarm)
plt.clabel(cs1,fmt = '%1.1f',ax=ax4)
plt.colorbar(cs2,ax=ax4)
plt.savefig("test_contour.png",format='png',dpi=300)
plt.savefig("test_contour.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#===============================================
##meshgird plot 3D
#生成格点数据,利用griddata插值
#grid_x, grid_y = np.mgrid[275:315:1, 0.60:0.95:0.01]
#from scipy.interpolate import griddata
#grid_z = griddata((LST,EMS), TBH, (grid_x, grid_y), method='cubic')
x,y = np.mgrid[-2:2:200j,-3:3:300j]
z = x*np.exp(-x**2 - y**2)
fig = plt.figure(figsize=(20,20), dpi=300)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222,projection ='3d')
ax3 = fig.add_subplot(223,projection ='3d')
ax4 = fig.add_subplot(224,projection ='3d')
cs1 = ax1.contour(x,y,z,10,extent = extent,origin = 'lower',cmap=cm.coolwarm)
plt.clabel(cs,fmt = '%1.1f',ax=ax1)
surf = ax2.plot_surface(x,y,z, rstride=20, cstride=20, cmap=cm.coolwarm,linewidth=1, antialiased=False)
fig.colorbar(surf,ax=ax2)
surf = ax3.plot_wireframe(x,y,z, rstride=20, cstride=20, cmap=cm.coolwarm)
#仰角elevation和方位轴azimuth
#ax.view_init(elevation, azimuth) ‘elev’ stores the elevation angle in the z plane, ‘azim’ stores the azimuth angle in the x,y plane.
ax4.plot_surface(x, y, z, rstride=20, cstride=20, alpha=0.3)
cset = ax4.contour(x, y, z, 10, offset = ax4.get_zlim()[0],zdir='z',cmap=cm.coolwarm)
cset = ax4.contour(x, y, z, 10, offset = ax4.get_xlim()[0],zdir='x',cmap=cm.coolwarm)
cset = ax4.contour(x, y, z, 10, offset = ax4.get_ylim()[-1],zdir='y',cmap=cm.coolwarm)
plt.savefig("test_surface3d.png",format='png',dpi=300)
plt.savefig("test_surface3d.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================================
## pie plot
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = np.array([15.2, 31, 42, 10.5])
#sizes = sizes/np.sum(sizes)
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.05, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig = plt.figure(figsize=(8,8),dpi=300)
ax5 = fig.add_subplot(111)
ax5.pie(sizes,explode,labels=labels, colors=colors,autopct='%1.1f%%', shadow=False, startangle=90)
plt.savefig("test_pie.png",format='png',dpi=300)
plt.savefig("test_pie.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================================
## scatter
N = 100
r0 = 0.6
x = 0.9*np.random.rand(N)
y = 0.9*np.random.rand(N)
area = np.pi*(10 * np.random.rand(N))**2
c = np.sqrt(area)
r = np.sqrt(x*x+y*y)
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x,y,s=area1, marker='^', c=c)
ax.scatter(x,y,s=area2, marker='o', c=c)
# Show the boundary between the regions:
theta = np.arange(0, np.pi/2, 0.01)
ax.plot(r0*np.cos(theta), r0*np.sin(theta))
plt.savefig("test_scatter.png",format='png',dpi=300)
plt.savefig("test_scatter.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================================
## table
colheader = ['#probe_id','gene symbol','fold change','pvalue','FDR']
#rowheader = ["top1","top2"]
content = [["ge","ann1",3,4,5],["ge2","ann2",7,8,8]]
#colors = plt.cm.BuPu(np.linspace(0, 0.5, len(colheader)))
fig = plt.figure()
ax = plt.gca()
#the_table = ax.table(cellText=cell_text,rowLabels=rows,rowColours=colors,colLabels=columns,loc='bottom')
##colWidths = [0.1]*5
ax.table(cellText=content,colLabels = colheader,loc='top')
ax.set_axis_off()
plt.savefig("test_table.png",format='png',dpi=300)
plt.savefig("test_table.svg",format='svg',dpi=300)
plt.clf()
plt.close()
#====================================================
## plotyy
fig = plt.figure()
ax1 = fig.add_subplot(111)
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('time (s)')
ax1.set_ylabel('exp', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
s2 = np.sin(2*np.pi*t)
ax2.plot(t, s2, 'ro')
ax2.set_ylabel('sin', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.savefig("test_plotyy.png",format='png',dpi=300)
plt.savefig("test_plotyy.svg",format='svg',dpi=300)
plt.clf()
plt.close()
if __name__ == "__main__":
__testplot()
__test()
show_grad()
"""
The following color abbreviations are supported
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
##'w' white
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``).
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
marker: [ ``7`` | ``4`` | ``5`` | ``6`` | ``'o'`` | ``'D'`` | ``'h'`` | ``'H'`` | ``'_'`` | ``''`` | ``'None'`` | ``' '`` | ``None`` | ``'8'`` | ``'p'`` | ``','`` | ``'+'`` | ``'.'`` | ``'s'`` | ``'*'`` | ``'d'`` | ``3`` | ``0`` | ``1`` | ``2`` | ``'1'`` | ``'3'`` | ``'4'`` | ``'2'`` | ``'v'`` | ``'<'`` | ``'>'`` | ``'^'`` | ``'|'`` | ``'x'`` | ``'$...$'`` | *tuple* | *Nx2 array* ]
"""
| StarcoderdataPython |
5127904 | from crontab import CronTab
class Scheduler:
def __init__(self, file_name):
self.cron = CronTab(tab=file_name)
self.file_name = file_name
# <-- End of __init__()
def add_job(
self,
command: str,
comment: str,
minute: int = 0,
hour: int = 0,
dow: list[str] = None
) -> None:
"""Add a new job with the given frequency to the Cron
Args:
command (str): The command to execute
comment (str): Specify the email address for removal later
minute (int, optional): At which minute the job will be executed.
Defaults to 0.
hour (int, optional): At which hour the job will be executed.
Defaults to 0.
dow (list[str], optional): At which day of week the job will be
executed. `['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']`
Defaults to None.
"""
job = self.cron.new(command=command, comment=comment)
job.minute.on(minute)
job.hour.on(hour)
if dow is not None:
job.dow.on(*dow)
else:
job.every().dows()
# self.cron.write(user=True)
self.cron.write(self.file_name)
# <-- End of add_job()
def remove_job(self, comment: str):
"""Remove all jobs for given email address
Args:
comment (str): The email address to remove
"""
self.cron.remove_all(comment=comment)
# self.cron.write(user=True)
self.cron.write(self.file_name)
# <-- End of remove_job()
def list_all_job(self):
"""Return a list of all active jobs
Returns:
list: A list of all active jobs
"""
return [job for job in self.cron]
# <-- End of list_all_job()
# <-- End of Scheduler
| StarcoderdataPython |
1917581 | import os
import shutil
all_task_test_images_path = "/home/maaz/PycharmProjects/VOC_EVAL/all_task_images"
all_dets_path = "/home/maaz/PycharmProjects/VOC_EVAL/dets_from_diff_methods/deep_mask/deep_mask_dets"
output_path = "/home/maaz/PycharmProjects/VOC_EVAL/dets_from_diff_methods/deep_mask/deep_mask_all_task_dets"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == "__main__":
images = os.listdir(all_task_test_images_path)
for image in images:
det_path = f"{all_dets_path}/{image.split('.')[0]}.txt"
out_path = f"{output_path}/{image.split('.')[0]}.txt"
shutil.copy(det_path, out_path)
| StarcoderdataPython |
9791114 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.airflow.airflow_component."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import os
from airflow import models
import mock
import tensorflow as tf
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import base_executor
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration.airflow import airflow_component
from tfx.types import component_spec
class _FakeComponentSpec(types.ComponentSpec):
PARAMETERS = {}
INPUTS = {
'input': component_spec.ChannelParameter(type_name='type_a'),
}
OUTPUTS = {'output': component_spec.ChannelParameter(type_name='type_b')}
class _FakeComponent(base_component.BaseComponent):
SPEC_CLASS = types.ComponentSpec
EXECUTOR_CLASS = base_executor.BaseExecutor
def __init__(self, spec: types.ComponentSpec):
super(_FakeComponent, self).__init__(spec=spec)
class AirflowComponentTest(tf.test.TestCase):
def setUp(self):
super(AirflowComponentTest, self).setUp()
self._component = _FakeComponent(
_FakeComponentSpec(
input=types.Channel(type_name='type_a'),
output=types.Channel(type_name='type_b')))
self._pipeline_info = data_types.PipelineInfo('name', 'root')
self._driver_args = data_types.DriverArgs(True)
self._metadata_connection_config = metadata.sqlite_metadata_connection_config(
os.path.join(
os.environ.get('TEST_TMP_DIR', self.get_temp_dir()), 'metadata'))
self._parent_dag = models.DAG(
dag_id=self._pipeline_info.pipeline_name,
start_date=datetime.datetime(2018, 1, 1),
schedule_interval=None)
@mock.patch(
'tfx.orchestration.component_launcher.ComponentLauncher'
)
def testAirflowAdaptor(self, mock_component_launcher_class):
fake_dagrun = collections.namedtuple('fake_dagrun', ['run_id'])
mock_ti = mock.Mock()
mock_ti.get_dagrun.return_value = fake_dagrun('run_id')
mock_component_launcher = mock.Mock()
mock_component_launcher_class.return_value = mock_component_launcher
airflow_component._airflow_component_launcher(
component=self._component,
pipeline_info=self._pipeline_info,
driver_args=self._driver_args,
metadata_connection_config=self._metadata_connection_config,
additional_pipeline_args={},
ti=mock_ti)
mock_component_launcher_class.assert_called_once()
arg_list = mock_component_launcher_class.call_args_list
self.assertEqual(arg_list[0][1]['pipeline_info'].run_id, 'run_id')
mock_component_launcher.launch.assert_called_once()
@mock.patch('functools.partial')
def testAirflowComponent(self, mock_functools_partial):
airflow_component.AirflowComponent(
parent_dag=self._parent_dag,
component=self._component,
pipeline_info=self._pipeline_info,
enable_cache=True,
metadata_connection_config=self._metadata_connection_config,
additional_pipeline_args={})
mock_functools_partial.assert_called_once_with(
airflow_component._airflow_component_launcher,
component=self._component,
pipeline_info=self._pipeline_info,
driver_args=mock.ANY,
metadata_connection_config=self._metadata_connection_config,
additional_pipeline_args={})
arg_list = mock_functools_partial.call_args_list
self.assertTrue(arg_list[0][1]['driver_args'].enable_cache)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
1617650 | <gh_stars>1-10
"""Eventlist coordinate check.
"""
from gammapy.data import EventListDataset
from gammapy.datasets import gammapy_extra
filename = gammapy_extra.filename('test_datasets/unbundled/hess/run_0023037_hard_eventlist.fits.gz')
event_list = EventListDataset.read(filename)
print(event_list.info)
event_list.check()
"""
TODO: figure out the origin of this offset:
ALT / AZ not consistent with RA / DEC. Max separation: 726.6134257108188 arcsec
"""
| StarcoderdataPython |
8133170 | <reponame>bsridatta/robotfashion
# import necessary libraries
from PIL import Image
from train_epoch import train_one_epoch, evaluate
import matplotlib.pyplot as plt
import torch
import transforms as T
import torchvision.utils
import torchvision
import copy
import torch
import numpy as np
import cv2
import random
import utils
import coco_utils
from config import *
from torch import nn
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.rpn import AnchorGenerator
#################################################################
# Main function for creating and training faster RCNN
#################################################################
def train_RCNN(model, path2data, path2json, weight_path = None):
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#device = torch.device('cpu')
# see if pretrained weights are available
load_pretrained = False
if weight_path is not None:
load_pretrained = True
# get coco style dataset
dataset = coco_utils.get_coco(path2data, path2json, T.ToTensor())
# split the dataset in train and test set
indices = torch.randperm(len(dataset)).tolist()
print(len(dataset))
dataset2 = copy.deepcopy(dataset)
dataset = torch.utils.data.Subset(dataset, indices[:-10])
dataset_test = torch.utils.data.Subset(dataset2, indices[-10:])
# define training and validation data loaders(use num_workers for multi-gpu)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=True,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False,
collate_fn=utils.collate_fn)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
model = nn.DataParallel(model)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# load the dataset in case of pretrained weights
start_epoch = 0
if load_pretrained:
checkpoint = torch.load(weight_path, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch + start_epoch, print_freq=100)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
# Find a way around the broken pytorch nograd keypoint evaluation
evaluate(model, data_loader_test, device=device)
# save weights when done
torch.save({
'epoch': num_epochs + start_epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, save_weights_to)
def get_model_bbox_detection(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
| StarcoderdataPython |
6676194 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 09:04:34 2020
@author: abhijit
"""
# set a splitting point
split_point = 3
# make two empty lists
lower = []; upper = []
# Split numbers from 0 to 9 into two groups, one lower or equal to the split point and one higher than the split point
for i in range(10): # count from 0 to 9
if (i <= split_point):
lower.append(i)
else:
upper.append(i)
print("lower:", lower)
print('upper:', upper)
a = 1.2
b = 5
c = 'hello'
d = "goodbye"
e = True
first_name = 'Abhijit'
first_name * 3
last_name = 'Dasgupta'
first_name + ' ' + last_name
b < 10
b == 10
b >= 10
b <= 10
b != 10
(a > 0) | (b == 5)
(a < 0) & (b == 5)
not (a > 0)
int i = 5;
a = 'abra'
a = 39
# list
[]
#tuple
()
# dict
{}
l1 = ['a', 35, True, [3, 5]]
l1[0]
l1[:3]
l1[1:3]
l1[2:]
l1[-1]
l1[-3:-1]
test_nested_list = [[1,'a',2,'b'],
[3, 'c', 4, 'd']]
test_tuple = (1,2,3)
test_list = [1,2,3]
test_list[2] = 40
test_list
test_tuple[2] = 40
contact = {
"first_name": "Abhijit",
"last_name": 'Dasgupta',
"Age": 48,
"address": "124 Main St",
"Employed" : True
}
contact[2]
contact['Age']
contact['address'] = '123 Main St'
contact
3 in [1,2,3,4,5,6,7]
x = [-2,-1,0,1,2,3,4,5,6,7,8,9,10]
y = [] # an empty list
for u in x:
if u < 0:
y.append('Negative')
elif u % 2 == 1: # what is remainder when dividing by 2
y.append('Odd')
else:
y.append('Even')
print(y)
def my_mean(x):
y = 0
for u in x:
y = y + u
y = y/len(x)
return(y)
my_mean([0, 1, 2,3])
import numpy as np
np.pi
# in terminal
conda install numpy
| StarcoderdataPython |
190535 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 18 01:20:47 2021
@author: user
"""
import numpy as np
import matplotlib.pyplot as pt
S = np.array([[4410000*4410000*4410000,4410000*4410000, 4410000, 1],
[4830000*4830000*4830000,4830000*4830000,4830000,1],
[5250000*5250000*5250000,5250000*5250000,5250000,1],
[5670000*5670000*5670000,5670000*5670000,5670000,1]])
P = np.array([[1165978],
[1329190],
[1501474],
[1682830]])
notZero = 1e-15
S = np.array(S,dtype=float)
SP = np.concatenate((S,P),axis=1)
augmented_matrix = np.copy(SP)
size = np.shape(SP)
n = size[0]
m = size[1]
for i in range(0,n-1):
col = abs(SP[i:,i])
max = np.argmax(col)
if (max !=0):
temp = np.copy(SP[i,:])
SP[i,:] = SP[max+i,:]
SP[max+i,:] = temp
partial_Pivot_Rows = np.copy(SP)
for i in range(0,n-1):
pivot = SP[i,i]
foward = i + 1
for k in range(foward,n,1):
factor = SP[k,i]/pivot
SP[k,:] = SP[k,:] - SP[i,:]*factor
backward_elimination = np.copy(SP)
last_row = n-1
last_col = m-1
for i in range(last_row,-1,-1):
pivot = SP[i,i]
backwards = i-1
for k in range(backwards,0-1,-1):
factor = SP[k,i]/pivot
SP[k,:] = SP[k,:] - SP[i,:]*factor
SP[i,:] = SP[i,:]/SP[i,i]
X = np.copy(SP[:,last_col])
X = np.transpose([X])
print('Matriz aumentada:')
print(augmented_matrix)
print('Pivoteo parcial por filas')
print(partial_Pivot_Rows)
print('eliminacion hacia foward')
print(backward_elimination)
print('eliminación hacia atrás')
print(SP)
print('solución de X: ')
print(X)
vaule=5000000
total_value=vaule*vaule*vaule*X[0],vaule*vaule*X[1]+vaule*X[2]+X[3]
print('El valor total es ')
print (total_value)
x=[4410000, 4830000, 5000000, 5250000, 5670000]
y=[1165978, 1329190, 1397831, 1501474, 1682830]
pt.plot(x,y,'ro')
pt.plot(x,y, color="black") | StarcoderdataPython |
3287296 | <filename>anvil/objects/curve.py
import yaml
from collections import OrderedDict
import anvil
import anvil.config as cfg
import anvil.runtime as rt
from transform import Transform
import io
from anvil.meta_data import MetaData
from six import iteritems
class Curve(Transform):
DCC_TYPE = 'nurbsCurve'
ANVIL_TYPE = cfg.CURVE_TYPE
BUILT_IN_META_DATA = Transform.BUILT_IN_META_DATA.merge({cfg.TYPE: cfg.CURVE_TYPE}, force=True, new=True)
SHAPE_CACHE = None
DEFAULT_SHAPE = [[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0]]
@staticmethod
def create_engine_instance(**flags):
return rt.dcc.create.create_curve(**flags)
@classmethod
def build(cls, shape='cube', scale=None, **kwargs):
if kwargs.get(cfg.POINT) is None:
kwargs.update(cls._get_shape_constructor(shape, return_positions=True))
instance = super(Curve, cls).build(**kwargs)
# Just in case we are using PyMEL and it has returned the actual shape node instead of the transform.
if rt.dcc.scene.get_type(str(instance)) == cls.DCC_TYPE and instance.get_parent():
instance._dcc_id = rt.dcc.scene.get_persistent_id(str(instance.get_parent()))
instance.transform_shape(scale, mode=cfg.SCALE)
return instance
@classmethod
def build_line_indicator(cls, object1, object2, **kwargs):
kwargs[cfg.DEGREE] = 1
kwargs[cfg.META_DATA] = MetaData(kwargs.get(cfg.META_DATA, {}))
kwargs[cfg.META_DATA].update({cfg.NAME: '%s_to_%s' % (object1, object2), cfg.TYPE: cfg.CURVE_TYPE})
curve = cls.build_from_nodes([object1, object2], **kwargs)
object1_cluster, object2_cluster = curve.generate_clusters()
object1_cluster.parent(object1)
object2_cluster.parent(object2)
curve.overrideEnabled.set(1)
curve.overrideDisplayType.set(1)
return (curve, [object1_cluster, object1_cluster])
@classmethod
def build_from_nodes(cls, nodes, **kwargs):
kwargs[cfg.POINT] = [node.get_world_position() for node in anvil.factory_list(nodes)]
instance = cls.build(**kwargs)
return instance
def auto_color(self, override_color=None):
self.info('Auto coloring %s based on meta_data side: %s', self, self.meta_data.get(cfg.SIDE))
color = override_color or cfg.RIG_COLORS.get(self.meta_data.get(cfg.SIDE, None) or cfg.DEFAULT)
self.colorize(color)
return color
def get_shape(self):
return self._api_class_instance.getShape()
def cvs(self):
return self.get_shape().cv[:]
def transform_shape(self, value, mode=cfg.SCALE, relative=False):
if value is not None:
value = [value] * 3 if not isinstance(value, list) else value
transform_kwargs = {cfg.PIVOTS: self.get_pivot(),
cfg.RELATIVE: relative,
cfg.ABSOLUTE: not relative,
cfg.WORLD_SPACE_DISTANCE: True,
self.MODE_LOOKUP[mode]: value}
rt.dcc.scene.position(self.cvs(), **transform_kwargs)
def generate_clusters(self):
return [Transform(rt.dcc.rigging.cluster(cv)[1]) for cv in self.cvs()]
def swap_shape(self, new_shape, maintain_position=False):
self.SHAPE_PARENT_KWARGS[cfg.RELATIVE] = not maintain_position
self.SHAPE_PARENT_KWARGS[cfg.ABSOLUTE] = maintain_position
curve = self.__class__.build(shape=new_shape)
rt.dcc.scene.delete(self.get_shape())
rt.dcc.scene.parent(curve.get_shape(), self, **self.SHAPE_PARENT_KWARGS)
@classmethod
def _get_shape_constructor(cls, shape_name, return_positions=False):
shape_entry = cls.SHAPE_CACHE.get(shape_name or '', {})
if return_positions:
return {key: shape_entry.get(key) for key in [cfg.POINT, cfg.DEGREE] if
shape_entry.get(key)} or {cfg.POINT: cls.DEFAULT_SHAPE, cfg.DEGREE: 1}
shape_constructor = shape_entry.pop('constructor')
api_function = getattr(rt.dcc.ENGINE_API, shape_constructor, None)
if callable(api_function):
cls.debug('Obtained shape constructor from yml: %s(%s)', api_function, shape_entry)
return lambda: api_function(**shape_entry)
@classmethod
def populate_shape_file_data(cls, shape_file=None):
if shape_file is None:
shape_file = cfg.SHAPES_FILE
if not cls.SHAPE_CACHE:
try:
cls.SHAPE_CACHE = yaml.safe_load(open(shape_file, "r"))
except IOError:
cls.error('Missing file %s, please reinstall or locate', shape_file)
cls.SHAPE_CACHE = {}
@staticmethod
def _ordered_dump(data, stream=None, dumper=yaml.Dumper, **kwargs):
"""Taken from https://stackoverflow.com/a/21912744. Great way of dumping as OrderedDict."""
class OrderedDumper(dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, iteritems(data))
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwargs)
def _add_curve_shape_to_shape_file(self, shape_file=None):
"""Adds the currently encapsulated Curve node's shape data
Adds to the shape curve_shapes file based on the name of the dag node in the DCC.
"""
if shape_file is None:
shape_file = cfg.SHAPES_FILE
try:
shape_name = self.name()
shapes_data = yaml.safe_load(open(shape_file, "r"))
target_data = shapes_data.get(shape_name, {})
degree = self.get_shape().degree()
target_data[cfg.DEGREE] = degree
target_data[cfg.POINT] = [[round(p, 3) for p in cv.getPosition(space='world')] for cv in
self.get_shape().cv[:]]
shapes_data[shape_name] = target_data
with io.open(shape_file, 'w') as f:
self._ordered_dump(shapes_data, stream=f, encoding='utf-8', default_flow_style=None)
self.info('Successfully wrote shape data %s to file %s', shape_name, f)
except IOError:
self.error('Missing file %s, please reinstall or locate', shape_file)
@classmethod
def _build_all_controls(cls):
for shape in cls.SHAPE_CACHE:
curve = cls.build(shape=shape)
curve.rename(shape)
Curve.populate_shape_file_data()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.